commit 57d6f85eed1d15b7b742b7179501fec3ca21e160 Author: Ahmed Nassar Date: Mon Mar 17 10:25:52 2025 +0000 Duplicate from gradio-templates/text-to-image-gradio-template Co-authored-by: Sylvain Filoni diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..a6344aa --- /dev/null +++ b/.gitattributes @@ -0,0 +1,35 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..3ad7f56 --- /dev/null +++ b/README.md @@ -0,0 +1,12 @@ +--- +title: Text-to-Image Gradio Template +emoji: 🖼 +colorFrom: purple +colorTo: red +sdk: gradio +sdk_version: 5.0.1 +app_file: app.py +pinned: false +--- + +Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/app.py b/app.py new file mode 100644 index 0000000..652dc45 --- /dev/null +++ b/app.py @@ -0,0 +1,154 @@ +import gradio as gr +import numpy as np +import random + +# import spaces #[uncomment to use ZeroGPU] +from diffusers import DiffusionPipeline +import torch + +device = "cuda" if torch.cuda.is_available() else "cpu" +model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use + +if torch.cuda.is_available(): + torch_dtype = torch.float16 +else: + torch_dtype = torch.float32 + +pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype) +pipe = pipe.to(device) + +MAX_SEED = np.iinfo(np.int32).max +MAX_IMAGE_SIZE = 1024 + + +# @spaces.GPU #[uncomment to use ZeroGPU] +def infer( + prompt, + negative_prompt, + seed, + randomize_seed, + width, + height, + guidance_scale, + num_inference_steps, + progress=gr.Progress(track_tqdm=True), +): + if randomize_seed: + seed = random.randint(0, MAX_SEED) + + generator = torch.Generator().manual_seed(seed) + + image = pipe( + prompt=prompt, + negative_prompt=negative_prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + generator=generator, + ).images[0] + + return image, seed + + +examples = [ + "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", + "An astronaut riding a green horse", + "A delicious ceviche cheesecake slice", +] + +css = """ +#col-container { + margin: 0 auto; + max-width: 640px; +} +""" + +with gr.Blocks(css=css) as demo: + with gr.Column(elem_id="col-container"): + gr.Markdown(" # Text-to-Image Gradio Template") + + with gr.Row(): + prompt = gr.Text( + label="Prompt", + show_label=False, + max_lines=1, + placeholder="Enter your prompt", + container=False, + ) + + run_button = gr.Button("Run", scale=0, variant="primary") + + result = gr.Image(label="Result", show_label=False) + + with gr.Accordion("Advanced Settings", open=False): + negative_prompt = gr.Text( + label="Negative prompt", + max_lines=1, + placeholder="Enter a negative prompt", + visible=False, + ) + + seed = gr.Slider( + label="Seed", + minimum=0, + maximum=MAX_SEED, + step=1, + value=0, + ) + + randomize_seed = gr.Checkbox(label="Randomize seed", value=True) + + with gr.Row(): + width = gr.Slider( + label="Width", + minimum=256, + maximum=MAX_IMAGE_SIZE, + step=32, + value=1024, # Replace with defaults that work for your model + ) + + height = gr.Slider( + label="Height", + minimum=256, + maximum=MAX_IMAGE_SIZE, + step=32, + value=1024, # Replace with defaults that work for your model + ) + + with gr.Row(): + guidance_scale = gr.Slider( + label="Guidance scale", + minimum=0.0, + maximum=10.0, + step=0.1, + value=0.0, # Replace with defaults that work for your model + ) + + num_inference_steps = gr.Slider( + label="Number of inference steps", + minimum=1, + maximum=50, + step=1, + value=2, # Replace with defaults that work for your model + ) + + gr.Examples(examples=examples, inputs=[prompt]) + gr.on( + triggers=[run_button.click, prompt.submit], + fn=infer, + inputs=[ + prompt, + negative_prompt, + seed, + randomize_seed, + width, + height, + guidance_scale, + num_inference_steps, + ], + outputs=[result, seed], + ) + +if __name__ == "__main__": + demo.launch() diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..73d01db --- /dev/null +++ b/requirements.txt @@ -0,0 +1,6 @@ +accelerate +diffusers +invisible_watermark +torch +transformers +xformers \ No newline at end of file