diff --git a/frontend/core/pipelines/stable-diffusion-2/README.md b/frontend/core/pipelines/stable-diffusion-2/README.md new file mode 100644 index 00000000..700c2571 --- /dev/null +++ b/frontend/core/pipelines/stable-diffusion-2/README.md @@ -0,0 +1,23 @@ +# Stable Diffusion Image Generation Pipeline + +This repository contains a Python function `compute` to generate images based on textual descriptions using the Stable Diffusion model. The function utilizes the `diffusers` library and GPU acceleration for efficient image generation. + +## Features + +- Generate high-quality images from textual prompts. +- Supports inference using the Euler Discrete Scheduler for diffusion. +- Utilizes GPU acceleration with `float16` for enhanced performance. + +--- + +## Usage +The compute function accepts a textual description (prompt) and the number of inference steps to generate an image. The generated image is saved as result.png in the current working directory. + +## Function Parameters +- prompt (str): Text description of the desired image. +- inference_steps (int): Number of diffusion steps for the generation process. +- Return Value: +The function returns a dictionary containing the path to the generated image: + +## Model used +- https://huggingface.co/stabilityai/stable-diffusion-2 diff --git a/frontend/core/pipelines/stable-diffusion-2/cover-image.png b/frontend/core/pipelines/stable-diffusion-2/cover-image.png new file mode 100644 index 00000000..f276cee8 Binary files /dev/null and b/frontend/core/pipelines/stable-diffusion-2/cover-image.png differ diff --git a/frontend/core/pipelines/stable-diffusion-2/new-python-1k08gnfkag96/Dockerfile b/frontend/core/pipelines/stable-diffusion-2/new-python-1k08gnfkag96/Dockerfile new file mode 100644 index 00000000..cc1854cd --- /dev/null +++ b/frontend/core/pipelines/stable-diffusion-2/new-python-1k08gnfkag96/Dockerfile @@ -0,0 +1,8 @@ +FROM python:3.9 + +WORKDIR /app + +RUN pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121 +RUN pip install diffusers transformers accelerate scipy safetensors + +COPY computations.py . diff --git a/frontend/core/pipelines/stable-diffusion-2/new-python-1k08gnfkag96/chatHistory.json b/frontend/core/pipelines/stable-diffusion-2/new-python-1k08gnfkag96/chatHistory.json new file mode 100644 index 00000000..d92592a1 --- /dev/null +++ b/frontend/core/pipelines/stable-diffusion-2/new-python-1k08gnfkag96/chatHistory.json @@ -0,0 +1,25 @@ +{ + "index": 0, + "history": [ + { + "timestamp": 1731600898832, + "prompt": "Code Template", + "response": "def compute(in1, in2):\r\n \"\"\"A textual description of the compute function.\r\n\r\n Inputs:\r\n in1 (all): Textual description of in1\r\n in2 (all): Textual description of in2\r\n\r\n Outputs:\r\n out1 (all): Textual description of out1\r\n out2 (all): Textual description of out2\r\n\r\n Requirements:\r\n \"\"\"\r\n # some code\r\n out1 = 2 * in1\r\n out2 = \"This is the in2 string:\" + in2\r\n\r\n return {\"out1\": out1, \"out2\": out2}\r\n\r\n\r\ndef test():\r\n \"\"\"Test the compute function.\"\"\"\r\n\r\n print(\"Running test\")\r\n" + }, + { + "timestamp": 1731601223108, + "prompt": "Manual Edit of computations.py", + "response": "def compute(prompt, inference_steps):\n \"\"\"\n prompt: text description of image\n inference_steps: difussion steps\n\n output: generated image path\n \"\"\"\n from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler\n\n model_id = \"stabilityai/stable-diffusion-2\"\n \n # Use the Euler scheduler here instead\n scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder=\"scheduler\")\n pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.float16)\n pipe = pipe.to(\"cuda\")\n \n image = pipe(prompt, num_inference_steps=inference_steps).images[0]\n \n image.save(\"result.png\")\n\n return {\"generated_image_path\": \"result.png\"}\n\n\ndef test():\n \"\"\"Test the compute function.\"\"\"\n\n print(\"Running test\")\n" + }, + { + "timestamp": 1731601351634, + "prompt": "Manual Edit of computations.py", + "response": "def compute(prompt, inference_steps):\n \"\"\"\n prompt: text description of image\n inference_steps: difussion steps\n\n output: generated image path\n\n use GPU to run this pipeline, we are using float16 dtype\n \"\"\"\n from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler\n\n model_id = \"stabilityai/stable-diffusion-2\"\n \n # Use the Euler scheduler here instead\n scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder=\"scheduler\")\n pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.float16)\n pipe = pipe.to(\"cuda\")\n \n image = pipe(prompt, num_inference_steps=inference_steps).images[0]\n \n image.save(\"result.png\")\n\n return {\"generated_image_path\": \"result.png\"}\n\n\ndef test():\n \"\"\"Test the compute function.\"\"\"\n\n print(\"Running test\")\n" + }, + { + "timestamp": 1731603317095, + "prompt": "Manual Edit of computations.py", + "response": "def compute(prompt, inference_steps):\n \"\"\"\n prompt: text description of image\n inference_steps: difussion steps\n\n output: generated image path\n\n use GPU to run this pipeline, we are using float16 dtype\n \"\"\"\n from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler\n import torch \n model_id = \"stabilityai/stable-diffusion-2\"\n \n # Use the Euler scheduler here instead\n scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder=\"scheduler\")\n pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.float16)\n pipe = pipe.to(\"cuda\")\n \n image = pipe(prompt, num_inference_steps=inference_steps).images[0]\n \n image.save(\"result.png\")\n\n return {\"generated_image_path\": \"result.png\"}\n\n\ndef test():\n \"\"\"Test the compute function.\"\"\"\n\n print(\"Running test\")\n" + } + ] +} \ No newline at end of file diff --git a/frontend/core/pipelines/stable-diffusion-2/new-python-1k08gnfkag96/computations.py b/frontend/core/pipelines/stable-diffusion-2/new-python-1k08gnfkag96/computations.py new file mode 100644 index 00000000..e6a6a204 --- /dev/null +++ b/frontend/core/pipelines/stable-diffusion-2/new-python-1k08gnfkag96/computations.py @@ -0,0 +1,31 @@ +def compute(prompt, inference_steps): + """ + Pipeline to generate the images based on textual description. takes text and inference step as an inputs and return the genreated image. + + prompt: text description of image + inference_steps: difussion steps + + output: generated image path + + use GPU to run this pipeline, we are using float16 dtype + """ + from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler + import torch + model_id = "stabilityai/stable-diffusion-2" + + # Use the Euler scheduler here instead + scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler") + pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.float16) + pipe = pipe.to("cuda") + + image = pipe(prompt, num_inference_steps=inference_steps).images[0] + + image.save("result.png") + + return {"generated_image_path": "result.png"} + + +def test(): + """Test the compute function.""" + + print("Running test") diff --git a/frontend/core/pipelines/stable-diffusion-2/new-python-1k08gnfkag96/cover-image.png b/frontend/core/pipelines/stable-diffusion-2/new-python-1k08gnfkag96/cover-image.png new file mode 100644 index 00000000..a33c4599 Binary files /dev/null and b/frontend/core/pipelines/stable-diffusion-2/new-python-1k08gnfkag96/cover-image.png differ diff --git a/frontend/core/pipelines/stable-diffusion-2/new-python-1k08gnfkag96/requirements.txt b/frontend/core/pipelines/stable-diffusion-2/new-python-1k08gnfkag96/requirements.txt new file mode 100644 index 00000000..e69de29b diff --git a/frontend/core/pipelines/stable-diffusion-2/new-python-1k08gnfkag96/specs.json b/frontend/core/pipelines/stable-diffusion-2/new-python-1k08gnfkag96/specs.json new file mode 100644 index 00000000..c1b7fafb --- /dev/null +++ b/frontend/core/pipelines/stable-diffusion-2/new-python-1k08gnfkag96/specs.json @@ -0,0 +1,92 @@ +{ + "information": { + "id": "stable-diffusion-2", + "name": "Stable Diffusion 2", + "description": "Pipeline to generate the images based on textual description. takes text and inference step as an inputs and return the genreated image.\nprompt: text description of image\ninference_steps: difussion steps\n\noutput: generated image path\n\nuse GPU to run this pipeline, we are using float16 dtype", + "system_versions": [ + "0.1" + ], + "block_version": "block version number", + "block_source": "core/blocks/stable-diffusion-2", + "block_type": "compute" + }, + "inputs": { + "prompt": { + "type": "Any", + "connections": [ + { + "block": "parameter-hhz20qr4vytg", + "variable": "parameter" + } + ] + }, + "inference_steps": { + "type": "Any", + "connections": [ + { + "block": "parameter-cuha2229jdsa", + "variable": "parameter" + } + ] + } + }, + "outputs": { + "generated_image_path": { + "type": "Any", + "connections": [ + { + "block": "view-images-gwly233ys03y", + "variable": "image_paths_view" + } + ] + } + }, + "action": { + "container": { + "image": "stable-diffusion-2", + "version": "stable-diffusion-2-1k08gnfkag96", + "command_line": [ + "python", + "-u", + "entrypoint.py" + ] + }, + "resources": { + "cpu": { + "request": "", + "limit": "" + }, + "memory": { + "request": "", + "limit": "" + }, + "gpu": { + "count": 1 + } + } + }, + "views": { + "node": { + "active": "True or False", + "title_bar": { + "background_color": "#6b2be0" + }, + "preview": {}, + "html": "", + "pos_x": "786", + "pos_y": "188", + "pos_z": "999", + "behavior": "modal", + "order": { + "input": [ + "prompt", + "inference_steps" + ], + "output": [ + "generated_image_path" + ] + } + } + }, + "events": {} +} \ No newline at end of file diff --git a/frontend/core/pipelines/stable-diffusion-2/pipeline.json b/frontend/core/pipelines/stable-diffusion-2/pipeline.json new file mode 100644 index 00000000..9cc47ec2 --- /dev/null +++ b/frontend/core/pipelines/stable-diffusion-2/pipeline.json @@ -0,0 +1,319 @@ +{ + "pipeline": { + "new-python-1k08gnfkag96": { + "information": { + "id": "stable-diffusion-2", + "name": "Stable Diffusion 2", + "description": "Pipeline to generate the images based on textual description. takes text and inference step as an inputs and return the genreated image.\nprompt: text description of image\ninference_steps: difussion steps\n\noutput: generated image path\n\nuse GPU to run this pipeline, we are using float16 dtype", + "system_versions": [ + "0.1" + ], + "block_version": "block version number", + "block_source": "core/blocks/stable-diffusion-2", + "block_type": "compute" + }, + "inputs": { + "prompt": { + "type": "Any", + "connections": [ + { + "block": "parameter-hhz20qr4vytg", + "variable": "parameter" + } + ] + }, + "inference_steps": { + "type": "Any", + "connections": [ + { + "block": "parameter-cuha2229jdsa", + "variable": "parameter" + } + ] + } + }, + "outputs": { + "generated_image_path": { + "type": "Any", + "connections": [ + { + "block": "view-images-gwly233ys03y", + "variable": "image_paths_view" + } + ] + } + }, + "action": { + "container": { + "image": "stable-diffusion-2", + "version": "stable-diffusion-2-1k08gnfkag96", + "command_line": [ + "python", + "-u", + "entrypoint.py" + ] + }, + "resources": { + "cpu": { + "request": "", + "limit": "" + }, + "memory": { + "request": "", + "limit": "" + }, + "gpu": { + "count": 1 + } + } + }, + "views": { + "node": { + "active": "True or False", + "title_bar": { + "background_color": "#6b2be0" + }, + "preview": {}, + "html": "", + "pos_x": "786", + "pos_y": "188", + "pos_z": "999", + "behavior": "modal", + "order": { + "input": [ + "prompt", + "inference_steps" + ], + "output": [ + "generated_image_path" + ] + } + } + }, + "events": {} + }, + "parameter-hhz20qr4vytg": { + "information": { + "id": "parameter", + "name": "Parameter", + "description": "Parameter input.", + "system_versions": [ + "0.1" + ], + "block_version": "block version number", + "block_source": "core/blocks", + "block_type": "entry" + }, + "inputs": {}, + "outputs": { + "parameter": { + "type": "Any", + "connections": [ + { + "block": "new-python-1k08gnfkag96", + "variable": "prompt" + } + ] + } + }, + "action": { + "container": { + "image": "", + "version": "", + "command_line": [] + }, + "parameters": { + "parameter": { + "value": "\"A photograph of a gigantic, hyper-realistic king frog in stunning 8K resolution. The frog has iridescent green and gold skin covered in intricate, jewel-like scales. Its eyes are large and intelligent, with vertical slit pupils. Delicate webbing stretches between the frog's massive, muscular legs. The frog is sitting on a lush, mossy log in a dense, primeval forest, with rays of golden light filtering through the canopy above.\"", + "type": "Any" + } + }, + "resources": { + "cpu": { + "request": "", + "limit": "" + }, + "memory": { + "request": "", + "limit": "" + }, + "gpu": { + "count": 0 + } + } + }, + "views": { + "node": { + "active": "true", + "title_bar": { + "background_color": "#909090" + }, + "preview": {}, + "html": "", + "pos_x": "256", + "pos_y": "139", + "pos_z": "999, this is the z-index for 2D canvas", + "order": { + "input": [], + "output": [ + "parameter" + ] + } + } + }, + "events": {} + }, + "parameter-cuha2229jdsa": { + "information": { + "id": "parameter", + "name": "Parameter", + "description": "Parameter input.", + "system_versions": [ + "0.1" + ], + "block_version": "block version number", + "block_source": "core/blocks", + "block_type": "entry" + }, + "inputs": {}, + "outputs": { + "parameter": { + "type": "Any", + "connections": [ + { + "block": "new-python-1k08gnfkag96", + "variable": "inference_steps" + } + ] + } + }, + "action": { + "container": { + "image": "", + "version": "", + "command_line": [] + }, + "parameters": { + "parameter": { + "value": "100", + "type": "Any" + } + }, + "resources": { + "cpu": { + "request": "", + "limit": "" + }, + "memory": { + "request": "", + "limit": "" + }, + "gpu": { + "count": 0 + } + } + }, + "views": { + "node": { + "active": "true", + "title_bar": { + "background_color": "#909090" + }, + "preview": {}, + "html": "", + "pos_x": "491", + "pos_y": "469", + "pos_z": "999, this is the z-index for 2D canvas", + "order": { + "input": [], + "output": [ + "parameter" + ] + } + } + }, + "events": {} + }, + "view-images-gwly233ys03y": { + "information": { + "id": "view-images", + "name": "View Images", + "description": "Generates an HTML file with a unique name and returns the file name.", + "system_versions": [ + "0.1" + ], + "block_version": "block version number", + "block_source": "core/blocks/view-images", + "block_type": "view" + }, + "inputs": { + "image_paths_view": { + "type": "List[file]", + "connections": [ + { + "block": "new-python-1k08gnfkag96", + "variable": "generated_image_path" + } + ] + } + }, + "outputs": { + "html": { + "type": "file", + "connections": [] + } + }, + "action": { + "container": { + "image": "view-images", + "version": "view-images-gwly233ys03y", + "command_line": [ + "python", + "-u", + "entrypoint.py" + ] + }, + "resources": { + "cpu": { + "request": "", + "limit": "" + }, + "memory": { + "request": "", + "limit": "" + }, + "gpu": { + "count": 0 + } + } + }, + "views": { + "node": { + "active": "True or False", + "title_bar": { + "background_color": "#D55908" + }, + "preview": { + "active": "true" + }, + "html": "", + "pos_x": "1219", + "pos_y": "407", + "pos_z": "999, this is the z-index for 2D canvas", + "order": { + "input": [ + "image_paths_view" + ], + "output": [ + "html" + ] + } + }, + "mode": "modal" + }, + "events": {} + } + }, + "name": "stable-diffusion-2", + "id": "pipeline-zavlrd78q1mt" +} \ No newline at end of file diff --git a/frontend/core/pipelines/stable-diffusion-2/view-images-gwly233ys03y/Dockerfile b/frontend/core/pipelines/stable-diffusion-2/view-images-gwly233ys03y/Dockerfile new file mode 100644 index 00000000..600ff699 --- /dev/null +++ b/frontend/core/pipelines/stable-diffusion-2/view-images-gwly233ys03y/Dockerfile @@ -0,0 +1,9 @@ +FROM python:3.9 + +WORKDIR /app + +COPY requirements.txt . + +RUN pip install --no-cache-dir -r requirements.txt + +COPY computations.py . diff --git a/frontend/core/pipelines/stable-diffusion-2/view-images-gwly233ys03y/computations.py b/frontend/core/pipelines/stable-diffusion-2/view-images-gwly233ys03y/computations.py new file mode 100644 index 00000000..007a6468 --- /dev/null +++ b/frontend/core/pipelines/stable-diffusion-2/view-images-gwly233ys03y/computations.py @@ -0,0 +1,94 @@ +import json +import uuid + +def compute(image_paths_view): + """Generates an HTML file with a unique name and returns the file name. + + Inputs: + image_paths_view (str or list): A path or a list of image paths to display in the gallery. + + Outputs: + dict: A dictionary with the key 'html' and the value being the name of the generated HTML file. + """ + + html_template = """ + + + + + Gallery + + + + + + + + + + + + + + + + """ + # Ensure image_paths_view is always a list + if isinstance(image_paths_view, str): + image_paths_view = [image_paths_view] + + unique_id = str(uuid.uuid4()) + html_path = f"/files/viz_{unique_id}.html" + image_paths_view_str = json.dumps(image_paths_view) + html_code = html_template.replace("$image_paths", image_paths_view_str) + + # Write the file + with open(html_path, "w") as file: + file.write(html_code) + + return {"html": f"viz_{unique_id}.html"} + +def test(): + """Test the compute function.""" + print("Running test") \ No newline at end of file diff --git a/frontend/core/pipelines/stable-diffusion-2/view-images-gwly233ys03y/cover-image.png b/frontend/core/pipelines/stable-diffusion-2/view-images-gwly233ys03y/cover-image.png new file mode 100644 index 00000000..b928a01c Binary files /dev/null and b/frontend/core/pipelines/stable-diffusion-2/view-images-gwly233ys03y/cover-image.png differ diff --git a/frontend/core/pipelines/stable-diffusion-2/view-images-gwly233ys03y/requirements.txt b/frontend/core/pipelines/stable-diffusion-2/view-images-gwly233ys03y/requirements.txt new file mode 100644 index 00000000..e69de29b diff --git a/frontend/core/pipelines/stable-diffusion-2/view-images-gwly233ys03y/specs.json b/frontend/core/pipelines/stable-diffusion-2/view-images-gwly233ys03y/specs.json new file mode 100644 index 00000000..c8cf27a3 --- /dev/null +++ b/frontend/core/pipelines/stable-diffusion-2/view-images-gwly233ys03y/specs.json @@ -0,0 +1,79 @@ +{ + "information": { + "id": "view-images", + "name": "View Images", + "description": "Generates an HTML file with a unique name and returns the file name.", + "system_versions": [ + "0.1" + ], + "block_version": "block version number", + "block_source": "core/blocks/view-images", + "block_type": "view" + }, + "inputs": { + "image_paths_view": { + "type": "List[file]", + "connections": [ + { + "block": "new-python-1k08gnfkag96", + "variable": "generated_image_path" + } + ] + } + }, + "outputs": { + "html": { + "type": "file", + "connections": [] + } + }, + "action": { + "container": { + "image": "view-images", + "version": "view-images-gwly233ys03y", + "command_line": [ + "python", + "-u", + "entrypoint.py" + ] + }, + "resources": { + "cpu": { + "request": "", + "limit": "" + }, + "memory": { + "request": "", + "limit": "" + }, + "gpu": { + "count": 0 + } + } + }, + "views": { + "node": { + "active": "True or False", + "title_bar": { + "background_color": "#D55908" + }, + "preview": { + "active": "true" + }, + "html": "", + "pos_x": "1300", + "pos_y": "363", + "pos_z": "999, this is the z-index for 2D canvas", + "order": { + "input": [ + "image_paths_view" + ], + "output": [ + "html" + ] + } + }, + "mode": "modal" + }, + "events": {} +} \ No newline at end of file