Image Generation

ComfyUI API Integration

Run advanced Stable Diffusion workflows programmatically using ComfyUI running on a OneInfer GPU instance. Automate complex image pipelines with full node-graph control.

OneInfer GPU InstancesComfyUIStable DiffusionPythonWebSocket

Step-by-step guide

1

Launch a ComfyUI instance on OneInfer

python
import requests

headers = {"Authorization": "Bearer your-oneinfer-api-key"}

# Create a GPU instance with ComfyUI pre-installed
response = requests.post(
    "https://api.oneinfer.ai/v1/instances",
    headers=headers,
    json={
        "template": "comfyui",
        "gpu_type": "RTX4090",
        "disk_gb": 50
    }
)
instance = response.json()
instance_url = instance["url"]  # e.g. https://abc123.instance.oneinfer.ai
print(f"ComfyUI running at: {instance_url}")
2

Submit a workflow via the ComfyUI API

python
import json
import uuid
import websocket
import requests

COMFY_URL = "https://abc123.instance.oneinfer.ai"
CLIENT_ID = str(uuid.uuid4())

# A minimal txt2img workflow (simplified)
workflow = {
    "6": {"class_type": "CLIPTextEncode", "inputs": {"clip": ["4", 1], "text": "a futuristic city at sunset"}},
    "7": {"class_type": "CLIPTextEncode", "inputs": {"clip": ["4", 1], "text": "blurry, low quality"}},
    "3": {"class_type": "KSampler", "inputs": {
        "model": ["4", 0], "positive": ["6", 0], "negative": ["7", 0],
        "latent_image": ["5", 0], "seed": 42, "steps": 25, "cfg": 7.5,
        "sampler_name": "euler", "scheduler": "normal", "denoise": 1.0
    }},
    "4": {"class_type": "CheckpointLoaderSimple", "inputs": {"ckpt_name": "sd_xl_base_1.0.safetensors"}},
    "5": {"class_type": "EmptyLatentImage", "inputs": {"width": 1024, "height": 1024, "batch_size": 1}},
    "8": {"class_type": "VAEDecode", "inputs": {"samples": ["3", 0], "vae": ["4", 2]}},
    "9": {"class_type": "SaveImage", "inputs": {"images": ["8", 0], "filename_prefix": "output"}}
}

prompt_response = requests.post(
    f"{COMFY_URL}/prompt",
    json={"prompt": workflow, "client_id": CLIENT_ID}
)
prompt_id = prompt_response.json()["prompt_id"]
print(f"Queued prompt: {prompt_id}")
3

Wait for completion and download the image

python
import time
from PIL import Image
import io

def wait_for_result(prompt_id: str) -> bytes:
    while True:
        history = requests.get(f"{COMFY_URL}/history/{prompt_id}").json()
        if prompt_id in history:
            outputs = history[prompt_id]["outputs"]
            for node_id, node_output in outputs.items():
                if "images" in node_output:
                    img_info = node_output["images"][0]
                    img_response = requests.get(
                        f"{COMFY_URL}/view",
                        params={"filename": img_info["filename"], "subfolder": img_info["subfolder"]}
                    )
                    return img_response.content
        time.sleep(2)

image_bytes = wait_for_result(prompt_id)
Image.open(io.BytesIO(image_bytes)).save("comfyui_output.png")
print("Saved comfyui_output.png")