From 15fada3eef6f0a9314c53ffb0dc9117c1f648084 Mon Sep 17 00:00:00 2001 From: RUFFY-369 Date: Sun, 29 Dec 2024 19:59:33 +0530 Subject: [PATCH 01/10] feat:initial implementation of i2i generic tasks integration --- runner/app/main.py | 8 + .../app/pipelines/image_to_image_generic.py | 283 ++++++++++++++++++ runner/app/routes/image_to_image_generic.py | 243 +++++++++++++++ runner/dl_checkpoints.sh | 7 + runner/gateway.openapi.yaml | 131 ++++++++ runner/gen_openapi.py | 2 + runner/openapi.yaml | 131 ++++++++ worker/docker.go | 21 +- worker/multipart.go | 103 +++++++ worker/runner.gen.go | 213 +++++++++++++ worker/worker.go | 58 ++++ 11 files changed, 1190 insertions(+), 10 deletions(-) create mode 100644 runner/app/pipelines/image_to_image_generic.py create mode 100644 runner/app/routes/image_to_image_generic.py diff --git a/runner/app/main.py b/runner/app/main.py index 0ba3ee3a..57acb6f8 100644 --- a/runner/app/main.py +++ b/runner/app/main.py @@ -78,6 +78,10 @@ def load_pipeline(pipeline: str, model_id: str) -> any: from app.pipelines.text_to_speech import TextToSpeechPipeline return TextToSpeechPipeline(model_id) + case "image-to-image-generic": + from app.pipelines.image_to_image_generic import ImageToImageGenericPipeline + + return ImageToImageGenericPipeline(model_id) case _: raise EnvironmentError( f"{pipeline} is not a valid pipeline for model {model_id}" @@ -128,6 +132,10 @@ def load_route(pipeline: str) -> any: from app.routes import text_to_speech return text_to_speech.router + case "image-to-image-generic": + from app.routes import image_to_image_generic + + return image_to_image_generic.router case _: raise EnvironmentError(f"{pipeline} is not a valid pipeline") diff --git a/runner/app/pipelines/image_to_image_generic.py b/runner/app/pipelines/image_to_image_generic.py new file mode 100644 index 00000000..8aeda564 --- /dev/null +++ b/runner/app/pipelines/image_to_image_generic.py @@ -0,0 +1,283 @@ +import logging +import numpy as np +import os +from enum import Enum +from typing import List, Optional, Tuple + +import PIL +import torch +from diffusers import ( + AutoPipelineForInpainting, + ControlNetModel, + StableDiffusionXLControlNetPipeline, + StableDiffusionXLInpaintPipeline, + EulerAncestralDiscreteScheduler, + AutoencoderKL, +) +from huggingface_hub import file_download +from PIL import Image, ImageOps + +from app.pipelines.base import Pipeline +from app.pipelines.utils import ( + LoraLoader, + get_model_dir, + get_torch_device, +) +from app.utils.errors import InferenceError + +logger = logging.getLogger(__name__) + +class TaskType(Enum): + """Enumeration for task types.""" + INPAINTING = "inpainting" + OUTPAINTING = "outpainting" + SKETCH_TO_IMAGE = "sketch_to_image" + + @classmethod + def list(cls): + return [task.value for task in cls] + + +class ImageToImageGenericPipeline(Pipeline): + def __init__(self, model_id: str, task: str): + + kwargs = {"cache_dir": get_model_dir(), "torch_dtype": torch.float16} + torch_device = get_torch_device() + + folder_name = file_download.repo_folder_name( + repo_id=model_id, repo_type="model" + ) + folder_path = os.path.join(get_model_dir(), folder_name) + # Load the fp16 variant if fp16 'safetensors' files are present in the cache. + # NOTE: Exception for SDXL-Lightning model: despite having fp16 'safetensors' + # files, they are not named according to the standard convention. + has_fp16_variant = ( + any( + ".fp16.safetensors" in fname + for _, _, files in os.walk(folder_path) + for fname in files + ) + ) + if torch_device.type != "cpu" and has_fp16_variant: + logger.info("ImageToImageGenericPipeline loading fp16 variant for %s", model_id) + + kwargs["torch_dtype"] = torch.float16 + kwargs["variant"] = "fp16" + + if task not in TaskType.list(): + raise ValueError(f"Unsupported task: {task}") + + self.task = task + + # Initialize pipelines based on task + if self.task == TaskType.INPAINTING.value: + self.pipeline = AutoPipelineForInpainting.from_pretrained( + model_id, **kwargs + ).to(torch_device) + self.pipeline.enable_model_cpu_offload() + + elif self.task == TaskType.OUTPAINTING.value: + self.controlnet = ControlNetModel.from_pretrained( + model_id, torch_dtype=torch.float16, variant="fp16" + ).to(torch_device), + self.vae = AutoencoderKL.from_pretrained( + "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16 + ).to(torch_device) + self.pipeline_stage1 = StableDiffusionXLControlNetPipeline.from_pretrained( + "SG161222/RealVisXL_V4.0", + controlnet=self.controlnet, + vae=self.vae, + safety_checker=None, + **kwargs + ).to(torch_device) + self.pipeline_stage2 = StableDiffusionXLInpaintPipeline.from_pretrained( + "OzzyGT/RealVisXL_V4.0_inpainting", + vae=self.vae, + **kwargs + ).to(torch_device) + + elif self.task == TaskType.SKETCH_TO_IMAGE.value: + self.controlnet = ControlNetModel.from_pretrained( + model_id, **kwargs + ).to(torch_device) + self.vae = AutoencoderKL.from_pretrained( + "madebyollin/sdxl-vae-fp16-fix", **kwargs + ).to(torch_device) + eulera_scheduler = EulerAncestralDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler") + self.pipeline = StableDiffusionXLControlNetPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + controlnet=self.controlnet, + vae=self.vae, + safety_checker=None, + scheduler=eulera_scheduler, + **kwargs + ).to(torch_device) + + self._lora_loader = LoraLoader(self.pipeline) + + if self.task == TaskType.OUTPAINTING.value: + self._lora_loader1 = LoraLoader(self.pipeline_stage1) + self._lora_loader2 = LoraLoader(self.pipeline_stage2) + + def __call__( + self, + prompt: List[str], + image: PIL.Image.Image, + mask_image: Optional[PIL.Image.Image] = None, + **kwargs, + ) -> Tuple[List[PIL.Image], List[Optional[bool]]]: + + # Handle num_inference_steps and other model-specific settings + if "num_inference_steps" in kwargs and ( + kwargs["num_inference_steps"] is None or kwargs["num_inference_steps"] < 1 + ): + del kwargs["num_inference_steps"] + + # Extract parameters from kwargs + seed = kwargs.pop("seed", None) + safety_check = kwargs.pop("safety_check", True) + loras_json = kwargs.pop("loras", "") + guidance_scale = kwargs.pop("guidance_scale", None) + num_inference_steps = kwargs.pop("num_inference_steps", None) + controlnet_conditioning_scale = kwargs.pop("controlnet_conditioning_scale", None) + control_guidance_end = kwargs.pop("control_guidance_end", None) + strength = kwargs.pop("strength", None) + + if len(prompt) == 1: + prompt = prompt[0] + + # Handle seed initialization for reproducibility + if seed is not None: + if isinstance(seed, int): + kwargs["generator"] = torch.Generator(get_torch_device()).manual_seed(seed) + elif isinstance(seed, list): + kwargs["generator"] = [ + torch.Generator(get_torch_device()).manual_seed(s) for s in seed + ] + + # Dynamically (un)load LoRas. + if not loras_json: + if self.task == TaskType.OUTPAINTING.value: + self._lora_loader1.disable_loras() + self._lora_loader2.disable_loras() + else: + self._lora_loader.disable_loras() # Assuming _lora_loader is defined elsewhere + else: + if self.task == TaskType.OUTPAINTING.value: + self._lora_loader1.load_loras(loras_json) + self._lora_loader2.load_loras(loras_json) + else: + self._lora_loader.load_loras(loras_json) # Assuming _lora_loader is defined elsewhere + + # Handle num_inference_steps and other model-specific settings + if "num_inference_steps" in kwargs and ( + kwargs["num_inference_steps"] is None or kwargs["num_inference_steps"] < 1 + ): + del kwargs["num_inference_steps"] + + # Ensure proper inference configuration based on model + if self.task == TaskType.INPAINTING.value: + if mask_image is None: + raise ValueError("Mask image is required for inpainting.") + try: + outputs = self.pipeline( + prompt=prompt, + image=image, + mask_image=mask_image, + guidance_scale=guidance_scale[0], + strength=strength, + **kwargs + ).images[0] + except torch.cuda.OutOfMemoryError as e: + raise e + except Exception as e: + raise InferenceError(original_exception=e) + elif self.task == TaskType.OUTPAINTING.value: + try: + resized_image, white_bg_image = self._scale_and_paste(image) + temp_image = self.pipeline_stage1( + prompt=prompt[0], + image=white_bg_image, + guidance_scale=guidance_scale[0], + num_inference_steps=num_inference_steps[0], + controlnet_conditioning_scale=controlnet_conditioning_scale, + control_guidance_end=control_guidance_end, + **kwargs + ).images[0] + + x = (1024 - resized_image.width) // 2 + y = (1024 - resized_image.height) // 2 + temp_image.paste(resized_image, (x, y), resized_image) + + mask = Image.new("L", temp_image.size) + mask.paste(resized_image.split()[3], (x, y)) + mask = ImageOps.invert(mask) + final_mask = mask.point(lambda p: p > 128 and 255) + mask_blurred = self.pipeline_stage2.mask_processor.blur(final_mask, blur_factor=20) + + outputs = self.pipeline_stage2( + prompt[1], + image=temp_image, + mask_image=mask_blurred, + strength=strength, + guidance_scale=guidance_scale[1], + num_inference_steps=num_inference_steps[1], + **kwargs + ).images[0] + + x = (1024 - resized_image.width) // 2 + y = (1024 - resized_image.height) // 2 + outputs.paste(resized_image, (x, y), resized_image) + except torch.cuda.OutOfMemoryError as e: + raise e + except Exception as e: + raise InferenceError(original_exception=e) + elif self.task == TaskType.SKETCH_TO_IMAGE.value: + try: + # must resize to 1024*1024 or same resolution bucket to get the best performance + width, height = image.size + ratio = np.sqrt(1024. * 1024. / (width * height)) + new_width, new_height = int(width * ratio), int(height * ratio) + image = image.resize((new_width, new_height)) + outputs = self.pipeline( + prompt=prompt, + image=image, + num_inference_steps=num_inference_steps[0], + controlnet_conditioning_scale=controlnet_conditioning_scale, + **kwargs + ).images[0] + except torch.cuda.OutOfMemoryError as e: + raise e + except Exception as e: + raise InferenceError(original_exception=e) + + # Safety check for NSFW content + if safety_check: + _, has_nsfw_concept = self._safety_checker.check_nsfw_images(outputs.images) + else: + has_nsfw_concept = [None] * len(outputs.images) + + return outputs, has_nsfw_concept # Return the first image in the output list + + + @staticmethod + def _scale_and_paste(original_image: PIL.Image.Image) -> Tuple[PIL.Image.Image, PIL.Image.Image]: + """Resize and paste the original image onto a 1024x1024 white canvas.""" + aspect_ratio = original_image.width / original_image.height + if original_image.width > original_image.height: + new_width = 1024 + new_height = round(new_width / aspect_ratio) + else: + new_height = 1024 + new_width = round(new_height * aspect_ratio) + + resized_original = original_image.resize((new_width, new_height), Image.LANCZOS) + white_background = Image.new("RGBA", (1024, 1024), "white") + x = (1024 - new_width) // 2 + y = (1024 - new_height) // 2 + white_background.paste(resized_original, (x, y), resized_original) + + return resized_original, white_background + + def __str__(self) -> str: + return f"ImageToImageGenericPipeline task={self.task}" diff --git a/runner/app/routes/image_to_image_generic.py b/runner/app/routes/image_to_image_generic.py new file mode 100644 index 00000000..0e85b509 --- /dev/null +++ b/runner/app/routes/image_to_image_generic.py @@ -0,0 +1,243 @@ +import logging +import os +import random +from typing import Annotated, Dict, Tuple, Union + +import torch +from fastapi import APIRouter, Depends, File, Form, UploadFile, status +from fastapi.responses import JSONResponse +from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer +from PIL import Image, ImageFile + +from app.dependencies import get_pipeline +from app.pipelines.base import Pipeline +from app.routes.utils import ( + HTTPError, + ImageResponse, + handle_pipeline_exception, + http_error, + image_to_data_url, + json_str_to_np_array, +) + +ImageFile.LOAD_TRUNCATED_IMAGES = True + +router = APIRouter() + +logger = logging.getLogger(__name__) + + +# Pipeline specific error handling configuration. +PIPELINE_ERROR_CONFIG: Dict[str, Tuple[Union[str, None], int]] = { + # Specific error types. + "OutOfMemoryError": ( + "Out of memory error. Try reducing input image resolution.", + status.HTTP_500_INTERNAL_SERVER_ERROR, + ) +} + +RESPONSES = { + status.HTTP_200_OK: { + "content": { + "application/json": { + "schema": { + "x-speakeasy-name-override": "data", + } + } + }, + }, + status.HTTP_400_BAD_REQUEST: {"model": HTTPError}, + status.HTTP_401_UNAUTHORIZED: {"model": HTTPError}, + status.HTTP_500_INTERNAL_SERVER_ERROR: {"model": HTTPError}, +} + + +# TODO: Make model_id and other None properties optional once Go codegen tool supports +# OAPI 3.1 https://github.com/deepmap/oapi-codegen/issues/373 +@router.post( + "/image-to-image-generic", + response_model=ImageResponse, + responses=RESPONSES, + description="Apply image transformations to a provided image according to the choice of tasks, i.e., outpainting, inpainting, sketch2image.", + operation_id="genImageToImageGeneric", + summary="Image To Image Generic", + tags=["generate"], + openapi_extra={"x-speakeasy-name-override": "imageToImageGeneric"}, +) +@router.post( + "/image-to-image-generic/", + response_model=ImageResponse, + responses=RESPONSES, + include_in_schema=False, +) +async def image_to_image_generic( + prompt: Annotated[ + str, + Form(description="Text prompt(s) to guide image generation."), + ], + image: Annotated[ + UploadFile, + File(description="Uploaded image to modify with the pipeline."), + ], + mask_image: Annotated[ + UploadFile, + File(description="Mask image to determine which regions of an image to fill in for inpainting task."), + ] = None, + model_id: Annotated[ + str, + Form(description="Hugging Face model ID used for image generation."), + ] = "", + loras: Annotated[ + str, + Form( + description=( + "A LoRA (Low-Rank Adaptation) model and its corresponding weight for " + 'image generation. Example: { "latent-consistency/lcm-lora-sdxl": ' + '1.0, "nerijs/pixel-art-xl": 1.2}.' + ) + ), + ] = "", + strength: Annotated[ + float, + Form( + description=( + "Degree of transformation applied to the reference image (0 to 1)." + ) + ), + ] = 0.8, + guidance_scale: Annotated[ + str, + Form( + description=( + "Encourages model to generate images closely linked to the text prompt " + "(higher values may reduce image quality)." + ) + ), + ] = "[6.5, 10.0]", + negative_prompt: Annotated[ + str, + Form( + description=( + "Text prompt(s) to guide what to exclude from image generation. " + "Ignored if guidance_scale < 1." + ) + ), + ] = "", + safety_check: Annotated[ + bool, + Form( + description=( + "Perform a safety check to estimate if generated images could be " + "offensive or harmful." + ) + ), + ] = True, + seed: Annotated[int, Form(description="Seed for random number generation.")] = None, + num_inference_steps: Annotated[ + str, + Form( + description=( + "Number of denoising steps. More steps usually lead to higher quality " + "images but slower inference. Modulated by strength." + ) + ), + ] = "[30, 25]", + controlnet_conditioning_scale: Annotated[ + float, + Form( + description=( + "Determines how much weight to assign to the conditioning inputs." + ) + ), + ] = 0.5, + control_guidance_end: Annotated[ + float, + Form( + description=( + "The percentage of total steps at which the ControlNet stops applying." + ) + ), + ] = 0.9, + num_images_per_prompt: Annotated[ + int, + Form(description="Number of images to generate per prompt."), + ] = 1, + pipeline: Pipeline = Depends(get_pipeline), + token: HTTPAuthorizationCredentials = Depends(HTTPBearer(auto_error=False)), +): + auth_token = os.environ.get("AUTH_TOKEN") + if auth_token: + if not token or token.credentials != auth_token: + return JSONResponse( + status_code=status.HTTP_401_UNAUTHORIZED, + headers={"WWW-Authenticate": "Bearer"}, + content=http_error("Invalid bearer token."), + ) + + if model_id != "" and model_id != pipeline.model_id: + return JSONResponse( + status_code=status.HTTP_400_BAD_REQUEST, + content=http_error( + f"pipeline configured with {pipeline.model_id} but called with " + f"{model_id}." + ), + ) + + seed = seed if seed is not None else random.randint(0, 2**32 - 1) + seeds = [seed + i for i in range(num_images_per_prompt)] + + image = Image.open(image.file).convert("RGB") + mask_image = Image.open(mask_image.file).convert("RGB") if mask_image else None + + try: + prompt = json_str_to_np_array(prompt, var_name="prompt") + guidance_scale = json_str_to_np_array(guidance_scale, var_name="guidance_scale") + num_inference_steps = json_str_to_np_array(num_inference_steps, var_name="num_inference_steps") + except ValueError as e: + return JSONResponse( + status_code=status.HTTP_400_BAD_REQUEST, + content=http_error(str(e)), + ) + + # TODO: Process one image at a time to avoid CUDA OEM errors. Can be removed again + # once LIV-243 and LIV-379 are resolved. + images = [] + has_nsfw_concept = [] + for seed in seeds: + try: + imgs, nsfw_checks = pipeline( + prompt=prompt, + image=image, + mask_image=mask_image, + strength=strength, + loras=loras, + guidance_scale=guidance_scale, + negative_prompt=negative_prompt, + safety_check=safety_check, + seed=seed, + num_images_per_prompt=1, + num_inference_steps=num_inference_steps, + controlnet_conditioning_scale=controlnet_conditioning_scale, + control_guidance_end=control_guidance_end, + ) + except Exception as e: + if isinstance(e, torch.cuda.OutOfMemoryError): + # TODO: Investigate why not all VRAM memory is cleared. + torch.cuda.empty_cache() + logger.error(f"ImageToImageGenericPipeline pipeline error: {e}") + return handle_pipeline_exception( + e, + default_error_message="Image-to-image-generic pipeline error.", + custom_error_config=PIPELINE_ERROR_CONFIG, + ) + images.extend(imgs) + has_nsfw_concept.extend(nsfw_checks) + + # TODO: Return None once Go codegen tool supports optional properties + # OAPI 3.1 https://github.com/deepmap/oapi-codegen/issues/373 + output_images = [ + {"url": image_to_data_url(img), "seed": sd, "nsfw": nsfw or False} + for img, sd, nsfw in zip(images, seeds, has_nsfw_concept) + ] + + return {"images": output_images} diff --git a/runner/dl_checkpoints.sh b/runner/dl_checkpoints.sh index 6ebb88ef..8b4c6ece 100755 --- a/runner/dl_checkpoints.sh +++ b/runner/dl_checkpoints.sh @@ -80,6 +80,13 @@ function download_all_models() { # Custom pipeline models. huggingface-cli download facebook/sam2-hiera-large --include "*.pt" "*.yaml" --cache-dir models + # Download image-to-image-generic models. + huggingface-cli download madebyollin/sdxl-vae-fp16-fix --include "*.safetensors" "*.json" --cache-dir models + huggingface-cli download OzzyGT/RealVisXL_V4.0_inpainting --include "*.fp16.safetensors" "*.json" "*.txt" --exclude ".onnx" ".onnx_data" --cache-dir models + huggingface-cli download kandinsky-community/kandinsky-2-2-decoder-inpaint --include "*.safetensors" "*.json" --cache-dir models + huggingface-cli download destitech/controlnet-inpaint-dreamer-sdxl --include "*.fp16.safetensors" "*.json" "*.txt" --exclude ".onnx" ".onnx_data" --cache-dir models + huggingface-cli download xinsir/controlnet-scribble-sdxl-1.0 --include "*.safetensors" "*.json" --cache-dir models + download_live_models } diff --git a/runner/gateway.openapi.yaml b/runner/gateway.openapi.yaml index e19de55f..ec565f3b 100644 --- a/runner/gateway.openapi.yaml +++ b/runner/gateway.openapi.yaml @@ -509,6 +509,54 @@ paths: security: - HTTPBearer: [] x-speakeasy-name-override: textToSpeech + /image-to-image-generic: + post: + tags: + - generate + summary: Image To Image Generic + description: Apply image transformations to a provided image according to the choice of tasks, i.e., outpainting, inpainting, sketch2image. + operationId: genImageToImageGeneric + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genImageToImageGeneric' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/ImageResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: imageToImageGeneric components: schemas: APIError: @@ -841,6 +889,89 @@ components: - image - model_id title: Body_genUpscale + Body_genImageToImageGeneric: + properties: + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide image generation. + image: + type: string + format: binary + title: Image + description: Uploaded image to modify with the pipeline. + mask_image: + type: string + format: binary + title: Mask Image + description: Mask image to determine which regions of an image to fill in for inpainting task. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for image generation. + default: '' + loras: + type: string + title: Loras + description: 'A LoRA (Low-Rank Adaptation) model and its corresponding weight + for image generation. Example: { "latent-consistency/lcm-lora-sdxl": 1.0, + "nerijs/pixel-art-xl": 1.2}.' + default: '' + strength: + type: number + title: Strength + description: Degree of transformation applied to the reference image (0 + to 1). + default: 0.8 + guidance_scale: + type: string + title: Guidance Scale + description: Encourages model to generate images closely linked to the text + prompt (higher values may reduce image quality). + default: '[6.5, 10.0]' + negative_prompt: + type: string + title: Negative Prompt + description: Text prompt(s) to guide what to exclude from image generation. + Ignored if guidance_scale < 1. + default: '' + safety_check: + type: boolean + title: Safety Check + description: Perform a safety check to estimate if generated images could + be offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: string + title: Num Inference Steps + description: Number of denoising steps. More steps usually lead to higher + quality images but slower inference. Modulated by strength. + default: '[30, 25]' + controlnet_conditioning_scale: + type: number + title: Controlnet Conditioning Scale + description: Determines how much weight to assign to the conditioning inputs. + default: 0.5 + control_guidance_end: + type: number + title: Control Guidance End + description: The percentage of total steps at which the ControlNet stops applying. + default: 0.9 + num_images_per_prompt: + type: integer + title: Num Images Per Prompt + description: Number of images to generate per prompt. + default: 1 + type: object + required: + - prompt + - image + - model_id + title: Body_genImageToImageGeneric Chunk: properties: timestamp: diff --git a/runner/gen_openapi.py b/runner/gen_openapi.py index 94072e04..68e89a38 100644 --- a/runner/gen_openapi.py +++ b/runner/gen_openapi.py @@ -20,6 +20,7 @@ text_to_image, text_to_speech, upscale, + image_to_image_generic, ) logging.basicConfig( @@ -114,6 +115,7 @@ def write_openapi(fname: str, entrypoint: str = "runner"): app.include_router(image_to_text.router) app.include_router(live_video_to_video.router) app.include_router(text_to_speech.router) + app.include_router(image_to_image_generic.router) logger.info(f"Generating OpenAPI schema for '{entrypoint}' entrypoint...") openapi = get_openapi( diff --git a/runner/openapi.yaml b/runner/openapi.yaml index 757e7ae1..f5a19f1f 100644 --- a/runner/openapi.yaml +++ b/runner/openapi.yaml @@ -509,6 +509,54 @@ paths: security: - HTTPBearer: [] x-speakeasy-name-override: textToSpeech + /image-to-image-generic: + post: + tags: + - generate + summary: Image To Image Generic + description: Apply image transformations to a provided image according to the choice of tasks, i.e., outpainting, inpainting, sketch2image. + operationId: genImageToImageGeneric + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genImageToImageGeneric' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/ImageResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: imageToImageGeneric /health: get: summary: Health @@ -872,6 +920,89 @@ components: - prompt - image title: Body_genUpscale + Body_genImageToImageGeneric: + properties: + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide image generation. + image: + type: string + format: binary + title: Image + description: Uploaded image to modify with the pipeline. + mask_image: + type: string + format: binary + title: Mask Image + description: Mask image to determine which regions of an image to fill in for inpainting task. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for image generation. + default: '' + loras: + type: string + title: Loras + description: 'A LoRA (Low-Rank Adaptation) model and its corresponding weight + for image generation. Example: { "latent-consistency/lcm-lora-sdxl": 1.0, + "nerijs/pixel-art-xl": 1.2}.' + default: '' + strength: + type: number + title: Strength + description: Degree of transformation applied to the reference image (0 + to 1). + default: 0.8 + guidance_scale: + type: string + title: Guidance Scale + description: Encourages model to generate images closely linked to the text + prompt (higher values may reduce image quality). + default: '[6.5, 10.0]' + negative_prompt: + type: string + title: Negative Prompt + description: Text prompt(s) to guide what to exclude from image generation. + Ignored if guidance_scale < 1. + default: '' + safety_check: + type: boolean + title: Safety Check + description: Perform a safety check to estimate if generated images could + be offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: string + title: Num Inference Steps + description: Number of denoising steps. More steps usually lead to higher + quality images but slower inference. Modulated by strength. + default: '[30, 25]' + controlnet_conditioning_scale: + type: number + title: Controlnet Conditioning Scale + description: Determines how much weight to assign to the conditioning inputs. + default: 0.5 + control_guidance_end: + type: number + title: Control Guidance End + description: The percentage of total steps at which the ControlNet stops applying. + default: 0.9 + num_images_per_prompt: + type: integer + title: Num Images Per Prompt + description: Number of images to generate per prompt. + default: 1 + type: object + required: + - prompt + - image + - model_id + title: Body_genImageToImageGeneric Chunk: properties: timestamp: diff --git a/worker/docker.go b/worker/docker.go index 9d7720fd..2bb965ae 100644 --- a/worker/docker.go +++ b/worker/docker.go @@ -44,16 +44,17 @@ var maxHealthCheckFailures = 2 // This only works right now on a single GPU because if there is another container // using the GPU we stop it so we don't have to worry about having enough ports var containerHostPorts = map[string]string{ - "text-to-image": "8000", - "image-to-image": "8100", - "image-to-video": "8200", - "upscale": "8300", - "audio-to-text": "8400", - "llm": "8500", - "segment-anything-2": "8600", - "image-to-text": "8700", - "text-to-speech": "8800", - "live-video-to-video": "8900", + "text-to-image": "8000", + "image-to-image": "8100", + "image-to-video": "8200", + "upscale": "8300", + "audio-to-text": "8400", + "llm": "8500", + "segment-anything-2": "8600", + "image-to-text": "8700", + "text-to-speech": "8800", + "live-video-to-video": "8900", + "image-to-image-generic": "9000", } // Mapping for per pipeline container images. diff --git a/worker/multipart.go b/worker/multipart.go index 551b0af8..25b00341 100644 --- a/worker/multipart.go +++ b/worker/multipart.go @@ -413,3 +413,106 @@ func NewImageToTextMultipartWriter(w io.Writer, req GenImageToTextMultipartReque return mw, nil } + +func NewImageToImageGenericMultipartWriter(w io.Writer, req GenImageToImageGenericMultipartRequestBody) (*multipart.Writer, error) { + mw := multipart.NewWriter(w) + writer, err := mw.CreateFormFile("image", req.Image.Filename()) + if err != nil { + return nil, err + } + imageSize := req.Image.FileSize() + imageRdr, err := req.Image.Reader() + if err != nil { + return nil, err + } + copied, err := io.Copy(writer, imageRdr) + if err != nil { + return nil, err + } + if copied != imageSize { + return nil, fmt.Errorf("failed to copy image to multipart request imageBytes=%v copiedBytes=%v", imageSize, copied) + } + + if req.MaskImage != nil { + writer, err := mw.CreateFormFile("mask_image", req.MaskImage.Filename()) + if err != nil { + return nil, err + } + maskimageSize := req.MaskImage.FileSize() + maskimageRdr, err := req.MaskImage.Reader() + if err != nil { + return nil, err + } + copied, err := io.Copy(writer, maskimageRdr) + if err != nil { + return nil, err + } + if copied != maskimageSize { + return nil, fmt.Errorf("failed to copy mask_image to multipart request maskimageBytes=%v copiedBytes=%v", maskimageSize, copied) + } + + if err := mw.WriteField("prompt", req.Prompt); err != nil { + return nil, err + } + if req.ModelId != nil { + if err := mw.WriteField("model_id", *req.ModelId); err != nil { + return nil, err + } + } + if req.Loras != nil { + if err := mw.WriteField("loras", *req.Loras); err != nil { + return nil, err + } + } + if req.Strength != nil { + if err := mw.WriteField("strength", fmt.Sprintf("%f", *req.Strength)); err != nil { + return nil, err + } + } + if req.GuidanceScale != nil { + if err := mw.WriteField("guidance_scale", *req.GuidanceScale); err != nil { + return nil, err + } + } + if req.NegativePrompt != nil { + if err := mw.WriteField("negative_prompt", *req.NegativePrompt); err != nil { + return nil, err + } + } + if req.SafetyCheck != nil { + if err := mw.WriteField("safety_check", strconv.FormatBool(*req.SafetyCheck)); err != nil { + return nil, err + } + } + if req.Seed != nil { + if err := mw.WriteField("seed", strconv.Itoa(*req.Seed)); err != nil { + return nil, err + } + } + if req.NumImagesPerPrompt != nil { + if err := mw.WriteField("num_images_per_prompt", strconv.Itoa(*req.NumImagesPerPrompt)); err != nil { + return nil, err + } + } + if req.NumInferenceSteps != nil { + if err := mw.WriteField("num_inference_steps", *req.NumInferenceSteps); err != nil { + return nil, err + } + } + if req.ControlnetConditioningScale != nil { + if err := mw.WriteField("controlnet_conditioning_scale", fmt.Sprintf("%f", *req.ControlnetConditioningScale)); err != nil { + return nil, err + } + } + if req.ControlGuidanceEnd != nil { + if err := mw.WriteField("control_guidance_end", fmt.Sprintf("%f", *req.ControlGuidanceEnd)); err != nil { + return nil, err + } + } + + if err := mw.Close(); err != nil { + return nil, err + } + + return mw, nil +} diff --git a/worker/runner.gen.go b/worker/runner.gen.go index 9b6fa10f..86048421 100644 --- a/worker/runner.gen.go +++ b/worker/runner.gen.go @@ -206,6 +206,51 @@ type BodyGenUpscale struct { Seed *int `json:"seed,omitempty"` } +// BodyGenImageToImageGeneric defines model for Body_genImageToImageGeneric. +type BodyGenImageToImageGeneric struct { + // GuidanceScale Encourages model to generate images closely linked to the text prompt (higher values may reduce image quality). + GuidanceScale *string `json:"guidance_scale,omitempty"` + + // Image Uploaded image to modify with the pipeline. + Image openapi_types.File `json:"image"` + + // MaskImage Mask image to determine which regions of an image to fill in for inpainting task. + MaskImage openapi_types.File `json:"mask_image"` + + // Loras A LoRA (Low-Rank Adaptation) model and its corresponding weight for image generation. Example: { "latent-consistency/lcm-lora-sdxl": 1.0, "nerijs/pixel-art-xl": 1.2}. + Loras *string `json:"loras,omitempty"` + + // ModelId Hugging Face model ID used for image generation. + ModelId *string `json:"model_id,omitempty"` + + // NegativePrompt Text prompt(s) to guide what to exclude from image generation. Ignored if guidance_scale < 1. + NegativePrompt *string `json:"negative_prompt,omitempty"` + + // NumImagesPerPrompt Number of images to generate per prompt. + NumImagesPerPrompt *int `json:"num_images_per_prompt,omitempty"` + + // NumInferenceSteps Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength. + NumInferenceSteps *string `json:"num_inference_steps,omitempty"` + + // Prompt Text prompt(s) to guide image generation. + Prompt string `json:"prompt"` + + // SafetyCheck Perform a safety check to estimate if generated images could be offensive or harmful. + SafetyCheck *bool `json:"safety_check,omitempty"` + + // Seed Seed for random number generation. + Seed *int `json:"seed,omitempty"` + + // Strength Degree of transformation applied to the reference image (0 to 1). + Strength *float32 `json:"strength,omitempty"` + + // Determines how much weight to assign to the conditioning inputs. + ControlnetConditioningScale *float32 `json:"controlnet_conditioning_scale,omitempty"` + + // The percentage of total steps at which the ControlNet stops applying. + ControlGuidanceEnd *float32 `json:"control_guidance_end,omitempty"` +} + // Chunk A chunk of text with a timestamp. type Chunk struct { // Text The text of the chunk. @@ -464,6 +509,9 @@ type GenTextToSpeechJSONRequestBody = TextToSpeechParams // GenUpscaleMultipartRequestBody defines body for GenUpscale for multipart/form-data ContentType. type GenUpscaleMultipartRequestBody = BodyGenUpscale +// GenImageToImageGenericMultipartRequestBody defines body for GenImageToImageGeneric for multipart/form-data ContentType. +type GenImageToImageGenericMultipartRequestBody = BodyGenImageToImageGeneric + // AsValidationErrorLoc0 returns the union data inside the ValidationError_Loc_Item as a ValidationErrorLoc0 func (t ValidationError_Loc_Item) AsValidationErrorLoc0() (ValidationErrorLoc0, error) { var body ValidationErrorLoc0 @@ -645,6 +693,9 @@ type ClientInterface interface { // GenUpscaleWithBody request with any body GenUpscaleWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + + // GenImageToImageGenericWithBody request with any body + GenImageToImageGenericWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) } func (c *Client) GenAudioToTextWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { @@ -851,6 +902,18 @@ func (c *Client) GenUpscaleWithBody(ctx context.Context, contentType string, bod return c.Client.Do(req) } +func (c *Client) GenImageToImageGenericWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGenImageToImageGenericRequestWithBody(c.Server, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + // NewGenAudioToTextRequestWithBody generates requests for GenAudioToText with any type of body func NewGenAudioToTextRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { var err error @@ -1266,6 +1329,35 @@ func NewGenUpscaleRequestWithBody(server string, contentType string, body io.Rea return req, nil } +// NewGenImageToImageGenericRequestWithBody generates requests for GenImageToImageGeneric with any type of body +func NewGenImageToImageGenericRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/image-to-image-generic") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", contentType) + + return req, nil +} + func (c *Client) applyEditors(ctx context.Context, req *http.Request, additionalEditors []RequestEditorFn) error { for _, r := range c.RequestEditors { if err := r(ctx, req); err != nil { @@ -1355,6 +1447,9 @@ type ClientWithResponsesInterface interface { // GenUpscaleWithBodyWithResponse request with any body GenUpscaleWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*GenUpscaleResponse, error) + + // GenImageToImageGenericWithBodyWithResponse request with any body + GenImageToImageGenericWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*GenImageToImageGenericResponse, error) } type GenAudioToTextResponse struct { @@ -1686,6 +1781,32 @@ func (r GenUpscaleResponse) StatusCode() int { return 0 } +type GenImageToImageGenericResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *ImageResponse + JSON400 *HTTPError + JSON401 *HTTPError + JSON422 *HTTPValidationError + JSON500 *HTTPError +} + +// Status returns HTTPResponse.Status +func (r GenImageToImageGenericResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GenImageToImageGenericResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + // GenAudioToTextWithBodyWithResponse request with arbitrary body returning *GenAudioToTextResponse func (c *ClientWithResponses) GenAudioToTextWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*GenAudioToTextResponse, error) { rsp, err := c.GenAudioToTextWithBody(ctx, contentType, body, reqEditors...) @@ -1835,6 +1956,15 @@ func (c *ClientWithResponses) GenUpscaleWithBodyWithResponse(ctx context.Context return ParseGenUpscaleResponse(rsp) } +// GenImageToImageGenericWithBodyWithResponse request with arbitrary body returning *GenImageToImageGenericResponse +func (c *ClientWithResponses) GenImageToImageGenericWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*GenImageToImageGenericResponse, error) { + rsp, err := c.GenImageToImageGenericWithBody(ctx, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseGenImageToImageGenericResponse(rsp) +} + // ParseGenAudioToTextResponse parses an HTTP response from a GenAudioToTextWithResponse call func ParseGenAudioToTextResponse(rsp *http.Response) (*GenAudioToTextResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) @@ -2474,6 +2604,60 @@ func ParseGenUpscaleResponse(rsp *http.Response) (*GenUpscaleResponse, error) { return response, nil } +// ParseGenImageToImageGenericResponse parses an HTTP response from a GenImageToImageGenericWithResponse call +func ParseGenImageToImageGenericResponse(rsp *http.Response) (*GenImageToImageGenericResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &GenImageToImageGenericResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest ImageResponse + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest HTTPError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest HTTPError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 422: + var dest HTTPValidationError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON422 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest HTTPError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + + } + + return response, nil +} + // ServerInterface represents all server handlers. type ServerInterface interface { // Audio To Text @@ -2515,6 +2699,9 @@ type ServerInterface interface { // Upscale // (POST /upscale) GenUpscale(w http.ResponseWriter, r *http.Request) + // Image To Image Generic + // (POST /image-to-image-generic) + GenImageToImageGeneric(w http.ResponseWriter, r *http.Request) } // Unimplemented server implementation that returns http.StatusNotImplemented for each endpoint. @@ -2599,6 +2786,12 @@ func (_ Unimplemented) GenUpscale(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNotImplemented) } +// Image To Image Generic +// (POST /image-to-image-generic) +func (_ Unimplemented) GenImageToImageGeneric(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotImplemented) +} + // ServerInterfaceWrapper converts contexts to parameters. type ServerInterfaceWrapper struct { Handler ServerInterface @@ -2823,6 +3016,23 @@ func (siw *ServerInterfaceWrapper) GenUpscale(w http.ResponseWriter, r *http.Req handler.ServeHTTP(w, r.WithContext(ctx)) } +// GenImageToImageGeneric operation middleware +func (siw *ServerInterfaceWrapper) GenImageToImageGeneric(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + ctx = context.WithValue(ctx, HTTPBearerScopes, []string{}) + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.GenImageToImageGeneric(w, r) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + type UnescapedCookieParamError struct { ParamName string Err error @@ -2975,6 +3185,9 @@ func HandlerWithOptions(si ServerInterface, options ChiServerOptions) http.Handl r.Group(func(r chi.Router) { r.Post(options.BaseURL+"/upscale", wrapper.GenUpscale) }) + r.Group(func(r chi.Router) { + r.Post(options.BaseURL+"/image-to-image-generic", wrapper.GenImageToImageGeneric) + }) return r } diff --git a/worker/worker.go b/worker/worker.go index e381e4be..7f0ae21e 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -659,6 +659,64 @@ func (w *Worker) LiveVideoToVideo(ctx context.Context, req GenLiveVideoToVideoJS return resp.JSON200, nil } +func (w *Worker) ImageToImageGeneric(ctx context.Context, req GenImageToImageGenericMultipartRequestBody) (*ImageResponse, error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, err := w.borrowContainer(ctx, "image-to-image-generic", *req.ModelId) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + mw, err := NewImageToImageGenericMultipartWriter(&buf, req) + if err != nil { + return nil, err + } + + resp, err := c.Client.GenImageToImageGenericWithBodyWithResponse(ctx, mw.FormDataContentType(), &buf) + if err != nil { + return nil, err + } + + if resp.JSON400 != nil { + val, err := json.Marshal(resp.JSON400) + if err != nil { + return nil, err + } + slog.Error("image-to-image-generic container returned 400", slog.String("err", string(val))) + return nil, errors.New("image-to-image-generic container returned 400: " + resp.JSON400.Detail.Msg) + } + + if resp.JSON401 != nil { + val, err := json.Marshal(resp.JSON401) + if err != nil { + return nil, err + } + slog.Error("image-to-image-generic container returned 401", slog.String("err", string(val))) + return nil, errors.New("image-to-image-generic container returned 401: " + resp.JSON401.Detail.Msg) + } + + if resp.JSON422 != nil { + val, err := json.Marshal(resp.JSON422) + if err != nil { + return nil, err + } + slog.Error("image-to-image-generic container returned 422", slog.String("err", string(val))) + return nil, errors.New("image-to-image-generic container returned 422: " + string(val)) + } + + if resp.JSON500 != nil { + val, err := json.Marshal(resp.JSON500) + if err != nil { + return nil, err + } + slog.Error("image-to-image-generic container returned 500", slog.String("err", string(val))) + return nil, errors.New("image-to-image-generic container returned 500: " + resp.JSON500.Detail.Msg) + } + + return resp.JSON200, nil +} + func (w *Worker) EnsureImageAvailable(ctx context.Context, pipeline string, modelID string) error { return w.manager.EnsureImageAvailable(ctx, pipeline, modelID) } From 658f5f9b62d0f9f9ab603df260ce7709d586ac02 Mon Sep 17 00:00:00 2001 From: RUFFY-369 Date: Sun, 29 Dec 2024 20:09:00 +0530 Subject: [PATCH 02/10] style:ruff formatting --- .../app/pipelines/image_to_image_generic.py | 97 +++++++++++-------- runner/app/routes/image_to_image_generic.py | 8 +- runner/gen_openapi.py | 4 +- 3 files changed, 62 insertions(+), 47 deletions(-) diff --git a/runner/app/pipelines/image_to_image_generic.py b/runner/app/pipelines/image_to_image_generic.py index 8aeda564..5986737d 100644 --- a/runner/app/pipelines/image_to_image_generic.py +++ b/runner/app/pipelines/image_to_image_generic.py @@ -27,8 +27,10 @@ logger = logging.getLogger(__name__) + class TaskType(Enum): """Enumeration for task types.""" + INPAINTING = "inpainting" OUTPAINTING = "outpainting" SKETCH_TO_IMAGE = "sketch_to_image" @@ -40,7 +42,6 @@ def list(cls): class ImageToImageGenericPipeline(Pipeline): def __init__(self, model_id: str, task: str): - kwargs = {"cache_dir": get_model_dir(), "torch_dtype": torch.float16} torch_device = get_torch_device() @@ -51,15 +52,15 @@ def __init__(self, model_id: str, task: str): # Load the fp16 variant if fp16 'safetensors' files are present in the cache. # NOTE: Exception for SDXL-Lightning model: despite having fp16 'safetensors' # files, they are not named according to the standard convention. - has_fp16_variant = ( - any( - ".fp16.safetensors" in fname - for _, _, files in os.walk(folder_path) - for fname in files - ) + has_fp16_variant = any( + ".fp16.safetensors" in fname + for _, _, files in os.walk(folder_path) + for fname in files ) if torch_device.type != "cpu" and has_fp16_variant: - logger.info("ImageToImageGenericPipeline loading fp16 variant for %s", model_id) + logger.info( + "ImageToImageGenericPipeline loading fp16 variant for %s", model_id + ) kwargs["torch_dtype"] = torch.float16 kwargs["variant"] = "fp16" @@ -77,9 +78,11 @@ def __init__(self, model_id: str, task: str): self.pipeline.enable_model_cpu_offload() elif self.task == TaskType.OUTPAINTING.value: - self.controlnet = ControlNetModel.from_pretrained( - model_id, torch_dtype=torch.float16, variant="fp16" - ).to(torch_device), + self.controlnet = ( + ControlNetModel.from_pretrained( + model_id, torch_dtype=torch.float16, variant="fp16" + ).to(torch_device), + ) self.vae = AutoencoderKL.from_pretrained( "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16 ).to(torch_device) @@ -88,29 +91,29 @@ def __init__(self, model_id: str, task: str): controlnet=self.controlnet, vae=self.vae, safety_checker=None, - **kwargs + **kwargs, ).to(torch_device) self.pipeline_stage2 = StableDiffusionXLInpaintPipeline.from_pretrained( - "OzzyGT/RealVisXL_V4.0_inpainting", - vae=self.vae, - **kwargs + "OzzyGT/RealVisXL_V4.0_inpainting", vae=self.vae, **kwargs ).to(torch_device) elif self.task == TaskType.SKETCH_TO_IMAGE.value: - self.controlnet = ControlNetModel.from_pretrained( - model_id, **kwargs - ).to(torch_device) + self.controlnet = ControlNetModel.from_pretrained(model_id, **kwargs).to( + torch_device + ) self.vae = AutoencoderKL.from_pretrained( "madebyollin/sdxl-vae-fp16-fix", **kwargs ).to(torch_device) - eulera_scheduler = EulerAncestralDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler") + eulera_scheduler = EulerAncestralDiscreteScheduler.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler" + ) self.pipeline = StableDiffusionXLControlNetPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=self.controlnet, vae=self.vae, safety_checker=None, scheduler=eulera_scheduler, - **kwargs + **kwargs, ).to(torch_device) self._lora_loader = LoraLoader(self.pipeline) @@ -118,7 +121,7 @@ def __init__(self, model_id: str, task: str): if self.task == TaskType.OUTPAINTING.value: self._lora_loader1 = LoraLoader(self.pipeline_stage1) self._lora_loader2 = LoraLoader(self.pipeline_stage2) - + def __call__( self, prompt: List[str], @@ -126,7 +129,6 @@ def __call__( mask_image: Optional[PIL.Image.Image] = None, **kwargs, ) -> Tuple[List[PIL.Image], List[Optional[bool]]]: - # Handle num_inference_steps and other model-specific settings if "num_inference_steps" in kwargs and ( kwargs["num_inference_steps"] is None or kwargs["num_inference_steps"] < 1 @@ -139,9 +141,11 @@ def __call__( loras_json = kwargs.pop("loras", "") guidance_scale = kwargs.pop("guidance_scale", None) num_inference_steps = kwargs.pop("num_inference_steps", None) - controlnet_conditioning_scale = kwargs.pop("controlnet_conditioning_scale", None) + controlnet_conditioning_scale = kwargs.pop( + "controlnet_conditioning_scale", None + ) control_guidance_end = kwargs.pop("control_guidance_end", None) - strength = kwargs.pop("strength", None) + strength = kwargs.pop("strength", None) if len(prompt) == 1: prompt = prompt[0] @@ -149,7 +153,9 @@ def __call__( # Handle seed initialization for reproducibility if seed is not None: if isinstance(seed, int): - kwargs["generator"] = torch.Generator(get_torch_device()).manual_seed(seed) + kwargs["generator"] = torch.Generator(get_torch_device()).manual_seed( + seed + ) elif isinstance(seed, list): kwargs["generator"] = [ torch.Generator(get_torch_device()).manual_seed(s) for s in seed @@ -167,7 +173,9 @@ def __call__( self._lora_loader1.load_loras(loras_json) self._lora_loader2.load_loras(loras_json) else: - self._lora_loader.load_loras(loras_json) # Assuming _lora_loader is defined elsewhere + self._lora_loader.load_loras( + loras_json + ) # Assuming _lora_loader is defined elsewhere # Handle num_inference_steps and other model-specific settings if "num_inference_steps" in kwargs and ( @@ -181,12 +189,12 @@ def __call__( raise ValueError("Mask image is required for inpainting.") try: outputs = self.pipeline( - prompt=prompt, - image=image, - mask_image=mask_image, - guidance_scale=guidance_scale[0], - strength=strength, - **kwargs + prompt=prompt, + image=image, + mask_image=mask_image, + guidance_scale=guidance_scale[0], + strength=strength, + **kwargs, ).images[0] except torch.cuda.OutOfMemoryError as e: raise e @@ -202,19 +210,21 @@ def __call__( num_inference_steps=num_inference_steps[0], controlnet_conditioning_scale=controlnet_conditioning_scale, control_guidance_end=control_guidance_end, - **kwargs - ).images[0] + **kwargs, + ).images[0] x = (1024 - resized_image.width) // 2 y = (1024 - resized_image.height) // 2 temp_image.paste(resized_image, (x, y), resized_image) - + mask = Image.new("L", temp_image.size) mask.paste(resized_image.split()[3], (x, y)) mask = ImageOps.invert(mask) final_mask = mask.point(lambda p: p > 128 and 255) - mask_blurred = self.pipeline_stage2.mask_processor.blur(final_mask, blur_factor=20) - + mask_blurred = self.pipeline_stage2.mask_processor.blur( + final_mask, blur_factor=20 + ) + outputs = self.pipeline_stage2( prompt[1], image=temp_image, @@ -222,7 +232,7 @@ def __call__( strength=strength, guidance_scale=guidance_scale[1], num_inference_steps=num_inference_steps[1], - **kwargs + **kwargs, ).images[0] x = (1024 - resized_image.width) // 2 @@ -235,8 +245,8 @@ def __call__( elif self.task == TaskType.SKETCH_TO_IMAGE.value: try: # must resize to 1024*1024 or same resolution bucket to get the best performance - width, height = image.size - ratio = np.sqrt(1024. * 1024. / (width * height)) + width, height = image.size + ratio = np.sqrt(1024.0 * 1024.0 / (width * height)) new_width, new_height = int(width * ratio), int(height * ratio) image = image.resize((new_width, new_height)) outputs = self.pipeline( @@ -244,7 +254,7 @@ def __call__( image=image, num_inference_steps=num_inference_steps[0], controlnet_conditioning_scale=controlnet_conditioning_scale, - **kwargs + **kwargs, ).images[0] except torch.cuda.OutOfMemoryError as e: raise e @@ -259,9 +269,10 @@ def __call__( return outputs, has_nsfw_concept # Return the first image in the output list - @staticmethod - def _scale_and_paste(original_image: PIL.Image.Image) -> Tuple[PIL.Image.Image, PIL.Image.Image]: + def _scale_and_paste( + original_image: PIL.Image.Image, + ) -> Tuple[PIL.Image.Image, PIL.Image.Image]: """Resize and paste the original image onto a 1024x1024 white canvas.""" aspect_ratio = original_image.width / original_image.height if original_image.width > original_image.height: diff --git a/runner/app/routes/image_to_image_generic.py b/runner/app/routes/image_to_image_generic.py index 0e85b509..408e6457 100644 --- a/runner/app/routes/image_to_image_generic.py +++ b/runner/app/routes/image_to_image_generic.py @@ -81,7 +81,9 @@ async def image_to_image_generic( ], mask_image: Annotated[ UploadFile, - File(description="Mask image to determine which regions of an image to fill in for inpainting task."), + File( + description="Mask image to determine which regions of an image to fill in for inpainting task." + ), ] = None, model_id: Annotated[ str, @@ -192,7 +194,9 @@ async def image_to_image_generic( try: prompt = json_str_to_np_array(prompt, var_name="prompt") guidance_scale = json_str_to_np_array(guidance_scale, var_name="guidance_scale") - num_inference_steps = json_str_to_np_array(num_inference_steps, var_name="num_inference_steps") + num_inference_steps = json_str_to_np_array( + num_inference_steps, var_name="num_inference_steps" + ) except ValueError as e: return JSONResponse( status_code=status.HTTP_400_BAD_REQUEST, diff --git a/runner/gen_openapi.py b/runner/gen_openapi.py index 68e89a38..af1ccf7d 100644 --- a/runner/gen_openapi.py +++ b/runner/gen_openapi.py @@ -166,8 +166,8 @@ def write_openapi(fname: str, entrypoint: str = "runner"): parser.add_argument( "--entrypoint", type=str, - choices=["gateway","runner"], - default=["gateway","runner"], + choices=["gateway", "runner"], + default=["gateway", "runner"], nargs="+", help=( "The entrypoint to generate the OpenAPI schema for, options are 'runner' " From 98100a6f4b512103bbaf96937c68c31dcee5b1ff Mon Sep 17 00:00:00 2001 From: RUFFY-369 Date: Sun, 29 Dec 2024 20:11:21 +0530 Subject: [PATCH 03/10] chore:make codegen --- worker/runner.gen.go | 568 ++++++++++++++++++++++--------------------- 1 file changed, 287 insertions(+), 281 deletions(-) diff --git a/worker/runner.gen.go b/worker/runner.gen.go index 86048421..adaa6f1d 100644 --- a/worker/runner.gen.go +++ b/worker/runner.gen.go @@ -99,6 +99,51 @@ type BodyGenImageToImage struct { Strength *float32 `json:"strength,omitempty"` } +// BodyGenImageToImageGeneric defines model for Body_genImageToImageGeneric. +type BodyGenImageToImageGeneric struct { + // ControlGuidanceEnd The percentage of total steps at which the ControlNet stops applying. + ControlGuidanceEnd *float32 `json:"control_guidance_end,omitempty"` + + // ControlnetConditioningScale Determines how much weight to assign to the conditioning inputs. + ControlnetConditioningScale *float32 `json:"controlnet_conditioning_scale,omitempty"` + + // GuidanceScale Encourages model to generate images closely linked to the text prompt (higher values may reduce image quality). + GuidanceScale *string `json:"guidance_scale,omitempty"` + + // Image Uploaded image to modify with the pipeline. + Image openapi_types.File `json:"image"` + + // Loras A LoRA (Low-Rank Adaptation) model and its corresponding weight for image generation. Example: { "latent-consistency/lcm-lora-sdxl": 1.0, "nerijs/pixel-art-xl": 1.2}. + Loras *string `json:"loras,omitempty"` + + // MaskImage Mask image to determine which regions of an image to fill in for inpainting task. + MaskImage *openapi_types.File `json:"mask_image,omitempty"` + + // ModelId Hugging Face model ID used for image generation. + ModelId string `json:"model_id"` + + // NegativePrompt Text prompt(s) to guide what to exclude from image generation. Ignored if guidance_scale < 1. + NegativePrompt *string `json:"negative_prompt,omitempty"` + + // NumImagesPerPrompt Number of images to generate per prompt. + NumImagesPerPrompt *int `json:"num_images_per_prompt,omitempty"` + + // NumInferenceSteps Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength. + NumInferenceSteps *string `json:"num_inference_steps,omitempty"` + + // Prompt Text prompt(s) to guide image generation. + Prompt string `json:"prompt"` + + // SafetyCheck Perform a safety check to estimate if generated images could be offensive or harmful. + SafetyCheck *bool `json:"safety_check,omitempty"` + + // Seed Seed for random number generation. + Seed *int `json:"seed,omitempty"` + + // Strength Degree of transformation applied to the reference image (0 to 1). + Strength *float32 `json:"strength,omitempty"` +} + // BodyGenImageToText defines model for Body_genImageToText. type BodyGenImageToText struct { // Image Uploaded image to transform with the pipeline. @@ -206,51 +251,6 @@ type BodyGenUpscale struct { Seed *int `json:"seed,omitempty"` } -// BodyGenImageToImageGeneric defines model for Body_genImageToImageGeneric. -type BodyGenImageToImageGeneric struct { - // GuidanceScale Encourages model to generate images closely linked to the text prompt (higher values may reduce image quality). - GuidanceScale *string `json:"guidance_scale,omitempty"` - - // Image Uploaded image to modify with the pipeline. - Image openapi_types.File `json:"image"` - - // MaskImage Mask image to determine which regions of an image to fill in for inpainting task. - MaskImage openapi_types.File `json:"mask_image"` - - // Loras A LoRA (Low-Rank Adaptation) model and its corresponding weight for image generation. Example: { "latent-consistency/lcm-lora-sdxl": 1.0, "nerijs/pixel-art-xl": 1.2}. - Loras *string `json:"loras,omitempty"` - - // ModelId Hugging Face model ID used for image generation. - ModelId *string `json:"model_id,omitempty"` - - // NegativePrompt Text prompt(s) to guide what to exclude from image generation. Ignored if guidance_scale < 1. - NegativePrompt *string `json:"negative_prompt,omitempty"` - - // NumImagesPerPrompt Number of images to generate per prompt. - NumImagesPerPrompt *int `json:"num_images_per_prompt,omitempty"` - - // NumInferenceSteps Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength. - NumInferenceSteps *string `json:"num_inference_steps,omitempty"` - - // Prompt Text prompt(s) to guide image generation. - Prompt string `json:"prompt"` - - // SafetyCheck Perform a safety check to estimate if generated images could be offensive or harmful. - SafetyCheck *bool `json:"safety_check,omitempty"` - - // Seed Seed for random number generation. - Seed *int `json:"seed,omitempty"` - - // Strength Degree of transformation applied to the reference image (0 to 1). - Strength *float32 `json:"strength,omitempty"` - - // Determines how much weight to assign to the conditioning inputs. - ControlnetConditioningScale *float32 `json:"controlnet_conditioning_scale,omitempty"` - - // The percentage of total steps at which the ControlNet stops applying. - ControlGuidanceEnd *float32 `json:"control_guidance_end,omitempty"` -} - // Chunk A chunk of text with a timestamp. type Chunk struct { // Text The text of the chunk. @@ -485,6 +485,9 @@ type GenAudioToTextMultipartRequestBody = BodyGenAudioToText // GenImageToImageMultipartRequestBody defines body for GenImageToImage for multipart/form-data ContentType. type GenImageToImageMultipartRequestBody = BodyGenImageToImage +// GenImageToImageGenericMultipartRequestBody defines body for GenImageToImageGeneric for multipart/form-data ContentType. +type GenImageToImageGenericMultipartRequestBody = BodyGenImageToImageGeneric + // GenImageToTextMultipartRequestBody defines body for GenImageToText for multipart/form-data ContentType. type GenImageToTextMultipartRequestBody = BodyGenImageToText @@ -509,9 +512,6 @@ type GenTextToSpeechJSONRequestBody = TextToSpeechParams // GenUpscaleMultipartRequestBody defines body for GenUpscale for multipart/form-data ContentType. type GenUpscaleMultipartRequestBody = BodyGenUpscale -// GenImageToImageGenericMultipartRequestBody defines body for GenImageToImageGeneric for multipart/form-data ContentType. -type GenImageToImageGenericMultipartRequestBody = BodyGenImageToImageGeneric - // AsValidationErrorLoc0 returns the union data inside the ValidationError_Loc_Item as a ValidationErrorLoc0 func (t ValidationError_Loc_Item) AsValidationErrorLoc0() (ValidationErrorLoc0, error) { var body ValidationErrorLoc0 @@ -662,6 +662,9 @@ type ClientInterface interface { // GenImageToImageWithBody request with any body GenImageToImageWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + // GenImageToImageGenericWithBody request with any body + GenImageToImageGenericWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + // GenImageToTextWithBody request with any body GenImageToTextWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) @@ -693,9 +696,6 @@ type ClientInterface interface { // GenUpscaleWithBody request with any body GenUpscaleWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) - - // GenImageToImageGenericWithBody request with any body - GenImageToImageGenericWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) } func (c *Client) GenAudioToTextWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { @@ -758,6 +758,18 @@ func (c *Client) GenImageToImageWithBody(ctx context.Context, contentType string return c.Client.Do(req) } +func (c *Client) GenImageToImageGenericWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGenImageToImageGenericRequestWithBody(c.Server, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + func (c *Client) GenImageToTextWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { req, err := NewGenImageToTextRequestWithBody(c.Server, contentType, body) if err != nil { @@ -902,18 +914,6 @@ func (c *Client) GenUpscaleWithBody(ctx context.Context, contentType string, bod return c.Client.Do(req) } -func (c *Client) GenImageToImageGenericWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { - req, err := NewGenImageToImageGenericRequestWithBody(c.Server, contentType, body) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - if err := c.applyEditors(ctx, req, reqEditors); err != nil { - return nil, err - } - return c.Client.Do(req) -} - // NewGenAudioToTextRequestWithBody generates requests for GenAudioToText with any type of body func NewGenAudioToTextRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { var err error @@ -1053,6 +1053,35 @@ func NewGenImageToImageRequestWithBody(server string, contentType string, body i return req, nil } +// NewGenImageToImageGenericRequestWithBody generates requests for GenImageToImageGeneric with any type of body +func NewGenImageToImageGenericRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/image-to-image-generic") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", contentType) + + return req, nil +} + // NewGenImageToTextRequestWithBody generates requests for GenImageToText with any type of body func NewGenImageToTextRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { var err error @@ -1329,35 +1358,6 @@ func NewGenUpscaleRequestWithBody(server string, contentType string, body io.Rea return req, nil } -// NewGenImageToImageGenericRequestWithBody generates requests for GenImageToImageGeneric with any type of body -func NewGenImageToImageGenericRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { - var err error - - serverURL, err := url.Parse(server) - if err != nil { - return nil, err - } - - operationPath := fmt.Sprintf("/image-to-image-generic") - if operationPath[0] == '/' { - operationPath = "." + operationPath - } - - queryURL, err := serverURL.Parse(operationPath) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("POST", queryURL.String(), body) - if err != nil { - return nil, err - } - - req.Header.Add("Content-Type", contentType) - - return req, nil -} - func (c *Client) applyEditors(ctx context.Context, req *http.Request, additionalEditors []RequestEditorFn) error { for _, r := range c.RequestEditors { if err := r(ctx, req); err != nil { @@ -1416,6 +1416,9 @@ type ClientWithResponsesInterface interface { // GenImageToImageWithBodyWithResponse request with any body GenImageToImageWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*GenImageToImageResponse, error) + // GenImageToImageGenericWithBodyWithResponse request with any body + GenImageToImageGenericWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*GenImageToImageGenericResponse, error) + // GenImageToTextWithBodyWithResponse request with any body GenImageToTextWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*GenImageToTextResponse, error) @@ -1447,9 +1450,6 @@ type ClientWithResponsesInterface interface { // GenUpscaleWithBodyWithResponse request with any body GenUpscaleWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*GenUpscaleResponse, error) - - // GenImageToImageGenericWithBodyWithResponse request with any body - GenImageToImageGenericWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*GenImageToImageGenericResponse, error) } type GenAudioToTextResponse struct { @@ -1572,6 +1572,32 @@ func (r GenImageToImageResponse) StatusCode() int { return 0 } +type GenImageToImageGenericResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *ImageResponse + JSON400 *HTTPError + JSON401 *HTTPError + JSON422 *HTTPValidationError + JSON500 *HTTPError +} + +// Status returns HTTPResponse.Status +func (r GenImageToImageGenericResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GenImageToImageGenericResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + type GenImageToTextResponse struct { Body []byte HTTPResponse *http.Response @@ -1781,32 +1807,6 @@ func (r GenUpscaleResponse) StatusCode() int { return 0 } -type GenImageToImageGenericResponse struct { - Body []byte - HTTPResponse *http.Response - JSON200 *ImageResponse - JSON400 *HTTPError - JSON401 *HTTPError - JSON422 *HTTPValidationError - JSON500 *HTTPError -} - -// Status returns HTTPResponse.Status -func (r GenImageToImageGenericResponse) Status() string { - if r.HTTPResponse != nil { - return r.HTTPResponse.Status - } - return http.StatusText(0) -} - -// StatusCode returns HTTPResponse.StatusCode -func (r GenImageToImageGenericResponse) StatusCode() int { - if r.HTTPResponse != nil { - return r.HTTPResponse.StatusCode - } - return 0 -} - // GenAudioToTextWithBodyWithResponse request with arbitrary body returning *GenAudioToTextResponse func (c *ClientWithResponses) GenAudioToTextWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*GenAudioToTextResponse, error) { rsp, err := c.GenAudioToTextWithBody(ctx, contentType, body, reqEditors...) @@ -1852,6 +1852,15 @@ func (c *ClientWithResponses) GenImageToImageWithBodyWithResponse(ctx context.Co return ParseGenImageToImageResponse(rsp) } +// GenImageToImageGenericWithBodyWithResponse request with arbitrary body returning *GenImageToImageGenericResponse +func (c *ClientWithResponses) GenImageToImageGenericWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*GenImageToImageGenericResponse, error) { + rsp, err := c.GenImageToImageGenericWithBody(ctx, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseGenImageToImageGenericResponse(rsp) +} + // GenImageToTextWithBodyWithResponse request with arbitrary body returning *GenImageToTextResponse func (c *ClientWithResponses) GenImageToTextWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*GenImageToTextResponse, error) { rsp, err := c.GenImageToTextWithBody(ctx, contentType, body, reqEditors...) @@ -1956,15 +1965,6 @@ func (c *ClientWithResponses) GenUpscaleWithBodyWithResponse(ctx context.Context return ParseGenUpscaleResponse(rsp) } -// GenImageToImageGenericWithBodyWithResponse request with arbitrary body returning *GenImageToImageGenericResponse -func (c *ClientWithResponses) GenImageToImageGenericWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*GenImageToImageGenericResponse, error) { - rsp, err := c.GenImageToImageGenericWithBody(ctx, contentType, body, reqEditors...) - if err != nil { - return nil, err - } - return ParseGenImageToImageGenericResponse(rsp) -} - // ParseGenAudioToTextResponse parses an HTTP response from a GenAudioToTextWithResponse call func ParseGenAudioToTextResponse(rsp *http.Response) (*GenAudioToTextResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) @@ -2165,22 +2165,22 @@ func ParseGenImageToImageResponse(rsp *http.Response) (*GenImageToImageResponse, return response, nil } -// ParseGenImageToTextResponse parses an HTTP response from a GenImageToTextWithResponse call -func ParseGenImageToTextResponse(rsp *http.Response) (*GenImageToTextResponse, error) { +// ParseGenImageToImageGenericResponse parses an HTTP response from a GenImageToImageGenericWithResponse call +func ParseGenImageToImageGenericResponse(rsp *http.Response) (*GenImageToImageGenericResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) defer func() { _ = rsp.Body.Close() }() if err != nil { return nil, err } - response := &GenImageToTextResponse{ + response := &GenImageToImageGenericResponse{ Body: bodyBytes, HTTPResponse: rsp, } switch { case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: - var dest ImageToTextResponse + var dest ImageResponse if err := json.Unmarshal(bodyBytes, &dest); err != nil { return nil, err } @@ -2200,13 +2200,6 @@ func ParseGenImageToTextResponse(rsp *http.Response) (*GenImageToTextResponse, e } response.JSON401 = &dest - case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 413: - var dest HTTPError - if err := json.Unmarshal(bodyBytes, &dest); err != nil { - return nil, err - } - response.JSON413 = &dest - case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 422: var dest HTTPValidationError if err := json.Unmarshal(bodyBytes, &dest); err != nil { @@ -2226,22 +2219,22 @@ func ParseGenImageToTextResponse(rsp *http.Response) (*GenImageToTextResponse, e return response, nil } -// ParseGenImageToVideoResponse parses an HTTP response from a GenImageToVideoWithResponse call -func ParseGenImageToVideoResponse(rsp *http.Response) (*GenImageToVideoResponse, error) { +// ParseGenImageToTextResponse parses an HTTP response from a GenImageToTextWithResponse call +func ParseGenImageToTextResponse(rsp *http.Response) (*GenImageToTextResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) defer func() { _ = rsp.Body.Close() }() if err != nil { return nil, err } - response := &GenImageToVideoResponse{ + response := &GenImageToTextResponse{ Body: bodyBytes, HTTPResponse: rsp, } switch { case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: - var dest VideoResponse + var dest ImageToTextResponse if err := json.Unmarshal(bodyBytes, &dest); err != nil { return nil, err } @@ -2261,6 +2254,13 @@ func ParseGenImageToVideoResponse(rsp *http.Response) (*GenImageToVideoResponse, } response.JSON401 = &dest + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 413: + var dest HTTPError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON413 = &dest + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 422: var dest HTTPValidationError if err := json.Unmarshal(bodyBytes, &dest); err != nil { @@ -2280,22 +2280,22 @@ func ParseGenImageToVideoResponse(rsp *http.Response) (*GenImageToVideoResponse, return response, nil } -// ParseGenLiveVideoToVideoResponse parses an HTTP response from a GenLiveVideoToVideoWithResponse call -func ParseGenLiveVideoToVideoResponse(rsp *http.Response) (*GenLiveVideoToVideoResponse, error) { +// ParseGenImageToVideoResponse parses an HTTP response from a GenImageToVideoWithResponse call +func ParseGenImageToVideoResponse(rsp *http.Response) (*GenImageToVideoResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) defer func() { _ = rsp.Body.Close() }() if err != nil { return nil, err } - response := &GenLiveVideoToVideoResponse{ + response := &GenImageToVideoResponse{ Body: bodyBytes, HTTPResponse: rsp, } switch { case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: - var dest LiveVideoToVideoResponse + var dest VideoResponse if err := json.Unmarshal(bodyBytes, &dest); err != nil { return nil, err } @@ -2334,22 +2334,22 @@ func ParseGenLiveVideoToVideoResponse(rsp *http.Response) (*GenLiveVideoToVideoR return response, nil } -// ParseGenLLMResponse parses an HTTP response from a GenLLMWithResponse call -func ParseGenLLMResponse(rsp *http.Response) (*GenLLMResponse, error) { +// ParseGenLiveVideoToVideoResponse parses an HTTP response from a GenLiveVideoToVideoWithResponse call +func ParseGenLiveVideoToVideoResponse(rsp *http.Response) (*GenLiveVideoToVideoResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) defer func() { _ = rsp.Body.Close() }() if err != nil { return nil, err } - response := &GenLLMResponse{ + response := &GenLiveVideoToVideoResponse{ Body: bodyBytes, HTTPResponse: rsp, } switch { case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: - var dest LLMResponse + var dest LiveVideoToVideoResponse if err := json.Unmarshal(bodyBytes, &dest); err != nil { return nil, err } @@ -2388,22 +2388,22 @@ func ParseGenLLMResponse(rsp *http.Response) (*GenLLMResponse, error) { return response, nil } -// ParseGenSegmentAnything2Response parses an HTTP response from a GenSegmentAnything2WithResponse call -func ParseGenSegmentAnything2Response(rsp *http.Response) (*GenSegmentAnything2Response, error) { +// ParseGenLLMResponse parses an HTTP response from a GenLLMWithResponse call +func ParseGenLLMResponse(rsp *http.Response) (*GenLLMResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) defer func() { _ = rsp.Body.Close() }() if err != nil { return nil, err } - response := &GenSegmentAnything2Response{ + response := &GenLLMResponse{ Body: bodyBytes, HTTPResponse: rsp, } switch { case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: - var dest MasksResponse + var dest LLMResponse if err := json.Unmarshal(bodyBytes, &dest); err != nil { return nil, err } @@ -2442,22 +2442,22 @@ func ParseGenSegmentAnything2Response(rsp *http.Response) (*GenSegmentAnything2R return response, nil } -// ParseGenTextToImageResponse parses an HTTP response from a GenTextToImageWithResponse call -func ParseGenTextToImageResponse(rsp *http.Response) (*GenTextToImageResponse, error) { +// ParseGenSegmentAnything2Response parses an HTTP response from a GenSegmentAnything2WithResponse call +func ParseGenSegmentAnything2Response(rsp *http.Response) (*GenSegmentAnything2Response, error) { bodyBytes, err := io.ReadAll(rsp.Body) defer func() { _ = rsp.Body.Close() }() if err != nil { return nil, err } - response := &GenTextToImageResponse{ + response := &GenSegmentAnything2Response{ Body: bodyBytes, HTTPResponse: rsp, } switch { case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: - var dest ImageResponse + var dest MasksResponse if err := json.Unmarshal(bodyBytes, &dest); err != nil { return nil, err } @@ -2496,22 +2496,22 @@ func ParseGenTextToImageResponse(rsp *http.Response) (*GenTextToImageResponse, e return response, nil } -// ParseGenTextToSpeechResponse parses an HTTP response from a GenTextToSpeechWithResponse call -func ParseGenTextToSpeechResponse(rsp *http.Response) (*GenTextToSpeechResponse, error) { +// ParseGenTextToImageResponse parses an HTTP response from a GenTextToImageWithResponse call +func ParseGenTextToImageResponse(rsp *http.Response) (*GenTextToImageResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) defer func() { _ = rsp.Body.Close() }() if err != nil { return nil, err } - response := &GenTextToSpeechResponse{ + response := &GenTextToImageResponse{ Body: bodyBytes, HTTPResponse: rsp, } switch { case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: - var dest AudioResponse + var dest ImageResponse if err := json.Unmarshal(bodyBytes, &dest); err != nil { return nil, err } @@ -2550,22 +2550,22 @@ func ParseGenTextToSpeechResponse(rsp *http.Response) (*GenTextToSpeechResponse, return response, nil } -// ParseGenUpscaleResponse parses an HTTP response from a GenUpscaleWithResponse call -func ParseGenUpscaleResponse(rsp *http.Response) (*GenUpscaleResponse, error) { +// ParseGenTextToSpeechResponse parses an HTTP response from a GenTextToSpeechWithResponse call +func ParseGenTextToSpeechResponse(rsp *http.Response) (*GenTextToSpeechResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) defer func() { _ = rsp.Body.Close() }() if err != nil { return nil, err } - response := &GenUpscaleResponse{ + response := &GenTextToSpeechResponse{ Body: bodyBytes, HTTPResponse: rsp, } switch { case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: - var dest ImageResponse + var dest AudioResponse if err := json.Unmarshal(bodyBytes, &dest); err != nil { return nil, err } @@ -2604,15 +2604,15 @@ func ParseGenUpscaleResponse(rsp *http.Response) (*GenUpscaleResponse, error) { return response, nil } -// ParseGenImageToImageGenericResponse parses an HTTP response from a GenImageToImageGenericWithResponse call -func ParseGenImageToImageGenericResponse(rsp *http.Response) (*GenImageToImageGenericResponse, error) { +// ParseGenUpscaleResponse parses an HTTP response from a GenUpscaleWithResponse call +func ParseGenUpscaleResponse(rsp *http.Response) (*GenUpscaleResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) defer func() { _ = rsp.Body.Close() }() if err != nil { return nil, err } - response := &GenImageToImageGenericResponse{ + response := &GenUpscaleResponse{ Body: bodyBytes, HTTPResponse: rsp, } @@ -2675,6 +2675,9 @@ type ServerInterface interface { // Image To Image // (POST /image-to-image) GenImageToImage(w http.ResponseWriter, r *http.Request) + // Image To Image Generic + // (POST /image-to-image-generic) + GenImageToImageGeneric(w http.ResponseWriter, r *http.Request) // Image To Text // (POST /image-to-text) GenImageToText(w http.ResponseWriter, r *http.Request) @@ -2699,9 +2702,6 @@ type ServerInterface interface { // Upscale // (POST /upscale) GenUpscale(w http.ResponseWriter, r *http.Request) - // Image To Image Generic - // (POST /image-to-image-generic) - GenImageToImageGeneric(w http.ResponseWriter, r *http.Request) } // Unimplemented server implementation that returns http.StatusNotImplemented for each endpoint. @@ -2738,6 +2738,12 @@ func (_ Unimplemented) GenImageToImage(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNotImplemented) } +// Image To Image Generic +// (POST /image-to-image-generic) +func (_ Unimplemented) GenImageToImageGeneric(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotImplemented) +} + // Image To Text // (POST /image-to-text) func (_ Unimplemented) GenImageToText(w http.ResponseWriter, r *http.Request) { @@ -2786,12 +2792,6 @@ func (_ Unimplemented) GenUpscale(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNotImplemented) } -// Image To Image Generic -// (POST /image-to-image-generic) -func (_ Unimplemented) GenImageToImageGeneric(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNotImplemented) -} - // ServerInterfaceWrapper converts contexts to parameters. type ServerInterfaceWrapper struct { Handler ServerInterface @@ -2880,6 +2880,23 @@ func (siw *ServerInterfaceWrapper) GenImageToImage(w http.ResponseWriter, r *htt handler.ServeHTTP(w, r.WithContext(ctx)) } +// GenImageToImageGeneric operation middleware +func (siw *ServerInterfaceWrapper) GenImageToImageGeneric(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + ctx = context.WithValue(ctx, HTTPBearerScopes, []string{}) + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.GenImageToImageGeneric(w, r) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + // GenImageToText operation middleware func (siw *ServerInterfaceWrapper) GenImageToText(w http.ResponseWriter, r *http.Request) { ctx := r.Context() @@ -3016,23 +3033,6 @@ func (siw *ServerInterfaceWrapper) GenUpscale(w http.ResponseWriter, r *http.Req handler.ServeHTTP(w, r.WithContext(ctx)) } -// GenImageToImageGeneric operation middleware -func (siw *ServerInterfaceWrapper) GenImageToImageGeneric(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - ctx = context.WithValue(ctx, HTTPBearerScopes, []string{}) - - handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - siw.Handler.GenImageToImageGeneric(w, r) - })) - - for _, middleware := range siw.HandlerMiddlewares { - handler = middleware(handler) - } - - handler.ServeHTTP(w, r.WithContext(ctx)) -} - type UnescapedCookieParamError struct { ParamName string Err error @@ -3161,6 +3161,9 @@ func HandlerWithOptions(si ServerInterface, options ChiServerOptions) http.Handl r.Group(func(r chi.Router) { r.Post(options.BaseURL+"/image-to-image", wrapper.GenImageToImage) }) + r.Group(func(r chi.Router) { + r.Post(options.BaseURL+"/image-to-image-generic", wrapper.GenImageToImageGeneric) + }) r.Group(func(r chi.Router) { r.Post(options.BaseURL+"/image-to-text", wrapper.GenImageToText) }) @@ -3185,9 +3188,6 @@ func HandlerWithOptions(si ServerInterface, options ChiServerOptions) http.Handl r.Group(func(r chi.Router) { r.Post(options.BaseURL+"/upscale", wrapper.GenUpscale) }) - r.Group(func(r chi.Router) { - r.Post(options.BaseURL+"/image-to-image-generic", wrapper.GenImageToImageGeneric) - }) return r } @@ -3195,86 +3195,92 @@ func HandlerWithOptions(si ServerInterface, options ChiServerOptions) http.Handl // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9a2/ctrb2XyH0voATYMa3Nu2Bgf3BSdPEOHZq+LLbIjVmc6Q1GsYSqU1SHk9z/N8P", - "uChpSImai2u7Pd3zKWOJl3V91iK5xHyNYpEXggPXKjr6Gql4CjnFn8fnJ++lFNL8TkDFkhWaCR4dmTcE", - "zCsiQRWCKyC5SCDbjQZRIUUBUjPAMXKVdrtfTaHqnoNSNAXTTzOdQXQUnanU/DUvzB9KS8bT6OFhEEn4", - "d8kkJNHRZxz1ZtGlIbTpJ8ZfINbRwyA6LhMmLioqu6RcePSTiZCEmh4kBQ6SmlZdprAF/siynybR0eev", - "0f+XMImOov+3t5DmXiXKvTNIGL2+OI0ebgYBSVQzQWJn3u1wa6dz+fV4CjD9ViTzUQocG16JK7jXhtwe", - "LnySrotM0KSmhkxYBkQLMgaiJeWm5RgSI5OJkDnV0VE0ZpzKedSir6vEQZSDpgnV1M46oWVm+n99iNpy", - "OU4SZn7SjHwRY8K4nYwJXtFSUKUgMX/oKZCCFZAx7ttRPVeIDqPsEUt8OjpUfCzTlPGU/Ejj2kBOfiCl", - "mdgYSi2PoraSZmrbNAlNLUGXko80y0FpmhfKp0HLEjp0XGAfsuhjp596KiEa7vUuuSyLQkhjTXc0K0Ed", - "kR0FXAOPYWdAdmZCJjsDYsycWKLIWIgMKCevdszkO+bdzoRmCnZe75IfLGWEKVK9frUY7/Vu3ZLkQLki", - "XDhE7lazVe/M7+GYotYWbRypVVxeLSSzCgY6jhGy+yXucZLTFK4E/tP1j7RkCeUxjFRMM/DU9P3um7aO", - "3vNYlJKmoCpL0Q2GAGE5vogzoSCbk4zx24XxGr2RQoq80OTVlKVTkJXuSE7nREJSxtUQ5N8lzZiev3bl", - "9qGik1winQ2/vMzHIA2/rGawx9Pt2FoYytlkTmZMTzt+1e/uVn4BW8dxR0vkeNCV4w+QSkBiZlMWWzIW", - "CGkpZYoUpZqiCGdUJgpbMc40o5lts9umj6wWUyYkVSsg4Ziciotj8upUzIYXlN+S44QWGpHpdaV4yhPC", - "tCKxkDY6JsbLZsDSqUbHtUw4AYa8v6d5kcER+Up+izKqgethLLhiyjjafC+L86GhbqiS++y36Igc7O4P", - "yG8RB8m+qL2C3UM2pFIP67eHD64ATpGxZ8PBDj9rQiGHlGp2ByNr/CuIuFq4ySv1Gt2rZAmQ2ZRq8xfc", - "x1mZAJlIkQdEfJJyIY0FTYhvkOS3cn//m5gcuGR/qkgj55a0EPVlPrJ+PSpAhng4aLPwCU2NiEkNCC5G", - "FCAr9jxCypyc2MbnIDvkMK4htdaL9PAJSEDWNLRCy8H+fj89CXDBlNExdtwlZ0KC/U1KVdLMoBZQxKwK", - "oiooqlkZl5qoTMxAkoYKM0xSZui547mJN8BTPe3wV7cnl0h1iDtXvOtYxTKb7NepohPQ81E8hfjWE54J", - "fW3pnYM0mGgCKXYj2A1NUWmWI+5P2thlYKHMEpPCiMkEuDJGJiSZUplPyswl89KO+g6JaYitojVSC5B0", - "JXIJlVtKyhORE4tvPaIwjYPyrnXlSWF/97964FpMbCqySNNoUWRsEeQk1Dq2mnm1b94ceIHssp6zg82t", - "uF/UCrSBLZAAeJF9dQYQTpDXDpsN608WOZ8wQW1Usi4s/yE07p+yz+taul2l0jVzun+yBERXpZMWKH4X", - "WpBNJM1BISAriAVP0Ly9POTODO9y92MPbk0x7Htzvvk+OKttSRgnGM7VGpN+tIOH5l3bdpv4Q+34GD//", - "VKu1ZGyeTuTCtB6Ny/gWdJuKg8Pv22Rc1xMaFeNq0xBlRE5zUXJtFGDHbJZbbkKBOrOh0LyqYNb8zE3s", - "rHrOWJYZsGccX3VUeGabvUWiPcbc0C6YghEt01EPLO8fdvLUhgXsTGiSLMDYY9imy+Sjt/CoFh0SFOTj", - "DNPm3r424eWxBKpqvr0QjwQclynpB/jV6cvhm//D2cs2r6glMWNJy3oP9g+/DeEhttwIDn/Gsbuzbhhh", - "bOhYEmJOT8+6kWXKlBZy7kPf5xsXrasWIeii9yMtboG3bf47BynoPbmybUKC7cXezUL+GjmylkBzbxrc", - "A/LzOJqHTWuuNOSjZlc4QOclNiHBbeBBpCEvjP5LCS0M/H4xxJXTaM1cMmAORs1LrOAS0hy4PuZzPWU8", - "PeyaxFjcB7bOSYYwQr4lVEo6Jym7A06oIpSMxX29EVShLWp1YLzgl19/+ZXYmOza/Ftx37vz0p38pI76", - "yhL/2DhP1e2I8aLUQf7EbChBiazE0GYaE2zcYkrPCxYjNuOSnZJCwh0TpTI/EhZjb6YrdBkscmtEx4P7", - "j/c/k1cf//HzPw7ffIfAdHl85q0nzszMJ0jmX27vIy8zg+XqdiRK3QhySVQ4MSusEgYLCdrcQlZ7w1Oz", - "DDMD2s1hmo9ZWhphWtFbs1IDIiYauPkzKWPc/QWtQVY99ZRyE3cYTzNw1OBxVVNOfrKUh/ycG6PK2O8w", - "ioWQidqMvUIwrgn2ZJxqUE0a1Yy7WFhSngL5vD84uKlMBHtX8xK4LyDWtvkYbAMJyjw0j6z6EpabiCm4", - "8vOWai7yzvIQYtSdrOsMn+4PKy8Xk4qrShEtX5hNQQIBGlfkE2YUR179Mvj19SIGesspbNamzIF0JCyj", - "Y8gChJ3i8yav9UirqTkgjCcsRvlT0xRSKUqeVK1N1rfvNRnT+NZt0iXXTrvkWCQTKdMbWIvtpkjJh8YD", - "1FRkJs9F87RjEcaVNrmfmBgSEePwfeDo4dTO3tXzuhlEJyYsiR/XRbMf/shthyferX8aQCwtW8njd4VX", - "LAS+f/MftI25ljS3+5mr1h0b7x/Wzhnw33fTkt+G8p7YvMBlilEmeiVdHHV2qwh0tenYXfrgANV6B0d1", - "WfQ3wJzMuJ6pZ8z6dWdgpiE3BD04czRjNRNhGOtIUrsNDWGOLK2gAhL8cH79TuRFqeGETwIVCGdNKUYC", - "mjJj/h/Or0ls+7jFAF2hWvhqsC6ce9EvtqhlkSh+cYtH3FUV5ELORxMJ4HXAx+RH83hJNy00zQL9rvB5", - "sCPjLdLwQXBTiOYeTZ/M3yt3V41AuG3pEemzWsuoJsjRakt5YfVea5ax31FFq1RsNFsumhOlqWZKs1g9", - "UrkvrLH11DCIHB5HlSW73RyJkUq+wencYSzNfaNYytfYDlnXJEIcBAnyraVtCwGL+Xh1dd5TaWZerVlq", - "ZsFi/bKspmqsW5b1Q407dmYPcdryq6Z1mF6w08PrP2nGEhyu4bqPlRqcl3LSHs9BcstJCMZdatsDhOim", - "MplRiV5fyWKtajrj30sROy1Kk+/Zerqm7Ovca7OM+RYgOZx9KErSZ3JusrvWDlmVW3t7ZPWzVahbLBo2", - "8w4WjLumE5DyEmVcaqrVWmqQQLOhidaokGUga8hS9biPVEjb51tKsWT/dbVi2Q+opZfwj0AzPX1X59m+", - "RM1wpQqnZVPsSGyTOjVzKANe5obYn/47GkTvLy5+uogG0ckPp+9d8i7tBKsYruhw+XLIDnCFS8WNamdD", - "y5TA6rZHGO21hZudrq6wda3Mlsysyl4rWm5a/ZZV1jqn0BsJBlP6ZXLpXw0spIIVniuXAu0UvZWWhzgI", - "MHp6euYy6BMrnTeLPZT2YM7aBM8tRqUCz7XtcQa5Vuss2qQzvjOcw5lLcogjdgd4tlMd8ZxTSa1p+czF", - "gmspslEpsxVbIdcXp6hcVY6xCJfxlNwxSq4ki29xA1VoEYus2hhJcJ+sOgnOzEIZj7CGWgzbZ96kQOJc", - "Zb+zZJFrmYVEDHfGIzYguijHGVNTQ7Pt2096jUc1TFGekEykHnnv7Rg91K25t2SS5zYIEi2ILHlXbuaF", - "/fFFjHfJJ6FZDERjOeCUKcIUMWltQurJ68P+uqzSrtGFnoIkUpQa1AD3f5gmiQBFuNC2bsnMRElw48vW", - "AcA9jbV99kq9JgkUwBNFBPc5YXmRQQ5cVyVSPCE5Fn2M8Vx9wtJS0nEGqAnT81/WDP5FqEzL+sBmrcDY", - "2HYjbbO+b+2iVgWz2Bg0SGdDOFBQX/lLwLMqU1oYn59LK8245djYXqVeUepU2J06CTQ3Iq6G8ea0j/rM", - "qnY8CE99KUoZgzsr47HI/VmbMYj2TpMvm+fBydux1aPEF4kLUWEMWgOtNoo2y8GlG3w2hzxEs7peZvl0", - "z4hhLvBqUePYMshaG7Ge1qaJFn+eVb+0US8LwmdU3aqNbNn2rU+sewzYPTdq50+Szgak5M7R4eJgU5FX", - "tuvrBvrwJNSvnvdPhfxz8JWpbGc8FEFQ77GQfakxymNH2TCR4CmDbY5048GhP6UHZXbglV/VVYSpunkl", - "1ZsW7Uv1i5l4YHc8Ny9qZRq8ocyWkDmfdtGxKHWrygf7dRXO1WTWnebnKei6IM9OOKOKTDKappAQqsin", - "yx9/9s5tzDDrn0UYTZg39rjLrZ5sZlyrCiro12Zw49T29HXBQky5SRBoHINS9tO7Zr9vDSe2rqssKSg2", - "V5+orj49Xl+chlSJ6CtFXn2h00ulr7GX5rnNpWEmwOjTr+TwYEWts5azZzDrL3PticpD64ilu8wdPPNq", - "clDzeOP3XgYM5n1VkN+38Pr7fHD3lFXfnc/ZllR9b79g237B9vf9gu3Nf/QHbOQSzEJdA8HawcJu0mAt", - "Ge5j7PzPjjEN1Xz/PZ4vKsy25SJ/Wpl6B7/XLFPvFiZ3Q2hvnL0sAOJpX6D1uHAh65jkBk9UAfQWJEnA", - "rOylMjrODPhncwL3hQSFejNhgnJUdWL6QDyty16M0aGtmscJtiyYjtFzOkvp+i8ju3pqs4TVAFW6Zf6y", - "44f16AzyjF/TrUPJsmixSMqWhwhbsYv7G8um6s3XfHvxTCFgMCsPozMReyfRlM+r0/U2h187Nn3z4Mbw", - "uHWg2aSr1fcAi3wdb4UJyhAfLJoizeTKPF2Vuho+7FRVS8e11jgA33wXbvW+m/3ScFWiXn+XZ9p6a4UN", - "z8baa4T600VLxIqzsopUV2bL93oQoeNSMj2/NKRYPj9eXZ2/BSpBNpcNIazbR80gU62L6MGMwYL1Q8fV", - "B8ZxcyeMLDk5Pmn2/dyNvlN2B4XBkuMTclFyjhMZXLNj7e/u7+4bgYgCOC1YdBR9s3uwu2+0RfUUyd7D", - "q0aGWgxrJy6ECkXz5j4W5/ocW+xdrbZEUVnDSWKWEu27SozIQem3IpnXG7PAcSIb9anUeybsDutrdKya", - "VxlB6GKUB1/FJsbjA6tQZPtwf79FhSP1vS/Kxo/1SPAWiDh3K3CXuNiflBlZNBtE3z4hCYvCnMD8b2lC", - "Lqz07bwHLzPvNaelngrJfocEJz745mUmrpgl77k2afCVEOSUytRK/eDNS3G/SFgRqSyWGxIOD5+UhE6R", - "VJeYRRPSFFK9eSn7O+EaJKcZuQR5B7KmwIFRjLkugH6+ebgZRKrMcyrn9b1b5EqQOjWgqTLYXYcSg973", - "Q5tiUTUfcprDUNyBlCxB5PfQYRDtTau6l70ahVNAEfgg5hYtRc+IIKHiqHWB5MGVUz2QrQ7zOW1Kn5ay", - "WhcCPTuvdqI/xmU9hmETC3762bOvn5Mvp+LocVxZEpEbXFqZoNx8MhOOysdFkc3r72a8CyqUPdovpDBJ", - "lrNY64Tp1o0izxynvdleOFD7NVDbSN0fqbcRatMIZT9AvhKk+QptwxDFfMdwQWCNzBw3rCwOrE7M/Qtn", - "Xsbh/4zEPFQQuPX6v3h+voWeR0PPI5Nj5nmoCzx3zV1TQeT5ELphaaOko76R5GUwyM72wiDkbyZt4Web", - "dDyD5zc3+zzO9WvHGER7GbuDoV/xuGr5EVx4ONXMtnbPvTFRl5JDQoAneJ2CCkJEu/huKUw8Xkc9hasv", - "jBK9lYZbwNgCxtMBhjEzCxZ/BDWytmda5MjyNVIFPGsssZ6BkozytDQQ1hzld1EA76paz/Hvh7PZbIh5", - "Qikz4LFI7EH6ZtmCmfKl3d/5wGfr8VuPf0KPt3e9berhWW6duipOH9Lq4p/hYb+PV3cEVaXQ+D0Y5UvW", - "AIE7hZ55HdCZ8YXd3C8y3zr61tGfztFr76uNmxw+wu9V10EG0Z6J2WscRnxo1SjjboBTkhxO853ar2fK", - "8LvVZdtzh63b/03cHuvq/sCxg3bcz3N2W6G31uaf38X977Xs/4pUfy1cbwvqRS0g5YlTlOn9n1M9SGGr", - "/p4VKrzCwhfGCv9/QNtixRYrnh4rGhd6HFhU3REtSueuzyBMVPcNNisBMp7XV+rjR5JakcWVykG3X9xY", - "+Myrg3qibXaw9fi/icc7t31u6Oql6wwKCVA4Xeu65boC+V0myoS8E3lecqbn5APVMKPzqPokGOue1dHe", - "XiKB5sPUvt3Nqu67semOhfY9419qzCr6hm0GUthujxZsbwya7jX8Ptw8/G8AAAD//5bIT20SdgAA", + "H4sIAAAAAAAC/+xde3Pbtpb/KhjuzjiZkWTZbdpdz9w/nDRNPOukHj9u20k9uhB5RCEmAV4AtKxm/d13", + "cMAHSIJ6uLbbTfVXZBKP8/ydA+AQ+RKEIs0EB65VcPQlUOEcUoo/j89O3koppPkdgQolyzQTPDgybwiY", + "V0SCygRXQFIRQTIKBkEmRQZSM8AxUhV3u1/OoeieglI0BtNPM51AcBR8ULH5a5mZP5SWjMfB/f0gkPDv", + "nEmIgqNPOOp13aUitOonpp8h1MH9IDjOIybOCyq7pJw36CczIQk1PUgMHCQ1rbpMYQv8kSQ/zYKjT1+C", + "/5QwC46C/9ivpblfiHL/A0SMXp2fBvfXA48kipkgsjOPOtza6Vx+Gzx5mH4touUkBo4NL8Ul3GlDbg8X", + "TZKuskTQqKSGzFgCRAsyBaIl5ablFCIjk5mQKdXBUTBlnMpl0KKvq8RBkIKmEdXUzjqjeWL6f7kP2nI5", + "jiJmftKEfBZTwridjAle0JJRpSAyf+g5kIxlkDDetKNyLh8dRtkTFjXp6FDxPo9jxmPyIw1LAzn5geRm", + "YmMopTyy0kqqqW3TyDe1BJ1LPtEsBaVpmqkmDVrm0KHjHPuQuo+dft5QCdFwp0fkIs8yIY013dIkB3VE", + "9hRwDTyEvQHZWwgZ7Q2IMXNiiSJTIRKgnLzYM5PvmXd7M5oo2Hs5Ij9YyghTpHj9oh7v5ahsSVKgXBEu", + "HCJHxWzFO/N7OKWotbqNI7WCy8taMutgoOMYPrtf4R4nKY3hUuA/Xf+IcxZRHsJEhTSBhpq+H71q6+gt", + "D0UuaQyqsBRdYQgQluKLMBEKkiVJGL+pjdfojWRSpJkmL+YsnoMsdEdSuiQSojwshiD/zmnC9PKlK7d3", + "BZ3kAums+OV5OgVp+GUlgz2ebsfWwlDOZkuyYHre8at+d7fy89g6jjtZIceDrhx/gFgCErOYs9CSUSOk", + "pZQpkuVqjiJcUBkpbMU404wmts2oTR9ZL6ZESKrWQMIxORXnx+TFqVgMzym/IccRzTQi08tC8ZRHhGlF", + "QiFtdIyMly2AxXONjmuZcAIMeXtH0yyBI/KF/BYkVAPXw1BwxZRxtOV+EqZDQ91QRXfJb8ERORiNB+S3", + "gINkn9V+xu4gGVKph+Xbw3tXAKfI2JPhYIefDaGQQ0w1u4WJNf41RFzWbvJCvUT3ylkEZDGn2vwFd2GS", + "R0BmUqQeEZ/EXEhjQTPSNEjyWz4efxOSA5fsjwVp5MyS5qM+TyfWrycZSB8PB20WPqKpETErAcHFiAxk", + "wV6DkDwlJ7bxGcgOOYxriK31Ij18BhKQNQ2t0HIwHvfTEwEXTBkdY8cR+SAk2N8kVzlNDGoBRcwqIKqA", + "opKVaa6JSsQCJKmoMMNEeYKeO12aeAM81vMOf2V7coFU+7hzxbuJVayyyX6dKjoDvZyEcwhvGsIzoa8t", + "vTOQBhNNIMVuBLuhKSrNUsT9WRu7DCzkSWRSGDGbAVfGyIQkcyrTWZ64ZF7YUd8gMRWxRbRGagGirkQu", + "oHBLSXkkUmLxrUcUprFX3qWuGlIYj/6rB67FzKYidZpGsyxhdZCTUOrYaubF2Lw5aASyi3LODja34n5W", + "KtAGNk8C0IjsG2YA74yEWNhNBELBtRRJHciARy25/Lcvsc9AhsC1YdfIR2iaFC5FtRPd3tjhP4ImSgvz", + "NsuSJeOxK5uiUR3F3vLIF8MKWjnoSWgijyGH8dgTe8e+2KtBpoyDInOxIGkezsu4pQWhSrGYlwp1RyeM", + "Z7lWHno5aMNf3bI3+K7IEoJP341eDcjBeDS+Dv76eVcr//kT8q6/YSJD1c2kR9wfqLqpRR2VNl54oISY", + "Ca6Mh1JeN5uxJCGMWzZ5RhnXhnlN1c1qpeBsvZrZJVx/n4Qr+PTNeEAOX3Ux66+ed9Uy36Vdu7SrL+1y", + "0GxNBlZmVusTMf9O5cZxtBLGo4XSR9wprJS0KVz/IZTun7LPD1vaXpdbb7i59k8WgeiqdNYCy+98CfRM", + "0hQUArUCk3CiwTc2hG7N8C53P/YsIOeYtjTmfPW9d1bb0gR/TEfUBpO+t4P75t3Ydqu4RO34GFf/VKu1", + "ZGyfZqTCtJ5M8/AGdJuKg8Pv22RclRM21hRG5DQVOddGAXbMat/bTTRQZzY2mlcF8JqfqQmmRc+FSeim", + "YNRqXnVU+ME2e41ENxhzQ75gCiY0jyc9QD0+7OTZFQvYmdAoquG5uYjCfUvyvrESKVYhEhSk0wSXG719", + "bcLOQwlUlXw3Yj4ScJzHpB/y16c1h6/+H28j7TKNUhILFrWs92B8+K0PD7HlVnD4M47dnXXLCGNDx4oQ", + "c3r6oRtZ5kxpIZetVPzaReuihXcdeTfR4gZ42+a/c1d4d+TStvEJthd7twv5G2TNWgJNG9PgYVwzs6Op", + "37SWSkM6qY7nPXReYBPiPY8fBBrSzOg/l+1tpe/rIS6dRhtmlx5zMGpeYQUXEKfA9TFf6jnj8WHXJKbi", + "zlPDQBKEEfItoVLSJYnZLXBCFaFkKu7KnaECbVGrA+MFv/z6y6/ExmTX5l+Luy22gE7KqK8s8Q+N87jt", + "wbNce/kTi6EEJZIcQ1uK+yCmcYspvcxYiNiMS3lKMgm3TOTK/IhYiL2ZLtBlUOfWiI4Hd+/vfiYv3v/j", + "538cvvoOgeni+ENjhWH3RJDMv9yeSJonBsvVzUTkuhLkiqhwYtZcOQxqCdrcQhaH9HOzMDMD2lN6mk5Z", + "nBthWtFbs1IDImYauPkzykM8hgetQRY99ZxyE3cYjxNw1NDgqqSc/GQp9/k5N0aVsN9hEgohI7Ude5lg", + "XBPsyTjVoKo0qhq3XmpSHgP5NB4cXBcmgr2LeQncZRBq23wKtoEEZR6aR1Z9EUtNxBRcNfOWYi7yxvLg", + "Y9SdrOsMH+8OCy8Xs4KrQhEtX1jMQQIBGhbkE2YUR178Mvj1ZR0DG8spbNamzIF0JCyhU0g8hJ3i8yqv", + "bZBWUnNAGI9YiPKnpinEUuQ8KlqbrG/caDKl4Y3bpEuunXZFfUoiYqa3sBbbTZGcD40HqLlITJ6L5mnH", + "IowrbXI/MTMkIsbhe08NyKmdvavnTTOITkxYET+usurI4YHbDo+8ff84gJhbtqKH7xavWQh8/+pvdJ68", + "kTR3O5zr1h1bH+SWzunx3zfznN/48p7QvMBlilEmeiWta8665Zy62HTsLn1wgGK9g6O6LDY3wJzMuJyp", + "Z8zydWdgpiE1BN07c1RjVRNhGOtIUrsNDWGOLK2gPBJ8d3b1RqRZruGEzzyloB+qmtgINGXG/N+dXZHQ", + "9nGrMrtCtfBVYZ0/96KfbXVxnSh+dqt43VUVpEIuJzMJ0OiAj8mP5vGKbnj27ul3ic+9HRlvkYYPvJtC", + "NG3Q9NH8vXZ31QiE25YNIpusljIqCXK02lKeX71XmiXsd1TROhUbzeZ1c6I01UxpFqoHKveZNbaZGgaB", + "w+OksGS3myMxUsjXO507jKW5bxRL+QbbIZuahI8DL0FNa2nbgsdi3l9envWU/JtXG9b8W7DYvD6+Kt/v", + "1sf/UOKOnbmBOG35FdM6TNfs9PD6T5qwCIeruO5jpQTnlZy0x3OQ3HLig3GX2vYAPrqpjBZUotcXstjo", + "swbj3ysRO85yk+/ZDxuq+vuzRptVzLcAyeHsXZaTPpNzk92NdsiK3LqxR1Y+W4e6Wd2wmndQM+6ajkfK", + "K5RxoalWG6lBAk2GJlqjQlaBrCFLleM+UCFtn28pxZL919WKZd+jll7C3wNN9PxNmWc3JWqGy5U/LZtj", + "R2KblKmZQxnwPDXE/vQ/wSB4e37+03kwCE5+OH3rkndhJ1jHcEGHy5dDtocrXCpu9RGTb5niWd32CKO9", + "tnCz0/WfOrlWZktp1mWvBS3XrX6rPnFyTqG3Egym9Kvk0r8aqKWCn9qsXQq0U/RWWu7jwMPo6ekHl8Em", + "sdJ5U++htAdz1iZ4bjHJFTRc2x5nkCu1yaJNOuM7wzmcuST7OGK3gGc7xRHPGZXUmpa/7jaXyZqtkKvz", + "U1Suyqf4NRTjMblllFxKFt7gBqrQIhRJsTES4T5ZcRKcmIUyHmENtRi2z7xJhsT5SnGvZOITMdwaj9iC", + "6CyfJkzNDc22bz/pJR6VMEV5RBIRN8h7a8fooW7DvSWTPLdBkGhBZM67cjMv7I/PYjoiH4VmIRCNZYJz", + "pghTxKS1ESknLw/7y7JQu0YXeg6SSJFrUAPc/2GaRAIU4ULbSiYsQybejS9bBwB3NNT22Qv1kkSQAY8U", + "EbzJCUuzBFLguiia4hFJsehjiufqMxbnkk4TQE2Ynv+yZvAvQmWclwc2GwXGyrYraZv1fWsXtfhyCRuD", + "BulsCHu+bCz8xeNZhSnVxtfMpZVm3HJsbK9Qr8h1LOxOnQSaGhEXwzTmtI/6zKp0PPBPfSFyGYI7K+Oh", + "SJuzVmMQ3ThNvqieeydvx9YGJU2RuBDlx6AN0GqraLMaXLrBZ3vIQzQr62VWT/eEGOYCrxYljq2CrI0R", + "63Ftmmjx51n1cxv1qiD8gaobtZUt277liXWPAbvnRu38SdLFgOTcOTqsDzYVeWG7vqygD09Cm9X/zVOh", + "5jn42lS2Mx6KwKv3UMi+1BjlsadsmIjwlME2R7rx4LA5ZQPK7MBrrzcoCFNl80Kq1y3aV+oXM3HP7nhq", + "XpTKNHhDWfENj1NFPBW5blX5YL+uwrmaLbrT/DwHXRbk2QkXVJFZQuMYIkIV+Xjx48+NcxszzOZnEUYT", + "5o097nKrJ6sZN6qC8vq1Gdw4tT19rVkIKTcJAg1DUMregVDt923gxNZ1lSUFxebqE9XVp8er81OfKhF9", + "pUiLT1l6qWxq7Ll5bnNpmPEw+vgrOTxYUZus5ewZzObLXHuict86YukucwdPvJoclDxeN3uvAgbzvqjL", + "71t4fT03Hzxm1XfnXoEVVd+7qwR2X7Z9vV+2vfpb3yRALsAs1DUQrB3M7CYN1pLhPsbe/+4Z01DVRTzT", + "ZV1htisX+dPK1Dv4vWGZercwuRtCe+PsRQYQzvsCbYMLF7KOSWrwRGVAb0CSCMzKXiqj48SAf7IkcJdJ", + "UKg3EyYoR1VHpg+E87LsxRgd2qp5HGHLjOkQPaezlC7/MrIrpzZLWA1QpFvmLzu+X4/OIE/4Nd0mlKyK", + "FnVStjpE2Ipd3N9YNVVvvta0l4YpeAxm7WF0IsLGSTTly+J0vc3hl45NX9+7MTxsHWhW6WrxPUCdr+P1", + "fF4Z4oO6KdJMLs3Tdamr4cNOVbR0XGuDA/Dtd+HW77vZLw3XJerld3mmbWOtsOXZWHuNUH66aIlYc1ZW", + "kOrKbPVeDyJ0mEumlxeGFMvn+8vLs9dAJcjq1keEdfuoGmSudRbcmzGYt37ouPjkOKwu55M5J8cn1b6f", + "u9F3ym4hM1hyfELOc85xIoNrdqzxaDwaG4GIDDjNWHAUfDM6GI2NtqieI9n7eOfbUIth6cSZUL5oXl2M", + "59xjaIu9i9WWyAprOInMUqJ9aZwROSj9WkTLcmMWOE5koz6Vet+E3WF5n6FV8zoj8N1Qd99UsYnx+MAq", + "FNk+HI9bVDhS3/+sbPzYjITGAhHnbgXuHBf7szwhdbNB8O0jklAX5njmf00jcm6lb+c9eJ55rzjN9VxI", + "9jtEOPHBN88zccEsecu1SYMvhSCnVMZW6gevnov7OmFFpLJYbkg4PHxUEjpFUl1i6iakKqR69Vz2d8I1", + "SE4TcgHyFmRJgQOjGHNdAP10fX89CFSeplQuywtQyaUgZWpAY2WwuwwlBr3vhjbFomo55DSFobgFKVmE", + "yN9Ah0GwPy/qXvZLFI4BRdAEMbdoKXhCBPEVR20KJPeunMqBbHVYk9Oq9Gklq2Uh0JPzaif6Y1yWYxg2", + "seCnnz37+in5ciqOHsaVJRG5waWVCcrVJzP+qHycZcmy/G6mcUGFskf7mRQmyXIWa50w3bra7YnjdGO2", + "Zw7UzRqoXaTuj9S7CLVthLIfIF+K+qqyLUMUazpGBwSGsXOd4mOAAaFhiN+hxtUNGHPBQlumRNWNGhA2", + "gtGAiFyX97UNnLvbBkTdgA7nh5tBS31n0TMiTDnpDmh2QPNVAg1xrgL7A4BT+4mLOxvsCOBGuYWT9RsC", + "zYuungcG/owNAV8h8g4E/uL7AjskejASPXBRzhoe6gLPbXXHnRd53vludttqsVPehPQ8GGRne2YQam5i", + "7+Bnl4M8gedXN4o9zPVLxxgE+wm7hWGz0nrdSse7xnG+orA1w+7drTqXHCICPMJrXJQXItpFvyth4uE6", + "6imYf2aU6K1w3gHGDjAeDzCMmVmw+COokbQ90yJHkm6QKmCNQ451VJQklMe5gbCqhKiLAnhH3maOfzdc", + "LBZDzBNymQAPRWQLeLbLFsyUz+3+zoeFO4/fefwjery9Y3JbD09S69TFRzFDWlw4Njzs9/HibrLiEwz8", + "DrX8nye8ru25y+yJ1wGdGZ/ZzZsft+wcfefoj+fopfeVxk0OH+D3qusgg2DfxOwNDkHftb6NwN0A51MI", + "f5rv1Jw+UYbfrWrdHUPs3P4rcXus5/0Dx53acb+Gs9vK4I02/5pd3P9f2f63uOUtBeW2oK5rkCmPnGLw", + "xn863IMUttr4SaGiUdD8zFjR/C+wd1ixw4rHx4rKhR4GFkV3RIvcuWPYCxPFPaf1/0E3XZb/lQd+nK0V", + "qa9y97p9fVPqE68Oyol22cHO478Sj3duGd7S1XPXGRQSoHC61jXv5ZcPbxKRR+SNSNOcM70k76iGBV0G", + "xVUE+L2FOtrfjyTQdBjbt6Ok6D4KTXf8wKdn/AuNWUXfsNVACtvt04ztT0HT/Yrf++v7/wsAAP//DDfM", + "rxOEAAA=", } // GetSwagger returns the content of the embedded swagger specification file From 7cd8af653ab5fce72727620e48a773028fa30073 Mon Sep 17 00:00:00 2001 From: RUFFY-369 Date: Sun, 29 Dec 2024 20:15:03 +0530 Subject: [PATCH 04/10] chore:make codegen --- worker/runner.gen.go | 167 ++++++++++++++++++++++--------------------- 1 file changed, 86 insertions(+), 81 deletions(-) diff --git a/worker/runner.gen.go b/worker/runner.gen.go index 1a06c48d..4414682e 100644 --- a/worker/runner.gen.go +++ b/worker/runner.gen.go @@ -3204,87 +3204,92 @@ func HandlerWithOptions(si ServerInterface, options ChiServerOptions) http.Handl // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+xdeW/jtrb/KoTeA5IB7GzttA8B7h+ZpTPBTaZBljst2sCXlo5lTiRSJakk7rx89wcu", - "kkiJsuU0Sft6/dc4Epez/s4heaj5GsUsLxgFKkV0+DUS8RxyrH8enR2/55xx9TsBEXNSSMJodKjeIFCv", - "EAdRMCoA5SyBbCcaRQVnBXBJQI+Ri7Tb/XIOtnsOQuAUVD9JZAbRYXQqUvXXolB/CMkJTaOHh1HE4beS", - "cEiiw1/0qNdNl5rQuh+bfoFYRg+j6KhMCDu3VHZJOffoRzPGEVY9UAoUOFatukzpFvpHlv04iw5/+Rr9", - "N4dZdBj9124jzV0ryt1TSAi+Oj+JHq5HAUnYmSAxM+90uDXTufx6PAWYfsOSxSQFqhtesku4l4rcHi58", - "kq6KjOGkogbNSAZIMjQFJDmmquUUEiWTGeM5ltFhNCUU80XUoq+rxFGUg8QJltjMOsNlpvp/fYjacjlK", - "EqJ+4gx9YVNEqJmMMGppKbAQkKg/5BxQQQrICPXtqJorRIdS9oQkPh0dKj6WaUpoin7AcWUgx+9QqSZW", - "hlLJo6ispJ7aNE1CU3OQJacTSXIQEueF8GmQvIQOHee6D2r6mOnnnkqQhHu5gy7KomBcWdMtzkoQh2hL", - "AJVAY9gaoa07xpOtEVJmjgxRaMpYBpii7S01+ZZ6tzXDmYCtVzvonaEMEYHs6+1mvFc7VUuUA6YCUeYQ", - "uWNns+/U7/EUa601bRypWS4vG8msgoGOY4Tsfol7HOc4hUum/+n6R1qSBNMYJiLGGXhq+n7ndVtH72nM", - "So5TENZSZI0hgEiuX8QZE5AtUEboTWO8Sm+o4CwvJNqek3QO3OoO5XiBOCRlbIdAv5U4I3LxypXbB0sn", - "utB01vzSMp8CV/ySisEeTzdjS6YoJ7MFuiNy3vGrfnc38gvYuh53skSO+105voOUgybmbk5iQ0aDkIZS", - "IlBRirkW4R3midCtCCWS4My02WnTh1aLKWMcixWQcIRO2PkR2j5hd+NzTG/QUYILqZHplVU8pgkiUqCY", - "cRMdE+Vld0DSudSOa5hwAgx6f4/zIoND9BX9GmVYApXjmFFBhHK0xW4W52NF3Vgk99mv0SHa39kboV8j", - "Cpx8EbsFuYdsjLkcV28PHlwBnGjGng0HO/wMhEIKKZbkFibG+FcQcdm4ybZ4pd2rJAmguzmW6i+4j7My", - "ATTjLA+I+DiljCsLmiHfINGv5d7eNzHad8n+ZElDZ4a0EPVlPjF+PSmAh3jYb7PwSZsaYrMKEFyMKIBb", - "9jxCyhwdm8ZnwDvkECohNdar6aEz4KBZk9AKLft7e/30JEAZEUrHuuMOOmUczG9UihJnCrUAa8yyEGWh", - "qGJlWkokMnYHHNVUqGGSMtOeO12oeAM0lfMOf1V7dKGpDnHnineIVSyzyX6dCjwDuZjEc4hvPOGp0NeW", - "3hlwhYkqkOpuSHfTpigkyTXuz9rYpWChzBKVwrDZDKhQRsY4mmOez8rMJfPCjPpWE1MTa6O1phYg6Urk", - "AqxbckwTliODbz2iUI2D8q505Ulhb+d/euCazUwq0qRpuCgy0gQ5DpWOjWa299SbfS+QXVRzdrC5FfeL", - "SoEmsAUSAC+yr84Awgny4LBZs/5kkfMJE9RaJUNh+Q+hcf+UfV7X0u0qlQ7M6f5FEmBdlc5aoPhdaEE2", - "4zgHoQFZQMxoos3by0Nu1fAudz/04NZch31vztffB2c1LRGhSIdzMWDSj2bw0LyDbbeOP9iMr+Pnn2q1", - "hoz104mcqdaTaRnfgGxTsX/wfZuMq2pCpWK92lREKZHjnJVUKgWYMevllptQaJ2ZUKheWZhVP3MVO23P", - "O5JlCuwJ1a86Kjw1zd5ooj3G3NDOiIAJLtNJDyzvHXTy1JoF3RnhJGnA2GPYpMvoo7fwsIsODgLyaabT", - "5t6+JuGlMQcsKr69EK8JOCpT1A/wq9OXg9f/j7OXTV5RSeKOJC3r3d87+DaEh7rlWnD4WY/dnXXNCGNC", - "x5IQcwFpDlQe0YWcE5oedMPMlN0HNk1Rpg0IfYsw53iBUnILFGGBMJqy+2oLwPqZxsWR4v+nn3/6GRk0", - "drl9w+5719zdyY8rvBeG+MciPBY3E0KLUgb5Y3djDoJlpQY11Rjpxi2m5KIgsfZKvVjDqOBwS1gp1I+E", - "xLo3kdauRk1Wpf1i//7j/We0/fEfn/9x8Po7bZIXR6deJnmqZj7WZP7lVr15mSkvFjcTVspakEvw4Fjl", - "1iWMGgmaqMLtruBcJeBqQLMtiPMpSUslTCN6Y1ZihNhMAlV/JmWs9/1ASuC2p5xjqhCH0DQDRw0eVxXl", - "6EdDeQg8qDKqjPwOk5gxnoj12CsYoRLpnoRiCaIOoPW4zZIC0xTQL3uj/WtrIrq3nRfBfQGxNM2nYBpw", - "EOqhemTUl5BcYSWjwo9Ydi701vAQYtSdrOsMn+4PrJezmeXKKqLlC3dz4IAAx5Z8RJTi0PZPo59fNejn", - "JdK6WZsyJ3/XhGV4ClmAsBP9vM5oPNIqavYRoQmJtfyxagopZyVNbGsV7/e8JlMc37hNuuSaaZdsiGcs", - "JXINazHdBCrpWHmAmLNMZTjaPM1YiFAhVdRnM0Wixjj9PrDpfGJm7+p5aOzoxIQl8eOqqHdCH7ngfOJ9", - "2qcBxNKwlTx+P3BFCvj96/+gDaxB0tzsZK3KONfeOaqcM+C/b+clvQnlPbF6oRNUpUztlbg55OqeH0u7", - "3dRNevUANtPVo7os+lsfja7rmXrGrF53BiYSckXQgzNHPVY9kQ5jHUlKt6EizJGlEVRAgh/Ort6yvCgl", - "HNNZ4Oz5tD6ET0Biosz/w9kVik0f9xi4K1QDXzXWhXMv/MWUMzSJ4he3bMDx1xxyxheTGQfwOujH6Af1", - "eEk3ySTOAv0u9fNgR0JbpOkHwe0AnHs0fVJ/r9xXUwKhpqVHpM9qJaOKIEerLeWF1XslSUZ+1ypapWKl", - "2bJpjoTEkghJYvFI5b6wxoapYRQ5PE6sJbvdHIkhK9/gdO4whua+UQzlAxbCQ00ixEGQIN9a2rYQsJiP", - "l5dnPTVG6tXAIiMDFsMLcup6oW5BzrsKd8zMHuK05WendZhu2Onh9V84I4kerua6j5UKnJdy0h7PQXLD", - "SQjGXWrbA4Toxjy5w1x7vZXFoDoq5d9LETstSpXvmUqquuDnzGuzjPkWIDmcfShK1GdybrI76DjE5tZu", - "+7Pq2SrULZqG9byjhnHXdAJSXqKMC4mlGKQGDjgbq2itFbIMZBVZohr3kQpp+3xLKYbsv65WDPsBtfQS", - "/hFwJudvqzzbl6garhThtGyuOyLTpErNHMqAlrki9sd/RqPo/fn5j+fRKDp+d/LeJe/CTLCKYUuHy5dD", - "doArvVRcq2oytEwJrG57hNFeW7jZ6eraStfKTLHEquzV0nLd6resptI5f1xLMDqlXyaX/tVAIxVd27dy", - "KdBO0VtpeYiDAKMnJ6enpiy3a9AxoxKodL3urX0U2tphmeeg5yxb7ZzcNKpmcuh3CAuTfQ6/lSACJ/k5", - "vp9IdgO0fab0nbtnfI8uTZtwmqhnFoNDs0Ptg1uVaodpG6iFos4WjAeGwZW+5IBzr5+uyfTrKnAeXHhL", - "yAtlYiWH1pHi966xNY0CJ3eSFRN/j2G873RmBfpnUKKqX9EumHK7na0sBKmV4ltJZQd9VtJ4b8u6OShv", - "86zbPgqesQ9ZlFRKXalH7pDV7E623dRZ9WtbnZTCJ9iYMLoSQ7ZDuDO+M9zILA1yS2ollraQl0AIuQV9", - "jGZP084wx8ZjunDCWTYpebZi7/Hq/ESjqSinut6Z0BTdEowuOYlv9IkFkyxmmd2JTPTGtD10z8itPXkf", - "SzZulxegQhPnoutbQxa64kFNwa1y9DWILsppRsRc0Wz69pNeJQBVXoBpgjKWeuS9N2P0UDdwM1etVttZ", - "B5IM8ZJ25aZemB9f2HQHfWKSxICkrrycE4GIQGodmaBq8qquoqpgNZtiTM6BI85KCWKkN1yJRAkDgSiT", - "pkRMzYRRcKfZlFzAPY6lebYtXqEECqCJQIz6nJC8yCAHKm01Gk1QrutrprqEYUbSkuNpBloTque/jRn8", - "G2GeltUJ6aBMtLbtWtpfHzrHFrY2WTcGCdw5gQncXbD+EvAsa0qN8fmLVyEJNRwr27PqZaVMmdkaV4FA", - "idgO481pHvWZVeV4EJ76gpU8BndWQmOW+7PWYyDpHdxf1M+Dk7eTWY8SXyQuRIUxaABarZXeLQeXbra3", - "PuRpNKtKk5ZP94wY5gKvZBWOLYOswYj1tDaNJPvzrPqljXpZED7F4kasZcumb1Ui0mPA7kFte8HC8d0I", - "ldQ5q28qCQTaNl1f1dCnSw/8iwr+MaxfeLJy7dgZT4sgqPeY8b61qJbHljBhItHHeqa5pluf1PtTelBm", - "Bl55gdESJqrmVqrXLdqX6lcvfQPHUbl6USlT4Q0mplrPuUWHp6yUrYIq3a+rcCpmd91pPs9BVrWPZsI7", - "LNAsw2kKCcICfbr44bN3UKqGGX74pzSh3pjzZbdQtZ5xUMFZ0K/V4MqpTblDw0KMqUoQcByDEOaWY73B", - "PsCJjesKQ4oWm6tPra4+PV6dn4RUqdGXs9xehuql0tfYS/Pc5lIxE2D06bdO9EmmGLJ5Yg49h+8rmSPM", - "h9aZZmjZ/rzbN6OKx2u/9zJgUO/t3Ye+hdff527jUxbYd24OLimw31wW3FwW/PteFnz9H31XEF2AWqhL", - "QLpYtzCbNLp4U+9jbP3vljINUV+1ny6aks5NfdafdiOgg98DbwRYg2mFWD+E9sbZiwIgnvcFWo8LF7KO", - "UK7wRBSAb4CjBNTKngul40yBf7ZAcF9wEFpvKkxgqlWdqD4Qz6s6M2V02lbV40S3LIiMted0ltLVX0p2", - "1dRqCSsBbLql/jLjh/XoDPKMFxeHULIsWjRJ2fIQYUrk9f7Gsql68zXfXjxTCBjMyuqPjMXe+RKmC1vO", - "0ubwa8emrx/cGB63KgiaUybzQZ7WeVRQhvpB01TTjC7V01Wpq+LDTGVbOq41oOJk/V241ftu5lLnqkS9", - "ugKp2nprhTUPo9trhOqWqCFixeG0JdWV2fK9Ho3QccmJXFwoUgyfHy8vz94A5sDr7zppWDeP6kHmUhbR", - "gxqDBAv2juxd7rj+/A4vKTo6rvf93I2+E3ILhcKSo2N0XlKqJ1K4Zsba29nb2VMCYQVQXJDoMPpmZ39n", - "T2kLy7kme1d/1WUs2bhy4oKJUDSvP33jfKnI3K6wqy1WWGs4TtRSov1ZGG5OCd+wZNE62jZRH3O5q8Lu", - "uPpikVHzKiMIfYPmwVexivHOiZ9m+2Bvr0WFI/XdL8LEj2EkeAtEPXcrcJd6sT8rM9Q0G0XfPiEJTSVc", - "YP43OEHVGa2ed/9l5r2iuJRzxsnvkOiJ9795mYkts+g9lSoNvmQMnWBuKgO+3X/9Utw3CatGKoPlioSD", - "gycloVOV2CWmaYLqysXXL2V/x1QCpzhDF8BvgVcUODCqY64LoL9cP1yPIlHmOeaL6hNn6JKhKjXAqVDY", - "XYUShd73Y5NiYbEYU5zDmN0C5yTRyO+hwyjandtCs90KhVPQIvBBzK0SjJ4RQULViEOB5MGVUzWQKcf0", - "Oa1rDZeyWlXePTuvZqI/xmU1hmJTV9j1s2dePydfTonf47gyJGpu9NJKBeX6jlo4Kh8VRbaoLqp53wIR", - "5mi/4EwlWc5irROmWx9veeY47c32woHaLzrcROr+SL2JUOtGKHPj/5Kh+trnmiGK+I7hgsCAzFxvWBkc", - "WJ2Y+9/2eRmH/zMS81AF7sbr/+L5+QZ6Hg09j0yOieehLvDc1p/1CiLPh9DHrNZKOqqPv7wMBpnZXhiE", - "/M2kDfxsko5n8Pz6I0qPc/3KMUbRbkZuYexXPK5afgQXHk41s6ndcz9OKUtOIUFAE/39EhGEiHbx3VKY", - "eLyOegpXXxgleisNN4CxAYynAwxlZgYs/ghqZG3PNMiR5QNSBX3WWOp6BowyTNNSQVh9lN9FgZPT53L8", - "5ubSSzu7c51n498b/35C/9besrY/Z7lxYVuKPsb2u1rjg36Ptp/gsoXP+vYXpksy/sAnu5456+/M+MJu", - "7peUbxx94+hP5+iV91XGjQ4e4fei6yCjaFdF6AFHDx9aFcl67e8UIIeTeqfS65nCereWbHPKsHH7v4nb", - "6yq6P3DIIB3385zd1OMN2urzu7j/b5n576aqu8HVJqBsKv8wTZwSTO8/8+pBClPj96xQ4ZURvjBW+P+1", - "3AYrNljx9FhRu9DjwMJ212hROp/SDcKE/ZxnvRJA00X1fxXoK5FSoOaL5UG3bz4I+syrg2qiTXaw8fi/", - "icc7H9Nd09VL1xmEJkDo6VpfM6/qjd9mrEzQW5bnJSVygT5gCXd4EdkLwLrKWRzu7iYccD5OzdudzHbf", - "iVV3XVbfM/6F1FlF37D1QEK328UF2Z2CxLs1vw/XD/8XAAD//+pb7T5rdwAA", + "H4sIAAAAAAAC/+xde3Pbtpb/KhjuziSZkeRHm3bXM/cPJ00Tz7VTjx+37aQeX4g8ohCTAAuAttWsv/sO", + "XiRAgpLs2m5vq7/iSHic5+8cAAfQlyRlZcUoUCmSvS+JSOdQYv3n/vHBO84ZV39nIFJOKkkYTfbUNwjU", + "V4iDqBgVgEqWQTFJRknFWQVcEtBjlCLvdz+bg+1eghA4B9VPEllAspcciVz9b1Gp/wjJCc2Tu7tRwuHX", + "mnDIkr1PetSLtktDaNOPTT9DKpO7UbJfZ4SdWCr7pJwE9KMZ4wirHigHChyrVn2mdAv9R1H8MEv2Pn1J", + "/pvDLNlL/murleaWFeXWEWQEn58cJncXo4gk7EyQmZknPW7NdD6/AU8Rpt+wbHGZA9UNz9gZ3EpF7gAX", + "IUnnVcFw5qhBM1IAkgxNAUmOqWo5hUzJZMZ4iWWyl0wJxXyRdOjrK3GUlCBxhiU2s85wXaj+X+6Srlz2", + "s4yoP3GBPrMpItRMRhi1tFRYCMjUf+QcUEUqKAgN7cjNFaNDKfuSZCEdPSo+1HlOaI6+x6kzkIPvUK0m", + "Vobi5FE5K2mmNk2z2NQcZM3ppSQlCInLSoQ0SF5Dj44T3Qe1fcz080AlSMKtnKDTuqoYV9Z0jYsaxB56", + "IYBKoCm8GKEXN4xnL0ZImTkyRKEpYwVgil6+UJO/UN+9mOFCwItXE/SdoQwRgezXL9vxXk1cS1QCpgJR", + "5hE5sbPZ79Tf4ynWWmvbeFKzXJ61klkFAz3HiNn9Evc4KHEOZ0z/0/ePvCYZpilcihQXEKjp28nrro7e", + "0ZTVHOcgrKXIBkMAkVJ/kRZMQLFABaFXrfEqvaGKs7KS6OWc5HPgVneoxAvEIatTOwT6tcYFkYtXvtze", + "WzrRqaaz4ZfW5RS44pc4Bgc83YwtmaKczBbohsh5z6+G3d3IL2LretzLJXLc6cvxO8g5aGJu5iQ1ZLQI", + "aSglAlW1mGsR3mCeCd2KUCIJLkybSZc+tFpMBeNYrICEfXTITvbRy0N2Mz7B9ArtZ7iSGpleWcVjmiEi", + "BUoZN9ExU152AySfS+24hgkvwKB3t7isCthDX9AvSYElUDlOGRVEKEdbbBVpOVbUjUV2W/yS7KGdyfYI", + "/ZJQ4OSz2KrILRRjzOXYfbt75wvgUDP2ZDjY42dNKKSQY0mu4dIY/woizlo3eSleafeqSQboZo6l+h/c", + "pkWdAZpxVkZEfJBTxpUFzVBokOiXenv7qxTt+GR/tKShY0NajPq6vDR+fVkBj/Gw02XhozY1xGYOEHyM", + "qIBb9gJC6hIdmMbHwHvkECohN9ar6aEz4KBZk9AJLTvb28P0ZEAZEUrHuuMEHTEO5m9UixoXCrUAa8yy", + "EGWhyLEyrSUSBbsBjhoq1DBZXWjPnS5UvAGay3mPP9cenWqqY9z54l3HKpbZ5LBOBZ6BXFymc0ivAuGp", + "0NeV3jFwhYkqkOpuSHfTpigkKTXuz7rYpWChLjKVwrDZDKhQRsY4mmNezurCJ/PUjPpWE9MQa6O1phYg", + "60vkFKxbckwzViKDbwOiUI2j8na6CqSwPfmfAbhmM5OKtGkarqqCtEGOg9Ox0czLbfXNThDITt2cPWzu", + "xP3KKdAEtkgCEET2NTOA90pCJO0nAimjkrOiDWRAs45c/jeW2FfAU6BSsavkwyQurEth6UW3t2b4jyCR", + "kEx9W1XFgtDcl41t1EaxdzSLxTBLKwV5marIo8ghNI/E3u1Y7JXAS0JBoDm7QWWdzl3ckgxhIUhOnUL9", + "0RGhVS1FhF4KUvHXthwMvkuyhOTTN5PXI7SzPdm+SP78eVcn//kD8q6/YSKDxdXlgLiPsLhqRZ05G7ce", + "yCEnjArloZi2zWakKBChhk1aYUKlYl5icbVcKXq2Qc1sEq6/T8KVfPpqe4R2X/cx68+ed7Uy36Rdm7Rr", + "KO3y0GxFBuYyq9WJWHyncu042gjj0ULpI+4UNkpaF65/F0oPTznkhx1tr8qt19xc+xfJgPVVOuuA5Tex", + "BHrGcQlCA7UAlXBqgw82hK7V8D533w8sIOc6bQnmfP1tdFbTUgV/nY6INSb9YAaPzbu27TZxCZvxdVz9", + "Q63WkHH/NKNkqvXltE6vQHap2Nn9tkvGuZswWFMokeOS1VQqBZgxm31vP9HQOjOxUX1lgVf9Wapganve", + "qIRuCkqt6queCo9Mszea6IAxP+QzIuAS1/nlAFBv7/by7IYF3RnhLGvhOVxE6X1L9CFYidhVCAcB5bTQ", + "y43BviZhpykHLBzfQczXBOzXORqG/NVpze7r/+BtpE2m4SRxQ7KO9e5s734dw0Pd8l5w+KMeuz/rPSOM", + "CR1LQswp5CVQuU8Xck5ovtsPM1N2Gzm9RoU2IPQ1wpzjBcrJNVCEBcJoym7dnoD1M42LI8X/Tz//9DMy", + "aOxz+4bd3mPxf+DwXhjiH4rwesFLq1pG+WM3Yw6CFbUGtVKvgFXjDlNyUZFUe6VexGFUcbgmrBbqj4yk", + "ujeR1q5GbVal/WLn9sPtj+jlh3/8+I/d199okzzdPwpyS7Ma1mT+6VbDZV0oLxZXl6yWjSCX4MGByrZr", + "GLUSNFGF2+PZuUrJ1YDmfBaXU5LXSphG9MasxAixmQSq/pvVqT6ABSmB255yjqlCHELzAjw1BFw5ytEP", + "hvIYeFBlVAX5DS5Txngm7sdexQiVSPckFEsQTQBtxm0XGZjmgD5tj3YurIno3nZeBLcVpNI0n4JpwEGo", + "D9VHRn0ZKRVWMirCiGXnQm8NDzFG/cn6zvDxdtd6OZtZrqwiOr5wMwcOCHBqyUdEKQ69/Gn086sW/YJE", + "WjfrUubl75qwAk+hiBB2qD9vMpqANEfNDiI0I6mWP1ZNIeesppltreL9dtBkitMrv0mfXDPtksqEguVE", + "3sNaTDeBajpWHiDmrFAZjjZPMxYiVEgV9dlMkagxTn8fOf0/NLP39bxu7OjFhCXx47xqNpsfuOB85I3b", + "xwHE2rCVPXyfcEUK+O3rv9FJ4lrS3Oxtrco4732E55wz4r9v5zW9iuU9qfpCJ6hKmdorcVtt1C/kk3a7", + "qZ/06gFspqtH9VkMtz5aXTczDYzpvu4NTCSUiqA7b45mrGYiHcZ6kpR+Q0WYJ0sjqIgE3x+fv2VlVUs4", + "oLNIEeBRUw2ZgcREmf/743OUmj5+PV5fqAa+GqyL5174s6krbRPFz379puevJZSMLy5nHCDooD9G36uP", + "l3TTp66Rfmf682hHQjuk6Q+i2wG4DGj6qP6/cl9NCYSalgGRIatORo4gT6sd5cXVey5JQX7TKlqlYqXZ", + "um2OhMSSCElS8UDlPrPG1lPDKPF4vLSW7HfzJIasfKPT+cMYmodGMZSvsRBe1yRiHEQJCq2lawsRi/lw", + "dnY8UOytvlqz2tuAxfqV0U3hdr8y+juHO2bmAHG68rPTeky37Azw+i9ckEwP13A9xIoD56WcdMfzkNxw", + "EoNxn9ruADG6Mc9uMNdeb2WxVkG78u+liJ1Xtcr3TEl7U3l9HLRZxnwHkDzO3lc1GjI5P9ld6zjE5tZ+", + "+2P32SrUrdqGzbyjlnHfdCJSXqKMU4mlWEsNHHAxVtFaK2QZyCqyhBv3gQrp+nxHKYbsP69WDPsRtQwS", + "/gFwIedvXZ4dSlQNV4t4WjbXHZFp4lIzjzKgdamI/eGfySh5d3Lyw0kySg6+O3znk3dqJljFsKXD58sj", + "O8KVXire6/pKbJkSWd0OCKO7tvCz09WXXHwrM0UUq7JXS8tFp9+yyy3e+eO9BKNT+mVyGV4NtFLRlyxW", + "LgW6KXonLY9xEGH08PDoyNyPitckApW+1721H8W2dlgROOgJK1Y7JzeN3Ewe/R5hcbJP4NcaROQkv8S3", + "l5JdAe2eKX3j7xnfojPTJp4m6pnF2qHZo/bOvx5kh+kaqIWi3hZMAIbRlb7kgMugn74cE1Za4DK68JZQ", + "VsrEat6t0vzWN7a2UeTkTrLqMtxjGO94nVmF/hmVqOpXdQup/G7HK0tDGqWEVuLsYMhKWu/tWDcH5W2B", + "dduPomfs6yxKnFJX6pF7ZLW7k1039Vb92lYvaxESbEwYnYt1tkO4N7433MgsDUpLqhNLV8hLIIRcgz5G", + "s6dpx5hj4zHxEueaFyv2Hs9PDjWainqqL54RmqNrgtEZJ+mVPrFgkqWssDuRmd6YtofuBbm2J+9jycbd", + "8gJUaeJiVc/nPKopuFaOfg+iq3paEDFXNJu+w6S7BMDlBZhmqGB5QN47M8YAdWtu5qrVajfrQJIhXtO+", + "3NQX5o/PbDpBH5kkKSCpKzLnRCAikFpHZshN7uoqXAWu2RRjcg4ccVZLECO94UokyhgIRJk0RWO64htF", + "d5pNyQXc4lSaz16KVyiDCmgmEKMhJ6SsCiiBSlufRjNU6vqaqS5hmJG85nhagNaE6vlvYwb/RpjntTsh", + "XSsTbWy7kfaXu96xhb0kphuDBO6dwEQukVp/iXiWNaXW+MLFq5CEGo6V7Vn1slrmzGyNq0CgRGyHCeY0", + "Hw2ZlXM8iE99ymqegj8roSkrw1mbMZAMDu5Pm8+jk3eT2YCSUCQ+RMUxaA20uld6txxc+tne/SFPo5kr", + "TVo+3RNimA+8kjkcWwZZayPW49o0kuyPs+rnNuplQfgIiytxL1s2fV2JyIAB+we13QULxzcjVFPvrL6t", + "JBDopen6qoE+XXoQXrQIj2HDwpOVa8feeFoEUb2njA+tRbU8XggTJjJ9rGeaa7r1SX04ZQBlZuCVL0lY", + "woRrbqV60aF9qX710jdyHFWqL5wyFd5gYq9LeQXbU1bLTkGV7tdXOBWzm/40P85ButpHM+ENFmhW4DyH", + "DGGBPp5+/2NwUKqGWf/wT2lCfWPOl/1C1WbGtQrOon6tBldObcodWhZSTFWCgNMUhDDPTTQb7Gs4sXFd", + "YUjRYvP1qdU1pMfzk8OYKjX6clbaW0ODVIYae26eu1wqZiKMPv7WiT7JFOtsnphDz/X3lcwR5l3nTDO2", + "bH/a7ZuR4/Ei7L0MGNT39grE0MLrr/PIxGMW2PeecFhSYL95tWFzifCve4nw9d/60QZ0CmqhLgHpYt3K", + "bNLo4k29j/Hi/14o0xDNm0fTRVvSuanP+sNuBPTwe80bAdZgOiE2DKGDcfa0AkjnQ4E24MKHrH1UKjwR", + "FeAr4CgDtbLnQum4UOBfLBDcVhyE1psKE5hqVWeqD6RzV2emjE7bqvo40y0rIlPtOb2ltPufkp2bWi1h", + "JYBNt9T/zPhxPXqDPOHFxXUoWRYt2qRseYgwJfJ6f2PZVIP5WmgvgSlEDGZl9UfB0uB8CdOFLWfpcvil", + "Z9MXd34MTzsVBO0pk3kZsXMeFZWh/qBtqmlGZ+rTVamr4sNMZVt6rrVGxcn9d+FW77uZS52rEnV3BVK1", + "DdYK9zyM7q4R3C1RQ8SKw2lLqi+z5Xs9GqHTmhO5OFWkGD4/nJ0dvwHMgTcPbGpYNx81g8ylrJI7NQaJ", + "Fuzt29vdafMOIq8p2j9o9v38jb5Dcg2VwpL9A3RSU6onUrhmxtqebE+2lUBYBRRXJNlLvprsTLaVtrCc", + "a7K39PN6Y8nGzokrJmLRvHmD0Hsy0tyusKstVllrOMjUUqL7Ph83p4RvWLboHG2bqI+53FJhd+yejjRq", + "XmUEsccA70IVqxjvnfhptne3tztUeFLf+ixM/FiPhGCBqOfuBO5aL/ZndYHaZqPk60ckoa2Ei8z/BmfI", + "ndHqeXeeZ95zims5Z5z8BpmeeOer55nYMoveUanS4DPG0CHmpjLg653Xz8V9m7BqpDJYrkjY3X1UEnpV", + "iX1i2iaoqVx8/Vz2d0AlcIoLdAr8GrijwINRHXN9AP10cXcxSkRdlpgv3Fuz6IwhlxrgXCjsdqFEofft", + "2KRYWCzGFJcwZtfAOck08gfoMEq25rbQbMuhcA5aBCGI+VWCyRMiSKwacV0gufPl5AYy5Zghp02t4VJW", + "XeXdk/NqJvp9XLoxFJu6wm6YPfP1U/Lllfg9jCtDouZGL61UUG7uqMWj8n5VFQt3US14C0SYo/2KM5Vk", + "eYu1XpjuvKL3xHE6mO2ZA3VYdLiJ1MORehOh7huhzI3/M9a+CnfPEEVCx+iBwDj3Xq58DDBAOE31xe+8", + "eWxkzkhqypSwuBIjRCYwGSFWS/c03sh7Jm+ExBXIdL67HrS0z0M9I8K4STdAswGavyTQIO/Vtd8BOK2f", + "+Lizxo6A3ig3cLJ6QyB8U+x5YOCP2BCIVf5vQOBPvi+wQaIHI9EDF+Uk8FAfeK6b5wSjyPM+9ojevRY7", + "7tGp58EgM9szg1C4ib2Bn00O8gSe3zze9jDXd44xSrYKcg3jsNJ61UonusbxblGYmmH/mVxZcwoZAprp", + "d5NEFCK6Rb9LYeLhOhoomH9mlBiscN4AxgYwHg8wlJkZsPg9qFF0PdMgR1GukSroGoda11FhVGCa1wrC", + "mhKiPgocHj2V47c3Jp/b2b1rhBv/3vj3I/q39pZ7+3NRGhe2V2DG2L7nN94d9mj79J+9cKFvnbqf9Ig6", + "cuSpwCfO+nszPrObh1dZNo6+cfTHc3Tnfc640e4D/F70HWSUbKkIvcaR5/vOTQi99vcuPsSTeq/C9InC", + "er+GdXPosHH7v4jb6+rd33G4KT33C5zd1AGvtdUXdvF/uNr83rB7k8BtAsq24hjTzCv9Dn7NeQApTG3x", + "k0JFUL78zFgR/rb4Bis2WPH4WNG40MPAwnbXaFF7T3hHYcI+I9z+uN904X4jRV/FlgK1v5QQdfv2IeIn", + "Xh24iTbZwcbj/yIe7z3ifU9Xr31nEJoAoafr/IqCu+fwtmB1ht6ysqwpkQv0Hku4wYvEPjygb1eIva2t", + "jAMux7n5dlLY7pNUddfXeQbGP5U6qxgathlI6HZbuCJbU5B4q+H37uLu/wMAAP//QMTBbWyFAAA=", } // GetSwagger returns the content of the embedded swagger specification file From 441e48c78b1e490d4f28ec661cbfaae516804019 Mon Sep 17 00:00:00 2001 From: RUFFY-369 Date: Mon, 30 Dec 2024 01:28:30 +0530 Subject: [PATCH 05/10] chore:enable model cpu offloading in all tasks pipelines --- runner/app/pipelines/image_to_image_generic.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/runner/app/pipelines/image_to_image_generic.py b/runner/app/pipelines/image_to_image_generic.py index 5986737d..881d9d7d 100644 --- a/runner/app/pipelines/image_to_image_generic.py +++ b/runner/app/pipelines/image_to_image_generic.py @@ -93,9 +93,11 @@ def __init__(self, model_id: str, task: str): safety_checker=None, **kwargs, ).to(torch_device) + self.pipeline_stage1.enable_model_cpu_offload() self.pipeline_stage2 = StableDiffusionXLInpaintPipeline.from_pretrained( "OzzyGT/RealVisXL_V4.0_inpainting", vae=self.vae, **kwargs ).to(torch_device) + self.pipeline_stage1.enable_model_cpu_offload() elif self.task == TaskType.SKETCH_TO_IMAGE.value: self.controlnet = ControlNetModel.from_pretrained(model_id, **kwargs).to( @@ -115,6 +117,7 @@ def __init__(self, model_id: str, task: str): scheduler=eulera_scheduler, **kwargs, ).to(torch_device) + self.pipeline.enable_model_cpu_offload() self._lora_loader = LoraLoader(self.pipeline) From cab7133e8921d55226c4eb59a30bceff654b22da Mon Sep 17 00:00:00 2001 From: RUFFY-369 Date: Mon, 30 Dec 2024 20:24:13 +0530 Subject: [PATCH 06/10] chore:add task param to env var for access to pipeline --- runner/app/main.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/runner/app/main.py b/runner/app/main.py index 57acb6f8..62647487 100644 --- a/runner/app/main.py +++ b/runner/app/main.py @@ -23,7 +23,9 @@ async def lifespan(app: FastAPI): pipeline = os.environ["PIPELINE"] model_id = os.environ["MODEL_ID"] - app.pipeline = load_pipeline(pipeline, model_id) + task = os.environ["TASK"] if pipeline == "image-to-image-generic" else None + + app.pipeline = load_pipeline(pipeline, model_id, task) app.include_router(load_route(pipeline)) app.hardware_info_service.log_gpu_compute_info() @@ -34,7 +36,7 @@ async def lifespan(app: FastAPI): logger.info("Shutting down") -def load_pipeline(pipeline: str, model_id: str) -> any: +def load_pipeline(pipeline: str, model_id: str, task: str) -> any: match pipeline: case "text-to-image": from app.pipelines.text_to_image import TextToImagePipeline @@ -81,7 +83,7 @@ def load_pipeline(pipeline: str, model_id: str) -> any: case "image-to-image-generic": from app.pipelines.image_to_image_generic import ImageToImageGenericPipeline - return ImageToImageGenericPipeline(model_id) + return ImageToImageGenericPipeline(model_id, task) case _: raise EnvironmentError( f"{pipeline} is not a valid pipeline for model {model_id}" From e1773828be8c653c2607fa929885585118b22e09 Mon Sep 17 00:00:00 2001 From: RUFFY-369 Date: Thu, 2 Jan 2025 19:38:44 +0530 Subject: [PATCH 07/10] chore:make mask_image as string to avoid conflicts in go livepeer --- runner/app/routes/image_to_image_generic.py | 13 ++++++++---- runner/gateway.openapi.yaml | 4 ++-- runner/openapi.yaml | 4 ++-- worker/multipart.go | 23 +++++---------------- worker/runner.gen.go | 4 ++-- 5 files changed, 20 insertions(+), 28 deletions(-) diff --git a/runner/app/routes/image_to_image_generic.py b/runner/app/routes/image_to_image_generic.py index 408e6457..6ccf3a32 100644 --- a/runner/app/routes/image_to_image_generic.py +++ b/runner/app/routes/image_to_image_generic.py @@ -80,9 +80,12 @@ async def image_to_image_generic( File(description="Uploaded image to modify with the pipeline."), ], mask_image: Annotated[ - UploadFile, - File( - description="Mask image to determine which regions of an image to fill in for inpainting task." + str, + Form( + description=( + "Mask image to determine which regions of an image to fill in" + "for inpainting task with the form HxW." + ) ), ] = None, model_id: Annotated[ @@ -189,7 +192,6 @@ async def image_to_image_generic( seeds = [seed + i for i in range(num_images_per_prompt)] image = Image.open(image.file).convert("RGB") - mask_image = Image.open(mask_image.file).convert("RGB") if mask_image else None try: prompt = json_str_to_np_array(prompt, var_name="prompt") @@ -197,6 +199,9 @@ async def image_to_image_generic( num_inference_steps = json_str_to_np_array( num_inference_steps, var_name="num_inference_steps" ) + if mask_image: + mask_image = json_str_to_np_array(mask_image, var_name="mask_image") + mask_image = Image.fromarray(mask_image) except ValueError as e: return JSONResponse( status_code=status.HTTP_400_BAD_REQUEST, diff --git a/runner/gateway.openapi.yaml b/runner/gateway.openapi.yaml index c6ae67cb..dffa39d1 100644 --- a/runner/gateway.openapi.yaml +++ b/runner/gateway.openapi.yaml @@ -868,9 +868,9 @@ components: description: Uploaded image to modify with the pipeline. mask_image: type: string - format: binary title: Mask Image - description: Mask image to determine which regions of an image to fill in for inpainting task. + description: Mask image to determine which regions of an image to fill in for + inpainting task with the form HxW. model_id: type: string title: Model Id diff --git a/runner/openapi.yaml b/runner/openapi.yaml index fe6e13a0..89a67987 100644 --- a/runner/openapi.yaml +++ b/runner/openapi.yaml @@ -900,9 +900,9 @@ components: description: Uploaded image to modify with the pipeline. mask_image: type: string - format: binary title: Mask Image - description: Mask image to determine which regions of an image to fill in for inpainting task. + description: Mask image to determine which regions of an image to fill in for + inpainting task with the form HxW. model_id: type: string title: Model Id diff --git a/worker/multipart.go b/worker/multipart.go index f8b93844..565fe589 100644 --- a/worker/multipart.go +++ b/worker/multipart.go @@ -383,24 +383,6 @@ func NewImageToImageGenericMultipartWriter(w io.Writer, req GenImageToImageGener return nil, fmt.Errorf("failed to copy image to multipart request imageBytes=%v copiedBytes=%v", imageSize, copied) } - if req.MaskImage != nil { - writer, err := mw.CreateFormFile("mask_image", req.MaskImage.Filename()) - if err != nil { - return nil, err - } - maskimageSize := req.MaskImage.FileSize() - maskimageRdr, err := req.MaskImage.Reader() - if err != nil { - return nil, err - } - copied, err := io.Copy(writer, maskimageRdr) - if err != nil { - return nil, err - } - if copied != maskimageSize { - return nil, fmt.Errorf("failed to copy mask_image to multipart request maskimageBytes=%v copiedBytes=%v", maskimageSize, copied) - } - if err := mw.WriteField("prompt", req.Prompt); err != nil { return nil, err } @@ -409,6 +391,11 @@ func NewImageToImageGenericMultipartWriter(w io.Writer, req GenImageToImageGener return nil, err } } + if req.MaskImage != nil { + if err := mw.WriteField("mask_image", *req.MaskImage); err != nil { + return nil, err + } + } if req.Loras != nil { if err := mw.WriteField("loras", *req.Loras); err != nil { return nil, err diff --git a/worker/runner.gen.go b/worker/runner.gen.go index 4414682e..b0552dfd 100644 --- a/worker/runner.gen.go +++ b/worker/runner.gen.go @@ -116,8 +116,8 @@ type BodyGenImageToImageGeneric struct { // Loras A LoRA (Low-Rank Adaptation) model and its corresponding weight for image generation. Example: { "latent-consistency/lcm-lora-sdxl": 1.0, "nerijs/pixel-art-xl": 1.2}. Loras *string `json:"loras,omitempty"` - // MaskImage Mask image to determine which regions of an image to fill in for inpainting task. - MaskImage *openapi_types.File `json:"mask_image,omitempty"` + // MaskImage Mask image to determine which regions of an image to fill in for inpainting task with the form HxW. + MaskImage *string `json:"mask_image,omitempty"` // ModelId Hugging Face model ID used for image generation. ModelId string `json:"model_id"` From 7845b6f9482dddce79559b9fa0b0408bff691b4b Mon Sep 17 00:00:00 2001 From: RUFFY-369 Date: Thu, 2 Jan 2025 19:56:04 +0530 Subject: [PATCH 08/10] chore:make codegen --- worker/runner.gen.go | 126 +++++++++++++++++++++---------------------- 1 file changed, 63 insertions(+), 63 deletions(-) diff --git a/worker/runner.gen.go b/worker/runner.gen.go index b0552dfd..bb8ec9ee 100644 --- a/worker/runner.gen.go +++ b/worker/runner.gen.go @@ -3227,69 +3227,69 @@ var swaggerSpec = []string{ "xP3KKdAEtkgCEET2NTOA90pCJO0nAimjkrOiDWRAs45c/jeW2FfAU6BSsavkwyQurEth6UW3t2b4jyCR", "kEx9W1XFgtDcl41t1EaxdzSLxTBLKwV5marIo8ghNI/E3u1Y7JXAS0JBoDm7QWWdzl3ckgxhIUhOnUL9", "0RGhVS1FhF4KUvHXthwMvkuyhOTTN5PXI7SzPdm+SP78eVcn//kD8q6/YSKDxdXlgLiPsLhqRZ05G7ce", - "yCEnjArloZi2zWakKBChhk1aYUKlYl5icbVcKXq2Qc1sEq6/T8KVfPpqe4R2X/cx68+ed7Uy36Rdm7Rr", - "KO3y0GxFBuYyq9WJWHyncu042gjj0ULpI+4UNkpaF65/F0oPTznkhx1tr8qt19xc+xfJgPVVOuuA5Tex", - "BHrGcQlCA7UAlXBqgw82hK7V8D533w8sIOc6bQnmfP1tdFbTUgV/nY6INSb9YAaPzbu27TZxCZvxdVz9", - "Q63WkHH/NKNkqvXltE6vQHap2Nn9tkvGuZswWFMokeOS1VQqBZgxm31vP9HQOjOxUX1lgVf9Wapganve", - "qIRuCkqt6queCo9Mszea6IAxP+QzIuAS1/nlAFBv7/by7IYF3RnhLGvhOVxE6X1L9CFYidhVCAcB5bTQ", - "y43BviZhpykHLBzfQczXBOzXORqG/NVpze7r/+BtpE2m4SRxQ7KO9e5s734dw0Pd8l5w+KMeuz/rPSOM", - "CR1LQswp5CVQuU8Xck5ovtsPM1N2Gzm9RoU2IPQ1wpzjBcrJNVCEBcJoym7dnoD1M42LI8X/Tz//9DMy", - "aOxz+4bd3mPxf+DwXhjiH4rwesFLq1pG+WM3Yw6CFbUGtVKvgFXjDlNyUZFUe6VexGFUcbgmrBbqj4yk", - "ujeR1q5GbVal/WLn9sPtj+jlh3/8+I/d199okzzdPwpyS7Ma1mT+6VbDZV0oLxZXl6yWjSCX4MGByrZr", - "GLUSNFGF2+PZuUrJ1YDmfBaXU5LXSphG9MasxAixmQSq/pvVqT6ABSmB255yjqlCHELzAjw1BFw5ytEP", - "hvIYeFBlVAX5DS5Txngm7sdexQiVSPckFEsQTQBtxm0XGZjmgD5tj3YurIno3nZeBLcVpNI0n4JpwEGo", - "D9VHRn0ZKRVWMirCiGXnQm8NDzFG/cn6zvDxdtd6OZtZrqwiOr5wMwcOCHBqyUdEKQ69/Gn086sW/YJE", - "WjfrUubl75qwAk+hiBB2qD9vMpqANEfNDiI0I6mWP1ZNIeesppltreL9dtBkitMrv0mfXDPtksqEguVE", - "3sNaTDeBajpWHiDmrFAZjjZPMxYiVEgV9dlMkagxTn8fOf0/NLP39bxu7OjFhCXx47xqNpsfuOB85I3b", - "xwHE2rCVPXyfcEUK+O3rv9FJ4lrS3Oxtrco4732E55wz4r9v5zW9iuU9qfpCJ6hKmdorcVtt1C/kk3a7", - "qZ/06gFspqtH9VkMtz5aXTczDYzpvu4NTCSUiqA7b45mrGYiHcZ6kpR+Q0WYJ0sjqIgE3x+fv2VlVUs4", - "oLNIEeBRUw2ZgcREmf/743OUmj5+PV5fqAa+GqyL5174s6krbRPFz379puevJZSMLy5nHCDooD9G36uP", - "l3TTp66Rfmf682hHQjuk6Q+i2wG4DGj6qP6/cl9NCYSalgGRIatORo4gT6sd5cXVey5JQX7TKlqlYqXZ", - "um2OhMSSCElS8UDlPrPG1lPDKPF4vLSW7HfzJIasfKPT+cMYmodGMZSvsRBe1yRiHEQJCq2lawsRi/lw", - "dnY8UOytvlqz2tuAxfqV0U3hdr8y+juHO2bmAHG68rPTeky37Azw+i9ckEwP13A9xIoD56WcdMfzkNxw", - "EoNxn9ruADG6Mc9uMNdeb2WxVkG78u+liJ1Xtcr3TEl7U3l9HLRZxnwHkDzO3lc1GjI5P9ld6zjE5tZ+", - "+2P32SrUrdqGzbyjlnHfdCJSXqKMU4mlWEsNHHAxVtFaK2QZyCqyhBv3gQrp+nxHKYbsP69WDPsRtQwS", - "/gFwIedvXZ4dSlQNV4t4WjbXHZFp4lIzjzKgdamI/eGfySh5d3Lyw0kySg6+O3znk3dqJljFsKXD58sj", - "O8KVXire6/pKbJkSWd0OCKO7tvCz09WXXHwrM0UUq7JXS8tFp9+yyy3e+eO9BKNT+mVyGV4NtFLRlyxW", - "LgW6KXonLY9xEGH08PDoyNyPitckApW+1721H8W2dlgROOgJK1Y7JzeN3Ewe/R5hcbJP4NcaROQkv8S3", - "l5JdAe2eKX3j7xnfojPTJp4m6pnF2qHZo/bOvx5kh+kaqIWi3hZMAIbRlb7kgMugn74cE1Za4DK68JZQ", - "VsrEat6t0vzWN7a2UeTkTrLqMtxjGO94nVmF/hmVqOpXdQup/G7HK0tDGqWEVuLsYMhKWu/tWDcH5W2B", - "dduPomfs6yxKnFJX6pF7ZLW7k1039Vb92lYvaxESbEwYnYt1tkO4N7433MgsDUpLqhNLV8hLIIRcgz5G", - "s6dpx5hj4zHxEueaFyv2Hs9PDjWainqqL54RmqNrgtEZJ+mVPrFgkqWssDuRmd6YtofuBbm2J+9jycbd", - "8gJUaeJiVc/nPKopuFaOfg+iq3paEDFXNJu+w6S7BMDlBZhmqGB5QN47M8YAdWtu5qrVajfrQJIhXtO+", - "3NQX5o/PbDpBH5kkKSCpKzLnRCAikFpHZshN7uoqXAWu2RRjcg4ccVZLECO94UokyhgIRJk0RWO64htF", - "d5pNyQXc4lSaz16KVyiDCmgmEKMhJ6SsCiiBSlufRjNU6vqaqS5hmJG85nhagNaE6vlvYwb/RpjntTsh", - "XSsTbWy7kfaXu96xhb0kphuDBO6dwEQukVp/iXiWNaXW+MLFq5CEGo6V7Vn1slrmzGyNq0CgRGyHCeY0", - "Hw2ZlXM8iE99ymqegj8roSkrw1mbMZAMDu5Pm8+jk3eT2YCSUCQ+RMUxaA20uld6txxc+tne/SFPo5kr", - "TVo+3RNimA+8kjkcWwZZayPW49o0kuyPs+rnNuplQfgIiytxL1s2fV2JyIAB+we13QULxzcjVFPvrL6t", - "JBDopen6qoE+XXoQXrQIj2HDwpOVa8feeFoEUb2njA+tRbU8XggTJjJ9rGeaa7r1SX04ZQBlZuCVL0lY", - "woRrbqV60aF9qX710jdyHFWqL5wyFd5gYq9LeQXbU1bLTkGV7tdXOBWzm/40P85ButpHM+ENFmhW4DyH", - "DGGBPp5+/2NwUKqGWf/wT2lCfWPOl/1C1WbGtQrOon6tBldObcodWhZSTFWCgNMUhDDPTTQb7Gs4sXFd", - "YUjRYvP1qdU1pMfzk8OYKjX6clbaW0ODVIYae26eu1wqZiKMPv7WiT7JFOtsnphDz/X3lcwR5l3nTDO2", - "bH/a7ZuR4/Ei7L0MGNT39grE0MLrr/PIxGMW2PeecFhSYL95tWFzifCve4nw9d/60QZ0CmqhLgHpYt3K", - "bNLo4k29j/Hi/14o0xDNm0fTRVvSuanP+sNuBPTwe80bAdZgOiE2DKGDcfa0AkjnQ4E24MKHrH1UKjwR", - "FeAr4CgDtbLnQum4UOBfLBDcVhyE1psKE5hqVWeqD6RzV2emjE7bqvo40y0rIlPtOb2ltPufkp2bWi1h", - "JYBNt9T/zPhxPXqDPOHFxXUoWRYt2qRseYgwJfJ6f2PZVIP5WmgvgSlEDGZl9UfB0uB8CdOFLWfpcvil", - "Z9MXd34MTzsVBO0pk3kZsXMeFZWh/qBtqmlGZ+rTVamr4sNMZVt6rrVGxcn9d+FW77uZS52rEnV3BVK1", - "DdYK9zyM7q4R3C1RQ8SKw2lLqi+z5Xs9GqHTmhO5OFWkGD4/nJ0dvwHMgTcPbGpYNx81g8ylrJI7NQaJ", - "Fuzt29vdafMOIq8p2j9o9v38jb5Dcg2VwpL9A3RSU6onUrhmxtqebE+2lUBYBRRXJNlLvprsTLaVtrCc", - "a7K39PN6Y8nGzokrJmLRvHmD0Hsy0tyusKstVllrOMjUUqL7Ph83p4RvWLboHG2bqI+53FJhd+yejjRq", - "XmUEsccA70IVqxjvnfhptne3tztUeFLf+ixM/FiPhGCBqOfuBO5aL/ZndYHaZqPk60ckoa2Ei8z/BmfI", - "ndHqeXeeZ95zims5Z5z8BpmeeOer55nYMoveUanS4DPG0CHmpjLg653Xz8V9m7BqpDJYrkjY3X1UEnpV", - "iX1i2iaoqVx8/Vz2d0AlcIoLdAr8GrijwINRHXN9AP10cXcxSkRdlpgv3Fuz6IwhlxrgXCjsdqFEofft", - "2KRYWCzGFJcwZtfAOck08gfoMEq25rbQbMuhcA5aBCGI+VWCyRMiSKwacV0gufPl5AYy5Zghp02t4VJW", - "XeXdk/NqJvp9XLoxFJu6wm6YPfP1U/Lllfg9jCtDouZGL61UUG7uqMWj8n5VFQt3US14C0SYo/2KM5Vk", - "eYu1XpjuvKL3xHE6mO2ZA3VYdLiJ1MORehOh7huhzI3/M9a+CnfPEEVCx+iBwDj3Xq58DDBAOE31xe+8", - "eWxkzkhqypSwuBIjRCYwGSFWS/c03sh7Jm+ExBXIdL67HrS0z0M9I8K4STdAswGavyTQIO/Vtd8BOK2f", - "+Lizxo6A3ig3cLJ6QyB8U+x5YOCP2BCIVf5vQOBPvi+wQaIHI9EDF+Uk8FAfeK6b5wSjyPM+9ojevRY7", - "7tGp58EgM9szg1C4ib2Bn00O8gSe3zze9jDXd44xSrYKcg3jsNJ61UonusbxblGYmmH/mVxZcwoZAprp", - "d5NEFCK6Rb9LYeLhOhoomH9mlBiscN4AxgYwHg8wlJkZsPg9qFF0PdMgR1GukSroGoda11FhVGCa1wrC", - "mhKiPgocHj2V47c3Jp/b2b1rhBv/3vj3I/q39pZ7+3NRGhe2V2DG2L7nN94d9mj79J+9cKFvnbqf9Ig6", - "cuSpwCfO+nszPrObh1dZNo6+cfTHc3Tnfc640e4D/F70HWSUbKkIvcaR5/vOTQi99vcuPsSTeq/C9InC", - "er+GdXPosHH7v4jb6+rd33G4KT33C5zd1AGvtdUXdvF/uNr83rB7k8BtAsq24hjTzCv9Dn7NeQApTG3x", - "k0JFUL78zFgR/rb4Bis2WPH4WNG40MPAwnbXaFF7T3hHYcI+I9z+uN904X4jRV/FlgK1v5QQdfv2IeIn", - "Xh24iTbZwcbj/yIe7z3ifU9Xr31nEJoAoafr/IqCu+fwtmB1ht6ysqwpkQv0Hku4wYvEPjygb1eIva2t", - "jAMux7n5dlLY7pNUddfXeQbGP5U6qxgathlI6HZbuCJbU5B4q+H37uLu/wMAAP//QMTBbWyFAAA=", + "yCEnjArloZi2zWakKBChhk1aYUKlYl6qsRo1afj/cPtjkPCoFoOa2SRcf5+EK/n01fYI7b7uY9afPe9q", + "Zb5JuzZp11Da5aHZigzMZVarE7H4TuXacbQRxqOF0kfcKWyUtC5c/y6UHp5yyA872l6VW6+5ufYvkgHr", + "q3TWActvYgn0jOMShAZqASrh1AYfbAhdq+F97r4fWEDOddoSzPn62+ispqUK/jodEWtM+sEMHpt3bdtt", + "4hI24+u4+odarSHj/mlGyVTry2mdXoHsUrGz+22XjHM3YbCmUCLHJaupVAowYzb73n6ioXVmYqP6ygKv", + "+rNUwdT2vFEJ3RSUWtVXPRUemWZvNNEBY37IZ0TAJa7zywGg3t7t5dkNC7ozwlnWwnO4iNL7luhDsBKx", + "qxAOAsppoZcbg31Nwk5TDlg4voOYrwnYr3M0DPmr05rd1//B20ibTMNJ4oZkHevd2d79OoaHuuW94PBH", + "PXZ/1ntGGBM6loSYU8hLoHKfLuSc0Hy3H2am7DZyeo0KbUDoa4Q5xwuUk2ugCAuE0ZTduj0B62caF0eK", + "/59+/ulnZNDY5/YNu73H4v/A4b0wxD8U4fWCl1a1jPLHbsYcBCtqDWqlXgGrxh2m5KIiqfZKvYjDqOJw", + "TVgt1B8ZSXVvIq1djTor353bD7c/opcf/vHjP3Zff6NN8nT/6FV/NazJ/NOthsu6UF4sri5ZLRtBLsGD", + "A5Vt1zBqJWiiCrfHs3OVkqsBzfksLqckr5UwjeiNWYkRYjMJVP03q1N9AAtSArc95RxThTiE5gV4agi4", + "cpSjHwzlMfCgyqgK8htcpozxTNyPvYoRKpHuSSiWIJoA2ozbLjIwzQF92h7tXFgT0b3tvAhuK0ilaT4F", + "04CDUB+qj4z6MlIqrGRUhBHLzoXeGh5ijPqT9Z3h4+2u9XI2s1xZRXR84WYOHBDg1JKPiFIcevnT6OdX", + "LfoFibRu1qXMy981YQWeQhEh7FB/3mQ0AWmOmh1EaEZSLX+smkLOWU0z21rF++2gyRSnV36TPrlm2iWV", + "CQXLibyHtZhuAtV0rDxAzFmhMhxtnmYsRKiQKuqzmSJRY5z+PnL6f2hm7+t53djRiwlL4sd51Ww2P3DB", + "+cgbt48DiLVhK3v4PuGKFPDb13+jk8S1pLnZ21qVcd77CM85Z8R/385rehXLe1L1hU5QlTK1V+K22qhf", + "yCftdlM/6dUD2ExXj+qzGG59tLpuZhoY033dG5hIKBVBd94czVjNRDqM9SQp/YaKME+WRlARCb4/Pn/L", + "yqqWcEBnkSLAo6YaMgOJiTL/98fnKDV9/Hq8vlANfDVYF8+98GdTV9omip/9+k3PX0soGV9czjhA0EF/", + "jL5XHy/ppk9dI/3O9OfRjoR2SNMfRLcDcBnQ9FH9f+W+mhIINS0DIkNWnYwcQZ5WO8qLq/dckoL8plW0", + "SsVKs3XbHAmJJRGSpOKByn1mja2nhlHi8XhpLdnv5kkMWflGp/OHMTQPjWIoX2MhvK5JxDiIEhRaS9cW", + "Ihbz4ezseKDYW321ZrW3AYv1K6Obwu1+ZfR3DnfMzAHidOVnp/WYbtkZ4PVfuCCZHq7heogVB85LOemO", + "5yG54SQG4z613QFidGOe3WCuvd7KYq2CduXfSxE7r2qV75mS9qby+jhos4z5DiB5nL2vajRkcn6yu9Zx", + "iM2t/fbH7rNVqFu1DZt5Ry3jvulEpLxEGacSS7GWGjjgYqyitVbIMpBVZAk37gMV0vX5jlIM2X9erRj2", + "I2oZJPwD4ELO37o8O5SoGq4W8bRsrjsi08SlZh5lQOtSEfvDP5NR8u7k5IeTZJQcfHf4zifv1EywimFL", + "h8+XR3aEK71UvNf1ldgyJbK6HRBGd23hZ6erL7n4VmaKKFZlr5aWi06/ZZdbvPPHewlGp/TL5DK8Gmil", + "oi9ZrFwKdFP0Tloe4yDC6OHh0ZG5HxWvSQQqfa97az+Kbe2wInDQE1asdk5uGrmZPPo9wuJkn8CvNYjI", + "SX6Jby8luwLaPVP6xt8zvkVnpk08TdQzi7VDs0ftnX89yA7TNVALRb0tmAAMoyt9yQGXQT99OSastMBl", + "dOEtoayUidW8W6X5rW9sbaPIyZ1k1WW4xzDe8TqzCv0zKlHVr+oWUvndjleWhjRKCa3E2cGQlbTe27Fu", + "DsrbAuu2H0XP2NdZlDilrtQj98hqdye7buqt+rWtXtYiJNiYMDoX62yHcG98b7iRWRqUllQnlq6Ql0AI", + "uQZ9jGZP044xx8Zj4iXONS9W7D2enxxqNBX1VF88IzRH1wSjM07SK31iwSRLWWF3IjO9MW0P3QtybU/e", + "x5KNu+UFqNLExaqez3lUU3CtHP0eRFf1tCBirmg2fYdJdwmAywswzVDB8oC8d2aMAerW3MxVq9Vu1oEk", + "Q7ymfbmpL8wfn9l0gj4ySVJAUldkzolARCC1jsyQm9zVVbgKXLMpxuQcOOKsliBGesOVSJQxEIgyaYrG", + "dMU3iu40m5ILuMWpNJ+9FK9QBhXQTCBGQ05IWRVQApW2Po1mqNT1NVNdwjAjec3xtACtCdXz38YM/o0w", + "z2t3QrpWJtrYdiPtL3e9Ywt7SUw3BgncO4GJXCK1/hLxLGtKrfGFi1chCTUcK9uz6mW1zJnZGleBQInY", + "DhPMaT4aMivneBCf+pTVPAV/VkJTVoazNmMgGRzcnzafRyfvJrMBJaFIfIiKY9AaaHWv9G45uPSzvftD", + "nkYzV5q0fLonxDAfeCVzOLYMstZGrMe1aSTZH2fVz23Uy4LwERZX4l62bPq6EpEBA/YParsLFo5vRqim", + "3ll9W0kg0EvT9VUDfbr0ILxoER7DhoUnK9eOvfG0CKJ6TxkfWotqebwQJkxk+ljPNNd065P6cMoAyszA", + "K1+SsIQJ19xK9aJD+1L96qVv5DiqVF84ZSq8wcRel/IKtqeslp2CKt2vr3AqZjf9aX6cg3S1j2bCGyzQ", + "rMB5DhnCAn08/T64ofJRDbP+4Z/ShPrGnC/7harNjGsVnEX9Wg2unNqUO7QspJiqBAGnKQhhnptoNtjX", + "cGLjusKQosXm61Ora0iP5yeHMVVq9OWstLeGBqkMNfbcPHe5VMxEGH38rRN9kinW2Twxh57r7yuZI8y7", + "zplmbNn+tNs3I8fjRdh7GTCo7+0ViKGF11/nkYnHLLDvPeGwpMB+82rD5hLhX/cS4eu/9aMN6BTUQl0C", + "0sW6ldmk0cWbeh/jxf+9UKYhmjePpou2pHNTn/WH3Qjo4feaNwKswXRCbBhCB+PsaQWQzocCbcCFD1n7", + "qFR4IirAV8BRBmplz4XScaHAv1gguK04CK03FSYw1arOVB9I567OTBmdtlX1caZbVkSm2nN6S2n3PyU7", + "N7VawkoAm26p/5nx43r0BnnCi4vrULIsWrRJ2fIQYUrk9f7GsqkG87XQXgJTiBjMyuqPgqXB+RKmC1vO", + "0uXwS8+mL+78GJ52KgjaUybzMmLnPCoqQ/1B21TTjM7Up6tSV8WHmcq29FxrjYqT++/Crd53M5c6VyXq", + "7gqkahusFe55GN1dI7hbooaIFYfTllRfZsv3ejRCpzUncnGqSDF8fjg7O34DmANvHtjUsG4+agaZS1kl", + "d2oMEi3Y27e3u9PmHUReU7R/0Oz7+Rt9h+QaKoUl+wfopKZUT6RwzYy1PdmebCuBsAoorkiyl3w12Zls", + "K21hOddkb+nn9caSjZ0TV0zEonnzBqH3ZKS5XWFXW6yy1nCQqaVE930+bk4J37Bs0TnaNlEfc7mlwu7Y", + "PR1p1LzKCGKPAd6FKlYx3jvx02zvbm93qPCkvvVZmPixHgnBAlHP3QnctV7sz+oCtc1GydePSEJbCReZ", + "/w3OkDuj1fPuPM+85xTXcs44+Q0yPfHOV88zsWUWvaNSpcFnjKFDzE1lwNc7r5+L+zZh1UhlsFyRsLv7", + "qCT0qhL7xLRNUFO5+Pq57O+ASuAUF+gU+DVwR4EHozrm+gD66eLuYpSIuiwxX7i3ZtEZQy41wLlQ2O1C", + "iULv27FJsbBYjCkuYcyugXOSaeQP0GGUbM1todmWQ+EctAhCEPOrBJMnRJBYNeK6QHLny8kNZMoxQ06b", + "WsOlrLrKuyfn1Uz0+7h0Yyg2dYXdMHvm66fkyyvxexhXhkTNjV5aqaDc3FGLR+X9qioW7qJa8BaIMEf7", + "FWcqyfIWa70w3XlF74njdDDbMwfqsOhwE6mHI/UmQt03Qpkb/2esfRXuniGKhI7RA4Fx7r1c+RhggHCa", + "6ovfefPYyJyR1JQpYXElRohMYDJCrJbuabyR90zeCIkrkOl8dz1oaZ+HekaEcZNugGYDNH9JoEHeq2u/", + "A3BaP/FxZ40dAb1RbuBk9YZA+KbY88DAH7EhEKv834DAn3xfYINED0aiBy7KSeChPvBcN88JRpHnfewR", + "vXstdtyjU8+DQWa2ZwahcBN7Az+bHOQJPL95vO1hru8cY5RsFeQaxmGl9aqVTnSN492iMDXD/jO5suYU", + "MgQ00+8miShEdIt+l8LEw3U0UDD/zCgxWOG8AYwNYDweYCgzM2Dxe1Cj6HqmQY6iXCNV0DUOta6jwqjA", + "NK8VhDUlRH0UODx6Ksdvb0w+t7N71wg3/r3x70f0b+0t9/bnojQubK/AjLF9z2+8O+zR9uk/e+FC3zp1", + "P+kRdeTIU4FPnPX3ZnxmNw+vsmwcfePoj+fozvuccaPdB/i96DvIKNlSEXqNI8/3nZsQeu3vXXyIJ/Ve", + "hekThfV+Devm0GHj9n8Rt9fVu7/jcFN67hc4u6kDXmurL+zi/3C1+b1h9yaB2wSUbcUxpplX+h38mvMA", + "Upja4ieFiqB8+ZmxIvxt8Q1WbLDi8bGicaGHgYXtrtGi9p7wjsKEfUa4/XG/6cL9Roq+ii0Fan8pIer2", + "7UPET7w6cBNtsoONx/9FPN57xPuerl77ziA0AUJP1/kVBXfP4W3B6gy9ZWVZUyIX6D2WcIMXiX14QN+u", + "EHtbWxkHXI5z8+2ksN0nqequr/MMjH8qdVYxNGwzkNDttnBFtqYg8VbD793F3f8HAAD//44auThshQAA", } // GetSwagger returns the content of the embedded swagger specification file From ba02f000b203bbb4861d1054fd38af5f10a035a8 Mon Sep 17 00:00:00 2001 From: RUFFY-369 Date: Thu, 2 Jan 2025 20:47:59 +0530 Subject: [PATCH 09/10] chore:handle deafault model_id case/model_id being a dict case --- runner/app/pipelines/image_to_image_generic.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/runner/app/pipelines/image_to_image_generic.py b/runner/app/pipelines/image_to_image_generic.py index 881d9d7d..3bc1e387 100644 --- a/runner/app/pipelines/image_to_image_generic.py +++ b/runner/app/pipelines/image_to_image_generic.py @@ -1,3 +1,4 @@ +import json import logging import numpy as np import os @@ -45,6 +46,16 @@ def __init__(self, model_id: str, task: str): kwargs = {"cache_dir": get_model_dir(), "torch_dtype": torch.float16} torch_device = get_torch_device() + # Check if the model_id is a dictionary in string format in the default value case of model_id on go livepeer side + if model_id.startswith("{") and model_id.endswith("}"): + try: + # Perform json parsing of the string into a dictionary + model_id_dict = json.loads(model_id.replace("'", '"')) # Replace single quotes to make it JSON compliant + if isinstance(model_id_dict, dict) and task in model_id_dict: + model_id = model_id_dict[task] + except json.JSONDecodeError: + pass + folder_name = file_download.repo_folder_name( repo_id=model_id, repo_type="model" ) From a31f6fbfd801f2417fc026950bcd8b8a803a74e7 Mon Sep 17 00:00:00 2001 From: RUFFY-369 Date: Tue, 7 Jan 2025 20:19:41 +0530 Subject: [PATCH 10/10] fix:bug in inference request to pipeline --- .../app/pipelines/image_to_image_generic.py | 27 +++++++------------ runner/app/routes/image_to_image_generic.py | 10 +++++-- 2 files changed, 18 insertions(+), 19 deletions(-) diff --git a/runner/app/pipelines/image_to_image_generic.py b/runner/app/pipelines/image_to_image_generic.py index 3bc1e387..681ba2c8 100644 --- a/runner/app/pipelines/image_to_image_generic.py +++ b/runner/app/pipelines/image_to_image_generic.py @@ -85,38 +85,36 @@ def __init__(self, model_id: str, task: str): if self.task == TaskType.INPAINTING.value: self.pipeline = AutoPipelineForInpainting.from_pretrained( model_id, **kwargs - ).to(torch_device) + ) self.pipeline.enable_model_cpu_offload() elif self.task == TaskType.OUTPAINTING.value: self.controlnet = ( ControlNetModel.from_pretrained( model_id, torch_dtype=torch.float16, variant="fp16" - ).to(torch_device), + ), ) self.vae = AutoencoderKL.from_pretrained( "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16 - ).to(torch_device) + ) self.pipeline_stage1 = StableDiffusionXLControlNetPipeline.from_pretrained( "SG161222/RealVisXL_V4.0", controlnet=self.controlnet, vae=self.vae, safety_checker=None, **kwargs, - ).to(torch_device) + ) self.pipeline_stage1.enable_model_cpu_offload() self.pipeline_stage2 = StableDiffusionXLInpaintPipeline.from_pretrained( "OzzyGT/RealVisXL_V4.0_inpainting", vae=self.vae, **kwargs - ).to(torch_device) + ) self.pipeline_stage1.enable_model_cpu_offload() elif self.task == TaskType.SKETCH_TO_IMAGE.value: - self.controlnet = ControlNetModel.from_pretrained(model_id, **kwargs).to( - torch_device - ) + self.controlnet = ControlNetModel.from_pretrained(model_id, **kwargs) self.vae = AutoencoderKL.from_pretrained( "madebyollin/sdxl-vae-fp16-fix", **kwargs - ).to(torch_device) + ) eulera_scheduler = EulerAncestralDiscreteScheduler.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler" ) @@ -127,7 +125,7 @@ def __init__(self, model_id: str, task: str): safety_checker=None, scheduler=eulera_scheduler, **kwargs, - ).to(torch_device) + ) self.pipeline.enable_model_cpu_offload() self._lora_loader = LoraLoader(self.pipeline) @@ -145,8 +143,9 @@ def __call__( ) -> Tuple[List[PIL.Image], List[Optional[bool]]]: # Handle num_inference_steps and other model-specific settings if "num_inference_steps" in kwargs and ( - kwargs["num_inference_steps"] is None or kwargs["num_inference_steps"] < 1 + kwargs["num_inference_steps"] is None or any(x < 1 for x in kwargs["num_inference_steps"]) ): + logger.warning("Invalid num_inference_steps found. Deleting it from kwargs.") del kwargs["num_inference_steps"] # Extract parameters from kwargs @@ -191,12 +190,6 @@ def __call__( loras_json ) # Assuming _lora_loader is defined elsewhere - # Handle num_inference_steps and other model-specific settings - if "num_inference_steps" in kwargs and ( - kwargs["num_inference_steps"] is None or kwargs["num_inference_steps"] < 1 - ): - del kwargs["num_inference_steps"] - # Ensure proper inference configuration based on model if self.task == TaskType.INPAINTING.value: if mask_image is None: diff --git a/runner/app/routes/image_to_image_generic.py b/runner/app/routes/image_to_image_generic.py index 6ccf3a32..7d77f7bd 100644 --- a/runner/app/routes/image_to_image_generic.py +++ b/runner/app/routes/image_to_image_generic.py @@ -1,6 +1,9 @@ +import base64 import logging +import numpy as np import os import random +import zlib from typing import Annotated, Dict, Tuple, Union import torch @@ -200,8 +203,11 @@ async def image_to_image_generic( num_inference_steps, var_name="num_inference_steps" ) if mask_image: - mask_image = json_str_to_np_array(mask_image, var_name="mask_image") - mask_image = Image.fromarray(mask_image) + mask_image = base64.b64decode(mask_image) + mask_image = zlib.decompress(mask_image) + mask_image = np.frombuffer(mask_image, dtype=np.uint8) + mask_image = mask_image.reshape((image.size)) + mask_image = Image.fromarray(mask_image, mode="L") except ValueError as e: return JSONResponse( status_code=status.HTTP_400_BAD_REQUEST,