diff --git a/.gitignore b/.gitignore index 763624e..47af30d 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,4 @@ -__pycache__/* \ No newline at end of file +__pycache__/* +*.iml +.idea/** +/.idea/ diff --git a/Compositor3.py b/Compositor3.py index 6115cf6..be515cc 100644 --- a/Compositor3.py +++ b/Compositor3.py @@ -20,11 +20,12 @@ async def receivedDone(request): class Compositor3: file = "new.png" result = None + configCache = None @classmethod def IS_CHANGED(cls, **kwargs): fabricData = kwargs.get("fabricData") - print(fabricData) + # print(fabricData) return fabricData @classmethod @@ -35,6 +36,9 @@ def INPUT_TYPES(cls): "fabricData": ("STRING", {"default": "{}"}), "imageName": ("STRING", {"default": "new.png"}), }, + "optional": { + "tools": ("BOOLEAN", {"forceInput": True, "default": True}), + }, "hidden": { "extra_pnginfo": "EXTRA_PNGINFO", "node_id": "UNIQUE_ID", @@ -50,6 +54,7 @@ def composite(self, **kwargs): # https://blog.miguelgrinberg.com/post/how-to-make-python-wait node_id = kwargs.pop('node_id', None) + imageName = kwargs.get('imageName', "new.png") config = kwargs.get('config', "default") @@ -58,9 +63,17 @@ def composite(self, **kwargs): width = config["width"] height = config["height"] config_node_id = config["node_id"] + onConfigChanged = config["onConfigChanged"] names = config["names"] fabricData = kwargs.get("fabricData") + configChanged = self.configCache != config + # print(configChanged) + # print(config) + # print(self.configCache) + + + self.configCache = config ui = { "test": ("value",), "padding": [padding], @@ -71,6 +84,8 @@ def composite(self, **kwargs): "names": names, "fabricData": [fabricData], "awaited": [self.result], + "configChanged": [configChanged], + "onConfigChanged": [onConfigChanged], } # break and send a message to the gui as if it was "executed" below @@ -78,7 +93,8 @@ def composite(self, **kwargs): PromptServer.instance.send_sync("compositor_init", detail) imageExists = folder_paths.exists_annotated_filepath(imageName) - if imageName == "new.png" or not imageExists: + # block when config changed + if imageName == "new.png" or not imageExists or configChanged: return { "ui": ui, "result": (ExecutionBlocker(None), ExecutionBlocker(None)) @@ -91,7 +107,7 @@ def composite(self, **kwargs): i = i.point(lambda i: i * (1 / 255)) image = i.convert("RGB") image = np.array(image).astype(np.float32) / 255.0 - image = torch.from_numpy(image)[None,] + image = torch.from_numpy(image)[None, ] return { "ui": ui, diff --git a/CompositorConfig.py b/CompositorConfig.py deleted file mode 100644 index 2d1fad1..0000000 --- a/CompositorConfig.py +++ /dev/null @@ -1,200 +0,0 @@ -# author: erosdiffusionai@gmail.com -import nodes -import numpy as np -import base64 -from io import BytesIO -from PIL import Image -import torch -import folder_paths -from server import PromptServer - -MAX_RESOLUTION = nodes.MAX_RESOLUTION - - -# these probably exist elsewhere as utils -def tensor2pil(image): - return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) - - -# these probably exist elsewhere as utils -def toBase64ImgUrl(img): - bytesIO = BytesIO() - img.save(bytesIO, format="PNG") - img_types = bytesIO.getvalue() - img_base64 = base64.b64encode(img_types) - return f"data:image/png;base64,{img_base64.decode('utf-8')}" - - -class CompositorConfig: - #OUTPUT_NODE = True - NOT_IDEMPOTENT = True - - # @classmethod - # def IS_CHANGED(cls, **kwargs): - # return float("NaN") - - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "width": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 32}), - "height": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 32}), - "padding": ("INT", {"default": 100, "min": 0, "max": MAX_RESOLUTION, "step": 1}), - "capture_on_queue": ("BOOLEAN", {"default": True}), - "pause": ("BOOLEAN", {"default": True}), - "storeTransforms": ("BOOLEAN", {"default": False}), - }, - "optional": { - "image1": ("IMAGE",), - "mask1": ("MASK",), - "image2": ("IMAGE",), - "mask2": ("MASK",), - "image3": ("IMAGE",), - "mask3": ("MASK",), - "image4": ("IMAGE",), - "mask4": ("MASK",), - "image5": ("IMAGE",), - "mask5": ("MASK",), - "image6": ("IMAGE",), - "mask6": ("MASK",), - "image7": ("IMAGE",), - "mask7": ("MASK",), - "image8": ("IMAGE",), - "mask8": ("MASK",), - "use_alignment_controls": ("BOOLEAN", {"forceInput": True}), - - }, - "hidden": { - "prompt": "PROMPT", - "extra_pnginfo": "EXTRA_PNGINFO", - "node_id": "UNIQUE_ID", - }, - } - - RETURN_TYPES = ("COMPOSITOR_CONFIG",) - RETURN_NAMES = ("config",) - - FUNCTION = "configure" - - CATEGORY = "image" - DESCRIPTION = """ -The compositor node -- pass up to 8 images -- optionally pass their masks (invert them) -- masks are automatically applied and internally the compositor is passed an rgba -- use the sizing controls to configure the compositor, it will be resized on run -- set the flag to pause to allow yourself time to build your composition (pause acts on compositor, not the config node) -""" - - def configure(self, **kwargs): - # extract the images - # convert them from tensor to pil and then to base 64 - # send as custom to be able to be used by ui - # finally return the resulting image (the composite "image" is seen as input but it's actually the output) - - # image = kwargs.pop('image', None) - image1 = kwargs.pop('image1', None) - image2 = kwargs.pop('image2', None) - image3 = kwargs.pop('image3', None) - image4 = kwargs.pop('image4', None) - image5 = kwargs.pop('image5', None) - image6 = kwargs.pop('image6', None) - image7 = kwargs.pop('image7', None) - image8 = kwargs.pop('image8', None) - mask1 = kwargs.pop('mask1', None) - mask2 = kwargs.pop('mask2', None) - mask3 = kwargs.pop('mask3', None) - mask4 = kwargs.pop('mask4', None) - mask5 = kwargs.pop('mask5', None) - mask6 = kwargs.pop('mask6', None) - mask7 = kwargs.pop('mask7', None) - mask8 = kwargs.pop('mask8', None) - pause = kwargs.pop('pause', False) - capture_on_queue = kwargs.pop('capture_on_queue', True) - padding = kwargs.pop('padding', 100) - width = kwargs.pop('width', 512) - height = kwargs.pop('height', 512) - node_id = kwargs.pop('node_id', None) - storeTransforms = kwargs.pop('storeTransforms') - use_alignment_controls = kwargs.pop('use_alignment_controls') - - images = [image1, image2, image3, image4, image5, image6, image7, image8, ] - masks = [mask1, mask2, mask3, mask4, mask5, mask6, mask7, mask8, ] - input_images = [] - - # apply the masks to the images if any so that we get a rgba - # then pass the rgba in the return value - - for (img, mask) in zip(images, masks): - if img is not None: - if mask is not None: - # apply the mask and return - masked = self.apply_mask(img, mask) - # self.masked = masked[0] - - i = tensor2pil(masked[0]) - input_images.append(toBase64ImgUrl(i)) - else: - # no need to apply the mask - i = tensor2pil(img) - input_images.append(toBase64ImgUrl(i)) - else: - # input is None, forward - input_images.append(img) - - # this can act as broadcast to another node, in this case it will be received - # by the compositor node, where it should be filtered by it's config node id and - # discard messages not coming from config - # PromptServer.instance.send_sync( - # "compositor.config", {"names": input_images, - # "config_node_id": node_id, - # "width": width, - # "height": height, - # "padding": padding, - # "capture_on_queue": capture_on_queue, - # "pause": pause - # } - # ) - - self.ensureEmpty() - - res = { - "node_id": node_id, - "width": width, - "height": height, - "padding": padding, - "capture_on_queue": capture_on_queue, - "pause": pause, - # the image names - # "images": input_images, - "names": input_images, - "storeTransforms": storeTransforms, - "use_alignment_controls": use_alignment_controls, - } - print(f"compositor config {node_id} executed") - # return (res, self.masked, ) - return (res,) - - def apply_mask(self, image: torch.Tensor, alpha: torch.Tensor): - batch_size = min(len(image), len(alpha)) - out_images = [] - - alpha = 1.0 - resize_mask(alpha, image.shape[1:]) - for i in range(batch_size): - out_images.append(torch.cat((image[i][:, :, :3], alpha[i].unsqueeze(2)), dim=2)) - - result = (torch.stack(out_images),) - return result - - # ensures empty.png exists - def ensureEmpty(self): - image = "test_empty.png" - if not folder_paths.exists_annotated_filepath(image): - print("it does not exist") - img = Image.new('RGB', (512,512), 'white') - img.save(folder_paths.get_annotated_filepath(image)) - - -def resize_mask(mask, shape): - return torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), - size=(shape[0], shape[1]), mode="bilinear").squeeze(1) diff --git a/CompositorConfig3.py b/CompositorConfig3.py index 9c0aabe..a9bcef5 100644 --- a/CompositorConfig3.py +++ b/CompositorConfig3.py @@ -3,9 +3,11 @@ import base64 from io import BytesIO from PIL import Image -import torch import folder_paths - +import torch +import torch.nn.functional as F +import math +from comfy.utils import common_upscale MAX_RESOLUTION = nodes.MAX_RESOLUTION @@ -34,6 +36,8 @@ def INPUT_TYPES(cls): "width": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 32}), "height": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 32}), "padding": ("INT", {"default": 100, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + "normalizeHeight": ("BOOLEAN", {"default": False}), + "onConfigChanged": ("BOOLEAN", {"label_off": "stop", "label_on": "Grab and Continue", "default": False}), "invertMask": ("BOOLEAN", {"default": False}), "initialized": ("STRING", {"default": ""}), }, @@ -104,6 +108,9 @@ def configure(self, **kwargs): width = kwargs.pop('width', 512) height = kwargs.pop('height', 512) invertMask = kwargs.pop('invertMask', False) + normalizeHeight = kwargs.pop('normalizeHeight', 512) + # grabAndContinue, stop + onConfigChanged = kwargs.pop('onConfigChanged', False) node_id = kwargs.pop('node_id', None) images = [image1, image2, image3, image4, image5, image6, image7, image8, ] @@ -112,10 +119,28 @@ def configure(self, **kwargs): # apply the masks to the images if any so that we get a rgba # then pass the rgba in the return value - + counter = 0 for (img, mask) in zip(images, masks): if img is not None: + + if normalizeHeight: + # print(counter) + counter = counter+1 + #img = self.upscale(img, "lanczos", height, "height", "disabled") + processor = ImageProcessor() + oldimg = img + img = processor.scale_image(img, height) + #print(oldimg == img) + # tensor + if mask is not None: + # if normalizeHeight: + # # print(mask) + # #mask = self.upscale(img, "lanczos", height, "height", "disabled") + # mask = prepare_mask(mask, foo_is_batch=True) + # mask = processor.scale_image(mask, height) + + # apply the mask and return # apply the mask and return masked = self.apply_mask(img, mask, invertMask) # self.masked = masked[0] @@ -130,7 +155,6 @@ def configure(self, **kwargs): # input is None, forward input_images.append(img) - self.ensureEmpty() res = { @@ -138,10 +162,12 @@ def configure(self, **kwargs): "width": width, "height": height, "padding": padding, - "invertMask": invertMask, "names": input_images, + "onConfigChanged": onConfigChanged, + "normalizeHeight": normalizeHeight, + "invertMask": invertMask, } - print(f"compositor config {node_id} executed") + # print(f"compositor config {node_id} executed") # return (res, self.masked, ) return (res,) @@ -164,11 +190,105 @@ def apply_mask(self, image: torch.Tensor, alpha: torch.Tensor, invertMask=False) def ensureEmpty(self): image = "test_empty.png" if not folder_paths.exists_annotated_filepath(image): - print("it does not exist") - img = Image.new('RGB', (512,512), 'white') + # print("it does not exist") + img = Image.new('RGB', (512, 512), 'white') img.save(folder_paths.get_annotated_filepath(image)) + def upscale(self, image, upscale_method, side_length: int, side: str, crop): + samples = image.movedim(-1, 1) + + size = get_image_size(image) + + width_B = int(size[0]) + height_B = int(size[1]) + + width = width_B + height = height_B + + def determineSide(_side: str) -> tuple[int, int]: + width, height = 0, 0 + if _side == "Width": + heigh_ratio = height_B / width_B + width = side_length + height = heigh_ratio * width + elif _side == "Height": + width_ratio = width_B / height_B + height = side_length + width = width_ratio * height + return width, height + + if side == "Longest": + if width > height: + width, height = determineSide("Width") + else: + width, height = determineSide("Height") + elif side == "Shortest": + if width < height: + width, height = determineSide("Width") + else: + width, height = determineSide("Height") + else: + width, height = determineSide(side) + + width = math.ceil(width) + height = math.ceil(height) + + cls = common_upscale(samples, width, height, upscale_method, crop) + cls = cls.movedim(1, -1) + return (cls,) + + +def get_image_size(IMAGE) -> tuple[int, int]: + samples = IMAGE.movedim(-1, 1) + size = samples.shape[3], samples.shape[2] + # size = size.movedim(1, -1) + return size + def resize_mask(mask, shape): return torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(shape[0], shape[1]), mode="bilinear").squeeze(1) + +class ImageProcessor: + def scale_image(self, image_tensor, new_height): + # Ensure the input tensor is in the format [batch_size, height, width, channels] + if image_tensor.ndim != 4: + raise ValueError("Expected image tensor to have shape [batch_size, height, width, channels]") + + batch_size, original_height, original_width, channels = image_tensor.shape + + if channels not in (1, 3, 4): + raise ValueError("Image tensor must have 1 (grayscale), 3 (RGB), or 4 (RGBA) channels") + + # Calculate the new width to maintain the aspect ratio + aspect_ratio = original_width / original_height + new_width = int(new_height * aspect_ratio) + + # Permute to match PyTorch's expected format [batch_size, channels, height, width] + image_tensor = image_tensor.permute(0, 3, 1, 2) # [batch_size, channels, height, width] + + # Resize images to the new dimensions (new_height, new_width) + resized_images = F.interpolate(image_tensor, size=(new_height, new_width), mode='bilinear', align_corners=False) + + # Permute back to the original format [batch_size, height, width, channels] + resized_images = resized_images.permute(0, 2, 3, 1) # [batch_size, height, width, channels] + + return resized_images + + +def prepare_mask(mask, foo_is_batch): + """ + Prepares the mask tensor to have shape [batch_size, height, width, channels]. + + Arguments: + mask: Tensor of shape [foo, width, height] + foo_is_batch: Bool, True if `foo` represents the batch size, False if it represents the channel. + """ + if foo_is_batch: + # Case where `foo` is the batch size, reshape to [batch_size, height, width, channels=1] + mask = mask.unsqueeze(3) # Add a channel dimension [batch_size, width, height] -> [batch_size, width, height, 1] + else: + # Case where `foo` is the channel dimension, reshape to [1, height, width, channels] + mask = mask.unsqueeze(0).permute(0, 2, 3, 1) # Add batch dim and permute to [1, height, width, channels] + + return mask \ No newline at end of file diff --git a/Alignment.py b/CompositorTools3.py similarity index 75% rename from Alignment.py rename to CompositorTools3.py index 3c68071..14d2c54 100644 --- a/Alignment.py +++ b/CompositorTools3.py @@ -1,4 +1,4 @@ -class Alignment: +class CompositorTools3: @classmethod def INPUT_TYPES(cls): return { @@ -8,12 +8,12 @@ def INPUT_TYPES(cls): } RETURN_TYPES = ("BOOLEAN",) - RETURN_NAMES = ("composite",) + RETURN_NAMES = ("tools",) FUNCTION = "run" CATEGORY = "image" DESCRIPTION = """ -frontend communication only with feature flag +experimental node: frontend communication only with feature flag, needs page reload to fill controls """ def run(self, **kwargs): diff --git a/TransformsOut.py b/CompositorTransformsOut3.py similarity index 71% rename from TransformsOut.py rename to CompositorTransformsOut3.py index 303f53b..6f0c17a 100644 --- a/TransformsOut.py +++ b/CompositorTransformsOut3.py @@ -27,8 +27,8 @@ def INPUT_TYPES(cls): }, } - RETURN_TYPES = ("INT", "INT", "INT", "INT", "FLOAT") - RETURN_NAMES = ("x", "y", "width", "height", "angle") + RETURN_TYPES = ("INT", "INT", "INT", "INT", "INT", "INT", "INT", "INT", "INT") + RETURN_NAMES = ("x", "y", "width", "height", "angle", "bbox x", "bbox y", "bbox width", "bbox height") FUNCTION = "run" CATEGORY = "image" @@ -41,19 +41,25 @@ def run(self, **kwargs): # print(transforms) data = json.loads(transforms) padding = data["padding"] - # extract transforms - # remap as it's 0 based, scale size as the area is final t = data["transforms"] width = t[channel - 1]["xwidth"] * t[channel - 1]["scaleX"] height = t[channel - 1]["xheight"] * t[channel - 1]["scaleY"] - angle = t[channel - 1]["angle"] # remove the padding as transforms are padding based x = t[channel - 1]["left"] - padding y = t[channel - 1]["top"] - padding + #angle + angle = t[channel - 1]["angle"] + + # bounding box out + b = data["bboxes"] + bwidth = b[channel - 1]["xwidth"] + bheight = b[channel - 1]["xheight"] + # remove the padding as transforms are padding based + bx = b[channel - 1]["left"] - padding + by = b[channel - 1]["top"] - padding if forceInt: - return (int(x), int(y), int(width), int(height), int(angle)) + return (int(x), int(y), int(width), int(height), int(angle), int(bx), int(by), int(bwidth), int(bheight)) else: - - return (x, y, width, height, angle) + return (x, y, width, height, angle, bx, by, bwidth, bheight) diff --git a/README.md b/README.md index 1ecbc0c..83e7341 100644 --- a/README.md +++ b/README.md @@ -18,33 +18,45 @@ With the Compositor Node you can: - Flip an image via negative scaling (drag a corner towards and past the inside of the image) - Mask your images quickly - Precisely move selections with keyboard -- Use the information about tranforms in other nodes (like conditioning set area) +- Use the information about transforms in other nodes (like conditioning set area) ## Changelog +- v **3.1.0** - 20.09.2024 + - _new feature_: **onConfigChange action toggle** when you change the configuration (or any of the attached nodes) you can now choose if_ + - you want to stop the flow to allow edits + - or you want to grab a capture and continue the flow + - _enhancement_: output transforms now give you back the angle and bounding box coordinates + - _enhancement_: you can force transform output values to be integers (as some nodes requires it) + - _new feature_: **normalize height** when this is activated your images will all be the same height of the canvas (this can lower image quality) + - _new feature_: (experimental and limited): **Tools** ! this is an experimental feature. it allows controlling some aspects of the compositor. + - **precise selection toggle** ignore transparent pixels and select the first image below the mouse + - **center selected** puts the selected images in the center of canvas + - **reset transforms** zeroes out the changes to images bringing them to their original size, angle and 0,0 location (top left) + - limitations: as saving discards the selection, and it happens on mouse out + you might need to re-select to use centering and reset + +![the compositor node](/assets/v3.1.PNG) + - v **3.0.8** - 18.09.2024 - - _new feature_: **invert mask** option. the implementation of mask was not correct. now it's possible to invert the mask via toggle. - - _new feature_: **angle output** the angle of rotation is now accessible in the output (and soon the bounding box x,y, width and height). - - _bugfix_: **fix cut images on swap due to wrongly preserved width and height** - - _new feature: **added force int** to allow the outputs to be used with set area conditioning (that requires int) + - _new feature_: **invert mask** option. the implementation of mask was not correct. now it's possible to invert the mask via toggle. + - _new feature_: **angle output** the angle of rotation is now accessible in the output (and soon the bounding box x,y, width and height). + - _bugfix_: **fix cut images on swap due to wrongly preserved width and height** + - _new feature: **added force int** to allow the outputs to be used with set area conditioning (that requires int) - v **3.0.4** - 18.09.2024 - **bugfix**: the width and height stored in transforms were swapped and the output node would report them incorrectly. thanks @sky958958 for spotting it - v **3.0.2** - 17.09.2024 - **friendly transforms** for area prompting! With the goal of being able to do regional area prompting, now you can easily output each input x,y coordinates and their scaled width and height with the help of the new **Transform Output** node! - select the channel corresponding the input and the node will output the values for you. - - _enhancement_: a **new node** outputs x,y,width,height othe images into a convenient node to be attached to the transforms output - - _enhancement_: save and restore skew from transform (now you can distort your images to help fake perspective) + select the channel corresponding the input and the node will output the values for you. + - _enhancement_: a **new node** outputs x,y,width,height other images into a convenient node to be attached to the transforms output + - _enhancement_: save and restore skew from transform (now you can distort your images to help fake perspective) - v **3.0.0** - 16.09.2024 - this release is a full rewrite of the code and fixes: - - issues #45 , #34, #18 - also, and adds **new features**: - - _enhancement_: **simplified control panel** (cature on queue, save transform, pause are removed as not needed anymore) - - _new feature_: **automatic upload** of the output **on mouse out** of the canvas area (no need to click capture) - - _new feature_: **flash on save** (once the image is uplodaded the composition area green border briefly flashes in orange) - - _new feature_: **preliminary work for optional control panels** (they will contain alignment controls, and other tools) - - _enhancement_: enqueue with **continue**, on the first run, if necessary information is missing (like output) the flow will stop, make your composition, and click continue to re-enqueue the flash finishes. - - -![the compositor node](/assets/v3.PNG) - + - issues #45 , #34, #18 + also, and adds **new features**: + - _enhancement_: **simplified control panel** (cature on queue, save transform, pause are removed as not needed anymore) + - _new feature_: **automatic upload** of the output **on mouse out** of the canvas area (no need to click capture) + - _new feature_: **flash on save** (once the image is uploaded the composition area green border briefly flashes in orange) + - _new feature_: **preliminary work for optional control panels** (they will contain alignment controls, and other tools) + - _enhancement_: enqueue with **continue**, on the first run, if necessary information is missing (like output) the flow will stop, make your composition, and click continue to re-enqueue the flash finishes. - v **2.0.4** - 06.09.2024 - _enhancement_: You can now **scale the selected image via mouse wheel**! - v **2.0.1** - 05.09.2024 - **V2 is HERE!** - _enhancement_: An all **new widget layout** with maximized working area and less clutter @@ -52,7 +64,7 @@ With the Compositor Node you can: - _enhancement_: More control! it's now possible to select an image or group and then "**alt+drag**" to **center scale and rotate** - _new feature_: More control! it's now possible to **nudge a selection** by one pixel by using keyboard arrows, and while holding shift the movement is 10px! pixel perfect alignments! - _new feature_: the node now **remembers the transforms** you have applied, on the new run it will re-apply the stored transforms (storing transforms is controlled in the config) - - _new feature_: **masks are here**! you can now pass masks and they will be applied automatically! (depending on the results you might want still to invert them) + - _new feature_: **masks are here**! you can now pass masks, and they will be applied automatically! (depending on the results you might want still to invert them) - _regression_: a bit annoying but is_changed is not being observed so flows are re-triggered even on fixed - _regression_: img in workflow saved is not visible anymore - V **1.0.9** - 30.08.2024 - Huge refactoring! @@ -61,7 +73,7 @@ With the Compositor Node you can: - _bugfix_: when **saving a png with the workflow** the **compositor content is now visible** (will not be restored...yet) - _enhancement_: the node **does not re-trigger** the execution of the flow if the image is not changed - _performance_: the node is **now more efficient** and correctly implements the is_changed check via **checksum**, avoiding re-triggering flows downstream if the composition has not changed - - _mantainability_: the node is now refactored and better engineered, with a lot of comments. could be a good use case for those learning to code comfy extensions. + - _maintainability_: the node is now refactored and better engineered, with a lot of comments. could be a good use case for those learning to code comfy extensions. - V **1.0.8** - 28.08.2024 - _new feature_: **safe area indication** - a green border is overlaid on top of the composition to indicate the exported area - V **1.0.7** - 28.08.2024 - _new feature_: **preserve stacking order**. when selecting a node, it's z-order is preserved image1 being the background/farthest and image8 the foreground/closest. - the first connected node will be the most distant from camera (background) @@ -109,7 +121,7 @@ and set the security to weak (at your risk) - the painter node is great and works better and does a million things more, but it misses some of these features. - continue compositing your image like caveman using pixel coordinates - well...photoshop and import via million clicks -- use krita or photoshop integrations with comfy (inversion of control) +- use Krita or photoshop integrations with comfy (inversion of control) ### How to use @@ -132,7 +144,8 @@ and set the security to weak (at your risk) - anything in the dark gray area is rendered - use up till 8 images, optionally pass masks - background will be at first slot on top -- in v 1.0.9 and later the z-index is fixed, reconnect an input or move stuff around. it should be simpler to handle depth stacking +- in v 1.0.9 and later the z-index is fixed, reconnect an input or move stuff around. + it should be simpler to handle depth stacking ### Advanced @@ -142,7 +155,8 @@ and set the security to weak (at your risk) - drag selected to move (can also rescale the group) - shift click to select multiple - shift click to unselect selected in a group select -- click "capture" to see what is the real order in memory before running (after the first run where images are generated/associated to the editor) +- if you choose to stop on config change, hit continue to re-enqueue. capture happens on mouse out from the composition or +- if you choose grabAndContinue then it will be automatic - scroll up or down to scale a single image selection ### Aupporting nodes I use with this one @@ -158,12 +172,13 @@ Just throw the worst possible images you find on the internet or that you can ge and you will get: ![v3.PNG](assets%2Fv3.PNG) -with the [V3 workflow in json format](assets%2Fv3.json) you are in pixel perfect positioning control of your scene and content ! -images to replicate are in the assets folder. after composition is set move the images a bit and continue. +with the [V3 workflow in json format](assets%2Fv3.1.json) you are in pixel perfect positioning control of your scene and content ! +images to replicate are in the assets folder. ### Final words and limitations - **limitation** you need to run the flow once for the compositor to show images +- **tools** new tools only show up on load, so if you add them, reload page with browser reload - **known issue**: the compositing is not scaled, so if you want a 5k image well... I hope you have a big enough monitor, but it's not (yet) the goal of this node... - upcoming **known issue** the new tooling will require graph changed events to setup frontend only widgets. so reload the wf. if the gui is not coming up in the tool node diff --git a/__init__.py b/__init__.py index f3138a8..681f2fd 100644 --- a/__init__.py +++ b/__init__.py @@ -1,28 +1,21 @@ # author: erosdiffusionai@gmail.com -# from .nodes_compositor import Compositor -# from .CompositorConfig import CompositorConfig -# from .CompositorConfig import CompositorConfig from .Compositor3 import Compositor3 from .CompositorConfig3 import CompositorConfig3 -from .Alignment import Alignment -from .TransformsOut import CompositorTransformsOutV3 +from .CompositorTools3 import CompositorTools3 +from .CompositorTransformsOut3 import CompositorTransformsOutV3 NODE_CLASS_MAPPINGS = { - # "Compositor": Compositor, - # "CompositorConfig": CompositorConfig, "Compositor3": Compositor3, "CompositorConfig3": CompositorConfig3, - "Alignment": Alignment, + "CompositorTools3": CompositorTools3, "CompositorTransformsOutV3": CompositorTransformsOutV3, } NODE_DISPLAY_NAME_MAPPINGS = { - #"Compositor": "Compositor", - #"CompositorConfig": "CompositorConfig", - "Compositor3": "πŸ’œ Compositor V3", - "CompositorConfig3": "πŸ’œ Compositor Config V3", - "Alignment": "Alignment", - "CompositorTransformsOutV3": "πŸ’œ Compositor Transforms Output V3", + "Compositor3": "πŸ’œ Compositor (V3)", + "CompositorConfig3": "πŸ’œ Compositor Config (V3)", + "CompositorTools3": "πŸ’œ Compositor Tools (V3) Experimental", + "CompositorTransformsOutV3": "πŸ’œ Compositor Transforms Output (V3)", } EXTENSION_NAME = "Enrico" diff --git a/assets/v3.2.PNG b/assets/v3.0.2.PNG similarity index 100% rename from assets/v3.2.PNG rename to assets/v3.0.2.PNG diff --git a/assets/v3.1.PNG b/assets/v3.1.PNG new file mode 100644 index 0000000..18c68b0 Binary files /dev/null and b/assets/v3.1.PNG differ diff --git a/assets/v3.1.json b/assets/v3.1.json new file mode 100644 index 0000000..9c91ef9 --- /dev/null +++ b/assets/v3.1.json @@ -0,0 +1,1304 @@ +{ + "last_node_id": 352, + "last_link_id": 823, + "nodes": [ + { + "id": 254, + "type": "LoadImage", + "pos": { + "0": -50, + "1": -850 + }, + "size": { + "0": 315, + "1": 314 + }, + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 819 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "warrior.jpg", + "image" + ] + }, + { + "id": 350, + "type": "LoadImage", + "pos": { + "0": -50, + "1": -1200 + }, + "size": { + "0": 315, + "1": 314 + }, + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 821 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "forest.jpg", + "image" + ] + }, + { + "id": 351, + "type": "Image Remove Background (rembg)", + "pos": { + "0": -50, + "1": -450 + }, + "size": [ + 260.3999938964844, + 28.730128519557297 + ], + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 819 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 820 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Image Remove Background (rembg)" + } + }, + { + "id": 120, + "type": "CheckpointLoaderSimple", + "pos": { + "0": 300, + "1": -1200 + }, + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 822 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 224, + 225 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 227, + 230 + ], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "dreamshaper_8.safetensors" + ] + }, + { + "id": 122, + "type": "CLIPTextEncode", + "pos": { + "0": 700, + "1": -1200 + }, + "size": { + "0": 422.84503173828125, + "1": 164.31304931640625 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 224 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 410 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "cinematic photograph of a viking warrior in a forest and a black wolf AND color grading and film grain AND cinematic AND 4K AND HDR" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 345, + "type": "CompositorConfig3", + "pos": { + "0": 300, + "1": -800 + }, + "size": { + "0": 315, + "1": 502 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "image1", + "type": "IMAGE", + "link": 821 + }, + { + "name": "mask1", + "type": "MASK", + "link": null + }, + { + "name": "image2", + "type": "IMAGE", + "link": 820 + }, + { + "name": "mask2", + "type": "MASK", + "link": null + }, + { + "name": "image3", + "type": "IMAGE", + "link": 818 + }, + { + "name": "mask3", + "type": "MASK", + "link": null + }, + { + "name": "image4", + "type": "IMAGE", + "link": null + }, + { + "name": "mask4", + "type": "MASK", + "link": null + }, + { + "name": "image5", + "type": "IMAGE", + "link": null + }, + { + "name": "mask5", + "type": "MASK", + "link": null + }, + { + "name": "image6", + "type": "IMAGE", + "link": null + }, + { + "name": "mask6", + "type": "MASK", + "link": null + }, + { + "name": "image7", + "type": "IMAGE", + "link": null + }, + { + "name": "mask7", + "type": "MASK", + "link": null + }, + { + "name": "image8", + "type": "IMAGE", + "link": null + }, + { + "name": "mask8", + "type": "MASK", + "link": null + } + ], + "outputs": [ + { + "name": "config", + "type": "COMPOSITOR_CONFIG", + "links": [ + 805 + ], + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CompositorConfig3" + }, + "widgets_values": [ + 1280, + 768, + 110, + true, + false, + false, + 1726855452501 + ] + }, + { + "id": 286, + "type": "EmptyImage", + "pos": { + "0": 300, + "1": -1050 + }, + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "EmptyImage" + }, + "widgets_values": [ + 512, + 512, + 1, + 1680 + ] + }, + { + "id": 304, + "type": "LoadImage", + "pos": { + "0": -50, + "1": -350 + }, + "size": { + "0": 315, + "1": 314 + }, + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 816 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "MASK", + "type": "MASK", + "links": [], + "slot_index": 1, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "wolf.jpg", + "image" + ] + }, + { + "id": 197, + "type": "AIO_Preprocessor", + "pos": { + "0": 700, + "1": -950 + }, + "size": [ + 428.3140313976878, + 82 + ], + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 672 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 425, + 426 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "AIO_Preprocessor" + }, + "widgets_values": [ + "DepthAnythingV2Preprocessor", + 512 + ] + }, + { + "id": 123, + "type": "CLIPTextEncode", + "pos": { + "0": 1150, + "1": -1200 + }, + "size": [ + 417.50327552934823, + 154.69677890005437 + ], + "flags": { + "collapsed": false + }, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 225 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 411 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "lifeless, horror, painting, cgi, illustration, low_quality, blurry, vampire, unrealistic, drawing, text, watermark, bad_quality" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 194, + "type": "ControlNetLoader", + "pos": { + "0": 1150, + "1": -950 + }, + "size": [ + 459.9102911854518, + 58 + ], + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CONTROL_NET", + "type": "CONTROL_NET", + "links": [ + 423 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ControlNetLoader" + }, + "widgets_values": [ + "control_v11f1p_sd15_depth_fp16.safetensors" + ] + }, + { + "id": 298, + "type": "ImageScale", + "pos": { + "0": 1600, + "1": -1200 + }, + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 15, + "mode": 4, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 803 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 672, + 673 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ImageScale" + }, + "widgets_values": [ + "nearest-exact", + 1024, + 1536, + "disabled" + ] + }, + { + "id": 198, + "type": "PreviewImage", + "pos": { + "0": 1950, + "1": -1200 + }, + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 426 + } + ], + "outputs": [], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 126, + "type": "VAEEncode", + "pos": { + "0": 2200, + "1": -1200 + }, + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "pixels", + "type": "IMAGE", + "link": 673 + }, + { + "name": "vae", + "type": "VAE", + "link": 230 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 231 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VAEEncode" + } + }, + { + "id": 124, + "type": "VAEDecode", + "pos": { + "0": 2200, + "1": -1100 + }, + "size": { + "0": 140, + "1": 46 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 226 + }, + { + "name": "vae", + "type": "VAE", + "link": 227 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 670 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 193, + "type": "ControlNetApplyAdvanced", + "pos": { + "0": 2250, + "1": -450 + }, + "size": [ + 240.27142998000954, + 166 + ], + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "positive", + "type": "CONDITIONING", + "link": 410 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 411 + }, + { + "name": "control_net", + "type": "CONTROL_NET", + "link": 423 + }, + { + "name": "image", + "type": "IMAGE", + "link": 425 + } + ], + "outputs": [ + { + "name": "positive", + "type": "CONDITIONING", + "links": [ + 413 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "negative", + "type": "CONDITIONING", + "links": [ + 414 + ], + "slot_index": 1, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ControlNetApplyAdvanced" + }, + "widgets_values": [ + 0.4, + 0, + 0.9 + ] + }, + { + "id": 344, + "type": "Compositor3", + "pos": { + "0": 700, + "1": -800 + }, + "size": [ + 1521, + 1079 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "config", + "type": "COMPOSITOR_CONFIG", + "link": 805 + }, + { + "name": "tools", + "type": "BOOLEAN", + "link": 807, + "widget": { + "name": "tools" + } + } + ], + "outputs": [ + { + "name": "transforms", + "type": "STRING", + "links": [ + 806 + ], + "shape": 3 + }, + { + "name": "image", + "type": "IMAGE", + "links": [ + 803 + ], + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Compositor3" + }, + "widgets_values": [ + "{\"width\":1280,\"height\":768,\"padding\":110,\"transforms\":[{\"left\":67.29333589365456,\"top\":117.9468021136131,\"scaleX\":0.9909220644610665,\"scaleY\":0.9909220644610665,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\",\"xwidth\":1365,\"xheight\":768,\"skewY\":0,\"skewX\":0},{\"left\":246.16651289095125,\"top\":152.3262279478957,\"scaleX\":0.9039852323783351,\"scaleY\":0.9039852323783351,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\",\"xwidth\":576,\"xheight\":768,\"skewY\":0,\"skewX\":0},{\"left\":667.8645917323958,\"top\":442.16978259209634,\"scaleX\":0.6638480948156427,\"scaleY\":0.6270438236132133,\"angle\":0,\"flipX\":true,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\",\"xwidth\":1152,\"xheight\":768,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"left\",\"originY\":\"top\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0}],\"bboxes\":[{\"left\":67.29333589365456,\"top\":117.9468021136131,\"xwidth\":761.028145506099,\"xheight\":1352.6086179893557},{\"left\":246.16651289095125,\"top\":152.3262279478957,\"xwidth\":694.2606584665614,\"xheight\":520.695493849921},{\"left\":667.8645917323958,\"top\":442.16978259209634,\"xwidth\":481.56965653494785,\"xheight\":764.7530052276205},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0}]}", + "compositor/1726856927860.png [temp]", + true, + null, + "continue" + ] + }, + { + "id": 347, + "type": "CompositorTools3", + "pos": { + "0": 300, + "1": -250 + }, + "size": { + "0": 310.79998779296875, + "1": 106 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "tools", + "type": "BOOLEAN", + "links": [ + 807 + ], + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CompositorTools3" + } + }, + { + "id": 349, + "type": "Image Remove Background (rembg)", + "pos": { + "0": -50, + "1": 50 + }, + "size": [ + 260.3999938964844, + 26.64310432639809 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 816 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 818 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Image Remove Background (rembg)" + } + }, + { + "id": 119, + "type": "KSampler", + "pos": { + "0": 2250, + "1": -800 + }, + "size": [ + 256.93020611648535, + 262 + ], + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 823 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 413 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 414 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 231 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 226 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 977862159117587, + "fixed", + 35, + 4, + "deis", + "beta", + 1 + ] + }, + { + "id": 352, + "type": "PerturbedAttentionGuidance", + "pos": { + "0": 2250, + "1": -200 + }, + "size": [ + 218.39999389648438, + 58 + ], + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 822 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 823 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "PerturbedAttentionGuidance" + }, + "widgets_values": [ + 3 + ] + }, + { + "id": 297, + "type": "PreviewImage", + "pos": { + "0": 2550, + "1": -700 + }, + "size": [ + 927.4722345560549, + 825.2888724063173 + ], + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 670 + } + ], + "outputs": [], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 346, + "type": "CompositorTransformsOutV3", + "pos": { + "0": 2550, + "1": -1200 + }, + "size": { + "0": 453.5999755859375, + "1": 266 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "transforms", + "type": "STRING", + "link": 806, + "widget": { + "name": "transforms" + } + } + ], + "outputs": [ + { + "name": "x", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "y", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "width", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "height", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "angle", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "bbox x", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "bbox y", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "bbox width", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "bbox height", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CompositorTransformsOutV3" + }, + "widgets_values": [ + "", + 1, + true + ] + } + ], + "links": [ + [ + 224, + 120, + 1, + 122, + 0, + "CLIP" + ], + [ + 225, + 120, + 1, + 123, + 0, + "CLIP" + ], + [ + 226, + 119, + 0, + 124, + 0, + "LATENT" + ], + [ + 227, + 120, + 2, + 124, + 1, + "VAE" + ], + [ + 230, + 120, + 2, + 126, + 1, + "VAE" + ], + [ + 231, + 126, + 0, + 119, + 3, + "LATENT" + ], + [ + 410, + 122, + 0, + 193, + 0, + "CONDITIONING" + ], + [ + 411, + 123, + 0, + 193, + 1, + "CONDITIONING" + ], + [ + 413, + 193, + 0, + 119, + 1, + "CONDITIONING" + ], + [ + 414, + 193, + 1, + 119, + 2, + "CONDITIONING" + ], + [ + 423, + 194, + 0, + 193, + 2, + "CONTROL_NET" + ], + [ + 425, + 197, + 0, + 193, + 3, + "IMAGE" + ], + [ + 426, + 197, + 0, + 198, + 0, + "IMAGE" + ], + [ + 670, + 124, + 0, + 297, + 0, + "IMAGE" + ], + [ + 672, + 298, + 0, + 197, + 0, + "IMAGE" + ], + [ + 673, + 298, + 0, + 126, + 0, + "IMAGE" + ], + [ + 803, + 344, + 1, + 298, + 0, + "IMAGE" + ], + [ + 805, + 345, + 0, + 344, + 0, + "COMPOSITOR_CONFIG" + ], + [ + 806, + 344, + 0, + 346, + 0, + "STRING" + ], + [ + 807, + 347, + 0, + 344, + 1, + "BOOLEAN" + ], + [ + 816, + 304, + 0, + 349, + 0, + "IMAGE" + ], + [ + 818, + 349, + 0, + 345, + 4, + "IMAGE" + ], + [ + 819, + 254, + 0, + 351, + 0, + "IMAGE" + ], + [ + 820, + 351, + 0, + 345, + 2, + "IMAGE" + ], + [ + 821, + 350, + 0, + 345, + 0, + "IMAGE" + ], + [ + 822, + 120, + 0, + 352, + 0, + "MODEL" + ], + [ + 823, + 352, + 0, + 119, + 0, + "MODEL" + ] + ], + "groups": [], + "config": {}, + "extra": { + "ds": { + "scale": 0.6588450000000085, + "offset": [ + 236.6341619559252, + 1311.8220381206384 + ] + }, + "groupNodes": {} + }, + "version": 0.4 +} \ No newline at end of file diff --git a/assets/workflows/v3.1.0_multiple_instances_with_lettering.json b/assets/workflows/v3.1.0_multiple_instances_with_lettering.json new file mode 100644 index 0000000..9fd9a93 --- /dev/null +++ b/assets/workflows/v3.1.0_multiple_instances_with_lettering.json @@ -0,0 +1,1630 @@ +{ + "last_node_id": 360, + "last_link_id": 831, + "nodes": [ + { + "id": 254, + "type": "LoadImage", + "pos": { + "0": -50, + "1": -850 + }, + "size": { + "0": 315, + "1": 314 + }, + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 819 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "warrior.jpg", + "image" + ] + }, + { + "id": 350, + "type": "LoadImage", + "pos": { + "0": -50, + "1": -1200 + }, + "size": { + "0": 315, + "1": 314 + }, + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 821 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "forest.jpg", + "image" + ] + }, + { + "id": 351, + "type": "Image Remove Background (rembg)", + "pos": { + "0": -50, + "1": -450 + }, + "size": { + "0": 260.3999938964844, + "1": 28.73012924194336 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 819 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 820 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Image Remove Background (rembg)" + } + }, + { + "id": 120, + "type": "CheckpointLoaderSimple", + "pos": { + "0": 300, + "1": -1200 + }, + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 822 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 224, + 225 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 227, + 230 + ], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "dreamshaper_8.safetensors" + ] + }, + { + "id": 286, + "type": "EmptyImage", + "pos": { + "0": 300, + "1": -1050 + }, + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "EmptyImage" + }, + "widgets_values": [ + 512, + 512, + 1, + 1680 + ] + }, + { + "id": 304, + "type": "LoadImage", + "pos": { + "0": -50, + "1": -350 + }, + "size": { + "0": 315, + "1": 314 + }, + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 816 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "MASK", + "type": "MASK", + "links": [], + "slot_index": 1, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "wolf.jpg", + "image" + ] + }, + { + "id": 197, + "type": "AIO_Preprocessor", + "pos": { + "0": 700, + "1": -950 + }, + "size": { + "0": 428.31402587890625, + "1": 82 + }, + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 672 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 425, + 426 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "AIO_Preprocessor" + }, + "widgets_values": [ + "DepthAnythingV2Preprocessor", + 512 + ] + }, + { + "id": 123, + "type": "CLIPTextEncode", + "pos": { + "0": 1150, + "1": -1200 + }, + "size": { + "0": 417.5032653808594, + "1": 154.69677734375 + }, + "flags": { + "collapsed": false + }, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 225 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 411 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "lifeless, horror, painting, cgi, illustration, low_quality, blurry, vampire, unrealistic, drawing, text, watermark, bad_quality" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 194, + "type": "ControlNetLoader", + "pos": { + "0": 1150, + "1": -950 + }, + "size": { + "0": 459.9102783203125, + "1": 58 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CONTROL_NET", + "type": "CONTROL_NET", + "links": [ + 423 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ControlNetLoader" + }, + "widgets_values": [ + "control_v11f1p_sd15_depth_fp16.safetensors" + ] + }, + { + "id": 298, + "type": "ImageScale", + "pos": { + "0": 1600, + "1": -1200 + }, + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 17, + "mode": 4, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 803 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 672, + 673 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ImageScale" + }, + "widgets_values": [ + "nearest-exact", + 1024, + 1536, + "disabled" + ] + }, + { + "id": 198, + "type": "PreviewImage", + "pos": { + "0": 1950, + "1": -1200 + }, + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 426 + } + ], + "outputs": [], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 126, + "type": "VAEEncode", + "pos": { + "0": 2200, + "1": -1200 + }, + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "pixels", + "type": "IMAGE", + "link": 673 + }, + { + "name": "vae", + "type": "VAE", + "link": 230 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 231 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VAEEncode" + } + }, + { + "id": 349, + "type": "Image Remove Background (rembg)", + "pos": { + "0": -50, + "1": 50 + }, + "size": { + "0": 260.3999938964844, + "1": 26.643104553222656 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 816 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 818 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Image Remove Background (rembg)" + } + }, + { + "id": 352, + "type": "PerturbedAttentionGuidance", + "pos": { + "0": 2250, + "1": -200 + }, + "size": { + "0": 218.39999389648438, + "1": 58 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 822 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 823 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "PerturbedAttentionGuidance" + }, + "widgets_values": [ + 3 + ] + }, + { + "id": 346, + "type": "CompositorTransformsOutV3", + "pos": { + "0": 2550, + "1": -1200 + }, + "size": { + "0": 453.5999755859375, + "1": 266 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "transforms", + "type": "STRING", + "link": 806, + "widget": { + "name": "transforms" + } + } + ], + "outputs": [ + { + "name": "x", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "y", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "width", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "height", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "angle", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "bbox x", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "bbox y", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "bbox width", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "bbox height", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CompositorTransformsOutV3" + }, + "widgets_values": [ + "", + 1, + true + ] + }, + { + "id": 119, + "type": "KSampler", + "pos": { + "0": 2250, + "1": -800 + }, + "size": { + "0": 256.9302062988281, + "1": 262 + }, + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 823 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 413 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 414 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 231 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 226 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 977862159117587, + "fixed", + 35, + 4.5, + "deis", + "beta", + 1 + ] + }, + { + "id": 297, + "type": "PreviewImage", + "pos": { + "0": 2550, + "1": -750 + }, + "size": { + "0": 1300, + "1": 1050 + }, + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 670 + } + ], + "outputs": [], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 122, + "type": "CLIPTextEncode", + "pos": { + "0": 700, + "1": -1200 + }, + "size": { + "0": 422.84503173828125, + "1": 164.31304931640625 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 224 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 410 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "cinematic photograph of a viking warrior in a forest AMD a black wolf AND color grading and film grain AND cinematic AND 4K AND HDR" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 193, + "type": "ControlNetApplyAdvanced", + "pos": { + "0": 2250, + "1": -450 + }, + "size": { + "0": 240.27142333984375, + "1": 166 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "positive", + "type": "CONDITIONING", + "link": 410 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 411 + }, + { + "name": "control_net", + "type": "CONTROL_NET", + "link": 423 + }, + { + "name": "image", + "type": "IMAGE", + "link": 425 + } + ], + "outputs": [ + { + "name": "positive", + "type": "CONDITIONING", + "links": [ + 413 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "negative", + "type": "CONDITIONING", + "links": [ + 414 + ], + "slot_index": 1, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ControlNetApplyAdvanced" + }, + "widgets_values": [ + 0.5, + 0, + 0.8300000000000001 + ] + }, + { + "id": 347, + "type": "CompositorTools3", + "pos": { + "0": 300, + "1": -250 + }, + "size": { + "0": 310.79998779296875, + "1": 106 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "tools", + "type": "BOOLEAN", + "links": [ + 807 + ], + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CompositorTools3" + } + }, + { + "id": 344, + "type": "Compositor3", + "pos": { + "0": 700, + "1": -800 + }, + "size": [ + 1521, + 1079 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "config", + "type": "COMPOSITOR_CONFIG", + "link": 805 + }, + { + "name": "tools", + "type": "BOOLEAN", + "link": 807, + "widget": { + "name": "tools" + } + } + ], + "outputs": [ + { + "name": "transforms", + "type": "STRING", + "links": [ + 806 + ], + "shape": 3 + }, + { + "name": "image", + "type": "IMAGE", + "links": [ + 803 + ], + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Compositor3" + }, + "widgets_values": [ + "{\"width\":1280,\"height\":768,\"padding\":110,\"transforms\":[{\"left\":764.4231481843307,\"top\":495.35725483911654,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":1365,\"xheight\":768,\"skewY\":0,\"skewX\":0},{\"left\":487.1252625219706,\"top\":504.49345571201695,\"scaleX\":0.9474614514269661,\"scaleY\":0.9474614514269661,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":576,\"xheight\":768,\"skewY\":0,\"skewX\":0},{\"left\":932.9173507499621,\"top\":679.8975765515524,\"scaleX\":0.7098708725895773,\"scaleY\":0.7098708725895773,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":1152,\"xheight\":768,\"skewY\":0,\"skewX\":0},{\"left\":1431.0722293068018,\"top\":1018.5045283243194,\"scaleX\":0.4964347777779789,\"scaleY\":0.4964347777779789,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0}],\"bboxes\":[{\"left\":81.92314818433067,\"top\":111.35725483911654,\"xwidth\":768,\"xheight\":1365},{\"left\":214.2563645110044,\"top\":140.66825836406196,\"xwidth\":727.6503946959099,\"xheight\":545.7377960219324},{\"left\":524.0317281383657,\"top\":407.30716147715475,\"xwidth\":545.1808301487954,\"xheight\":817.7712452231929},{\"left\":1431.0722293068018,\"top\":1018.5045283243194,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0}]}", + "compositor/1726858015532.png [temp]", + true, + null, + "continue" + ] + }, + { + "id": 124, + "type": "VAEDecode", + "pos": { + "0": 2200, + "1": -1100 + }, + "size": { + "0": 140, + "1": 46 + }, + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 226 + }, + { + "name": "vae", + "type": "VAE", + "link": 227 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 670, + 826 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 345, + "type": "CompositorConfig3", + "pos": { + "0": 300, + "1": -800 + }, + "size": { + "0": 315, + "1": 502 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "image1", + "type": "IMAGE", + "link": 821 + }, + { + "name": "mask1", + "type": "MASK", + "link": null + }, + { + "name": "image2", + "type": "IMAGE", + "link": 820 + }, + { + "name": "mask2", + "type": "MASK", + "link": null + }, + { + "name": "image3", + "type": "IMAGE", + "link": 818 + }, + { + "name": "mask3", + "type": "MASK", + "link": null + }, + { + "name": "image4", + "type": "IMAGE", + "link": null + }, + { + "name": "mask4", + "type": "MASK", + "link": null + }, + { + "name": "image5", + "type": "IMAGE", + "link": null + }, + { + "name": "mask5", + "type": "MASK", + "link": null + }, + { + "name": "image6", + "type": "IMAGE", + "link": null + }, + { + "name": "mask6", + "type": "MASK", + "link": null + }, + { + "name": "image7", + "type": "IMAGE", + "link": null + }, + { + "name": "mask7", + "type": "MASK", + "link": null + }, + { + "name": "image8", + "type": "IMAGE", + "link": null + }, + { + "name": "mask8", + "type": "MASK", + "link": null + } + ], + "outputs": [ + { + "name": "config", + "type": "COMPOSITOR_CONFIG", + "links": [ + 805 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CompositorConfig3" + }, + "widgets_values": [ + 1280, + 768, + 110, + true, + true, + false, + 1726858146143 + ] + }, + { + "id": 359, + "type": "SaveImage", + "pos": { + "0": 2300, + "1": 350 + }, + "size": [ + 1550, + 1050 + ], + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 828 + } + ], + "outputs": [], + "properties": {}, + "widgets_values": [ + "ComfyUI" + ] + }, + { + "id": 353, + "type": "Text_Image_Zho", + "pos": { + "0": 35, + "1": 499 + }, + "size": { + "0": 210, + "1": 466 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 831 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Text_Image_Zho" + }, + "widgets_values": [ + "V 3.1.0", + "NotoSans-Regular", + "left", + 0, + 22, + "#000103", + 0, + "#ffffff", + 0, + 0, + 200, + 200, + false, + false, + 100, + 180, + 360 + ] + }, + { + "id": 357, + "type": "CompositorConfig3", + "pos": { + "0": 350, + "1": 350 + }, + "size": { + "0": 315, + "1": 502 + }, + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "image1", + "type": "IMAGE", + "link": 826 + }, + { + "name": "mask1", + "type": "MASK", + "link": null + }, + { + "name": "image2", + "type": "IMAGE", + "link": null + }, + { + "name": "mask2", + "type": "MASK", + "link": null + }, + { + "name": "image3", + "type": "IMAGE", + "link": null + }, + { + "name": "mask3", + "type": "MASK", + "link": null + }, + { + "name": "image4", + "type": "IMAGE", + "link": 831 + }, + { + "name": "mask4", + "type": "MASK", + "link": null + }, + { + "name": "image5", + "type": "IMAGE", + "link": null + }, + { + "name": "mask5", + "type": "MASK", + "link": null + }, + { + "name": "image6", + "type": "IMAGE", + "link": null + }, + { + "name": "mask6", + "type": "MASK", + "link": null + }, + { + "name": "image7", + "type": "IMAGE", + "link": null + }, + { + "name": "mask7", + "type": "MASK", + "link": null + }, + { + "name": "image8", + "type": "IMAGE", + "link": null + }, + { + "name": "mask8", + "type": "MASK", + "link": null + } + ], + "outputs": [ + { + "name": "config", + "type": "COMPOSITOR_CONFIG", + "links": [ + 825 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CompositorConfig3" + }, + "widgets_values": [ + 1280, + 768, + 110, + true, + true, + false, + 1726858146143 + ] + }, + { + "id": 360, + "type": "CompositorTools3", + "pos": { + "0": 350, + "1": 950 + }, + "size": { + "0": 310.79998779296875, + "1": 106 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "tools", + "type": "BOOLEAN", + "links": [ + 829 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CompositorTools3" + } + }, + { + "id": 354, + "type": "Compositor3", + "pos": { + "0": 700, + "1": 350 + }, + "size": [ + 1521, + 1079 + ], + "flags": {}, + "order": 26, + "mode": 0, + "inputs": [ + { + "name": "config", + "type": "COMPOSITOR_CONFIG", + "link": 825 + }, + { + "name": "tools", + "type": "BOOLEAN", + "link": 829, + "widget": { + "name": "tools" + } + } + ], + "outputs": [ + { + "name": "transforms", + "type": "STRING", + "links": [], + "slot_index": 0, + "shape": 3 + }, + { + "name": "image", + "type": "IMAGE", + "links": [ + 828 + ], + "slot_index": 1, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Compositor3" + }, + "widgets_values": [ + "{\"width\":1280,\"height\":768,\"padding\":110,\"transforms\":[{\"left\":750,\"top\":494,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":1280,\"xheight\":768,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":1415.4241902320905,\"top\":978.2955456708843,\"scaleX\":0.3496149970870165,\"scaleY\":0.3496149970870165,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":768,\"xheight\":768,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0},{\"left\":0,\"top\":0,\"scaleX\":1,\"scaleY\":1,\"angle\":0,\"flipX\":false,\"flipY\":false,\"originX\":\"top\",\"originY\":\"left\",\"xwidth\":0,\"xheight\":0,\"skewY\":0,\"skewX\":0}],\"bboxes\":[{\"left\":110,\"top\":110,\"xwidth\":768,\"xheight\":1280},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":1281.1720313506762,\"top\":844.0433867894699,\"xwidth\":268.5043177628288,\"xheight\":268.5043177628286},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0},{\"left\":0,\"top\":0,\"xwidth\":0,\"xheight\":0}]}", + "compositor/1726858264446.png [temp]", + true, + null, + "continue" + ] + } + ], + "links": [ + [ + 224, + 120, + 1, + 122, + 0, + "CLIP" + ], + [ + 225, + 120, + 1, + 123, + 0, + "CLIP" + ], + [ + 226, + 119, + 0, + 124, + 0, + "LATENT" + ], + [ + 227, + 120, + 2, + 124, + 1, + "VAE" + ], + [ + 230, + 120, + 2, + 126, + 1, + "VAE" + ], + [ + 231, + 126, + 0, + 119, + 3, + "LATENT" + ], + [ + 410, + 122, + 0, + 193, + 0, + "CONDITIONING" + ], + [ + 411, + 123, + 0, + 193, + 1, + "CONDITIONING" + ], + [ + 413, + 193, + 0, + 119, + 1, + "CONDITIONING" + ], + [ + 414, + 193, + 1, + 119, + 2, + "CONDITIONING" + ], + [ + 423, + 194, + 0, + 193, + 2, + "CONTROL_NET" + ], + [ + 425, + 197, + 0, + 193, + 3, + "IMAGE" + ], + [ + 426, + 197, + 0, + 198, + 0, + "IMAGE" + ], + [ + 670, + 124, + 0, + 297, + 0, + "IMAGE" + ], + [ + 672, + 298, + 0, + 197, + 0, + "IMAGE" + ], + [ + 673, + 298, + 0, + 126, + 0, + "IMAGE" + ], + [ + 803, + 344, + 1, + 298, + 0, + "IMAGE" + ], + [ + 805, + 345, + 0, + 344, + 0, + "COMPOSITOR_CONFIG" + ], + [ + 806, + 344, + 0, + 346, + 0, + "STRING" + ], + [ + 807, + 347, + 0, + 344, + 1, + "BOOLEAN" + ], + [ + 816, + 304, + 0, + 349, + 0, + "IMAGE" + ], + [ + 818, + 349, + 0, + 345, + 4, + "IMAGE" + ], + [ + 819, + 254, + 0, + 351, + 0, + "IMAGE" + ], + [ + 820, + 351, + 0, + 345, + 2, + "IMAGE" + ], + [ + 821, + 350, + 0, + 345, + 0, + "IMAGE" + ], + [ + 822, + 120, + 0, + 352, + 0, + "MODEL" + ], + [ + 823, + 352, + 0, + 119, + 0, + "MODEL" + ], + [ + 825, + 357, + 0, + 354, + 0, + "COMPOSITOR_CONFIG" + ], + [ + 826, + 124, + 0, + 357, + 0, + "IMAGE" + ], + [ + 828, + 354, + 1, + 359, + 0, + "IMAGE" + ], + [ + 829, + 360, + 0, + 354, + 1, + "BOOLEAN" + ], + [ + 831, + 353, + 0, + 357, + 6, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": { + "ds": { + "scale": 0.544500000000007, + "offset": [ + 381.334151528598, + 906.0090720330776 + ] + }, + "groupNodes": {} + }, + "version": 0.4 +} \ No newline at end of file diff --git a/assets/v3.json b/assets/workflows/v3.json similarity index 100% rename from assets/v3.json rename to assets/workflows/v3.json diff --git a/nodes_compositor.py b/nodes_compositor.py deleted file mode 100644 index edcf2d7..0000000 --- a/nodes_compositor.py +++ /dev/null @@ -1,193 +0,0 @@ -import nodes -import folder_paths -from server import PromptServer -# from aiohttp import web -import numpy as np -import base64 -from io import BytesIO -from PIL import Image, ImageOps -from comfy_execution.graph import ExecutionBlocker -from pathlib import Path -import random - -MAX_RESOLUTION = nodes.MAX_RESOLUTION - - -# these probably exist elsewhere as utils -def tensor2pil(image): - return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) - - -# these probably exist elsewhere as utils -def toBase64ImgUrl(img): - bytesIO = BytesIO() - img.save(bytesIO, format="PNG") - img_types = bytesIO.getvalue() - img_base64 = base64.b64encode(img_types) - return f"data:image/png;base64,{img_base64.decode('utf-8')}" - - -# compositor type is a customized LoadImage. the image is the output! -# basically we pretend we have loaded a composite image and return it. -# other stuff supports the gui -# ideally we would support any number of inputs to be composited -# it should not be necessary to pass b64 but just the names of the uploaded images -# author: erosdiffusionai@gmail.com - -class Compositor(nodes.LoadImage): - #OUTPUT_NODE = False - NOT_IDEMPOTENT = True - counter = 1 - - # By default, Comfy considers that a node has changed if any of its inputs or widgets have changed. - # This is normally correct, but you may need to override this if, for instance, - # the node uses a random number (and does not specify a seed - it’s best practice to have a seed input in this case - # so that the user can control reproducability and avoid unecessary execution), - # or loads an input that may have changed externally, or sometimes ignores inputs - # (so doesn’t need to execute just because those inputs changed). - # - # Despite the name, IS_CHANGED should not return a bool - # IS_CHANGED is passed the same arguments as the main function defined by FUNCTION, - # and can return any Python object. This object is compared with the one returned in the previous run (if any) - # and the node will be considered to have changed if is_changed != is_changed_old - # (this code is in execution.py if you need to dig). - # could also be @staticmethod but need to try, or not annotated - @classmethod - def IS_CHANGED(cls, **kwargs): - # it seems that for the image, it's ignored as something else changed ??? - file = kwargs.get("hash") - print(file) - return file - - # @classmethod - # def VALIDATE_INPUTS(cls, image, config): - # # YOLO, anything goes! - # return True - - # def check_lazy_status(self, image, config): - # needed = [] - # if image is None and other_condition: - # needed.append("image1") - # return needed - - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - # about forceInput, lazy and other flags: https://docs.comfy.org/essentials/custom_node_datatypes - "image": ("COMPOSITOR", {"default": "test_empty.png"}), - "config": ("COMPOSITOR_CONFIG", {"forceInput": True}), - "fabricData": ("STRING", {"default": "{}"}), - "hash": ("STRING", {"default": "first run"}), - - }, - "hidden": { - "extra_pnginfo": "EXTRA_PNGINFO", - "node_id": "UNIQUE_ID", - }, - } - - RETURN_TYPES = ("IMAGE",) - RETURN_NAMES = ("composite",) - FUNCTION = "composite" - CATEGORY = "image" - - DESCRIPTION = """ -The compositor node -- drag click to select multiple -- shift click to add/remove from selected -- once you have a multiselection you can move/scale/rotate all the items in the selection together -- use the buffer zone to manipulate big items or park them -- z-index is no intuitive make sure your graph has the items in the same exact vertical sequence on how they are connected -- regenerating shows you the sequence, if something does not stack correctly regenerate to see where it goes -- using the pause button should stop the flow but somenodes don't interpret correctly the break and throw an error. it's irreleveant, just close it -- use "join image with alpha" to apply a mask (hand drawn or extracted via sam or other way) and get and rgba to pass to the node -- use Image remove background (rembg) from comfyui-rembg-node to extract an rgba image with no background -""" - - def composite(self, **kwargs): - image = kwargs.get('image', None) - config = kwargs.get('config', "default") - pause = config["pause"] - padding = config["padding"] - capture_on_queue = config["capture_on_queue"] - width = config["width"] - height = config["height"] - config_node_id = config["node_id"] - # images = config["images"] - names = config["names"] - fabricData = kwargs.get("fabricData") - storeTransforms = kwargs.get("storeTransforms") - use_alignment_controls = config["use_alignment_controls"] - - node_id = kwargs.pop('node_id', None) - # additional stuff we might send - # prompt - # extra_pnginfo - # EXTRA_PNGINFO <- will need to save x,y, scale, rotate, skew, etc inside here to be able to re-load - # EXTRA_PNGINFO is a dictionary that will be copied into the metadata of any .png files saved. - # Custom nodes can store additional information in this dictionary for saving - # (or as a way to communicate with a downstream node). - - images = [] - - # test progress callback - # self.progress("test1") - # self.progress("test2") - # self.progress("test3") - - # not needed for now, config controls the node - # PromptServer.instance.send_sync( - # "compositor.images", {"names": images, "node": node_id} - # ) - - # values to send the gui for update, includes base64 images - ui = { - "test": ("value",), - "pause": [pause], - "padding": [padding], - "capture_on_queue": [capture_on_queue], - "width": [width], - "height": [height], - "config_node_id": [config_node_id], - "node_id": [node_id], - # "images": images, - "names": names, - "image": [image], - "fabricData": [fabricData], - "storeTransforms": [storeTransforms], - "use_alignment_controls": [use_alignment_controls], - } - - invalidImage = self.imageDoesNotExist(image) - isPippo = self.imageIsPippo(image) - # print(image is None) - # if pause or image is None: - if pause or image is None or invalidImage or isPippo: - # at the end of my main method - # awkward return types, can't assign variable need tuple (val,) or list [val] - print( - f"compositor {node_id} with config {config_node_id} executed, with pause {pause} or image {image} is None {image is None} or invalidImage {invalidImage}]") - print(f"pause {pause}") - return {"ui": ui, "result": (ExecutionBlocker(None),)} - - else: - print( - f"compositor {node_id} with config {config_node_id} executed, else clause: image {image} is None ? {image is None} or invalidImage {invalidImage}") - return {"ui": ui, "result": super().load_image(folder_paths.get_annotated_filepath(image))} - - # example of progress feedback, not sure about the details dictionary signature: - # we're supposed to teg node and prompt_id - def progress(self, a): - # node (node id), prompt_id, value, max - # print(a) - self.counter = self.counter + 1 - PromptServer.instance.send_sync( - "progress", {"value": self.counter, "node": None, "prompt_id": None, "max": 10} - ) - - def imageDoesNotExist(self, image): - return not folder_paths.exists_annotated_filepath(image) - - def imageIsPippo(self, image): - return image == "test_empty.png" diff --git a/pyproject.toml b/pyproject.toml index 69e13eb..165e2d2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [project] name = "comfyui-enricos-nodes" description = "pass up to 8 images and visually place, rotate and scale them to build the perfect composition. group move and group rescale. remember their position and scaling value across generations to easy swap images. use the buffer zone to to park an asset you don't want to use or easily reach transformations controls" -version = "3.0.8" +version = "3.1.0" license = {text = "MIT License"} @@ -12,4 +12,4 @@ Repository = "https://github.com/erosDiffusion/ComfyUI-enricos-nodes" [tool.comfy] PublisherId = "erosdiffusion" DisplayName = "ComfyUI-enricos-nodes" -Icon = "" +Icon = "πŸ’œ" diff --git a/web/compositor3.js b/web/compositor3.js index 590a275..1a7d1f1 100644 --- a/web/compositor3.js +++ b/web/compositor3.js @@ -16,6 +16,41 @@ function getCompositorWidget(node, widgetName) { return node.widgets.find((w) => w.name === widgetName); } +function handleTogglePreciseSelection(e, currentNode) { + const optionValue = e.data.value; + currentNode.compositorInstance.preciseSelection = optionValue; + const c = currentNode.compositorInstance.fcanvas; + c.getObjects().map(function (i) { + return i.set('perPixelTargetFind', optionValue); + }); +} + +function handleResetOldTransform(e, currentNode) { + const optionValue = e.data.value; + const instance = currentNode.compositorInstance; + + const c = instance.fcanvas; + c.getObjects().forEach(function (image, index) { + instance.resetOldTransform(index); + }); +} + +function centerSelected(e, currentNode) { + const optionValue = e.data.value; + const instance = currentNode.compositorInstance; + + const c = instance.fcanvas; + // get the selected and set the + instance.needsUpload = true; + c.getActiveObjects().forEach((o)=>o.center()); + c.renderAll(); + instance.uploadIfNeeded(instance); + // c.getObjects().forEach(function (image, index) { + // instance.resetOldTransform(index); + // + // }); +} + /** * registering an extension gives the possibility to tap into lifecycle methods * here is the sequence from the docs: @@ -107,10 +142,10 @@ app.registerExtension({ /** example of arbitrary messages - PromptServer.instance.send_sync("my.custom.message", {"node": node_id, "other_things": etc}) - in api.ts search for "case 'executing'": for all events emitted or "new CustomEvent('executing'" - example of built-in, this should be when a node is about to start processing (in the back?) - */ + PromptServer.instance.send_sync("my.custom.message", {"node": node_id, "other_things": etc}) + in api.ts search for "case 'executing'": for all events emitted or "new CustomEvent('executing'" + example of built-in, this should be when a node is about to start processing (in the back?) + */ function executingMessageHandler(event) { //console.log("executingMessageHandler", event, arguments); const current = app.graph.getNodeById(event.detail); @@ -152,10 +187,10 @@ app.registerExtension({ const nodeId = event.detail.node; const node = Editor.hook(nodeId); if (node.type != "Compositor3") { - console.log(node.type); + // console.log(node.type); return; } - + const instance = node.compositorInstance; // console.log("hasResult,awaitedResult", e.hasResult[0], e.awaited[0]); node.compositorInstance.w.value = e.width[0]; @@ -169,7 +204,13 @@ app.registerExtension({ const images = [...e.names]; const restore = Editor.deserializeStuff(node.fabricDataWidget.value); - const shouldRestore = Editor.getConfigWidgetValue(node, 3); + const shouldRestore = true; // Editor.getConfigWidgetValue(node, 3); + const normalizeHeight = Editor.getConfigWidgetValue(node, 3); + const onConfigChanged = Editor.getConfigWidgetValue(node, 4); + + instance.normalizeHeigh = normalizeHeight; + instance.onConfigChanged = onConfigChanged; + instance.configChanged = e.configChanged[0]; images.map((b64, index) => { function fromUrlCallback(oImg) { @@ -181,22 +222,68 @@ app.registerExtension({ * http://fabricjs.com/docs/fabric.Image.html */ fabric.Image.fromURL(b64, fromUrlCallback); + }); + if(instance.configChanged) { + + instance.needsUpload = true; + + // true: grab and continue + if(onConfigChanged){ + console.log("upload and continue") + + const reEnqueue = () => { + console.log("upload and continue") + interrupt(); + instance.continue(); + } + + instance.uploadIfNeeded(instance, reEnqueue); + + + }else{ + // False, stop + instance.uploadIfNeeded(instance) + console.log("stopped, just upload") + } + + } + + // console.log("configChanged",instance.configChanged); + // // if(instance.onConfigChanged == "grabAndContinue"){ + // // console.log("grabAndContinue") + // // instance.needsUpload = true; + // // console.log("set instance needs upload to ", instance.needsUpload) + // // const reEneuque = () => { + // // app.queuePrompt(0, 1); + // // } + // // instance.uploadIfNeeded(instance,reEneuque); + // // }else{ + // + // console.log("stop:config") + // instance.needsUpload = true; + // console.log("set instance needs upload to ", instance.needsUpload) + // + // }else{ + // instance.needsUpload = false; + // console.log("config did not hcange, needs upload set to ", instance.needsUpload) + // } + } /** important messaging considerations https://docs.comfy.org/essentials/comms_messages */ function configureHandler() { - console.log("configurehanlder", arguments); + //console.log("configurehanlder", arguments); } function executionStartHandler() { - console.log("executionStartHandler", arguments); + //console.log("executionStartHandler", arguments); } function executionCachedHandler() { - console.log("executionCachedHandler", arguments); + //console.log("executionCachedHandler", arguments); } function graphChangedHandler() { @@ -204,7 +291,7 @@ app.registerExtension({ } function changeWorkflowHandler() { - console.log("changeWorkflowHandler", arguments); + //console.log("changeWorkflowHandler", arguments); } @@ -324,19 +411,49 @@ app.registerExtension({ // console.log("afterConfigureGraph", args); + // reset the config timestamp, to ensure re-triggering const configs = app.graph.findNodesByType("CompositorConfig3"); configs.forEach((c) => { const initialized = getCompositorWidget(c, "initialized"); initialized.value = Date.now(); }) + // enable nodes to talk to each other without running the frontend through a dedicated + // broadcast channel + // + // setup broadcast channel, also needs to be done on node created or connection change... const nodes = app.graph.findNodesByType("Compositor3"); // probably too late here as it's already running in the back - // nodes.forEach((current) => { - // const config = current.getInputNode(0); - // // console.log("looping afterconfiguregraph compostior node", current); - // // console.log("looping afterconfiguregraph compostior node configs", config); - // }) + nodes.forEach((currentNode) => { + const tools = currentNode.getInputNode(1); + //const tools = Editor.getToolWidget(this); + const CHANNELNAME = `Tools${tools.id}`; + //console.log(CHANNELNAME) + const channel = new BroadcastChannel(CHANNELNAME); + channel.addEventListener("message", (e) => { + switch (e.data.action) { + case "togglePreciseSelection": + handleTogglePreciseSelection(e, currentNode); + break; + case "resetTransforms": + handleResetOldTransform(e, currentNode); + break; + case "centerSelected": + centerSelected(e, currentNode); + break; + default: + console.log("unknown broadcast event", e); + } + + + }); + + currentNode.channel = channel; + + + // console.log("looping afterconfiguregraph compostior node", current); + // console.log("looping afterconfiguregraph compostior node configs", config); + }) app.graph.setDirtyCanvas(true, true); }, /** @@ -357,8 +474,8 @@ app.registerExtension({ node.imageNameWidget = getCompositorWidget(node, "imageName"); const originalCallback = node.imageNameWidget.callback; node.imageNameWidget.callback = () => { - debugger; - console.log("callback of imageNameWidget with ", arguments); + //debugger; + //console.log("callback of imageNameWidget with ", arguments); originalCallback(arguments); } node.imageNameWidget.computeSize = () => [0, 0]; @@ -395,7 +512,7 @@ app.registerExtension({ node.continue = node.addWidget("button", "continue", "continue", compositorInstance.continue.bind(compositorInstance)); node.onMouseOut = function (e, pos, canvas) { - console.log("mouseout") + // console.log("mouseout") const original_onMouseDown = node.onMouseOut; return original_onMouseDown?.apply(this, arguments); } @@ -455,6 +572,7 @@ class Editor { compositionArea; compositionBorder; + preciseSelection = false; /** (widget) references / config params*/ p; @@ -483,7 +601,7 @@ class Editor { * this is currently called on capture (regardless of the flag) */ static serializeStuff(node) { - // console.log("serializeStuff"); + console.log("serializeStuff"); const instance = node.compositorInstance; const result = { // or the widget ? boh @@ -500,9 +618,19 @@ class Editor { return undefined; } }); - result.transforms = res; + const bboxes = [0, 1, 2, 3, 4, 5, 6, 7].map((i) => { + try { + let t = instance.getBoundingBox(i); + return t; + } catch (e) { + return undefined; + } + }); + + result.bboxes = bboxes; + return JSON.stringify(result); } @@ -518,6 +646,13 @@ class Editor { return connected.widgets[slot].value; } + static getToolWidget(instance) { + // console.log(node, slot); + return instance.node.getInputNode(1); + + } + + /** * in CompositorConfig * - 4 is pause >removed @@ -618,6 +753,7 @@ class Editor { static createCanvasElement() { const canvas = document.createElement("canvas"); + canvas.id = Editor.getRandomCompositorUniqueId(); return canvas; } @@ -663,23 +799,64 @@ class Editor { } getOldTransform(index) { + const ref = this.inputImages[this.imageNameAt(index)]; return { - left: this.inputImages[this.imageNameAt(index)].left, - top: this.inputImages[this.imageNameAt(index)].top, - scaleX: this.inputImages[this.imageNameAt(index)].scaleX, - scaleY: this.inputImages[this.imageNameAt(index)].scaleY, - angle: this.inputImages[this.imageNameAt(index)].angle, - flipX: this.inputImages[this.imageNameAt(index)].flipX, - flipY: this.inputImages[this.imageNameAt(index)].flipY, - originX: this.inputImages[this.imageNameAt(index)].originX, - originY: this.inputImages[this.imageNameAt(index)].originY, - xwidth: this.inputImages[this.imageNameAt(index)].width, - xheight: this.inputImages[this.imageNameAt(index)].height, - skewY: this.inputImages[this.imageNameAt(index)].skewY, - skewX: this.inputImages[this.imageNameAt(index)].skewX, + left: ref.left, + top: ref.top, + scaleX: ref.scaleX, + scaleY: ref.scaleY, + angle: ref.angle, + flipX: ref.flipX, + flipY: ref.flipY, + originX: ref.originX, + originY: ref.originY, + xwidth: ref.width, + xheight: ref.height, + skewY: ref.skewY, + skewX: ref.skewX, }; } + getBoundingBox(index) { + const ref = this.inputImages[this.imageNameAt(index)].getBoundingRect(); + return { + left: ref.left, + top: ref.top, + scaleX: ref.scaleX, + scaleY: ref.scaleY, + angle: ref.angle, + flipX: ref.flipX, + flipY: ref.flipY, + originX: ref.originX, + originY: ref.originY, + xwidth: ref.height, + xheight: ref.width, + skewY: ref.skewY, + skewX: ref.skewX, + }; + } + + resetOldTransform(index) { + this.inputImages[this.imageNameAt(index)].left = 0; + this.inputImages[this.imageNameAt(index)].top = 0; + this.inputImages[this.imageNameAt(index)].scaleX = 1; + this.inputImages[this.imageNameAt(index)].scaleY = 1; + this.inputImages[this.imageNameAt(index)].angle = 0; + this.inputImages[this.imageNameAt(index)].flipX = false; + this.inputImages[this.imageNameAt(index)].flipY = false; + this.inputImages[this.imageNameAt(index)].originX = "top"; + this.inputImages[this.imageNameAt(index)].originY = "left"; + // this.inputImages[this.imageNameAt(index)].height; + // this.inputImages[this.imageNameAt(index)].width; + this.inputImages[this.imageNameAt(index)].skewY = 0; + this.inputImages[this.imageNameAt(index)].skewX = 0; + this.inputImages[this.imageNameAt(index)].perPixelTargetFind = this.preciseSelection; + this.fcanvas.renderAll(); + } + + + + /** * checks if the reference at index for an image is not null * references are stored in "inputImages" @@ -707,6 +884,7 @@ class Editor { const oldTransform = this.getOldTransform(index); // Remove the old image from the canvas this.fcanvas.remove(this.inputImages[this.imageNameAt(index)]); + // this breaks if we have width and height so renamed to xwidth and xheight theImage.set(oldTransform); this.fcanvas.add(theImage); this.inputImages[this.imageNameAt(index)] = theImage; @@ -768,6 +946,7 @@ class Editor { altSelectionKey: "ctrlKey", altActionKey: "ctrlKey", centeredKey: "altKey", + // uniScaleTransform: false, // selectable:true, // evented:true, // centeredRotation: true, @@ -827,6 +1006,7 @@ class Editor { node.setDirtyCanvas(true, true); if (callback) callback() + // deprecated, not really needed anymore if (setDone) api.fetchApi("/compositor/done", {method: "POST", body}); }, () => { @@ -839,9 +1019,12 @@ class Editor { return this.cblob == undefined } - /** this can't be async so resort to promise resolving and callbacks */ - grabUploadAndSetOutput(setDone, callback) { - console.log("capture"); + /** this can't be async so resort to promise resolving and callbacks + * @params setDone **deprecated** when setDone is true, it will raise a /compositor/done event for the backend + * @params callback will be passed to uploadImage and called when the upload has finished + * */ + grabUploadAndSetOutput(instance, setDone, callback) { + // console.log("capture"); // console.log("grap upload and set output") // prepare the image const img = new Image(); @@ -893,16 +1076,7 @@ class Editor { } continue(setDone) { - // console.log("continue"); - // const body = new FormData(); - // const node_id = this.node.id; - // body.append('node_id', node_id); - // body.append('filename', this.node.imageNameWidget.value); - // body.append('overwrite', "true"); - // if (setDone) api.fetchApi("/compositor/done", {method: "POST", body}); - // return; - app.queuePrompt(0,1); - + app.queuePrompt(0, 1); } @@ -986,16 +1160,16 @@ class Editor { }); this.fcanvas.on('mouse:out', function (opt) { - console.log("mouseout") + // console.log("mouseout") // moving outside editor, this might fail to be intercepted depending on how full the // canvas is - if (opt.target === null || opt.target === undefined || opt.target && opt.nextTarget===undefined) { + if (opt.target === null || opt.target === undefined || opt.target && opt.nextTarget === undefined) { compositorInstance.uploadIfNeeded(compositorInstance); } }); this.fcanvas.on('object:modified', function (opt) { - console.log(this, compositorInstance); + // console.log(this, compositorInstance); // mark as needing upload so when we mouse out we doit then reset // mouse out is flimsy, sometimes it's not triggering compositorInstance.needsUpload = true; @@ -1039,15 +1213,15 @@ class Editor { }.bind(this)); } - uploadIfNeeded(compositorInstance) { + uploadIfNeeded(compositorInstance,callback = ()=>{console.log("upload if needed")}) { if (compositorInstance.needsUpload) { compositorInstance.needsUpload = false; const serialized = Editor.serializeStuff(compositorInstance.node); compositorInstance.node.fabricDataWidget.value = serialized; - const callback = () => { - alert("done"); - } + // const callback = () => { + // + // } compositorInstance.grabUploadAndSetOutput(compositorInstance, false, callback) } else { console.log("no upload needed to be done"); @@ -1133,7 +1307,9 @@ class Editor { //this.canvasEl.id = 'test'; // ditor.getRandomCompositorUniqueId(); this.canvasEl.id = Editor.getRandomCompositorUniqueId(); this.containerDiv.appendChild(this.canvasEl); - this.containerDiv.style.overflow ="hidden"; + + + this.containerDiv.style.overflow = "hidden"; this.canvasEl.width = this.w.value + 2 * this.p.value; this.canvasEl.height = this.h.value + 2 * this.p.value; @@ -1182,3 +1358,13 @@ class Editor { } +async function interrupt() { + const response = await fetch('/interrupt', { + method: 'POST', + cache: 'no-cache', + headers: { + 'Content-Type': 'text/html' + }, + }); + return await response.json(); +} \ No newline at end of file diff --git a/web/tools.js b/web/tools.js new file mode 100644 index 0000000..acb79a1 --- /dev/null +++ b/web/tools.js @@ -0,0 +1,74 @@ +import {app} from "../../scripts/app.js"; +import {api} from "../../scripts/api.js"; + + +function isType(comfyClass, node) { + return node.constructor.comfyClass == comfyClass; +} + +function getWidget(node, widgetName) { + return node.widgets.find((w) => w.name === widgetName); +} + +app.registerExtension({ + name: "Comfy.CompositorTools3", + async getCustomWidgets(app) { + }, + async setup(app) { + }, + async init(args) { + }, + async beforeRegisterNodeDef(nodeType, nodeData, app) { + }, + + async loadedGraphNode(node, app) { + }, + async afterConfigureGraph(args) { + // not enough to do here only we also in node created (for later or connection changed) + console.log("after configure graph") + // To do something when a workflow has loaded, use afterConfigureGraph, not setup + // console.log("afterConfigureGraph", args); + + + const tools = app.graph.findNodesByType("CompositorTools3"); + tools.forEach((node) => { + const CHANNELNAME = `Tools${node.id}`; + console.log(CHANNELNAME) + const channel = new BroadcastChannel(CHANNELNAME); + + node["togglePreciseSelection"] = () => { + //console.log(arguments); + channel.postMessage({action:"togglePreciseSelection",value: node.preciseSelection.value, nodeId: node.id}); + } + + node["centerSelected"] = () => { + //console.log(arguments); + channel.postMessage({action:"centerSelected",value: true, nodeId: node.id}); + } + + node["resetTransforms"] = () => { + //console.log(arguments); + channel.postMessage({action:"resetTransforms",value: true, nodeId: node.id}); + } + + node.centerSelected = node.addWidget("button", "centerSelected", false, node.centerSelected); + node.preciseSelection = node.addWidget("toggle", "preciseSelection", false, node.togglePreciseSelection); + node.resetTransforms = node.addWidget("button", "resetTransforms", false, node.resetTransforms); + //node.preciseSelection.serialize = ()=>{} + node.setDirtyCanvas(true, true); + }) + }, + async nodeCreated(node) { + if (!isType("CompositorTools3", node)) return; + // console.log("better log it"); + node.serialize_widgets = false; + node.isVirtualNode = true; + }, +}); + + + + + + +