diff --git a/topics/imaging/images/voronoi-segmentation/BIAD634_IM276.png b/topics/imaging/images/voronoi-segmentation/BIAD634_IM276.png
new file mode 100644
index 00000000000000..c8b2abdcd2dfaf
Binary files /dev/null and b/topics/imaging/images/voronoi-segmentation/BIAD634_IM276.png differ
diff --git a/topics/imaging/tutorials/voronoi-segmentation/faqs/index.md b/topics/imaging/tutorials/voronoi-segmentation/faqs/index.md
new file mode 100644
index 00000000000000..9ce3fe4fce824b
--- /dev/null
+++ b/topics/imaging/tutorials/voronoi-segmentation/faqs/index.md
@@ -0,0 +1,3 @@
+---
+layout: faq-page
+---
diff --git a/topics/imaging/tutorials/voronoi-segmentation/tutorial.bib b/topics/imaging/tutorials/voronoi-segmentation/tutorial.bib
new file mode 100644
index 00000000000000..9206b0b6e4cae4
--- /dev/null
+++ b/topics/imaging/tutorials/voronoi-segmentation/tutorial.bib
@@ -0,0 +1,42 @@
+
+# This is the bibliography file for your tutorial.
+#
+# To add bibliography (bibtex) entries here, follow these steps:
+# 1) Find the DOI for the article you want to cite
+# 2) Go to https://doi2bib.org and fill in the DOI
+# 3) Copy the resulting bibtex entry into this file
+#
+# To cite the example below, in your tutorial.md file
+# use {% cite Batut2018 %}
+#
+# If you want to cite an online resourse (website etc)
+# you can use the 'online' format (see below)
+#
+# You can remove the examples below
+
+@article{Batut2018,
+ doi = {10.1016/j.cels.2018.05.012},
+ url = {https://doi.org/10.1016/j.cels.2018.05.012},
+ year = {2018},
+ month = jun,
+ publisher = {Elsevier {BV}},
+ volume = {6},
+ number = {6},
+ pages = {752--758.e1},
+ author = {B{\'{e}}r{\'{e}}nice Batut and Saskia Hiltemann and Andrea Bagnacani and Dannon Baker and Vivek Bhardwaj and
+ Clemens Blank and Anthony Bretaudeau and Loraine Brillet-Gu{\'{e}}guen and Martin {\v{C}}ech and John Chilton
+ and Dave Clements and Olivia Doppelt-Azeroual and Anika Erxleben and Mallory Ann Freeberg and Simon Gladman and
+ Youri Hoogstrate and Hans-Rudolf Hotz and Torsten Houwaart and Pratik Jagtap and Delphine Larivi{\`{e}}re and
+ Gildas Le Corguill{\'{e}} and Thomas Manke and Fabien Mareuil and Fidel Ram{\'{i}}rez and Devon Ryan and
+ Florian Christoph Sigloch and Nicola Soranzo and Joachim Wolff and Pavankumar Videm and Markus Wolfien and
+ Aisanjiang Wubuli and Dilmurat Yusuf and James Taylor and Rolf Backofen and Anton Nekrutenko and Bj\"{o}rn Gr\"{u}ning},
+ title = {Community-Driven Data Analysis Training for Biology},
+ journal = {Cell Systems}
+}
+
+@online{gtn-website,
+ author = {GTN community},
+ title = {GTN Training Materials: Collection of tutorials developed and maintained by the worldwide Galaxy community},
+ url = {https://training.galaxyproject.org},
+ urldate = {2021-03-24}
+}
diff --git a/topics/imaging/tutorials/voronoi-segmentation/tutorial.md b/topics/imaging/tutorials/voronoi-segmentation/tutorial.md
new file mode 100644
index 00000000000000..4c076bb49f028d
--- /dev/null
+++ b/topics/imaging/tutorials/voronoi-segmentation/tutorial.md
@@ -0,0 +1,436 @@
+---
+layout: tutorial_hands_on
+
+title: Voronoi Segmentation
+zenodo_link: 'https://doi.org/10.5281/zenodo.5494629'
+questions:
+ - How to use Galaxy for Voronoi Segmentation?
+ - How should images be prepared before applying Voronoi segmentation?
+ - How can Voronoi segmentation be used to analyze spatial relationships and divide an image into distinct regions based on proximity?
+objectives:
+ - "What Galaxy tools can I use to perform Voronoi Segmentation in Galaxy."
+time_estimation: 3H
+key_points:
+ - Learn how to prepare images for Voronoi segmentation.
+ - Learn to use Voronoi Segmentation to identify different regions in an image
+contributors:
+- annefou
+
+---
+
+
+# Introduction
+
+
+
+Voronoi segmentation is a technique used to divide an image or space into regions
+based on the proximity to a set of defined points, called seeds or sites. Each
+region, known as a Voronoi cell, contains all locations that are closer to its
+seed than to any other. This approach is especially useful when analyzing spatial
+relationships, as it reveals how different areas relate in terms of distance and
+distribution. Voronoi segmentation is widely applicable for tasks where it's
+important to understand the proximity or neighborhood structure of points, such
+as organizing space, studying clustering patterns, or identifying regions of
+influence around each point in various types of data.
+
+
+## Voronoi Segmentation for bioimage analysis
+
+In bioimage analysis, Voronoi segmentation is a valuable tool for studying the
+spatial organization of cells, tissues, or other biological structures within an
+image. By dividing an image into regions around each identified cell or structure,
+Voronoi segmentation enables researchers to analyze how different cell types are
+distributed, measure distances between cells, and examine clustering patterns. This
+can provide insights into cellular interactions, tissue organization, and functional
+relationships within biological samples, such as identifying the proximity of immune
+cells to tumor cells or mapping neuron distributions within brain tissue.
+
+## Voronoi Segmentation for Earth Observation
+
+In Earth observation, Voronoi segmentation is used to analyze spatial patterns and distributions in satellite or aerial images. By creating regions based on proximity to specific points, such as cities, vegetation clusters, or monitoring stations, Voronoi segmentation helps in studying how features are organized across a landscape. This method is particularly useful for mapping resource distribution, analyzing urban growth, monitoring vegetation patterns, or assessing land use changes. For instance, it can help divide an area into regions of influence around weather stations or identify how different land cover types interact spatially, aiding in environmental monitoring and planning.
+
+
+>
+>
+> In this tutorial, we will cover:
+>
+> 1. TOC
+> {:toc}
+>
+{: .agenda}
+
+# Get data
+
+
+## Bioimage data
+
+This tutorial will use an image dataset from the [BioImage archive](https://www.ebi.ac.uk/bioimage-archive/). This dataset is specifically prepared for training nuclear segmentation.
+
+The images are saved in the BioImage archive and can be uploaded to the Galaxy server with the corresponding BioImage Archive retrieval tool.
+
+![S-BIAD634:IM276 dataset](../../images/voronoi-segmentation/BIAD634_IM276.png "An annotated fluorescence image dataset for training nuclear segmentation methods")
+
+> Data upload with Bioimage Archive Tool
+>
+> 1. Create a new history for this tutorial.
+> When you log in for the first time, an empty, unnamed history is created by default. You can simply rename it.
+>
+> {% snippet faqs/galaxy/histories_create_new.md %}
+>
+> 2. {% tool [FTP Link for BioImage Archive](toolshed.g2.bx.psu.edu/repos/bgruening/bia_download/bia_download/0.1.0+galaxy0) %} with the following parameters:
+> - {% icon param-file %} *"Storage mode"*: `fire` (Storage mode is always fire)
+> - *"The path of accession"*: `S-BIAD/634/S-BIAD634`
+> > BioImage Archive
+> >
+> > This tool will upload all the files into your Galaxy history which can be very inconvenient when you have large dataset.
+> > In that case, you can delete data files you do not plan to use for your analysis.
+> {: .comment}
+>
+> 3. Rename {% icon galaxy-pencil %} the file `Neuroblastoma_0.tif` to `input_image.tif`
+> 4. Check that the datatype
+>
+> {% snippet faqs/galaxy/datasets_change_datatype.md datatype="datatypes" %}
+>
+> 5. Add to each database a tag corresponding to `input`
+>
+> {% snippet faqs/galaxy/datasets_add_tag.md %}
+>
+>
+{: .hands_on}
+
+## Earth Observation (EO) data
+
+![Datasets of SH's AI4ER MRes Project](https://edsbook.org/_images/7d3b3ce159046d8da12d413a00c69137e4a073dcf1ee27d7cd4e33af6d93d526.png "a top-down RGB image of forest, captured by drone, aircraft or satellite.")
+
+> EO Data Upload
+>
+> 1. Create a new history for this tutorial.
+> When you log in for the first time, an empty, unnamed history is created by default. You can simply rename it.
+>
+> {% snippet faqs/galaxy/histories_create_new.md %}
+>
+> 2. Import the files from [Zenodo]({{ page.zenodo_link }}) or from
+> the shared data library (`GTN - Material` -> `{{ page.topic_name }}`
+> -> `{{ page.title }}`):
+>
+> - **Important:** If setting the type to 'Auto-detect', make sure that after upload, the datatype is set to tiff.
+>
+> ```
+> https://zenodo.org/records/5494629/files/Sep_2014_RGB_602500_646500.tif
+> ```
+>
+> {% snippet faqs/galaxy/datasets_import_via_link.md %}
+>
+> {% snippet faqs/galaxy/datasets_import_from_data_library.md %}
+>
+> 3. Rename {% icon galaxy-pencil %} the file `Sep_2014_RGB_602500_646500.tif` to `input_image.tif`
+> 4. Check that the datatype
+>
+> {% snippet faqs/galaxy/datasets_change_datatype.md datatype="datatypes" %}
+>
+> 5. Add to each database a tag corresponding to `input`
+>
+> {% snippet faqs/galaxy/datasets_add_tag.md %}
+>
+{: .hands_on}
+
+# Data preparation
+
+## Sub-step with **Convert image format**
+
+> Select channel for Voronoi Segmentation
+>
+> 1. {% tool [Convert image format](toolshed.g2.bx.psu.edu/repos/imgteam/bfconvert/ip_convertimage/6.7.0+galaxy3) %} with the following parameters:
+> - {% icon param-file %} *"Input Image"*: `output` (Input dataset)
+> - *"Extract series"*: `All series`
+> - *"Extract timepoint"*: `All timepoints`
+> - *"Extract channel"*: `Extract channel`
+> - *"Channel id"*: `{'id': 2, 'output_name': 'output'}`
+> - *"Extract z-slice"*: `All z-slices`
+> - *"Extract range"*: `All images`
+> - *"Extract crop"*: `Full image`
+> - *"Tile image"*: `No tiling`
+> - *"Pyramid image"*: `No Pyramid`
+>
+> > Why do we need to select a single channel?
+> >
+> > Select a single channel from the input image. Note that some tools number channels starting from 1, while others start from 0.
+> {: .comment}
+>
+{: .hands_on}
+
+## Sub-step with **Convert image format**
+
+> Task description
+>
+> 1. {% tool [Convert image format](toolshed.g2.bx.psu.edu/repos/imgteam/bfconvert/ip_convertimage/6.7.0+galaxy3) %} with the following parameters:
+> - {% icon param-file %} *"Input Image"*: `output` (Input dataset)
+> - *"Extract series"*: `All series`
+> - *"Extract timepoint"*: `All timepoints`
+> - *"Extract channel"*: `Extract channel`
+> - *"Channel id"*: `{'id': 2, 'output_name': 'output'}`
+> - *"Extract z-slice"*: `All z-slices`
+> - *"Extract range"*: `All images`
+> - *"Extract crop"*: `Full image`
+> - *"Tile image"*: `No tiling`
+> - *"Pyramid image"*: `No Pyramid`
+>
+> ***TODO***: *Check parameter descriptions*
+>
+> ***TODO***: *Consider adding a comment or tip box*
+>
+> > short description
+> >
+> > A comment about the tool or something else. This box can also be in the main text
+> {: .comment}
+>
+{: .hands_on}
+
+***TODO***: *Consider adding a question to test the learners understanding of the previous exercise*
+
+>
+>
+> 1. Question1?
+> 2. Question2?
+>
+> >
+> >
+> > 1. Answer for question1
+> > 2. Answer for question2
+> >
+> {: .solution}
+>
+{: .question}
+
+## Sub-step with **Convert binary image to label map**
+
+> Task description
+>
+> 1. {% tool [Convert binary image to label map](toolshed.g2.bx.psu.edu/repos/imgteam/binary2labelimage/ip_binary_to_labelimage/0.5+galaxy0) %} with the following parameters:
+> - {% icon param-file %} *"Binary image"*: `output` (Input dataset)
+> - *"Mode"*: `Connected component analysis`
+>
+> ***TODO***: *Check parameter descriptions*
+>
+> ***TODO***: *Consider adding a comment or tip box*
+>
+> > short description
+> >
+> > A comment about the tool or something else. This box can also be in the main text
+> {: .comment}
+>
+{: .hands_on}
+
+***TODO***: *Consider adding a question to test the learners understanding of the previous exercise*
+
+>
+>
+> 1. Question1?
+> 2. Question2?
+>
+> >
+> >
+> > 1. Answer for question1
+> > 2. Answer for question2
+> >
+> {: .solution}
+>
+{: .question}
+
+## Sub-step with **Filter 2-D image**
+
+> Task description
+>
+> 1. {% tool [Filter 2-D image](toolshed.g2.bx.psu.edu/repos/imgteam/2d_simple_filter/ip_filter_standard/1.12.0+galaxy0) %} with the following parameters:
+> - {% icon param-file %} *"Input image"*: `output` (output of **Convert image format** {% icon tool %})
+> - *"Filter type"*: `Gaussian`
+>
+> ***TODO***: *Check parameter descriptions*
+>
+> ***TODO***: *Consider adding a comment or tip box*
+>
+> > short description
+> >
+> > A comment about the tool or something else. This box can also be in the main text
+> {: .comment}
+>
+{: .hands_on}
+
+***TODO***: *Consider adding a question to test the learners understanding of the previous exercise*
+
+>
+>
+> 1. Question1?
+> 2. Question2?
+>
+> >
+> >
+> > 1. Answer for question1
+> > 2. Answer for question2
+> >
+> {: .solution}
+>
+{: .question}
+
+## Sub-step with **Compute Voronoi tessellation**
+
+> Task description
+>
+> 1. {% tool [Compute Voronoi tessellation](toolshed.g2.bx.psu.edu/repos/imgteam/voronoi_tesselation/voronoi_tessellation/0.22.0+galaxy1) %} with the following parameters:
+> - {% icon param-file %} *"Input image"*: `output` (output of **Convert binary image to label map** {% icon tool %})
+>
+> ***TODO***: *Check parameter descriptions*
+>
+> ***TODO***: *Consider adding a comment or tip box*
+>
+> > short description
+> >
+> > A comment about the tool or something else. This box can also be in the main text
+> {: .comment}
+>
+{: .hands_on}
+
+***TODO***: *Consider adding a question to test the learners understanding of the previous exercise*
+
+>
+>
+> 1. Question1?
+> 2. Question2?
+>
+> >
+> >
+> > 1. Answer for question1
+> > 2. Answer for question2
+> >
+> {: .solution}
+>
+{: .question}
+
+## Sub-step with **Threshold image**
+
+> Task description
+>
+> 1. {% tool [Threshold image](toolshed.g2.bx.psu.edu/repos/imgteam/2d_auto_threshold/ip_threshold/0.18.1+galaxy2) %} with the following parameters:
+> - {% icon param-file %} *"Input image"*: `output` (output of **Filter 2-D image** {% icon tool %})
+> - *"Thresholding method"*: `Manual`
+> - *"Threshold value"*: `{'id': 3, 'output_name': 'output'}`
+>
+> ***TODO***: *Check parameter descriptions*
+>
+> ***TODO***: *Consider adding a comment or tip box*
+>
+> > short description
+> >
+> > A comment about the tool or something else. This box can also be in the main text
+> {: .comment}
+>
+{: .hands_on}
+
+***TODO***: *Consider adding a question to test the learners understanding of the previous exercise*
+
+>
+>
+> 1. Question1?
+> 2. Question2?
+>
+> >
+> >
+> > 1. Answer for question1
+> > 2. Answer for question2
+> >
+> {: .solution}
+>
+{: .question}
+
+## Sub-step with **Process images using arithmetic expressions**
+
+> Task description
+>
+> 1. {% tool [Process images using arithmetic expressions](toolshed.g2.bx.psu.edu/repos/imgteam/image_math/image_math/1.26.4+galaxy1) %} with the following parameters:
+> - *"Expression"*: `tessellation * (mask / 255) * (1 - seeds / 255)`
+> - In *"Input images"*:
+> - {% icon param-repeat %} *"Insert Input images"*
+> - {% icon param-file %} *"Image"*: `result` (output of **Compute Voronoi tessellation** {% icon tool %})
+> - *"Variable for representation of the image within the expression"*: `tessellation`
+> - {% icon param-repeat %} *"Insert Input images"*
+> - {% icon param-file %} *"Image"*: `output` (Input dataset)
+> - *"Variable for representation of the image within the expression"*: `seeds`
+> - {% icon param-repeat %} *"Insert Input images"*
+> - {% icon param-file %} *"Image"*: `output` (output of **Threshold image** {% icon tool %})
+> - *"Variable for representation of the image within the expression"*: `mask`
+>
+> ***TODO***: *Check parameter descriptions*
+>
+> ***TODO***: *Consider adding a comment or tip box*
+>
+> > short description
+> >
+> > A comment about the tool or something else. This box can also be in the main text
+> {: .comment}
+>
+{: .hands_on}
+
+***TODO***: *Consider adding a question to test the learners understanding of the previous exercise*
+
+>
+>
+> 1. Question1?
+> 2. Question2?
+>
+> >
+> >
+> > 1. Answer for question1
+> > 2. Answer for question2
+> >
+> {: .solution}
+>
+{: .question}
+
+## Sub-step with **Colorize label map**
+
+> Task description
+>
+> 1. {% tool [Colorize label map](toolshed.g2.bx.psu.edu/repos/imgteam/colorize_labels/colorize_labels/3.2.1+galaxy1) %} with the following parameters:
+> - {% icon param-file %} *"Input image (label map)"*: `result` (output of **Process images using arithmetic expressions** {% icon tool %})
+>
+> ***TODO***: *Check parameter descriptions*
+>
+> ***TODO***: *Consider adding a comment or tip box*
+>
+> > short description
+> >
+> > A comment about the tool or something else. This box can also be in the main text
+> {: .comment}
+>
+{: .hands_on}
+
+***TODO***: *Consider adding a question to test the learners understanding of the previous exercise*
+
+>
+>
+> 1. Question1?
+> 2. Question2?
+>
+> >
+> >
+> > 1. Answer for question1
+> > 2. Answer for question2
+> >
+> {: .solution}
+>
+{: .question}
+
+
+## Re-arrange
+
+To create the template, each step of the workflow had its own subsection.
+
+***TODO***: *Re-arrange the generated subsections into sections or other subsections.
+Consider merging some hands-on boxes to have a meaningful flow of the analyses*
+
+# Conclusion
+
+Sum up the tutorial and the key takeaways here. We encourage adding an overview image of the
+pipeline used.
+
+
diff --git a/topics/imaging/tutorials/voronoi-segmentation/workflows/index.md b/topics/imaging/tutorials/voronoi-segmentation/workflows/index.md
new file mode 100644
index 00000000000000..e092e0ae66ddd4
--- /dev/null
+++ b/topics/imaging/tutorials/voronoi-segmentation/workflows/index.md
@@ -0,0 +1,3 @@
+---
+layout: workflow-list
+---
diff --git a/topics/imaging/tutorials/voronoi-segmentation/workflows/main_workflow.ga b/topics/imaging/tutorials/voronoi-segmentation/workflows/main_workflow.ga
new file mode 100644
index 00000000000000..978594b5dc8e45
--- /dev/null
+++ b/topics/imaging/tutorials/voronoi-segmentation/workflows/main_workflow.ga
@@ -0,0 +1,524 @@
+{
+ "a_galaxy_workflow": "true",
+ "annotation": "Generic workflow to perform voronoi segmentation and their quantitative analysis (Black Background)",
+ "comments": [
+ {
+ "child_steps": [
+ 3,
+ 2,
+ 0,
+ 1
+ ],
+ "color": "orange",
+ "data": {
+ "title": "User Input"
+ },
+ "id": 0,
+ "position": [
+ 0,
+ 45
+ ],
+ "size": [
+ 251.2,
+ 660.6
+ ],
+ "type": "frame"
+ },
+ {
+ "child_steps": [
+ 10
+ ],
+ "color": "blue",
+ "data": {
+ "title": "Visualisation Image features"
+ },
+ "id": 3,
+ "position": [
+ 2526.9,
+ 421.2
+ ],
+ "size": [
+ 240,
+ 411
+ ],
+ "type": "frame"
+ },
+ {
+ "child_steps": [
+ 9,
+ 5,
+ 7,
+ 8
+ ],
+ "color": "lime",
+ "data": {
+ "title": "Voronoi segmentation"
+ },
+ "id": 2,
+ "position": [
+ 974.3,
+ 257.2
+ ],
+ "size": [
+ 1254.2,
+ 900.9
+ ],
+ "type": "frame"
+ },
+ {
+ "child_steps": [
+ 6,
+ 4
+ ],
+ "color": "yellow",
+ "data": {
+ "title": "Preprocessing (only for bio-images)"
+ },
+ "id": 1,
+ "position": [
+ 360.3,
+ 0
+ ],
+ "size": [
+ 401,
+ 429
+ ],
+ "type": "frame"
+ }
+ ],
+ "creator": [
+ {
+ "class": "Person",
+ "identifier": "",
+ "name": "Riccardo Massei"
+ }
+ ],
+ "format-version": "0.1",
+ "license": "MIT",
+ "name": "Voronoi Segmentation BiA (Tutorial)",
+ "report": {
+ "markdown": "\n# Workflow Execution Report\n\n## Workflow Inputs\n```galaxy\ninvocation_inputs()\n```\n\n## Workflow Outputs\n```galaxy\ninvocation_outputs()\n```\n\n## Workflow\n```galaxy\nworkflow_display()\n```\n"
+ },
+ "steps": {
+ "0": {
+ "annotation": "Image to analyze",
+ "content_id": null,
+ "errors": null,
+ "id": 0,
+ "input_connections": {},
+ "inputs": [
+ {
+ "description": "Image to analyze",
+ "name": "Input Image"
+ }
+ ],
+ "label": "Input Image",
+ "name": "Input dataset",
+ "outputs": [],
+ "position": {
+ "left": 19.896066009909354,
+ "top": 85.02096026201994
+ },
+ "tool_id": null,
+ "tool_state": "{\"optional\": false, \"tag\": null}",
+ "tool_version": null,
+ "type": "data_input",
+ "uuid": "57297a4a-3c64-41bc-bf4a-70aa07221187",
+ "when": null,
+ "workflow_outputs": []
+ },
+ "1": {
+ "annotation": "Channel where to perform the Voronoi Segmentation",
+ "content_id": null,
+ "errors": null,
+ "id": 1,
+ "input_connections": {},
+ "inputs": [
+ {
+ "description": "Channel where to perform the Voronoi Segmentation",
+ "name": "Select Channel for Voronoi Segmentation"
+ }
+ ],
+ "label": "Select Channel for Voronoi Segmentation",
+ "name": "Input parameter",
+ "outputs": [],
+ "position": {
+ "left": 23.89588290444067,
+ "top": 265.0208381917074
+ },
+ "tool_id": null,
+ "tool_state": "{\"parameter_type\": \"integer\", \"optional\": false}",
+ "tool_version": null,
+ "type": "parameter_input",
+ "uuid": "a375cf18-e81c-4ecd-af0e-dd52f64d5578",
+ "when": null,
+ "workflow_outputs": []
+ },
+ "2": {
+ "annotation": "The threshold in Voronoi segmentation sets a distance limit for each region's size and influence around its seed point.",
+ "content_id": null,
+ "errors": null,
+ "id": 2,
+ "input_connections": {},
+ "inputs": [
+ {
+ "description": "The threshold in Voronoi segmentation sets a distance limit for each region's size and influence around its seed point.",
+ "name": "Threshold Value"
+ }
+ ],
+ "label": "Threshold Value",
+ "name": "Input parameter",
+ "outputs": [],
+ "position": {
+ "left": 21.89588290444067,
+ "top": 449.5208381917074
+ },
+ "tool_id": null,
+ "tool_state": "{\"parameter_type\": \"float\", \"optional\": false}",
+ "tool_version": null,
+ "type": "parameter_input",
+ "uuid": "372a34f0-8d8b-4bb2-80f7-64990cb65abd",
+ "when": null,
+ "workflow_outputs": []
+ },
+ "3": {
+ "annotation": "Binary Image with seeds",
+ "content_id": null,
+ "errors": null,
+ "id": 3,
+ "input_connections": {},
+ "inputs": [
+ {
+ "description": "Binary Image with seeds",
+ "name": "Input seeds for segmentation"
+ }
+ ],
+ "label": "Input seeds for segmentation",
+ "name": "Input dataset",
+ "outputs": [],
+ "position": {
+ "left": 21.453422385751928,
+ "top": 603.3870754969164
+ },
+ "tool_id": null,
+ "tool_state": "{\"optional\": false, \"tag\": null}",
+ "tool_version": null,
+ "type": "data_input",
+ "uuid": "d45104a3-8567-4369-94e5-61149142bc00",
+ "when": null,
+ "workflow_outputs": []
+ },
+ "4": {
+ "annotation": "",
+ "content_id": "toolshed.g2.bx.psu.edu/repos/imgteam/bfconvert/ip_convertimage/6.7.0+galaxy3",
+ "errors": null,
+ "id": 4,
+ "input_connections": {
+ "channel_options|channel": {
+ "id": 1,
+ "output_name": "output"
+ },
+ "input_file": {
+ "id": 0,
+ "output_name": "output"
+ }
+ },
+ "inputs": [
+ {
+ "description": "runtime parameter for tool Convert image format",
+ "name": "channel_options"
+ }
+ ],
+ "label": null,
+ "name": "Convert image format",
+ "outputs": [
+ {
+ "name": "output",
+ "type": "tiff"
+ }
+ ],
+ "position": {
+ "left": 384.06302675863685,
+ "top": 50.600695401694594
+ },
+ "post_job_actions": {},
+ "tool_id": "toolshed.g2.bx.psu.edu/repos/imgteam/bfconvert/ip_convertimage/6.7.0+galaxy3",
+ "tool_shed_repository": {
+ "changeset_revision": "fcadded98e61",
+ "name": "bfconvert",
+ "owner": "imgteam",
+ "tool_shed": "toolshed.g2.bx.psu.edu"
+ },
+ "tool_state": "{\"bigtiff\": false, \"channel_options\": {\"extract\": \"True\", \"__current_case__\": 1, \"channel\": {\"__class__\": \"ConnectedValue\"}}, \"compression\": \"False\", \"crop_options\": {\"extract\": \"False\", \"__current_case__\": 0}, \"input_file\": {\"__class__\": \"ConnectedValue\"}, \"noflat\": false, \"out_format\": \"tiff\", \"pyramid_options\": {\"generate\": \"False\", \"__current_case__\": 1}, \"range_options\": {\"extract\": \"False\", \"__current_case__\": 0}, \"series_options\": {\"extract\": \"False\", \"__current_case__\": 0}, \"tile_options\": {\"extract\": \"False\", \"__current_case__\": 0}, \"timepoint_options\": {\"extract\": \"False\", \"__current_case__\": 0}, \"z_options\": {\"extract\": \"False\", \"__current_case__\": 0}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+ "tool_version": "6.7.0+galaxy3",
+ "type": "tool",
+ "uuid": "ba289013-f43c-48e3-a280-605ae8a0b063",
+ "when": null,
+ "workflow_outputs": []
+ },
+ "5": {
+ "annotation": "",
+ "content_id": "toolshed.g2.bx.psu.edu/repos/imgteam/binary2labelimage/ip_binary_to_labelimage/0.5+galaxy0",
+ "errors": null,
+ "id": 5,
+ "input_connections": {
+ "input": {
+ "id": 3,
+ "output_name": "output"
+ }
+ },
+ "inputs": [],
+ "label": null,
+ "name": "Convert binary image to label map",
+ "outputs": [
+ {
+ "name": "output",
+ "type": "tiff"
+ }
+ ],
+ "position": {
+ "left": 1110.8141593247035,
+ "top": 886.4280635640689
+ },
+ "post_job_actions": {},
+ "tool_id": "toolshed.g2.bx.psu.edu/repos/imgteam/binary2labelimage/ip_binary_to_labelimage/0.5+galaxy0",
+ "tool_shed_repository": {
+ "changeset_revision": "984358e43242",
+ "name": "binary2labelimage",
+ "owner": "imgteam",
+ "tool_shed": "toolshed.g2.bx.psu.edu"
+ },
+ "tool_state": "{\"input\": {\"__class__\": \"ConnectedValue\"}, \"mode\": {\"mode_selector\": \"cca\", \"__current_case__\": 0}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+ "tool_version": "0.5+galaxy0",
+ "type": "tool",
+ "uuid": "94e9da42-3cce-42b7-a997-7cec28ea2ab2",
+ "when": null,
+ "workflow_outputs": []
+ },
+ "6": {
+ "annotation": "",
+ "content_id": "toolshed.g2.bx.psu.edu/repos/imgteam/2d_simple_filter/ip_filter_standard/1.12.0+galaxy0",
+ "errors": null,
+ "id": 6,
+ "input_connections": {
+ "input": {
+ "id": 4,
+ "output_name": "output"
+ }
+ },
+ "inputs": [],
+ "label": null,
+ "name": "Filter 2-D image",
+ "outputs": [
+ {
+ "name": "output",
+ "type": "tiff"
+ }
+ ],
+ "position": {
+ "left": 531.3741290823522,
+ "top": 270.986691759329
+ },
+ "post_job_actions": {},
+ "tool_id": "toolshed.g2.bx.psu.edu/repos/imgteam/2d_simple_filter/ip_filter_standard/1.12.0+galaxy0",
+ "tool_shed_repository": {
+ "changeset_revision": "6c4b22ef2b81",
+ "name": "2d_simple_filter",
+ "owner": "imgteam",
+ "tool_shed": "toolshed.g2.bx.psu.edu"
+ },
+ "tool_state": "{\"filter\": {\"filter_type\": \"gaussian\", \"__current_case__\": 0, \"size\": \"3.0\"}, \"input\": {\"__class__\": \"ConnectedValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+ "tool_version": "1.12.0+galaxy0",
+ "type": "tool",
+ "uuid": "9a596b52-d436-4027-a235-d364419fa359",
+ "when": null,
+ "workflow_outputs": []
+ },
+ "7": {
+ "annotation": "",
+ "content_id": "toolshed.g2.bx.psu.edu/repos/imgteam/voronoi_tesselation/voronoi_tessellation/0.22.0+galaxy1",
+ "errors": null,
+ "id": 7,
+ "input_connections": {
+ "input": {
+ "id": 5,
+ "output_name": "output"
+ }
+ },
+ "inputs": [],
+ "label": null,
+ "name": "Compute Voronoi tessellation",
+ "outputs": [
+ {
+ "name": "result",
+ "type": "tiff"
+ }
+ ],
+ "position": {
+ "left": 1383.3013280258754,
+ "top": 901.2780391500064
+ },
+ "post_job_actions": {},
+ "tool_id": "toolshed.g2.bx.psu.edu/repos/imgteam/voronoi_tesselation/voronoi_tessellation/0.22.0+galaxy1",
+ "tool_shed_repository": {
+ "changeset_revision": "e7fdea8385f0",
+ "name": "voronoi_tesselation",
+ "owner": "imgteam",
+ "tool_shed": "toolshed.g2.bx.psu.edu"
+ },
+ "tool_state": "{\"input\": {\"__class__\": \"ConnectedValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+ "tool_version": "0.22.0+galaxy1",
+ "type": "tool",
+ "uuid": "070b08bc-7959-496e-8c7b-a095ef6f89a4",
+ "when": null,
+ "workflow_outputs": []
+ },
+ "8": {
+ "annotation": "",
+ "content_id": "toolshed.g2.bx.psu.edu/repos/imgteam/2d_auto_threshold/ip_threshold/0.18.1+galaxy2",
+ "errors": null,
+ "id": 8,
+ "input_connections": {
+ "input": {
+ "id": 6,
+ "output_name": "output"
+ },
+ "th_method|threshold": {
+ "id": 2,
+ "output_name": "output"
+ }
+ },
+ "inputs": [
+ {
+ "description": "runtime parameter for tool Threshold image",
+ "name": "th_method"
+ }
+ ],
+ "label": null,
+ "name": "Threshold image",
+ "outputs": [
+ {
+ "name": "output",
+ "type": "tiff"
+ }
+ ],
+ "position": {
+ "left": 1129.085474681118,
+ "top": 329.595615762486
+ },
+ "post_job_actions": {},
+ "tool_id": "toolshed.g2.bx.psu.edu/repos/imgteam/2d_auto_threshold/ip_threshold/0.18.1+galaxy2",
+ "tool_shed_repository": {
+ "changeset_revision": "e5c8e7e72373",
+ "name": "2d_auto_threshold",
+ "owner": "imgteam",
+ "tool_shed": "toolshed.g2.bx.psu.edu"
+ },
+ "tool_state": "{\"input\": {\"__class__\": \"ConnectedValue\"}, \"invert_output\": false, \"th_method\": {\"method_id\": \"manual\", \"__current_case__\": 0, \"threshold\": {\"__class__\": \"ConnectedValue\"}, \"block_size\": \"0\", \"offset\": \"0\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+ "tool_version": "0.18.1+galaxy2",
+ "type": "tool",
+ "uuid": "761a9d93-2d70-498a-b79f-c70de1363f6c",
+ "when": null,
+ "workflow_outputs": []
+ },
+ "9": {
+ "annotation": "",
+ "content_id": "toolshed.g2.bx.psu.edu/repos/imgteam/image_math/image_math/1.26.4+galaxy1",
+ "errors": null,
+ "id": 9,
+ "input_connections": {
+ "inputs_0|image": {
+ "id": 7,
+ "output_name": "result"
+ },
+ "inputs_1|image": {
+ "id": 3,
+ "output_name": "output"
+ },
+ "inputs_2|image": {
+ "id": 8,
+ "output_name": "output"
+ }
+ },
+ "inputs": [],
+ "label": null,
+ "name": "Process images using arithmetic expressions",
+ "outputs": [
+ {
+ "name": "result",
+ "type": "tiff"
+ }
+ ],
+ "position": {
+ "left": 1935.0085912670663,
+ "top": 583.5575492040355
+ },
+ "post_job_actions": {},
+ "tool_id": "toolshed.g2.bx.psu.edu/repos/imgteam/image_math/image_math/1.26.4+galaxy1",
+ "tool_shed_repository": {
+ "changeset_revision": "f8b7770cbca5",
+ "name": "image_math",
+ "owner": "imgteam",
+ "tool_shed": "toolshed.g2.bx.psu.edu"
+ },
+ "tool_state": "{\"expression\": \"tessellation * (mask / 255) * (1 - seeds / 255)\", \"inputs\": [{\"__index__\": 0, \"image\": {\"__class__\": \"ConnectedValue\"}, \"name\": \"tessellation\"}, {\"__index__\": 1, \"image\": {\"__class__\": \"ConnectedValue\"}, \"name\": \"seeds\"}, {\"__index__\": 2, \"image\": {\"__class__\": \"ConnectedValue\"}, \"name\": \"mask\"}], \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+ "tool_version": "1.26.4+galaxy1",
+ "type": "tool",
+ "uuid": "b81e3146-4bcd-477e-8499-9bd1f9a5bed6",
+ "when": null,
+ "workflow_outputs": []
+ },
+ "10": {
+ "annotation": "Visualize the different regions",
+ "content_id": "toolshed.g2.bx.psu.edu/repos/imgteam/colorize_labels/colorize_labels/3.2.1+galaxy1",
+ "errors": null,
+ "id": 10,
+ "input_connections": {
+ "input": {
+ "id": 9,
+ "output_name": "result"
+ }
+ },
+ "inputs": [],
+ "label": "Visualisation",
+ "name": "Colorize label map",
+ "outputs": [
+ {
+ "name": "output",
+ "type": "png"
+ }
+ ],
+ "position": {
+ "left": 2545.469463466948,
+ "top": 600.5368843142134
+ },
+ "post_job_actions": {},
+ "tool_id": "toolshed.g2.bx.psu.edu/repos/imgteam/colorize_labels/colorize_labels/3.2.1+galaxy1",
+ "tool_shed_repository": {
+ "changeset_revision": "3aa2d054848a",
+ "name": "colorize_labels",
+ "owner": "imgteam",
+ "tool_shed": "toolshed.g2.bx.psu.edu"
+ },
+ "tool_state": "{\"bg_color\": \"#000000\", \"bg_label\": \"0\", \"input\": {\"__class__\": \"ConnectedValue\"}, \"radius\": \"10\", \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+ "tool_version": "3.2.1+galaxy1",
+ "type": "tool",
+ "uuid": "80875ea7-b51a-40aa-95af-7835c061d53f",
+ "when": null,
+ "workflow_outputs": [
+ {
+ "label": "Image",
+ "output_name": "output",
+ "uuid": "69596a0e-4c77-4407-b2a2-0b978afd1876"
+ }
+ ]
+ }
+ },
+ "tags": [
+ "imageanaylsis",
+ "Segmentation",
+ "voronoi"
+ ],
+ "uuid": "0e074432-8100-4013-a5d5-b441eafca0ce",
+ "version": 9
+}
\ No newline at end of file