# HG changeset patch
# User iuc
# Date 1770400232 0
# Node ID 356d58ae85fa78b26d9b2298980d41ffea5d031f
planemo upload for repository https://github.com/galaxyproject/tools-iuc/tree/main/tools/biapy commit 63860b5c6c21e0b76b1c55a5e71cafcb77d6cc84
diff -r 000000000000 -r 356d58ae85fa biapy.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/biapy.xml Fri Feb 06 17:50:32 2026 +0000
@@ -0,0 +1,623 @@
+
+ Accessible deep learning on bioimages
+
+ macros.xml
+
+
+ topic_3474
+
+
+
+ operation_2945
+ operation_3925
+ operation_3443
+
+ operation_2946
+
+
+
+ operation_2944
+
+
+
+
+
+
+
+ 0
+ #set $test_gt_avail = 'test_gt_yes'
+ #end if
+ #end if
+
+ ## Define output directory
+ mkdir -p output &&
+
+ ## Define checkpoint directory in case it is needed
+ mkdir -p '$checkpoint_dir' &&
+
+ ########## Reuse provided yaml file and update paths ##########
+ #if $mode_selection.selected_mode == 'custom_cfg':
+ #if $mode_selection.get('test_sec') and $mode_selection['test_sec'].get('gt_test'):
+ #set $test_gt_avail = 'test_gt_yes'
+ #end if
+ #set $mpath = $mode_selection.get('biapy_model_path')
+ #if $mpath and str($mpath) not in ['None', '']
+ ln -fs '$mpath' ${checkpoint_file} &&
+ #end if
+ python3 '$__tool_directory__/create_yaml.py'
+ --input_config_path '$mode_selection.config_path'
+ --num_cpus "\${GALAXY_SLOTS:-1}"
+ ${common_yaml_args}
+ ## Optionally override data paths with the staged dirs if user provided inputs
+ #if $selected_phase in ['train_test', 'train'] and $mode_selection.get('train_sec') and $mode_selection['train_sec'].get('raw_train')
+ --raw_train '$train_raw_dir'
+ #if $mode_selection['train_sec'].get('gt_train')
+ --gt_train '$train_gt_dir'
+ #end if
+ #end if
+ #if $selected_phase in ['train_test', 'test'] and $mode_selection.get('test_sec') and $mode_selection['test_sec'].get('raw_test')
+ --test_raw_path '$test_raw_dir'
+ #if $test_gt_avail == 'test_gt_yes' and $mode_selection['test_sec'].get('gt_test')
+ --test_gt_path '$test_gt_dir'
+ #end if
+ #end if
+ #if $mpath and str($mpath) not in ['None', '']
+ --model '$checkpoint_file'
+ --model_source 'biapy'
+ #end if
+ #else
+ ########## Create new yaml file ##########
+ #set $pm = $mode_selection["pretrained_model"]
+ python3 '$__tool_directory__/create_yaml.py'
+ --new_config
+ --num_cpus "\${GALAXY_SLOTS:-1}"
+ ${common_yaml_args}
+ --workflow '$mode_selection["workflow_selection"]["workflow"]'
+ --dims '$mode_selection["dimensionality"]["is_3d"]'
+ --obj_slices '$mode_selection["dimensionality"].get("obj_slices")'
+ --obj_size '$mode_selection["obj_size"]'
+ --img_channel '$mode_selection["img_channel"]'
+ #if $pm["model_source"] == 'biapy'
+ --model_source 'biapy'
+ #elif $pm["model_source"] == 'biapy_pretrained'
+ --model '$checkpoint_file'
+ --model_source 'biapy'
+ #elif $pm.get("model_source") == 'bmz_pretrained'
+ --model_source 'bmz'
+ --model '$pm.get("bmz_model_name", "")'
+ #end if
+ #if $selected_phase == 'train_test'
+ --raw_train '$train_raw_dir'
+ #if $gt_train
+ --gt_train '$train_gt_dir'
+ #end if
+ --test_raw_path '$test_raw_dir'
+ #if $test_gt_avail == 'test_gt_yes'
+ --test_gt_path '$test_gt_dir'
+ #end if
+ #elif $selected_phase == 'train'
+ --raw_train '$train_raw_dir'
+ #if $gt_train
+ --gt_train '$train_gt_dir'
+ #end if
+ #elif $selected_phase == 'test'
+ --test_raw_path '$test_raw_dir'
+ #if $test_gt_avail == 'test_gt_yes'
+ --test_gt_path '$test_gt_dir'
+ #end if
+ #end if
+
+ #if $pm["model_source"] == 'biapy_pretrained'
+ && ln -fs '$pm["biapy_model_path"]' ${checkpoint_file}
+ #end if
+ #end if
+
+ &&
+
+ ## Copy the training data
+ #if $selected_phase in ['train_test', 'train']:
+ mkdir -p '$train_raw_dir' &&
+ #for $i, $image in enumerate($raw_train)
+ #set $ext = $image.ext
+ ln -fs '$image' ${train_raw_dir}/training-${i}.${ext} &&
+ #end for
+ #if $gt_train and len($gt_train) > 0
+ mkdir -p '$train_gt_dir' &&
+ #for $i, $image in enumerate($gt_train)
+ #set $ext = $image.ext
+ ln -fs '$image' ${train_gt_dir}/training-gt-${i}.${ext} &&
+ #end for
+ #end if
+ #end if
+
+ ## Copy the test data
+ #if $selected_phase in ['train_test', 'test']:
+ mkdir -p '$test_raw_dir' &&
+ #for $i, $image in enumerate($raw_test)
+ #set $ext = $image.ext
+ ln -fs '$image' ${test_raw_dir}/test-${i}.${ext} &&
+ #end for
+ #if $test_gt_avail == 'test_gt_yes':
+ mkdir -p '$test_gt_dir' &&
+ #for $i, $image in enumerate($gt_test)
+ #set $ext = $image.ext
+ ln -fs '$image' ${test_gt_dir}/test-gt-${i}.${ext} &&
+ #end for
+ #end if
+ #end if
+
+ ########## Run BiaPy ##########
+ biapy
+ --config '$config_file'
+ --result_dir './output'
+ --name 'my_experiment'
+ --run_id 1
+ --gpu \${GALAXY_BIAPY_GPU_STRING:-""}
+
+ #set $outs = $selected_outputs or []
+
+ ## Copy the selected output to the correct place
+ #if $selected_phase in ['train_test', 'test']:
+
+ #if 'raw' in $outs
+ ########
+ ## RAW #
+ ########
+ && mkdir -p raw && {
+ ## Instance segmentation
+ if [ -d "output/my_experiment/results/my_experiment_1/per_image_instances" ]; then
+ mv output/my_experiment/results/my_experiment_1/per_image_instances/* raw/;
+
+ ## Instance segmentation
+ elif [ -d "output/my_experiment/results/my_experiment_1/full_image_instances" ]; then
+ mv output/my_experiment/results/my_experiment_1/full_image_instances/* raw/;
+
+ ## Semantic segmentation
+ elif [ -d "output/my_experiment/results/my_experiment_1/per_image_binarized" ]; then
+ mv output/my_experiment/results/my_experiment_1/per_image_binarized/* raw/;
+
+ ## Semantic segmentation
+ elif [ -d "output/my_experiment/results/my_experiment_1/full_image_binarized" ]; then
+ mv output/my_experiment/results/my_experiment_1/full_image_binarized/* raw/;
+
+ ## I2I
+ elif [ -d "output/my_experiment/results/my_experiment_1/full_image" ]; then
+ mv output/my_experiment/results/my_experiment_1/full_image/* raw/;
+
+ ## Detection
+ elif [ -d "output/my_experiment/results/my_experiment_1/per_image_local_max_check" ]; then
+ mv output/my_experiment/results/my_experiment_1/per_image_local_max_check/* raw/;
+
+ ## Detection, Denoising, I2I, SSL, SR
+ elif [ -d "output/my_experiment/results/my_experiment_1/per_image" ]; then
+ mv output/my_experiment/results/my_experiment_1/per_image/* raw/;
+
+ ## Classification
+ elif [ -f "output/my_experiment/results/my_experiment_1/predictions.csv" ]; then
+ mv output/my_experiment/results/my_experiment_1/predictions.csv raw/;
+ fi;
+ }
+ #end if
+
+ #if 'post_proc' in $outs
+ ##############
+ ## POST-PROC #
+ ##############
+ && mkdir -p post_proc && {
+ ## Instance segmentation
+ if [ -d "output/my_experiment/results/my_experiment_1/per_image_post_processing" ]; then
+ mv output/my_experiment/results/my_experiment_1/per_image_post_processing/* post_proc/;
+
+ ## Instance segmentation
+ elif [ -d "output/my_experiment/results/my_experiment_1/full_image_post_processing" ]; then
+ mv output/my_experiment/results/my_experiment_1/full_image_post_processing/* post_proc/;
+
+ ## Detection
+ elif [ -d "output/my_experiment/results/my_experiment_1/per_image_local_max_check_post_proc" ]; then
+ mv output/my_experiment/results/my_experiment_1/per_image_local_max_check_post_proc/* post_proc/;
+ fi;
+ }
+ #end if
+
+ #if 'metrics' in $outs and $test_gt_avail == "test_gt_yes":
+ && mkdir -p metrics &&
+ mv output/my_experiment/results/my_experiment_1/test_results_metrics.csv metrics/ 2>/dev/null || true
+ #end if
+ #end if
+ #if $selected_phase in ['train_test', 'train']:
+ #if 'tcharts' in $outs
+ && mkdir -p train_charts
+ #end if
+ #if 'tlogs' in $outs
+ && mkdir -p train_logs
+ #end if
+ #end if
+ #if 'checkpoint' in $outs
+ && mkdir -p checkpoints
+ #end if
+ ]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ^[A-Za-z]+(?:-[A-Za-z]+)+$
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff -r 000000000000 -r 356d58ae85fa create_yaml.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/create_yaml.py Fri Feb 06 17:50:32 2026 +0000
@@ -0,0 +1,241 @@
+import argparse
+import sys
+
+import requests
+import yaml
+
+
+def download_yaml_template(workflow, dims, biapy_version=""):
+ template_dir_map = {
+ "SEMANTIC_SEG": "semantic_segmentation",
+ "INSTANCE_SEG": "instance_segmentation",
+ "DETECTION": "detection",
+ "DENOISING": "denoising",
+ "SUPER_RESOLUTION": "super-resolution",
+ "CLASSIFICATION": "classification",
+ "SELF_SUPERVISED": "self-supervised",
+ "IMAGE_TO_IMAGE": "image-to-image",
+ }
+
+ # Use .get() to avoid KeyError if workflow is unexpected
+ dir_name = template_dir_map.get(workflow)
+ if not dir_name:
+ raise ValueError(f"Unknown workflow: {workflow}")
+
+ template_name = f"{dir_name}/{dims.lower()}_{dir_name}.yaml"
+ url = f"https://raw.githubusercontent.com/BiaPyX/BiaPy/refs/tags/v{biapy_version}/templates/{template_name}"
+
+ print(f"Downloading YAML template from {url}")
+ try:
+ response = requests.get(url, timeout=10) # Added timeout
+ response.raise_for_status() # Automatically raises HTTPError for 4xx/5xx
+ return yaml.safe_load(response.text) or {}
+ except requests.exceptions.RequestException as e:
+ print(f"Error: Could not download template. {e}")
+ sys.exit(1) # Exit gracefully rather than crashing with a stack trace
+
+
+def tuple_to_list(obj):
+ """Convert tuples to lists recursively."""
+ if isinstance(obj, tuple):
+ return list(obj)
+ if isinstance(obj, dict):
+ return {k: tuple_to_list(v) for k, v in obj.items()}
+ if isinstance(obj, list):
+ return [tuple_to_list(v) for v in obj]
+ return obj
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Generate a YAML configuration from given arguments."
+ )
+ parser.add_argument(
+ '--input_config_path', default='', type=str,
+ help="Input configuration file to reuse"
+ )
+ parser.add_argument(
+ '--new_config', action='store_true',
+ help="Whether to create a new config or reuse an existing one."
+ )
+ parser.add_argument(
+ '--out_config_path', required=True, type=str,
+ help="Path to save the generated YAML configuration."
+ )
+ parser.add_argument(
+ '--workflow', default='semantic', type=str,
+ choices=['semantic', 'instance', 'detection', 'denoising',
+ 'sr', 'cls', 'sr2', 'i2i'],
+ )
+ parser.add_argument(
+ '--dims', default='2d', type=str,
+ choices=['2d_stack', '2d', '3d'],
+ help="Number of dimensions for the problem"
+ )
+ parser.add_argument(
+ '--obj_slices', default='', type=str,
+ choices=['', '1-5', '5-10', '10-20', '20-60', '60+'],
+ help="Number of slices for the objects in the images"
+ )
+ parser.add_argument(
+ '--obj_size', default='0-25', type=str,
+ choices=['0-25', '25-100', '100-200', '200-500', '500+'],
+ help="Size of the objects in the images"
+ )
+ parser.add_argument(
+ '--img_channel', default=1, type=int,
+ help="Number of channels in the input images"
+ )
+ parser.add_argument(
+ '--model_source', default='biapy',
+ choices=['biapy', 'bmz', 'torchvision'],
+ help="Source of the model."
+ )
+ parser.add_argument(
+ '--model', default='', type=str,
+ help=("Path to the model file if using a pre-trained model "
+ "from BiaPy or name of the model within BioImage "
+ "Model Zoo or TorchVision.")
+ )
+ parser.add_argument(
+ '--raw_train', default='', type=str,
+ help="Path to the training raw data."
+ )
+ parser.add_argument(
+ '--gt_train', default='', type=str,
+ help="Path to the training ground truth data."
+ )
+ parser.add_argument(
+ '--test_raw_path', default='', type=str,
+ help="Path to the testing raw data."
+ )
+ parser.add_argument(
+ '--test_gt_path', default='', type=str,
+ help="Path to the testing ground truth data."
+ )
+ parser.add_argument(
+ '--biapy_version', default='', type=str,
+ help="BiaPy version to use."
+ )
+ parser.add_argument(
+ '--num_cpus', default="1", type=str,
+ help="Number of CPUs to allocate."
+ )
+ args = parser.parse_args()
+
+ if args.new_config:
+ workflow_map = {
+ "semantic": "SEMANTIC_SEG",
+ "instance": "INSTANCE_SEG",
+ "detection": "DETECTION",
+ "denoising": "DENOISING",
+ "sr": "SUPER_RESOLUTION",
+ "cls": "CLASSIFICATION",
+ "sr2": "SELF_SUPERVISED",
+ "i2i": "IMAGE_TO_IMAGE",
+ }
+ workflow_type = workflow_map[args.workflow]
+
+ ndim = "3D" if args.dims == "3d" else "2D"
+ as_stack = args.dims in ["2d_stack", "2d"]
+
+ config = download_yaml_template(workflow_type, ndim, biapy_version=args.biapy_version)
+
+ # Initialization using setdefault to prevent KeyErrors
+ config.setdefault("PROBLEM", {})
+ config["PROBLEM"].update({"TYPE": workflow_type, "NDIM": ndim})
+
+ config.setdefault("TEST", {})["ANALIZE_2D_IMGS_AS_3D_STACK"] = as_stack
+
+ # Handle MODEL and PATHS
+ model_cfg = config.setdefault("MODEL", {})
+ if args.model_source == "biapy":
+ model_cfg["SOURCE"] = "biapy"
+ is_loading = bool(args.model)
+ model_cfg["LOAD_CHECKPOINT"] = is_loading
+ model_cfg["LOAD_MODEL_FROM_CHECKPOINT"] = is_loading
+ if is_loading:
+ config.setdefault("PATHS", {})["CHECKPOINT_FILE"] = args.model
+ elif args.model_source == "bmz":
+ model_cfg["SOURCE"] = "bmz"
+ model_cfg.setdefault("BMZ", {})["SOURCE_MODEL_ID"] = args.model
+ elif args.model_source == "torchvision":
+ model_cfg["SOURCE"] = "torchvision"
+ model_cfg["TORCHVISION_MODEL_NAME"] = args.model
+
+ # PATCH_SIZE Logic
+ obj_size_map = {
+ "0-25": (256, 256), "25-100": (256, 256),
+ "100-200": (512, 512), "200-500": (512, 512), "500+": (1024, 1024),
+ }
+ obj_size = obj_size_map[args.obj_size]
+
+ obj_slices_map = {"": -1, "1-5": 5, "5-10": 10, "10-20": 20, "20-60": 40, "60+": 80}
+ obj_slices = obj_slices_map.get(args.obj_slices, -1)
+
+ if ndim == "2D":
+ patch_size = obj_size + (args.img_channel,)
+ else:
+ if obj_slices == -1:
+ print("Error: For 3D problems, obj_slices must be specified.")
+ sys.exit(1)
+ patch_size = (obj_slices,) + obj_size + (args.img_channel,)
+
+ config.setdefault("DATA", {})["PATCH_SIZE"] = str(patch_size)
+ config["DATA"]["REFLECT_TO_COMPLETE_SHAPE"] = True
+
+ else:
+ if not args.input_config_path:
+ print("Error: Input configuration path must be specified.")
+ sys.exit(1)
+ try:
+ with open(args.input_config_path, 'r', encoding='utf-8') as f:
+ config = yaml.safe_load(f) or {}
+ except FileNotFoundError:
+ print(f"Error: File {args.input_config_path} not found.")
+ sys.exit(1)
+
+ # Always set NUM_CPUS
+ config.setdefault("SYSTEM", {})
+ try:
+ num_cpus = max(int(args.num_cpus), 1)
+ except BaseException:
+ num_cpus = 1
+ config["SYSTEM"].update({"NUM_CPUS": num_cpus})
+
+ # Global overrides (Train/Test)
+ config.setdefault("TRAIN", {})
+ config.setdefault("DATA", {})
+
+ if args.raw_train:
+ config["TRAIN"]["ENABLE"] = True
+ config["DATA"].setdefault("TRAIN", {}).update({
+ "PATH": args.raw_train,
+ "GT_PATH": args.gt_train
+ })
+ else:
+ config["TRAIN"]["ENABLE"] = False
+
+ test_cfg = config.setdefault("TEST", {})
+ if args.test_raw_path:
+ test_cfg["ENABLE"] = True
+ data_test = config["DATA"].setdefault("TEST", {})
+ data_test["PATH"] = args.test_raw_path
+ data_test["LOAD_GT"] = bool(args.test_gt_path)
+ if args.test_gt_path:
+ data_test["GT_PATH"] = args.test_gt_path
+ else:
+ test_cfg["ENABLE"] = False
+
+ config.setdefault("MODEL", {})["OUT_CHECKPOINT_FORMAT"] = "safetensors"
+
+ # Final cleanup and save
+ config = tuple_to_list(config)
+ with open(args.out_config_path, 'w', encoding='utf-8') as f:
+ yaml.dump(config, f, default_flow_style=False)
+
+ print(f"Success: YAML configuration written to {args.out_config_path}")
+
+
+if __name__ == "__main__":
+ main()
diff -r 000000000000 -r 356d58ae85fa macros.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/macros.xml Fri Feb 06 17:50:32 2026 +0000
@@ -0,0 +1,120 @@
+
+ 3.6.8
+ 0
+ 25.0
+
+
+
+ biapyx/biapy:@TOOL_VERSION@-11.8
+
+
+
+
+
+
+
+
+
+ 10.1038/s41592-025-02699-y
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff -r 000000000000 -r 356d58ae85fa test-data/example.yaml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/example.yaml Fri Feb 06 17:50:32 2026 +0000
@@ -0,0 +1,53 @@
+# BiaPy version: 3.6.2
+
+SYSTEM:
+ NUM_CPUS: -1
+
+PROBLEM:
+ TYPE: SEMANTIC_SEG
+ NDIM: 2D
+
+DATA:
+ PATCH_SIZE: (256, 256, 1)
+ TRAIN:
+ PATH: /path/to/data
+ GT_PATH: /path/to/data
+ IN_MEMORY: True
+ VAL:
+ SPLIT_TRAIN: 0.1
+ TEST:
+ PATH: /path/to/data
+ GT_PATH: /path/to/data
+ IN_MEMORY: True
+ LOAD_GT: True
+ PADDING: (32,32)
+
+AUGMENTOR:
+ ENABLE: True
+ AUG_SAMPLES: False
+ RANDOM_ROT: True
+ VFLIP: True
+ HFLIP: True
+
+MODEL:
+ ARCHITECTURE: unet
+ FEATURE_MAPS: [16, 32, 64, 128, 256]
+ LOAD_CHECKPOINT: False
+ SOURCE: "bmz"
+ BMZ:
+ SOURCE_MODEL_ID: 'sensible-cat'
+
+TRAIN:
+ ENABLE: False
+ OPTIMIZER: ADAMW
+ LR: 1.E-3
+ BATCH_SIZE: 6
+ EPOCHS: 20
+ PATIENCE: 20
+ LR_SCHEDULER:
+ NAME: 'onecycle' # use one-cycle learning rate scheduler
+
+TEST:
+ ENABLE: True
+ AUGMENTATION: False
+ FULL_IMG: False
diff -r 000000000000 -r 356d58ae85fa test-data/im_0000.png
Binary file test-data/im_0000.png has changed
diff -r 000000000000 -r 356d58ae85fa test-data/mask_0000.png
Binary file test-data/mask_0000.png has changed