diff --git a/extensions/chaeo/accessors.py b/extensions/chaeo/accessors.py
index 82f726ba86fddf817583e349027a83b134d483e2..60cc2c70e285d173021ea7e81b4b900862dd874c 100644
--- a/extensions/chaeo/accessors.py
+++ b/extensions/chaeo/accessors.py
@@ -82,7 +82,10 @@ class Multichannel3dPatchStack(InMemoryDataAccessor):
         """
 
         if isinstance(data, list):  # list of YXCZ patches
-            nda = np.array(data)
+            nda = np.zeros((len(data), *np.array([e.shape for e in data]).max(axis=0)), dtype=data[0].dtype)
+            for i in range(0, len(data)):
+                nzi = data[i].shape[-1]
+                nda[i, :, :, :, 0:nzi] = data[i]
             assert nda.ndim == 5
             # self._data = np.moveaxis( # pos-YXCZ
             #         nda,
diff --git a/extensions/chaeo/tests/test_zstack.py b/extensions/chaeo/tests/test_zstack.py
index a78fbea4e9b4721fb19d10c60c86688cbdc15edb..f472c7734bff93eedb880d1581e5bc84cf503c84 100644
--- a/extensions/chaeo/tests/test_zstack.py
+++ b/extensions/chaeo/tests/test_zstack.py
@@ -209,6 +209,7 @@ class TestZStackDerivedDataProducts(unittest.TestCase):
             DummyInstanceSegmentationModel(),
         ]
         export_params = RoiSetExportParams(**{
+            'expand_box_by': [128, 2],
             'pixel_probabilities': True,
             'patches_3d': {},
             'patches_2d_for_annotation': {
diff --git a/extensions/chaeo/workflows.py b/extensions/chaeo/workflows.py
index 91b09c9d45178659a740efa85551b9caa6486776..8dd7554fe6657e19e21a6d3f0852927cfb160151 100644
--- a/extensions/chaeo/workflows.py
+++ b/extensions/chaeo/workflows.py
@@ -9,223 +9,15 @@ from skimage.measure import label, regionprops_table
 from skimage.morphology import dilation
 from sklearn.model_selection import train_test_split
 
-from extensions.chaeo.accessors import MonoPatchStack
-from extensions.chaeo.annotators import draw_boxes_on_3d_image
-from extensions.chaeo.models import PatchStackObjectClassifier
 from extensions.chaeo.params import RoiSetExportParams
 from extensions.chaeo.process import mask_largest_object
-from extensions.chaeo.products import export_patches_from_zstack, export_patch_masks_from_zstack, export_multichannel_patches_from_zstack, get_patches_from_zmask_meta, get_patch_masks_from_zmask_meta
-from extensions.chaeo.zmask import project_stack_from_focal_points, RoiSet
-from extensions.ilastik.models import IlastikPixelClassifierModel
+from extensions.chaeo.zmask import RoiSet
 
 from model_server.accessors import generate_file_accessor, InMemoryDataAccessor, write_accessor_data_to_file
 from model_server.models import Model, InstanceSegmentationModel, SemanticSegmentationModel
 from model_server.process import rescale
 from model_server.workflows import Timer
 
-# def get_zmask_meta(
-#     input_file_path: str,
-#     ilastik_pixel_classifier: IlastikPixelClassifierModel,
-#     segmentation_channel: int,
-#     pxmap_threshold: float,
-#     pxmap_foreground_channel: int = 0,
-#     zmask_zindex: int = None,
-#     zmask_clip: int = None,
-#     zmask_filters: Dict = None,
-#     zmask_type: str = 'boxes',
-#     **kwargs,
-# ) -> tuple:
-#     ti = Timer()
-#     stack = generate_file_accessor(Path(input_file_path))
-#     fstem = Path(input_file_path).stem
-#     ti.click('file_input')
-#
-#     # MIP if no zmask z-index is given, then classify pixels
-#     if isinstance(zmask_zindex, int):
-#         assert 0 < zmask_zindex < stack.nz
-#         zmask_data = stack.get_one_channel_data(channel=segmentation_channel).data[:, :, :, zmask_zindex]
-#     else:
-#         zmask_data = stack.get_one_channel_data(channel=segmentation_channel).data.max(axis=-1, keepdims=True)
-#     if zmask_clip:
-#         zmask_data = rescale(zmask_data, zmask_clip)
-#     mip = InMemoryDataAccessor(
-#         zmask_data,
-#     )
-#     pxmap, _ = ilastik_pixel_classifier.infer(mip)
-#     ti.click('infer_pixel_probability')
-#
-#     obmask = InMemoryDataAccessor(
-#         pxmap.data > pxmap_threshold
-#     )
-#     ti.click('threshold_pixel_mask')
-#
-#     # make zmask
-#     obj_table = ZMaskObjectTable(
-#         obmask.get_one_channel_data(pxmap_foreground_channel),
-#         stack.get_one_channel_data(segmentation_channel),
-#         mask_type=zmask_type,
-#         filters=zmask_filters,
-#         expand_box_by=kwargs['zmask_expand_box_by'],
-#     )
-#     ti.click('generate_zmasks')
-#
-#     # record pixel scale
-#     obj_table.df['pixel_scale_in_micrometers'] = float(stack.pixel_scale_in_micrometers.get('X'))
-#
-#     return ti, stack, fstem, obmask, pxmap, obj_table
-
-
-# # called by batch runners
-# def export_patches_from_multichannel_zstack(
-#         input_file_path: str,
-#         output_folder_path: str,
-#         models: List[Model],
-#         pxmap_threshold: float,
-#         pxmap_foreground_channel: int,
-#         segmentation_channel: int,
-#         patches_channel: int,
-#         zmask_zindex: int = None,  # None for MIP,
-#         zmask_clip: int = None,
-#         zmask_type: str = 'boxes',
-#         zmask_filters: Dict = None,
-#         zmask_expand_box_by: int = None,
-#         export_pixel_probabilities=True,
-#         export_2d_patches_for_training=True,
-#         export_2d_patches_for_annotation=True,
-#         draw_bounding_box_on_2d_patch=True,
-#         draw_contour_on_2d_patch=False,
-#         draw_mask_on_2d_patch=False,
-#         export_3d_patches=True,
-#         export_annotated_zstack=True,
-#         draw_label_on_zstack=False,
-#         export_patch_masks=True,
-#         rgb_overlay_channels=(None, None, None),
-#         rgb_overlay_weights=(1.0, 1.0, 1.0),
-# ) -> Dict:
-#     pixel_classifier = models[0]
-#
-#     # ti, stack, fstem, obmask, pxmap, obj_table = get_zmask_meta(
-#     #     input_file_path,
-#     #     pixel_classifier,
-#     #     segmentation_channel,
-#     #     pxmap_threshold,
-#     #     pxmap_foreground_channel=pxmap_foreground_channel,
-#     #     zmask_zindex=zmask_zindex,
-#     #     zmask_clip=zmask_clip,
-#     #     zmask_expand_box_by=zmask_expand_box_by,
-#     #     zmask_filters=zmask_filters,
-#     #     zmask_type=zmask_type,
-#     # )
-#
-#     # obj_table = ZMaskObjectTable(
-#     #     obmask.get_one_channel_data(pxmap_foreground_channel),
-#     #     stack.get_one_channel_data(segmentation_channel),
-#     #     mask_type=zmask_type,
-#     #     filters=zmask_filters,
-#     #     expand_box_by=kwargs['zmask_expand_box_by'],
-#     # )
-#
-#     if export_pixel_probabilities:
-#         write_accessor_data_to_file(
-#             Path(output_folder_path) / 'pixel_probabilities' / (fstem + '.tif'),
-#             pxmap
-#         )
-#         ti.click('export_pixel_probability')
-#
-#     if export_3d_patches and len(zmask_meta) > 0:
-#         files = export_patches_from_zstack(
-#             Path(output_folder_path) / '3d_patches',
-#             stack.get_one_channel_data(patches_channel),
-#             zmask_meta,
-#             prefix=fstem,
-#             draw_bounding_box=False,
-#             rescale_clip=0.001,
-#             make_3d=True,
-#         )
-#         ti.click('export_3d_patches')
-#
-#     if export_2d_patches_for_annotation and len(zmask_meta) > 0:
-#         files = export_multichannel_patches_from_zstack(
-#             Path(output_folder_path) / '2d_patches_annotation',
-#             stack,
-#             zmask_meta,
-#             prefix=fstem,
-#             rescale_clip=0.001,
-#             make_3d=False,
-#             focus_metric='max_sobel',
-#             ch_white=patches_channel,
-#             ch_rgb_overlay=rgb_overlay_channels,
-#             draw_bounding_box=draw_bounding_box_on_2d_patch,
-#             bounding_box_channel=1,
-#             bounding_box_linewidth=2,
-#             draw_contour=draw_contour_on_2d_patch,
-#             draw_mask=draw_mask_on_2d_patch,
-#             overlay_gain=rgb_overlay_weights,
-#         )
-#         df_patches = pd.DataFrame(files)
-#         ti.click('export_2d_patches')
-#         # associate 2d patches, dropping labeled objects that were not exported as patches
-#         df = pd.merge(df, df_patches, left_index=True, right_on='df_index').drop(columns='df_index')
-#         # prepopulate patch UUID
-#         df['patch_id'] = df.apply(lambda _: uuid4(), axis=1)
-#
-#     if export_2d_patches_for_training and len(zmask_meta) > 0:
-#         files = export_multichannel_patches_from_zstack(
-#             Path(output_folder_path) / '2d_patches_training',
-#             stack.get_one_channel_data(patches_channel),
-#             zmask_meta,
-#             prefix=fstem,
-#             rescale_clip=0.001,
-#             make_3d=False,
-#             focus_metric='max_sobel',
-#         )
-#         ti.click('export_2d_patches')
-#
-#     if export_patch_masks and len(zmask_meta) > 0:
-#         files = export_patch_masks_from_zstack(
-#             Path(output_folder_path) / 'patch_masks',
-#             stack.get_one_channel_data(patches_channel),
-#             zmask_meta,
-#             prefix=fstem,
-#         )
-#
-#     if export_annotated_zstack:
-#         annotated = InMemoryDataAccessor(
-#             draw_boxes_on_3d_image(
-#                 stack.get_one_channel_data(patches_channel).data,
-#                 zmask_meta,
-#                 add_label=draw_label_on_zstack,
-#             )
-#         )
-#         write_accessor_data_to_file(
-#             Path(output_folder_path) / 'annotated_zstacks' / (fstem + '.tif'),
-#             annotated
-#         )
-#         ti.click('export_annotated_zstack')
-#
-#     # generate multichannel projection from label centroids
-#     dff = df[df['keeper']]
-#     if len(zmask_meta) > 0:
-#         interm['projected'] = project_stack_from_focal_points(
-#             dff['centroid-0'].to_numpy(),
-#             dff['centroid-1'].to_numpy(),
-#             dff['zi'].to_numpy(),
-#             stack,
-#             degree=4,
-#         )
-#     else: # else just return MIP
-#         interm['projected'] = stack.data.max(axis=-1)
-#
-#     return {
-#         'pixel_model_id': pixel_classifier.model_id,
-#         'input_filepath': input_file_path,
-#         'number_of_objects': len(zmask_meta),
-#         'pixeL_scale_in_micrometers': stack.pixel_scale_in_micrometers,
-#         'success': True,
-#         'timer_results': ti.events,
-#         'dataframe': df[df['keeper'] == True],
-#         'interm': interm,
-#     }
 
 def infer_object_map_from_zstack(
         input_file_path: str,
@@ -239,9 +31,7 @@ def infer_object_map_from_zstack(
         zmask_clip: int = None,
         zmask_type: str = 'boxes',
         zmask_filters: Dict = None,
-        # zmask_expand_box_by: int = None,
         exports: RoiSetExportParams = RoiSetExportParams(),
-        **kwargs,
 ) -> Dict:
     assert len(models) == 2
     pixel_classifier = models[0]
@@ -290,54 +80,6 @@ def infer_object_map_from_zstack(
     )
     ti.click('generate_zmasks')
 
-    # ti, stack, fstem, obmask, pxmap, obj_table = get_zmask_meta(
-    #     input_file_path,
-    #     pixel_classifier,
-    #     segmentation_channel,
-    #     pxmap_threshold,
-    #     pxmap_foreground_channel=pxmap_foreground_channel,
-    #     zmask_zindex=zmask_zindex,
-    #     zmask_clip=zmask_clip,
-    #     # zmask_expand_box_by=zmask_expand_box_by,
-    #     zmask_filters=zmask_filters,
-    #     zmask_type=zmask_type,
-    #     **kwargs
-    # )
-
-    # # extract patches to accessor
-    # patches_acc = get_patches_from_zmask_meta(
-    #     stack.get_one_channel_data(patches_channel),
-    #     obj_table.zmask_meta,
-    #     rescale_clip=zmask_clip,
-    #     make_3d=False,
-    #     focus_metric='max_sobel',
-    #     **kwargs
-    # )
-    #
-    # # extract masks
-    # patch_masks_acc = get_patch_masks_from_zmask_meta(
-    #     stack,
-    #     obj_table.zmask_meta,
-    #     **kwargs
-    # )
-
-    # # send patches and mask stacks to object classifier
-    # result_acc, _ = object_classifier.infer(patches_acc, patch_masks_acc)
-
-    # labels_map = obj_table.interm['label_map']
-    # output_map = np.zeros(labels_map.shape, dtype=labels_map.dtype)
-    # assert labels_map.shape == obj_table.get_label_map().shape
-    # assert labels_map.dtype == obj_table.get_label_map().dtype
-    #
-    # # assign labels to object map:
-    # meta = []
-    # for ii in range(0, len(obj_table.zmask_meta)):
-    #     object_id = obj_table.zmask_meta[ii]['info'].label
-    #     result_patch = mask_largest_object(result_acc.iat(ii))
-    #     object_class = np.unique(result_patch)[1]
-    #     output_map[labels_map == object_id] = object_class
-    #     meta.append({'object_id': ii, 'object_class': object_id})
-
     object_class_map = rois.classify_by(patches_channel, object_classifier)
 
     # TODO: add ZMaskObjectTable method to export object map
diff --git a/extensions/chaeo/zmask.py b/extensions/chaeo/zmask.py
index 31bf51702d539affc825cea8e0e437ca28b00e8d..76e96bf8bb91b45b7031fbef2a2ac510393e7870 100644
--- a/extensions/chaeo/zmask.py
+++ b/extensions/chaeo/zmask.py
@@ -173,6 +173,16 @@ class RoiSet(object):
     def get_object_map(self, filters: RoiFilter):
         pass
 
+    def run_exports(self, where, channel, params):
+        names = [
+            'pixel_probabilities',
+            'patches_3d',
+            'patches_2d_for_annotation',
+            'patches_2d_for_training',
+            'patch_masks',
+            'annotated_z_stack',
+        ]
+
 
 def build_zmask_from_object_mask(
         obmask: GenericImageDataAccessor,