diff --git a/.gitignore b/.gitignore
index 36a2805f7974aed48458a8127918b60d0e03f951..08aaa8ee46cef6362c9395208a3ab9ba1f2b3f48 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,10 @@
-*/.idea/*
 *__pycache__*
+
+# IDE profile files
+.idea/*
+
+# build and conda-build artifacts
+build/*
+conda-bld/*
+dist/*
+*.egg-info/*
\ No newline at end of file
diff --git a/.idea/.gitignore b/.idea/.gitignore
deleted file mode 100644
index 26d33521af10bcc7fd8cea344038eaaeb78d0ef5..0000000000000000000000000000000000000000
--- a/.idea/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-# Default ignored files
-/shelf/
-/workspace.xml
diff --git a/.idea/.name b/.idea/.name
deleted file mode 100644
index 7d20512f8a792e102d449514ad755b9eca6287ee..0000000000000000000000000000000000000000
--- a/.idea/.name
+++ /dev/null
@@ -1 +0,0 @@
-model_server
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml
deleted file mode 100644
index 105ce2da2d6447d11dfe32bfb846c3d5b199fc99..0000000000000000000000000000000000000000
--- a/.idea/inspectionProfiles/profiles_settings.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-<component name="InspectionProjectProfileManager">
-  <settings>
-    <option name="USE_PROJECT_PROFILE" value="false" />
-    <version value="1.0" />
-  </settings>
-</component>
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
deleted file mode 100644
index 1a4d1ba605abd69ba93129b21071e29168076cdb..0000000000000000000000000000000000000000
--- a/.idea/misc.xml
+++ /dev/null
@@ -1,4 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project version="4">
-  <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.9 (model_server_env)" project-jdk-type="Python SDK" />
-</project>
\ No newline at end of file
diff --git a/.idea/model_server.iml b/.idea/model_server.iml
deleted file mode 100644
index 167262fe593bfe12ad8b4ccee58da6af496a3f21..0000000000000000000000000000000000000000
--- a/.idea/model_server.iml
+++ /dev/null
@@ -1,8 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<module type="PYTHON_MODULE" version="4">
-  <component name="NewModuleRootManager">
-    <content url="file://$MODULE_DIR$" />
-    <orderEntry type="jdk" jdkName="Python 3.9 (model_server_env)" jdkType="Python SDK" />
-    <orderEntry type="sourceFolder" forTests="false" />
-  </component>
-</module>
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
deleted file mode 100644
index 1f65a314e4138ce057f347daca726d72621cf5d4..0000000000000000000000000000000000000000
--- a/.idea/modules.xml
+++ /dev/null
@@ -1,8 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project version="4">
-  <component name="ProjectModuleManager">
-    <modules>
-      <module fileurl="file://$PROJECT_DIR$/.idea/model_server.iml" filepath="$PROJECT_DIR$/.idea/model_server.iml" />
-    </modules>
-  </component>
-</project>
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
deleted file mode 100644
index 94a25f7f4cb416c083d265558da75d457237d671..0000000000000000000000000000000000000000
--- a/.idea/vcs.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project version="4">
-  <component name="VcsDirectoryMappings">
-    <mapping directory="$PROJECT_DIR$" vcs="Git" />
-  </component>
-</project>
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..8a8248f5388c9569d58aeab76bcce62261bf226c
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,7 @@
+Copyright (c) 2024 European Molecular Biology Laboratory
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..2acdf5e372fc7f97223ab6004992aec8e9d9b5b9
--- /dev/null
+++ b/README.md
@@ -0,0 +1,36 @@
+# model_server
+
+Serving Vision to Living Things.
+
+## Summary
+
+model_server is a service for on-demand computer vision, adapted specifically to image-based feedback in microscopy 
+and other life sciences applications.  It abstracts image data access, persists machine learning models, 
+and exposes an extensible API to facilitate low-latency analysis.  
+
+## Install Git and miniforge
+1. Install Miniforge for environment management:<br>https://github.com/conda-forge/miniforge/releases
+2. Under the Start menu, open `Miniforge3 > Miniforge Prompt`
+
+## Option 1: install model_server as a package:
+1. Download the most recent version of the built package from:<br>https://git.embl.de/rhodes/model_server/-/packages/1280
+2. (optional) activate the target conda environment: `mamba activate <target_environment>`
+3. From the package repository https://git.embl.de/rhodes/model_server/-/packages/ download:
+   - The most recent requirements.yml
+   - The most recent .tar.bz2 file containing the built conda package
+4. In a text editor, open requirements.yml and remove all but the "channels" and "dependencies" blocks, then save.
+5. Change directories to the location of 'requirements.yml' and install dependencies:<br>`mamba env update -f requirements.yml`
+6. Download the most recent .tar.bz2 file containing the built conda package from:<br>https://git.embl.de/rhodes/model_server/-/packages/1283
+7. Change directories to the downloaded file and install model_server package: `mamba install model_server-<version>-py_0.tar.bz2`
+
+## Option 2: install model_server from source:
+1. Install Git:<br>https://git-scm.com/download/win
+2. In the new terminal, clone the model_server repository:<br>`cd %userprofile%`<br>`git clone https://almf-staff:KJmFvyPRbpzoVZDqfMzV@git.embl.de/rhodes/model_server.git`
+3. Create the target environment: `mamba env create --file requirements.yml --name model_server_env`
+4. Activate the target environment: `mamba activate model_server_env`
+5. Add the project source as a Python package: `pip install --no-deps -e .`
+
+## To start the server:
+1. From the Miniforge prompt, run `mamba activate <target_environment>`
+2. Then run `python -m scripts.run_server --port 6221`
+3. A browser window should appear, with basic status information.
diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7a71c05a02dabd7701bd4480e5ace864e87dc657
--- /dev/null
+++ b/conda-recipe/meta.yaml
@@ -0,0 +1,49 @@
+{% set name = "model_server" %}
+{% set version = "2024.7.26" %}
+{% set pyproject = load_file_data('../pyproject.toml', from_recipe_dir=True) %}
+{% set pp = pyproject.get('project') %}
+
+debug:
+  {{ pyproject|pprint }}
+
+package:
+  name: {{ pp.get('name') }}
+  version: {{ pp.get('version') }}
+
+source:
+  - path: ..
+  - path: ../tests
+    folder: tests
+
+build:
+  noarch: python
+  script: {{ PYTHON }} -m pip install -vv --no-deps .
+  number: 0
+
+requirements:
+  host:
+    - python >=3.9
+    - setuptools >=61.0
+    - pip
+  run:
+    - python >=3.9
+    {% for dep in pp.get('dependencies') %}
+    - {{ dep.lower() }}
+    {% endfor %}
+    - pip
+
+
+test:
+  imports:
+    - model_server
+  commands:
+    - python -m unittest discover
+  requires:
+    - pip
+  source_files:
+    - tests
+
+about:
+  summary: Service for analyzing microscope images
+  license: 'MIT'
+  license_file: ../LICENSE
\ No newline at end of file
diff --git a/conda-recipe/publish.py b/conda-recipe/publish.py
new file mode 100644
index 0000000000000000000000000000000000000000..071d9a5f52e6950848653e4d78e010eea65993b4
--- /dev/null
+++ b/conda-recipe/publish.py
@@ -0,0 +1,51 @@
+"""
+Automate registration of conda build artifacts to EMBL GitLab;
+assumes API access token is recorded in ~/.pypirc shell configuration file
+"""
+from configparser import ConfigParser
+import json
+from pathlib import Path
+import requests
+
+id = '5668'
+proj = 'model_server'
+root = Path('../conda-bld/')
+
+# get authentication info from local config file
+cfg = ConfigParser()
+cfg.read(Path.home() / '.pypirc')
+user = cfg['gitlab-model-server']['username']
+pwd = cfg['gitlab-model-server']['password']
+
+with open(root / 'channeldata.json', 'r') as fh:
+    chdata = json.load(fh)
+
+# upload to GitLab API
+res = {}
+for sd in ['noarch', 'win-64']:
+    with open(root / sd / 'repodata.json', 'r') as fh:
+        dd = json.load(fh)
+    pkgname = f'conda_{sd}'
+
+    if len(dd['packages']) == 0:
+        continue
+
+    # put each .tar.bz2
+    for fn in dd['packages'].keys():
+        ver = dd['packages'][fn]['version']
+        stem = fn.split('.tar.bz2')[0]
+        res[(sd, fn)] = requests.put(
+            f'https://git.embl.de/api/v4/projects/{id}/packages/generic/{pkgname}/{ver}/{fn}?status=default',
+            files={'file': open(root / sd / fn, 'rb')},
+            headers={'PRIVATE-TOKEN': pwd},
+        )
+
+    # put requirements.yml
+    fn = 'requirements.yml'
+    res[(sd, fn)] = requests.put(
+        f'https://git.embl.de/api/v4/projects/{id}/packages/generic/{pkgname}/{ver}/{fn}?status=default',
+        files={'file': open(root.parent / fn, 'r')},
+        headers={'PRIVATE-TOKEN': pwd, 'Content-Type': 'text/html'},
+    )
+print('Finished')
+print(res)
\ No newline at end of file
diff --git a/model_server/base/accessors.py b/model_server/base/accessors.py
index 86b6afe42c62b8865f1b7826f89501e0c8ecb754..a9366156bb6a5fcd63267cd3fb6e319d13bfbe37 100644
--- a/model_server/base/accessors.py
+++ b/model_server/base/accessors.py
@@ -8,8 +8,8 @@ from skimage.io import imread, imsave
 import czifile
 import tifffile
 
-from model_server.base.process import make_rgb
-from model_server.base.process import is_mask
+from .process import make_rgb
+from .process import is_mask
 
 class GenericImageDataAccessor(ABC):
 
@@ -49,13 +49,51 @@ class GenericImageDataAccessor(ABC):
             nda = self.data.take(indices=carr, axis=self._ga('C'))
             return self._derived_accessor(nda)
 
+
+    def get_zi(self, zi: int):
+        """
+        Return a new accessor of a specific z-coordinate
+        """
+        return self._derived_accessor(
+            self.data.take(
+                indices=[zi],
+                axis=self._ga('Z')
+            )
+        )
+
+    def get_mip(self):
+        """
+        Return a new accessor of maximum intensity projection (MIP) along z-axis
+        """
+        return self.apply(lambda x: x.max(axis=self._ga('Z'), keepdims=True))
+
     def get_mono(self, channel: int, mip: bool = False):
         return self.get_channels([channel], mip=mip)
 
+    def get_z_argmax(self):
+        return self.apply(lambda x: x.argmax(axis=self.get_axis('Z')))
+
+    def get_focus_vector(self):
+        return self.data.sum(axis=(0, 1, 2))
+
+    @property
+    def data_xy(self) -> np.ndarray:
+        if not self.chroma == 1 and self.nz == 1:
+            raise InvalidDataShape('Can only return XY array from accessors with a single channel and single z-level')
+        else:
+            return self.data[:, :, 0, 0]
+
+    @property
+    def data_xyz(self) -> np.ndarray:
+        if not self.chroma == 1:
+            raise InvalidDataShape('Can only return XYZ array from accessors with a single channel')
+        else:
+            return self.data[:, :, 0, :]
+
     def _gc(self, channels):
         return self.get_channels(list(channels))
 
-    def _unique(self):
+    def unique(self):
         return np.unique(self.data, return_counts=True)
 
     @property
@@ -75,6 +113,15 @@ class GenericImageDataAccessor(ABC):
     def _ga(self, arg):
         return self.get_axis(arg)
 
+    def crop_hw(self, yxhw: tuple):
+        """
+        Return subset of data cropped in X and Y
+        :param yxhw: tuple (Y, X, H, W)
+        :return: InMemoryDataAccessor of size (H x W), starting at (Y, X)
+        """
+        y, x, h, w = yxhw
+        return InMemoryDataAccessor(self.data[y: (y + h), x: (x + w), :, :])
+
     @property
     def hw(self):
         """
@@ -120,6 +167,14 @@ class GenericImageDataAccessor(ABC):
             func(self.data)
         )
 
+    @property
+    def info(self):
+        return {
+            'shape_dict': self.shape_dict,
+            'dtype': str(self.dtype),
+            'filepath': '',
+        }
+
 class InMemoryDataAccessor(GenericImageDataAccessor):
     def __init__(self, data):
         self._data = self.conform_data(data)
@@ -139,6 +194,12 @@ class GenericImageFileAccessor(GenericImageDataAccessor): # image data is loaded
     def read(fp: Path):
         return generate_file_accessor(fp)
 
+    @property
+    def info(self):
+        d = super().info
+        d['filepath'] = self.fpath.__str__()
+        return d
+
 class TifSingleSeriesFileAccessor(GenericImageFileAccessor):
     def __init__(self, fpath: Path):
         super().__init__(fpath)
@@ -240,7 +301,7 @@ class CziImageFileAccessor(GenericImageFileAccessor):
 
 def write_accessor_data_to_file(fpath: Path, acc: GenericImageDataAccessor, mkdir=True) -> bool:
     """
-    Export an image accessor to file.
+    Export an image accessor to file
     :param fpath: complete path including filename and extension
     :param acc: image accessor to be written
     :param mkdir: create any needed subdirectories in fpath if True
@@ -287,7 +348,7 @@ def write_accessor_data_to_file(fpath: Path, acc: GenericImageDataAccessor, mkdi
 def generate_file_accessor(fpath):
     """
     Given an image file path, return an image accessor, assuming the file is a supported format and represents
-    a single position array, which may be single or multi-channel, single plane or z-stack.
+    a single position array, which may be single or multichannel, single plane or z-stack.
     """
     if str(fpath).upper().endswith('.TIF') or str(fpath).upper().endswith('.TIFF'):
         return TifSingleSeriesFileAccessor(fpath)
@@ -379,6 +440,11 @@ class PatchStack(InMemoryDataAccessor):
         else:
             tifffile.imwrite(fpath, tzcyx, imagej=True)
 
+    def write(self, fp: Path, mkdir=True):
+        if mkdir:
+            fp.parent.mkdir(parents=True, exist_ok=True)
+        self.export_pyxcz(fp)
+
     @property
     def shape_dict(self):
         return dict(zip(('P', 'Y', 'X', 'C', 'Z'), self.data.shape))
@@ -437,7 +503,6 @@ def make_patch_stack_from_file(fpath):  # interpret t-dimension as patch positio
     return PatchStack(pyxcz)
 
 
-
 class Error(Exception):
     pass
 
diff --git a/model_server/base/annotators.py b/model_server/base/annotators.py
index 3b3d568aa1d9516db5e777aba5076fea17c14a6a..7626d76e72fdfe9e8f2c74b072d5764c9a568d1a 100644
--- a/model_server/base/annotators.py
+++ b/model_server/base/annotators.py
@@ -1,11 +1,23 @@
 import numpy as np
+from matplotlib import font_manager
 from PIL import Image, ImageDraw, ImageFont
 
-from model_server.base.process import rescale
+from .process import rescale
+
+def _get_font(font_size=18):
+    return ImageFont.truetype(
+        font_manager.findfont(
+            font_manager.FontProperties(
+                family='sans-serif',
+                weight='bold'
+            )
+        ),
+        size=font_size,
+    )
+
 
 def draw_boxes_on_3d_image(roiset, draw_full_depth=False, **kwargs):
     h, w, chroma, nz = roiset.acc_raw.shape
-    font_size = kwargs.get('font_size', 18)
     linewidth = kwargs.get('linewidth', 4)
 
     if ck := kwargs.get('channel'):
@@ -25,7 +37,7 @@ def draw_boxes_on_3d_image(roiset, draw_full_depth=False, **kwargs):
         for ci in range(0, len(channels)):
             pilimg = Image.fromarray(roiset.acc_raw.data[:, :, channels[ci], zi])
             draw = ImageDraw.Draw(pilimg)
-            draw.font = ImageFont.truetype(font="arial.ttf", size=font_size)
+            draw.font = _get_font()
 
             for roi in subset.itertuples('Roi'):
                 xm = round((roi.x0 + roi.x1) / 2)
diff --git a/model_server/base/api.py b/model_server/base/api.py
index 13abef8ee4d76157d91dd7e4c5227238161366c9..608738a166768fb1ff8420a8cd0b4a58b170a3be 100644
--- a/model_server/base/api.py
+++ b/model_server/base/api.py
@@ -1,101 +1,113 @@
+from typing import Union
+
 from fastapi import FastAPI, HTTPException
-from pydantic import BaseModel
+from .accessors import generate_file_accessor
+from .session import session, AccessorIdError, InvalidPathError, WriteAccessorError
 
-from model_server.base.models import DummyInstanceSegmentationModel, DummySemanticSegmentationModel
-from model_server.base.session import Session, InvalidPathError
-from model_server.base.validators import validate_workflow_inputs
-from model_server.base.workflows import classify_pixels
-from model_server.extensions.ilastik.workflows import infer_px_then_ob_model
 
 app = FastAPI(debug=True)
-session = Session()
 
-import model_server.extensions.ilastik.router
-app.include_router(model_server.extensions.ilastik.router.router)
+from .pipelines.router import router
+app.include_router(router)
+
 
 @app.on_event("startup")
 def startup():
     pass
 
+
 @app.get('/')
 def read_root():
     return {'success': True}
 
-class BounceBackParams(BaseModel):
-    par1: str
-    par2: list
-
-@app.put('/bounce_back')
-def list_bounce_back(params: BounceBackParams):
-    return {'success': True, 'params': {'par1': params.par1, 'par2': params.par2}}
 
 @app.get('/paths')
 def list_session_paths():
     return session.get_paths()
 
+
 @app.get('/status')
 def show_session_status():
     return {
         'status': 'running',
         'models': session.describe_loaded_models(),
         'paths': session.get_paths(),
+        'accessors': session.list_accessors(),
     }
 
-def change_path(key, path):
+
+def _change_path(key, path):
     try:
-        if session.get_paths()[key] == path:
-            return session.get_paths()
         session.set_data_directory(key, path)
     except InvalidPathError as e:
-        raise HTTPException(
-            status_code=404,
-            detail=e.__str__(),
-        )
-    session.log_info(f'Change {key} path to {path}')
-    return session.get_paths()
+        raise HTTPException(404, f'Did not find valid folder at: {path}')
+
 
 @app.put('/paths/watch_input')
 def watch_input_path(path: str):
-    return change_path('inbound_images', path)
+    return _change_path('inbound_images', path)
+
 
 @app.put('/paths/watch_output')
 def watch_output_path(path: str):
-    return change_path('outbound_images', path)
+    return _change_path('outbound_images', path)
+
 
 @app.get('/session/restart')
 def restart_session(root: str = None) -> dict:
     session.restart(root=root)
     return session.describe_loaded_models()
 
+
 @app.get('/session/logs')
 def list_session_log() -> list:
     return session.get_log_data()
 
+
 @app.get('/models')
 def list_active_models():
     return session.describe_loaded_models()
 
-@app.put('/models/dummy_semantic/load/')
-def load_dummy_model() -> dict:
-    mid = session.load_model(DummySemanticSegmentationModel)
-    session.log_info(f'Loaded model {mid}')
-    return {'model_id': mid}
-
-@app.put('/models/dummy_instance/load/')
-def load_dummy_model() -> dict:
-    mid = session.load_model(DummyInstanceSegmentationModel)
-    session.log_info(f'Loaded model {mid}')
-    return {'model_id': mid}
-
-@app.put('/workflows/segment')
-def infer_img(model_id: str, input_filename: str, channel: int = None) -> dict:
-    inpath = session.paths['inbound_images'] / input_filename
-    validate_workflow_inputs([model_id], [inpath])
-    record = classify_pixels(
-        inpath,
-        session.models[model_id]['object'],
-        session.paths['outbound_images'],
-        channel=channel,
-    )
-    session.log_info(f'Completed segmentation of {input_filename}')
-    return record
\ No newline at end of file
+
+@app.get('/accessors')
+def list_accessors():
+    return session.list_accessors()
+
+
+def _session_accessor(func, acc_id):
+    try:
+        return func(acc_id)
+    except AccessorIdError as e:
+        raise HTTPException(404, f'Did not find accessor with ID {acc_id}')
+
+
+@app.get('/accessors/{accessor_id}')
+def get_accessor(accessor_id: str):
+    return _session_accessor(session.get_accessor_info, accessor_id)
+
+
+@app.get('/accessors/delete/{accessor_id}')
+def delete_accessor(accessor_id: str):
+    if accessor_id == '*':
+        return session.del_all_accessors()
+    else:
+        return _session_accessor(session.del_accessor, accessor_id)
+
+
+@app.put('/accessors/read_from_file/{filename}')
+def read_accessor_from_file(filename: str, accessor_id: Union[str, None] = None):
+    fp = session.paths['inbound_images'] / filename
+    if not fp.exists():
+        raise HTTPException(status_code=404, detail=f'Could not find file:\n{filename}')
+    acc = generate_file_accessor(fp)
+    return session.add_accessor(acc, accessor_id=accessor_id)
+
+
+@app.put('/accessors/write_to_file/{accessor_id}')
+def write_accessor_to_file(accessor_id: str, filename: Union[str, None] = None) -> str:
+    try:
+        return session.write_accessor(accessor_id, filename)
+    except AccessorIdError as e:
+        raise HTTPException(404, f'Did not find accessor with ID {accessor_id}')
+    except WriteAccessorError as e:
+        raise HTTPException(409, str(e))
\ No newline at end of file
diff --git a/model_server/base/czi_util.py b/model_server/base/czi_util.py
index 6c8c5db8c27eeefea2e4bad32122d34f306bf16e..f58d624759b7da71bc03fb422d8658f953483d43 100644
--- a/model_server/base/czi_util.py
+++ b/model_server/base/czi_util.py
@@ -5,7 +5,7 @@ import czifile
 import numpy as np
 import pandas as pd
 
-from model_server.base.accessors import InMemoryDataAccessor
+from .accessors import InMemoryDataAccessor
 
 
 def dump_czi_subblock_table(czif: czifile.CziFile, where: Path):
diff --git a/model_server/base/models.py b/model_server/base/models.py
index 228bcb66a7451837eaa6fd3b23a4fe489c335b95..eaded2c9f24c8793600019d86a1dcbb5cdab7c52 100644
--- a/model_server/base/models.py
+++ b/model_server/base/models.py
@@ -1,11 +1,10 @@
 from abc import ABC, abstractmethod
 from math import floor
-from typing import Union
 
 import numpy as np
 from pydantic import BaseModel
 
-from model_server.base.accessors import GenericImageDataAccessor, InMemoryDataAccessor, PatchStack
+from .accessors import GenericImageDataAccessor, InMemoryDataAccessor, PatchStack
 
 
 class Model(ABC):
@@ -49,6 +48,11 @@ class Model(ABC):
     def reload(self):
         self.load()
 
+    @property
+    def name(self):
+        return f'{self.__class__.__name__}'
+
+
 
 class ImageToImageModel(Model):
     """
@@ -125,48 +129,20 @@ class InstanceSegmentationModel(ImageToImageModel):
         return PatchStack(data)
 
 
-class DummySemanticSegmentationModel(SemanticSegmentationModel):
+class BinaryThresholdSegmentationModel(SemanticSegmentationModel):
 
-    model_id = 'dummy_make_white_square'
-
-    def load(self):
-        return True
+    def __init__(self, tr: float = 0.5):
+        self.tr = tr
 
     def infer(self, img: GenericImageDataAccessor) -> (GenericImageDataAccessor, dict):
-        super().infer(img)
-        w = img.shape_dict['X']
-        h = img.shape_dict['Y']
-        result = np.zeros([h, w], dtype='uint8')
-        result[floor(0.25 * h) : floor(0.75 * h), floor(0.25 * w) : floor(0.75 * w)] = 255
-        return InMemoryDataAccessor(data=result), {'success': True}
-
-    def label_pixel_class(
-            self, img: GenericImageDataAccessor, **kwargs) -> GenericImageDataAccessor:
-        mask, _ = self.infer(img)
-        return mask
-
-class DummyInstanceSegmentationModel(InstanceSegmentationModel):
+        return img.apply(lambda x: x > self.tr), {'success': True}
 
-    model_id = 'dummy_pass_input_mask'
+    def label_pixel_class(self, img: GenericImageDataAccessor, **kwargs) -> GenericImageDataAccessor:
+        return self.infer(img, **kwargs)[0]
 
     def load(self):
-        return True
-
-    def infer(
-            self, img: GenericImageDataAccessor, mask: GenericImageDataAccessor
-    ) -> (GenericImageDataAccessor, dict):
-        return img.__class__(
-            (mask.data / mask.data.max()).astype('uint16')
-        )
+        pass
 
-    def label_instance_class(
-            self, img: GenericImageDataAccessor, mask: GenericImageDataAccessor, **kwargs
-    ) -> GenericImageDataAccessor:
-        """
-        Returns a trivial segmentation, i.e. the input mask with value 1
-        """
-        super(DummyInstanceSegmentationModel, self).label_instance_class(img, mask, **kwargs)
-        return self.infer(img, mask)
 
 class Error(Exception):
     pass
diff --git a/model_server/clients/__init__.py b/model_server/base/pipelines/__init__.py
similarity index 100%
rename from model_server/clients/__init__.py
rename to model_server/base/pipelines/__init__.py
diff --git a/model_server/base/pipelines/roiset_obmap.py b/model_server/base/pipelines/roiset_obmap.py
new file mode 100644
index 0000000000000000000000000000000000000000..037fb7fc015c5646152854f5a7fa4f2258d9808d
--- /dev/null
+++ b/model_server/base/pipelines/roiset_obmap.py
@@ -0,0 +1,121 @@
+from typing import Dict, Union
+
+from pydantic import BaseModel, Field, validator
+
+from ..accessors import GenericImageDataAccessor
+from .router import router
+from .segment_zproj import segment_zproj_pipeline
+from .shared import call_pipeline
+from ..roiset import get_label_ids, RoiSet, RoiSetMetaParams, RoiSetExportParams
+from ..session import session
+
+from ..pipelines.shared import PipelineTrace, PipelineParams, PipelineRecord
+
+from ..models import Model, InstanceSegmentationModel
+
+
+class RoiSetObjectMapParams(PipelineParams):
+    class _SegmentationParams(BaseModel):
+        channel: int = Field(
+            None,
+            description='Channel of input image to use for solving segmentation; use all channels if empty'
+        )
+        zi: Union[int, None] = Field(
+            None,
+            description='z coordinate to use on input image when solving segmentation; apply MIP if empty',
+        )
+
+    accessor_id: str = Field(
+        description='ID(s) of previously loaded accessor(s) to use as pipeline input'
+    )
+    pixel_classifier_segmentation_model_id: str = Field(
+        description='Pixel classifier applied to segmentation_channel(s) to segment objects'
+    )
+    object_classifier_model_id: Union[str, None] = Field(
+        None,
+        description='Object classifier used to classify segmented objectss'
+    )
+    pixel_classifier_derived_model_id: Union[str, None] = Field(
+        None,
+        description='Pixel classifier used to derive channel(s) as additional inputs to object classification'
+    )
+    patches_channel: int = Field(
+        description='Channel of input image used in patches sent to object classifier'
+    )
+    segmentation: _SegmentationParams = Field(
+        _SegmentationParams(),
+        description='Parameters used to solve segmentation'
+    )
+    roi_params: RoiSetMetaParams = RoiSetMetaParams(**{
+        'mask_type': 'boxes',
+        'filters': {
+            'area': {'min': 1e3, 'max': 1e8}
+        },
+        'expand_box_by': [128, 2],
+        'deproject_channel': None,
+    })
+    export_params: RoiSetExportParams = RoiSetExportParams()
+    derived_channels_input_channel: Union[int, None] = Field(
+        None,
+        description='Channel of input image from which to compute derived channels; use all if empty'
+    )
+    derived_channels_output_channels: Union[int, list] = Field(
+        None,
+        description='Derived channels to send to object classifier; use all if empty'
+    )
+    export_label_interm: bool = False
+
+
+class RoiSetToObjectMapRecord(PipelineRecord):
+    roiset_table: dict
+
+@router.put('/roiset_to_obmap/infer')
+def roiset_object_map(p: RoiSetObjectMapParams) -> RoiSetToObjectMapRecord:
+    """
+    Compute a RoiSet from 2d segmentation, apply to z-stack, and optionally apply object classification.
+    """
+    record, rois = call_pipeline(roiset_object_map_pipeline, p)
+
+    table = rois.get_serializable_dataframe()
+
+    session.write_to_table('RoiSet', {'input_filename': p.accessor_id}, table)
+    ret = RoiSetToObjectMapRecord(
+        roiset_table=table.to_dict(),
+        **record.dict()
+    )
+    return ret
+
+
+def roiset_object_map_pipeline(
+        accessors: Dict[str, GenericImageDataAccessor],
+        models: Dict[str, Model],
+        **k
+) -> (PipelineTrace, RoiSet):
+    d = PipelineTrace(accessors['accessor'])
+
+    d['mask'] = segment_zproj_pipeline(
+        accessors,
+        {'model': models['pixel_classifier_segmentation_model']},
+        **k['segmentation'],
+    ).last
+
+    d['labeled'] = get_label_ids(d.last)
+    rois = RoiSet.from_object_ids(d['input'], d['labeled'], RoiSetMetaParams(**k['roi_params']))
+
+    # optionally append RoiSet products
+    for ki, vi in rois.get_export_product_accessors(k['patches_channel'], RoiSetExportParams(**k['export_params'])).items():
+        d[ki] = vi
+
+    # optionally run an object classifier if specified
+    if obmod := models.get('object_classifier_model'):
+        obmod_name = k['object_classifier_model_id']
+        assert isinstance(obmod, InstanceSegmentationModel)
+        rois.classify_by(
+            obmod_name,
+            [k['patches_channel']],
+            obmod,
+        )
+        d[obmod_name] = rois.get_object_class_map(obmod_name)
+    else:
+        d['objects_unclassified'] = d.last.apply(lambda x: ((x > 0) * 1).astype('uint16'))
+    return d, rois
diff --git a/model_server/base/pipelines/router.py b/model_server/base/pipelines/router.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc7850da0c7101db8bdfb04487a2498ec5cfdcfd
--- /dev/null
+++ b/model_server/base/pipelines/router.py
@@ -0,0 +1,9 @@
+from fastapi import APIRouter
+
+router = APIRouter(
+    prefix='/pipelines',
+    tags=['pipelines'],
+)
+
+# this completes routing in individual pipeline modules
+from . import roiset_obmap, segment, segment_zproj
\ No newline at end of file
diff --git a/model_server/base/pipelines/segment.py b/model_server/base/pipelines/segment.py
new file mode 100644
index 0000000000000000000000000000000000000000..fac1835111872ab563b71d6795982e170d52e61f
--- /dev/null
+++ b/model_server/base/pipelines/segment.py
@@ -0,0 +1,46 @@
+from typing import Dict
+
+from .shared import call_pipeline, IncompatibleModelsError, PipelineTrace, PipelineParams, PipelineRecord
+from ..accessors import GenericImageDataAccessor
+from ..models import Model, SemanticSegmentationModel
+from ..process import smooth
+from .router import router
+
+from pydantic import Field
+
+
+class SegmentParams(PipelineParams):
+    accessor_id: str = Field(description='ID(s) of previously loaded accessor(s) to use as pipeline input')
+    model_id: str = Field(description='ID(s) of previously loaded segmentation model(s)')
+    channel: int = Field(None, description='Channel to use for segmentation; use all channels if empty.')
+
+
+class SegmentRecord(PipelineRecord):
+    pass
+
+
+@router.put('/segment')
+def segment(p: SegmentParams) -> SegmentRecord:
+    """
+    Run a semantic segmentation model to compute a binary mask from an input image
+    """
+    return call_pipeline(segment_pipeline, p)
+
+def segment_pipeline(
+        accessors: Dict[str, GenericImageDataAccessor],
+        models: Dict[str, Model],
+        **k
+) -> PipelineTrace:
+    d = PipelineTrace(accessors.get('accessor'))
+    model = models.get('model')
+
+    if not isinstance(model, SemanticSegmentationModel):
+        raise IncompatibleModelsError('Expecting a pixel classification model')
+
+    if ch := k.get('channel') is not None:
+        d['mono'] = d['input'].get_mono(ch)
+    d['inference'] = model.label_pixel_class(d.last)
+    if sm := k.get('smooth') is not None:
+        d['smooth'] = d.last.apply(lambda x: smooth(x, sm))
+    d['output'] = d.last
+    return d
\ No newline at end of file
diff --git a/model_server/base/pipelines/segment_zproj.py b/model_server/base/pipelines/segment_zproj.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f5ed9ba755433ef8445198b80885412e2161d98
--- /dev/null
+++ b/model_server/base/pipelines/segment_zproj.py
@@ -0,0 +1,40 @@
+from typing import Dict
+
+from .router import router
+from .segment import SegmentParams, SegmentRecord, segment_pipeline
+from .shared import call_pipeline, PipelineTrace
+from ..accessors import GenericImageDataAccessor
+from ..models import Model
+
+from pydantic import Field
+
+class SegmentZStackParams(SegmentParams):
+    zi: int = Field(None, description='z coordinate to use on input stack; apply MIP if empty')
+
+
+class SegmentZStackRecord(SegmentRecord):
+    pass
+
+
+@router.put('/segment_zproj')
+def segment_zproj(p: SegmentZStackParams) -> SegmentZStackRecord:
+    """
+    Run a semantic segmentation model to compute a binary mask from a projected input zstack
+    """
+    return call_pipeline(segment_zproj(), p)
+
+
+def segment_zproj_pipeline(
+        accessors: Dict[str, GenericImageDataAccessor],
+        models: Dict[str, Model],
+        **k
+) -> PipelineTrace:
+    d = PipelineTrace(accessors.get('accessor'))
+
+    if isinstance(k.get('zi'), int):
+        assert 0 < k['zi'] < d.last.nz
+        d['mip'] = d.last.get_zi(k['zi'])
+    else:
+        d['mip'] = d.last.get_mip()
+    return segment_pipeline({'accessor': d.last}, models, **k)
+
diff --git a/model_server/base/pipelines/shared.py b/model_server/base/pipelines/shared.py
new file mode 100644
index 0000000000000000000000000000000000000000..df43cd3c6b55af8b99a2425be568c69786fb1ba9
--- /dev/null
+++ b/model_server/base/pipelines/shared.py
@@ -0,0 +1,218 @@
+from collections import OrderedDict
+from pathlib import Path
+from time import perf_counter
+from typing import List, Union
+
+from fastapi import HTTPException
+from pydantic import BaseModel, Field, root_validator
+
+from ..accessors import GenericImageDataAccessor
+from ..session import session, AccessorIdError
+
+
+class PipelineParams(BaseModel):
+    keep_interm: bool = Field(False, description='Keep accessors to intermediate images in session')
+    api: bool = Field(True, description='Validate parameters against server session and map HTTP errors if True')
+
+    @root_validator(pre=False)
+    def models_are_loaded(cls, dd):
+        for k, v in dd.items():
+            if dd['api'] and k.endswith('model_id') and v is not None:
+                if v not in session.describe_loaded_models().keys():
+                    raise HTTPException(status_code=409, detail=f'Model with {k} = {v} has not been loaded')
+        return dd
+
+    @root_validator(pre=False)
+    def accessors_are_loaded(cls, dd):
+        for k, v in dd.items():
+            if dd['api'] and k.endswith('accessor_id'):
+                try:
+                    info = session.get_accessor_info(v)
+                except AccessorIdError as e:
+                    raise HTTPException(status_code=409, detail=str(e))
+                if not info['loaded']:
+                    raise HTTPException(status_code=409, detail=f'Accessor with {k} = {v} has not been loaded')
+        return dd
+
+
+class PipelineRecord(BaseModel):
+    output_accessor_id: str
+    interm_accessor_ids: Union[List[str], None]
+    success: bool
+    timer: dict
+
+
+def call_pipeline(func, p: PipelineParams) -> PipelineRecord:
+    # match accessor IDs to loaded accessor objects
+    accessors_in = {}
+    for k, v in p.dict().items():
+        if k.endswith('accessor_id'):
+            accessors_in[k.split('_id')[0]] = session.get_accessor(v, pop=True)
+    if len(accessors_in) == 0:
+        raise NoAccessorsFoundError('Expecting as least one valid accessor to run pipeline')
+
+    # use first validated accessor ID to name log file entries and derived accessors
+    input_description = [p.dict()[pk] for pk in p.dict().keys() if pk.endswith('accessor_id')][0]
+
+    models = {}
+    for k, v in p.dict().items():
+        if k.endswith('model_id') and v is not None:
+            models[k.split('_id')[0]] = session.models[v]['object']
+
+    # call the actual pipeline; expect a single PipelineTrace or a tuple where first element is PipelineTrace
+    ret = func(
+        accessors_in,
+        models,
+        **p.dict(),
+    )
+    if isinstance(ret, PipelineTrace):
+        steps = ret
+        misc = None
+    elif isinstance(ret, tuple) and isinstance(ret[0], PipelineTrace):
+        steps = ret[0]
+        misc = ret[1:]
+    else:
+        raise UnexpectedPipelineReturnError(
+            f'{func.__name__} returned unexpected value of {type(ret)}'
+        )
+    session.log_info(f'Completed {func.__name__} on {input_description}.')
+
+    # map intermediate data accessors to accessor IDs
+    if p.keep_interm:
+        interm_ids = []
+        acc_interm = steps.accessors(skip_first=True, skip_last=True).items()
+        for i, item in enumerate(acc_interm):
+            stk, acc = item
+            interm_ids.append(
+                session.add_accessor(
+                    acc,
+                    accessor_id=f'{input_description}_{func.__name__}_step{(i + 1):02d}_{stk}'
+                )
+            )
+    else:
+        interm_ids = None
+
+    # map final result to an accessor ID
+    result_id = session.add_accessor(
+        steps.last,
+        accessor_id=f'{p.accessor_id}_{func.__name__}_result'
+    )
+
+    record = PipelineRecord(
+        output_accessor_id=result_id,
+        interm_accessor_ids=interm_ids,
+        success=True,
+        timer=steps.times
+    )
+
+    # return miscellaneous objects if pipeline returns these
+    if misc:
+        return record, *misc
+    else:
+        return record
+
+
+class PipelineTrace(OrderedDict):
+    tfunc = perf_counter
+
+    def __init__(self, start_acc: GenericImageDataAccessor = None, enforce_accessors=True, allow_overwrite=False):
+        """
+        A container and timer for data at each stage of a pipeline.
+        :param start_acc: (optional) accessor to initialize as 'input' step
+        :param enforce_accessors: if True, only allow accessors to be appended as items
+        :param allow_overwrite: if True, allow an item to be overwritten
+        """
+        self.enforce_accessors = enforce_accessors
+        self.allow_overwrite = allow_overwrite
+        self.last_time = self.tfunc()
+        self.markers = None
+        self.timer = OrderedDict()
+        super().__init__()
+        if start_acc is not None:
+            self['input'] = start_acc
+
+    def __setitem__(self, key, value: GenericImageDataAccessor):
+        if self.enforce_accessors:
+            assert isinstance(value, GenericImageDataAccessor), f'Pipeline trace expects data accessor type'
+        if not self.allow_overwrite and key in self.keys():
+            raise KeyAlreadyExists(f'key {key} already exists in pipeline trace')
+        self.timer.__setitem__(key, self.tfunc() - self.last_time)
+        self.last_time = self.tfunc()
+        return super().__setitem__(key, value)
+
+    @property
+    def times(self):
+        """
+        Return an ordered dictionary of incremental times for each item that is appended
+        """
+        return {k: self.timer[k] for k in self.keys()}
+
+    @property
+    def last(self):
+        """
+        Return most recently appended item
+        :return:
+        """
+        return list(self.values())[-1]
+
+    def accessors(self, skip_first=True, skip_last=True) -> dict:
+        """
+        Return subset ordered dictionary that guarantees items are accessors
+        :param skip_first: if True, exclude first item in trace
+        :param skip_last: if False, exclude last item in trace
+        :return: dictionary of accessors that meet input criteria
+        """
+        res = OrderedDict()
+        for i, item in enumerate(self.items()):
+            k, v = item
+            if not isinstance(v, GenericImageDataAccessor):
+                continue
+            if skip_first and k == list(self.keys())[0]:
+                continue
+            if skip_last and k == list(self.keys())[-1]:
+                continue
+            res[k] = v
+        return res
+
+    def write_interm(
+            self,
+            where: Path,
+            prefix: str = 'interm',
+            skip_first=True,
+            skip_last=True,
+            debug=False
+    ) -> List[Path]:
+        """
+        Write accessor data to TIF files under specified path
+        :param where: directory in which to write image files
+        :param prefix: (optional) file prefix
+        :param skip_first: if True, do not write first item in trace
+        :param skip_last: if False, do not write last item in trace
+        :param debug: if True, report destination filepaths but do not write files
+        :return: list of destination filepaths
+        """
+        paths = []
+        accessors = self.accessors(skip_first=skip_first, skip_last=skip_last)
+        for i, item in enumerate(accessors.items()):
+            k, v = item
+            fp = where / f'{prefix}_{i:02d}_{k}.tif'
+            paths.append(fp)
+            if not debug:
+                v.write(fp)
+        return paths
+
+
+class Error(Exception):
+    pass
+
+class IncompatibleModelsError(Error):
+    pass
+
+class KeyAlreadyExists(Error):
+    pass
+
+class NoAccessorsFoundError(Error):
+    pass
+
+class UnexpectedPipelineReturnError(Error):
+    pass
diff --git a/model_server/base/process.py b/model_server/base/process.py
index d475b063ca641922e4c08707bdc2bfc36049f9ea..dac347cd8c2e7a43e1ef82c4c00d9d119fdf303b 100644
--- a/model_server/base/process.py
+++ b/model_server/base/process.py
@@ -18,7 +18,7 @@ def is_mask(img):
         return True
     elif img.dtype == 'uint8':
         unique = np.unique(img)
-        if unique.shape[0] == 2 and np.all(unique == [0, 255]):
+        if unique.shape[0] <= 2 and np.all(unique == [0, 255]):
             return True
     return False
 
@@ -136,7 +136,14 @@ def smooth(img: np.ndarray, sig: float) -> np.ndarray:
     :param sig: threshold parameter
     :return: smoothed image
     """
-    return gaussian(img, sig)
+    ga = gaussian(img, sig, preserve_range=True)
+    if is_mask(img):
+        if img.dtype == 'bool':
+            return ga > ga.mean()
+        elif img.dtype == 'uint8':
+            return (255 * (ga > ga.mean())).astype('uint8')
+    else:
+        return ga
 
 class Error(Exception):
     pass
diff --git a/model_server/base/roiset.py b/model_server/base/roiset.py
index 68ee831a58a7088d74e8959e8e47e5734780ac6e..cfeec9eec6d4de1e2d56a358055140a58893b4a6 100644
--- a/model_server/base/roiset.py
+++ b/model_server/base/roiset.py
@@ -1,7 +1,9 @@
+from collections import OrderedDict
+import itertools
 from math import sqrt, floor
 from pathlib import Path
-import re
-from typing import List, Union
+from typing import Dict, List, Union
+from typing_extensions import Self
 from uuid import uuid4
 
 import numpy as np
@@ -10,16 +12,16 @@ from pydantic import BaseModel
 from scipy.stats import moment
 from skimage.filters import sobel
 
-from skimage.measure import label, regionprops_table, shannon_entropy, find_contours
-from sklearn.preprocessing import PolynomialFeatures
-from sklearn.linear_model import LinearRegression
+from skimage import draw
+from skimage.measure import approximate_polygon, find_contours, label, points_in_poly, regionprops, regionprops_table, shannon_entropy
+from skimage.morphology import binary_dilation, disk
 
-from model_server.base.accessors import GenericImageDataAccessor, InMemoryDataAccessor, write_accessor_data_to_file
-from model_server.base.models import InstanceSegmentationModel
-from model_server.base.process import get_safe_contours, pad, rescale, resample_to_8bit, make_rgb
-from model_server.base.annotators import draw_box_on_patch, draw_contours_on_patch, draw_boxes_on_3d_image
-from model_server.base.accessors import generate_file_accessor, PatchStack
-from model_server.base.process import mask_largest_object
+from .accessors import GenericImageDataAccessor, InMemoryDataAccessor, write_accessor_data_to_file
+from .models import InstanceSegmentationModel
+from .process import get_safe_contours, pad, rescale, resample_to_8bit, make_rgb
+from .annotators import draw_box_on_patch, draw_contours_on_patch, draw_boxes_on_3d_image
+from .accessors import generate_file_accessor, PatchStack
+from .process import mask_largest_object
 
 
 class PatchParams(BaseModel):
@@ -51,6 +53,7 @@ class RoiFilter(BaseModel):
 class RoiSetMetaParams(BaseModel):
     filters: Union[RoiFilter, None] = None
     expand_box_by: List[int] = [128, 0]
+    deproject_channel: Union[int, None] = None
 
 
 class RoiSetExportParams(BaseModel):
@@ -62,8 +65,7 @@ class RoiSetExportParams(BaseModel):
     derived_channels: bool = False
 
 
-
-def _get_label_ids(acc_seg_mask: GenericImageDataAccessor, allow_3d=False, connect_3d=True) -> InMemoryDataAccessor:
+def get_label_ids(acc_seg_mask: GenericImageDataAccessor, allow_3d=False, connect_3d=True) -> InMemoryDataAccessor:
     """
     Convert binary segmentation mask into either a 2D or 3D object identities map
     :param acc_seg_mask: binary segmentation mask (mono) of either two or three dimensions
@@ -73,7 +75,7 @@ def _get_label_ids(acc_seg_mask: GenericImageDataAccessor, allow_3d=False, conne
     """
     if allow_3d and connect_3d:
         nda_la = label(
-            acc_seg_mask.data[:, :, 0, :],
+            acc_seg_mask.data_xyz,
             connectivity=3,
         ).astype('uint16')
         return InMemoryDataAccessor(np.expand_dims(nda_la, 2))
@@ -82,7 +84,7 @@ def _get_label_ids(acc_seg_mask: GenericImageDataAccessor, allow_3d=False, conne
         la_3d = np.zeros((*acc_seg_mask.hw, 1, acc_seg_mask.nz), dtype='uint16')
         for zi in range(0, acc_seg_mask.nz):
             la_2d = label(
-                acc_seg_mask.data[:, :, 0, zi],
+                acc_seg_mask.data_xyz[:, :, zi],
                 connectivity=2,
             ).astype('uint16')
             la_2d[la_2d > 0] = la_2d[la_2d > 0] + nla
@@ -92,13 +94,13 @@ def _get_label_ids(acc_seg_mask: GenericImageDataAccessor, allow_3d=False, conne
     else:
         return InMemoryDataAccessor(
             label(
-                acc_seg_mask.data[:, :, 0, :].max(axis=-1),
+                acc_seg_mask.get_mip().data_xy,
                 connectivity=1,
             ).astype('uint16')
         )
 
 
-def _focus_metrics():
+def focus_metrics():
     return {
         'max_intensity': lambda x: np.max(x),
         'stdev': lambda x: np.std(x),
@@ -109,7 +111,215 @@ def _focus_metrics():
     }
 
 
-def _safe_add(a, g, b):
+def filter_df(df: pd.DataFrame, filters: RoiFilter = None) -> pd.DataFrame:
+    query_str = 'label > 0'  # always true
+    if filters is not None:  # parse filters
+        for k, val in filters.dict(exclude_unset=True).items():
+            assert k in ('area')
+            vmin = val['min']
+            vmax = val['max']
+            assert vmin >= 0
+            query_str = query_str + f' & {k} > {vmin} & {k} < {vmax}'
+    return df.loc[df.query(query_str).index, :]
+
+
+def filter_df_overlap_bbox(df1: pd.DataFrame, df2: pd.DataFrame = None) -> pd.DataFrame:
+    """
+    If passed a single DataFrame, return the subset whose bounding boxes overlap in 3D space.  If passed two DataFrames,
+    return the subset where a ROI in the first overlaps a ROI in the second.  May return duplicates entries where a ROI
+    overlaps with multiple neighbors.
+    :param df1: DataFrame with potentially overlapping bounding boxes
+    :param df2: (optional) second DataFrame
+    :return DataFrame describing subset of overlapping ROIs
+        bbox_overlaps_with: index of ROI that overlaps
+        bbox_intersec: pixel area of intersecting region
+    """
+
+    def _compare(r0, r1):
+        olx = (r0.x0 < r1.x1) and (r0.x1 > r1.x0)
+        oly = (r0.y0 < r1.y1) and (r0.y1 > r1.y0)
+        olz = (r0.zi == r1.zi)
+        return olx and oly and olz
+
+    def _intersec(r0, r1):
+        return (r0.x1 - r1.x0) * (r0.y1 - r1.y0)
+
+    first = []
+    second = []
+    intersec = []
+
+    if df2 is not None:
+        for pair in itertools.product(df1.index, df2.index):
+            if _compare(df1.iloc[pair[0]], df2.iloc[pair[1]]):
+                first.append(pair[0])
+                second.append(pair[1])
+                intersec.append(
+                    _intersec(df1.iloc[pair[0]], df2.iloc[pair[1]])
+                )
+    else:
+        for pair in itertools.combinations(df1.index, 2):
+            if _compare(df1.iloc[pair[0]], df1.iloc[pair[1]]):
+                first.append(pair[0])
+                second.append(pair[1])
+                first.append(pair[1])
+                second.append(pair[0])
+                isc = _intersec(df1.iloc[pair[0]], df1.iloc[pair[1]])
+                intersec.append(isc)
+                intersec.append(isc)
+
+    sdf = df1.iloc[first]
+    sdf.loc[:, 'overlaps_with'] = second
+    sdf.loc[:, 'bbox_intersec'] = intersec
+    return sdf
+
+
+def filter_df_overlap_seg(df1: pd.DataFrame, df2: pd.DataFrame = None) -> pd.DataFrame:
+    """
+    If passed a single DataFrame, return the subset whose segmentations overlap in 3D space.  If passed two DataFrames,
+    return the subset where a ROI in the first overlaps a ROI in the second.  May return duplicates entries where a ROI
+    overlaps with multiple neighbors.
+    :param df1: DataFrame with potentially overlapping bounding boxes
+    :param df2: (optional) second DataFrame
+    :return DataFrame describing subset of overlapping ROIs
+        seg_overlaps_with: index of ROI that overlaps
+        seg_intersec: pixel area of intersecting region
+        seg_iou: intersection over union
+    """
+
+    dfbb = filter_df_overlap_bbox(df1, df2)
+
+    def _overlap_seg(r):
+        roi1 = df1.loc[r.name]
+        if df2 is not None:
+            roi2 = df2.loc[r.overlaps_with]
+        else:
+            roi2 = df1.loc[r.overlaps_with]
+        ex0 = min(roi1.x0, roi2.x0, roi1.x1, roi2.x1)
+        ew = max(roi1.x0, roi2.x0, roi1.x1, roi2.x1) - ex0
+        ey0 = min(roi1.y0, roi2.y0, roi1.y1, roi2.y1)
+        eh = max(roi1.y0, roi2.y0, roi1.y1, roi2.y1) - ey0
+        emask = np.zeros((eh, ew), dtype='uint8')
+        sl1 = np.s_[(roi1.y0 - ey0): (roi1.y1 - ey0), (roi1.x0 - ex0): (roi1.x1 - ex0)]
+        sl2 = np.s_[(roi2.y0 - ey0): (roi2.y1 - ey0), (roi2.x0 - ex0): (roi2.x1 - ex0)]
+        emask[sl1] = roi1.binary_mask
+        emask[sl2] = emask[sl2] + roi2.binary_mask
+        return emask
+
+    emasks = dfbb.apply(_overlap_seg, axis=1)
+    dfbb['seg_overlaps'] = emasks.apply(lambda x: np.any(x > 1))
+    dfbb['seg_intersec'] = emasks.apply(lambda x: (x == 2).sum())
+    dfbb['seg_iou'] = emasks.apply(lambda x: (x == 2).sum() / (x > 0).sum())
+    return dfbb
+
+
+def make_df_from_object_ids(acc_raw, acc_obj_ids, expand_box_by, deproject_channel=None) -> pd.DataFrame:
+    """
+    Build dataframe that associate object IDs with summary stats;
+    :param acc_raw: accessor to raw image data
+    :param acc_obj_ids: accessor to map of object IDs
+    :param expand_box_by: number of pixels to expand bounding box in all directions (without exceeding image boundary)
+    :param deproject_channel: if objects' z-coordinates are not specified, compute them based on argmax of this channel
+    :return: pd.DataFrame
+    """
+    # build dataframe of objects, assign z index to each object
+
+    if acc_obj_ids.nz == 1 and acc_raw.nz > 1:
+
+        if deproject_channel is None or deproject_channel >= acc_raw.chroma or deproject_channel < 0:
+            if acc_raw.chroma == 1:
+                deproject_channel = 0
+            else:
+                raise NoDeprojectChannelSpecifiedError(
+                    f'When labeling objects, either their z-coordinates or a valid deprojection channel are required.'
+                )
+        acc_raw.get_mono(deproject_channel)
+
+        zi_map = acc_raw.get_mono(deproject_channel).get_z_argmax().data_xy.astype('uint16')
+        assert len(zi_map.shape) == 2
+        df = pd.DataFrame(regionprops_table(
+            acc_obj_ids.data_xy,
+            intensity_image=zi_map,
+            properties=('label', 'area', 'intensity_mean', 'bbox')
+        )).rename(columns={'bbox-0': 'y0', 'bbox-1': 'x0', 'bbox-2': 'y1', 'bbox-3': 'x1'})
+        df['zi'] = df['intensity_mean'].round().astype('int')
+
+    else:  # objects' z-coordinates come from arg of max count in object identities map
+        df = pd.DataFrame(regionprops_table(
+            acc_obj_ids.data_xyz,
+            properties=('label', 'area', 'bbox')
+        )).rename(columns={
+            'bbox-0': 'y0', 'bbox-1': 'x0', 'bbox-2': 'z0', 'bbox-3': 'y1', 'bbox-4': 'x1', 'bbox-5': 'z1'
+        })
+
+        def _get_zi_from_label(la):
+            return acc_obj_ids.apply(lambda x: x == la).get_focus_vector().argmax()
+
+        df['zi'] = df['label'].apply(_get_zi_from_label)
+
+    df = df_insert_slices(df, acc_raw.shape_dict, expand_box_by)
+
+    def _make_binary_mask(r):
+        acc = InMemoryDataAccessor(acc_obj_ids.data == r.label)
+        cropped = acc.get_mono(0, mip=True).crop_hw((r.y0, r.x0, (r.y1 - r.y0), (r.x1 - r.x0))).data_xy
+        return cropped
+
+    df['binary_mask'] = df.apply(
+        _make_binary_mask,
+        axis=1,
+        result_type='reduce',
+    )
+    return df
+
+
+def df_insert_slices(df: pd.DataFrame, sd: dict, expand_box_by) -> pd.DataFrame:
+    h = sd['Y']
+    w = sd['X']
+    nz = sd['Z']
+
+    df['h'] = df['y1'] - df['y0']
+    df['w'] = df['x1'] - df['x0']
+    ebxy, ebz = expand_box_by
+    df['ebb_y0'] = (df.y0 - ebxy).apply(lambda x: max(x, 0))
+    df['ebb_y1'] = (df.y1 + ebxy).apply(lambda x: min(x, h))
+    df['ebb_x0'] = (df.x0 - ebxy).apply(lambda x: max(x, 0))
+    df['ebb_x1'] = (df.x1 + ebxy).apply(lambda x: min(x, w))
+    df['ebb_z0'] = (df.zi - ebz).apply(lambda x: max(x, 0))
+    df['ebb_z1'] = (df.zi + ebz).apply(lambda x: min(x, nz))
+    df['ebb_h'] = df['ebb_y1'] - df['ebb_y0']
+    df['ebb_w'] = df['ebb_x1'] - df['ebb_x0']
+    df['ebb_nz'] = df['ebb_z1'] - df['ebb_z0'] + 1
+
+    # compute relative bounding boxes
+    df['rel_y0'] = df.y0 - df.ebb_y0
+    df['rel_y1'] = df.y1 - df.ebb_y0
+    df['rel_x0'] = df.x0 - df.ebb_x0
+    df['rel_x1'] = df.x1 - df.ebb_x0
+
+    assert np.all(df['rel_x1'] <= (df['ebb_x1'] - df['ebb_x0']))
+    assert np.all(df['rel_y1'] <= (df['ebb_y1'] - df['ebb_y0']))
+
+    df['slice'] = df.apply(
+        lambda r:
+        np.s_[int(r.y0): int(r.y1), int(r.x0): int(r.x1), :, int(r.zi): int(r.zi + 1)],
+        axis=1,
+        result_type='reduce',
+    )
+    df['expanded_slice'] = df.apply(
+        lambda r:
+        np.s_[int(r.ebb_y0): int(r.ebb_y1), int(r.ebb_x0): int(r.ebb_x1), :, int(r.ebb_z0): int(r.ebb_z1) + 1],
+        axis=1,
+        result_type='reduce',
+    )
+    df['relative_slice'] = df.apply(
+        lambda r:
+        np.s_[int(r.rel_y0): int(r.rel_y1), int(r.rel_x0): int(r.rel_x1), :, :],
+        axis=1,
+        result_type='reduce',
+    )
+    return df
+
+
+def safe_add(a, g, b):
     assert a.dtype == b.dtype
     assert a.shape == b.shape
     assert g >= 0.0
@@ -120,45 +330,135 @@ def _safe_add(a, g, b):
         np.iinfo(a.dtype).max
     ).astype(a.dtype)
 
+def make_object_ids_from_df(df: pd.DataFrame, sd: dict) -> InMemoryDataAccessor:
+    id_mask = np.zeros((sd['Y'], sd['X'], 1, sd['Z']), dtype='uint16')
+    if 'binary_mask' not in df.columns:
+        raise MissingSegmentationError('RoiSet dataframe does not contain segmentation')
+
+    def _label_obj(r):
+        sl = np.s_[r.y0:r.y1, r.x0:r.x1, :, r.zi:r.zi + 1]
+        mask = np.expand_dims(r.binary_mask, (2, 3))
+        id_mask[sl] = id_mask[sl] + r.label * mask
+
+    df.apply(_label_obj, axis=1)
+    return InMemoryDataAccessor(id_mask)
+
 
 class RoiSet(object):
 
     def __init__(
             self,
             acc_raw: GenericImageDataAccessor,
-            acc_obj_ids: GenericImageDataAccessor,
+            df: pd.DataFrame,
             params: RoiSetMetaParams = RoiSetMetaParams(),
     ):
         """
         A set of regions of interest, referenced by their positions and contours in the YXCZ space of stack acc_raw.
         RoiSet contains their internal state, which may be exported as patches, maps, and other products by export methods.
         :param acc_raw: accessor to a generally a multichannel z-stack
-        :param acc_obj_ids: accessor to a 2D single-channel object identities map, where each pixel's intensity
-            labels its membership in a connected object
+        :param df: dataframe containing at minimum bounding box and segmentation mask information
         :param params: optional arguments that influence the definition and representation of ROIs
         """
-        assert acc_obj_ids.chroma == 1
-        self.acc_obj_ids = acc_obj_ids
         self.acc_raw = acc_raw
         self.accs_derived = []
         self.params = params
 
-        self._df = self.filter_df(
-            self.make_df(
-                self.acc_raw, self.acc_obj_ids, expand_box_by=params.expand_box_by
-            ),
-            params.filters,
-        )
-
+        self._df = df
         self.count = len(self._df)
-        self.object_class_maps = {}  # classification results
 
     def __iter__(self):
         """Expose ROI meta information via the Pandas.DataFrame API"""
         return self._df.itertuples(name='Roi')
 
-    @staticmethod
-    def from_segmentation(
+    @classmethod
+    def from_object_ids(
+            cls,
+            acc_raw: GenericImageDataAccessor,
+            acc_obj_ids: GenericImageDataAccessor,
+            params: RoiSetMetaParams = RoiSetMetaParams(),
+    ):
+        """
+        Create an RoiSet from an object identities map
+        :param acc_raw: accessor to a generally multichannel z-stack
+        :param acc_obj_ids: accessor to a 2D single-channel object identities map, where each pixel's intensity
+            labels its membership in a connected object
+        :param params: optional arguments that influence the definition and representation of ROIs
+        :return: RoiSet object
+        """
+        assert acc_obj_ids.chroma == 1
+
+        df = filter_df(
+            make_df_from_object_ids(
+                acc_raw, acc_obj_ids,
+                expand_box_by=params.expand_box_by,
+                deproject_channel=params.deproject_channel,
+            ),
+            params.filters,
+        )
+
+        return cls(acc_raw, df, params)
+
+    @classmethod
+    def from_bounding_boxes(
+        cls,
+        acc_raw: GenericImageDataAccessor,
+        bbox_yxhw: List[Dict],
+        bbox_zi: Union[List[int], int] = None,
+        params: RoiSetMetaParams = RoiSetMetaParams()
+    ):
+        """
+        Create and RoiSet from bounding boxes
+        :param acc_raw: accessor to a generally a multichannel z-stack
+        :param yxhw_list: list of bounding boxing coordinates [corner X, corner Y, height, width]
+        :param params: optional arguments that influence the definition and representation of ROIs
+        :return: RoiSet object
+        """
+        bbox_df = pd.DataFrame(bbox_yxhw)
+        if list(bbox_df.columns.str.upper().sort_values()) != ['H', 'W', 'X', 'Y']:
+            raise BoundingBoxError(f'Expecting bounding box coordinates Y, X, H, and W, not {list(bbox_df.columns)}')
+
+
+        # deproject if zi is not specified
+        if bbox_zi is None:
+            dch = params.deproject_channel
+            if dch is None or dch >= acc_raw.chroma or dch < 0:
+                if acc_raw.chroma == 1:
+                    dch = 0
+                else:
+                    raise NoDeprojectChannelSpecifiedError(
+                        f'When labeling objects, either their z-coordinates or a valid deprojection channel are required.'
+                    )
+            bbox_df['zi'] = acc_raw.get_mono(dch).get_focus_vector().argmax()
+        else:
+            bbox_df['zi'] = bbox_zi
+
+        bbox_df['y0'] = bbox_df['y']
+        bbox_df['x0'] = bbox_df['x']
+        bbox_df['y1'] = bbox_df['y0'] + bbox_df['h']
+        bbox_df['x1'] = bbox_df['x0'] + bbox_df['w']
+        bbox_df['label'] = bbox_df.index
+
+
+        df = df_insert_slices(
+            bbox_df[['y0', 'x0', 'y1', 'x1', 'zi', 'label']],
+            acc_raw.shape_dict,
+            params.expand_box_by,
+        )
+
+        def _make_binary_mask(r):
+            return np.ones((r.h, r.w), dtype=bool)
+
+        df['binary_mask'] = df.apply(
+            _make_binary_mask,
+            axis=1,
+            result_type='reduce',
+        )
+        return cls(acc_raw, df, params)
+
+
+    @classmethod
+    def from_binary_mask(
+            cls,
             acc_raw: GenericImageDataAccessor,
             acc_seg: GenericImageDataAccessor,
             allow_3d=False,
@@ -169,102 +469,44 @@ class RoiSet(object):
         Create a RoiSet from a binary segmentation mask (either 2D or 3D)
         :param acc_raw: accessor to a generally a multichannel z-stack
         :param acc_seg: accessor of a binary segmentation mask (mono) of either two or three dimensions
-        :param allow_3d: return a 3D map if True; return a 2D map of the mask's maximum intensity project if False
+        :param allow_3d: use a 3D map if True; use a 2D map of the mask's maximum intensity project if False
         :param connect_3d: objects can span multiple z-positions if True; objects are unique to a single z if False
         :param params: optional arguments that influence the definition and representation of ROIs
-        :return: object identities map
-        """
-        return RoiSet(acc_raw, _get_label_ids(acc_seg, allow_3d=allow_3d, connect_3d=connect_3d), params)
-
-    @staticmethod
-    def make_df(acc_raw, acc_obj_ids, expand_box_by) -> pd.DataFrame:
-        """
-        Build dataframe associate object IDs with summary stats
-        :param acc_raw: accessor to raw image data
-        :param acc_obj_ids: accessor to map of object IDs
-        :param expand_box_by: number of pixels to expand bounding box in all directions (without exceeding image boundary)
-        # :param deproject: assign object's z-position based on argmax of raw data if True
-        :return: pd.DataFrame
-        """
-        # build dataframe of objects, assign z index to each object
-
-        if acc_obj_ids.nz == 1:  # deproject objects' z-coordinates from argmax of raw image
-            df = pd.DataFrame(regionprops_table(
-                acc_obj_ids.data[:, :, 0, 0],
-                intensity_image=acc_raw.data.argmax(axis=3, keepdims=True)[:, :, 0, 0].astype('uint16'),
-                properties=('label', 'area', 'intensity_mean', 'bbox', 'centroid')
-            )).rename(columns={'bbox-0': 'y0', 'bbox-1': 'x0', 'bbox-2': 'y1', 'bbox-3': 'x1'})
-            df['zi'] = df['intensity_mean'].round().astype('int')
-
-        else:  # objects' z-coordinates come from arg of max count in object identities map
-            df = pd.DataFrame(regionprops_table(
-                acc_obj_ids.data[:, :, 0, :],
-                properties=('label', 'area', 'bbox', 'centroid')
-            )).rename(columns={
-                'bbox-0': 'y0', 'bbox-1': 'x0', 'bbox-2': 'z0', 'bbox-3': 'y1', 'bbox-4': 'x1', 'bbox-5': 'z1'
-            })
-            df['zi'] = df['label'].apply(lambda x: (acc_obj_ids.data == x).sum(axis=(0, 1, 2)).argmax())
-
-        # compute expanded bounding boxes
-        h, w, c, nz = acc_raw.shape
-        df['h'] = df['y1'] - df['y0']
-        df['w'] = df['x1'] - df['x0']
-        ebxy, ebz = expand_box_by
-        df['ebb_y0'] = (df.y0 - ebxy).apply(lambda x: max(x, 0))
-        df['ebb_y1'] = (df.y1 + ebxy).apply(lambda x: min(x, h))
-        df['ebb_x0'] = (df.x0 - ebxy).apply(lambda x: max(x, 0))
-        df['ebb_x1'] = (df.x1 + ebxy).apply(lambda x: min(x, w))
-        df['ebb_z0'] = (df.zi - ebz).apply(lambda x: max(x, 0))
-        df['ebb_z1'] = (df.zi + ebz).apply(lambda x: min(x, nz))
-        df['ebb_h'] = df['ebb_y1'] - df['ebb_y0']
-        df['ebb_w'] = df['ebb_x1'] - df['ebb_x0']
-        df['ebb_nz'] = df['ebb_z1'] - df['ebb_z0'] + 1
-
-        # compute relative bounding boxes
-        df['rel_y0'] = df.y0 - df.ebb_y0
-        df['rel_y1'] = df.y1 - df.ebb_y0
-        df['rel_x0'] = df.x0 - df.ebb_x0
-        df['rel_x1'] = df.x1 - df.ebb_x0
-
-        assert np.all(df['rel_x1'] <= (df['ebb_x1'] - df['ebb_x0']))
-        assert np.all(df['rel_y1'] <= (df['ebb_y1'] - df['ebb_y0']))
-
-        df['slice'] = df.apply(
-            lambda r:
-            np.s_[int(r.y0): int(r.y1), int(r.x0): int(r.x1), :, int(r.zi): int(r.zi + 1)],
-            axis=1,
-            result_type='reduce',
-        )
-        df['expanded_slice'] = df.apply(
-            lambda r:
-            np.s_[int(r.ebb_y0): int(r.ebb_y1), int(r.ebb_x0): int(r.ebb_x1), :, int(r.ebb_z0): int(r.ebb_z1) + 1],
-            axis=1,
-            result_type='reduce',
-        )
-        df['relative_slice'] = df.apply(
-            lambda r:
-            np.s_[int(r.rel_y0): int(r.rel_y1), int(r.rel_x0): int(r.rel_x1), :, :],
-            axis=1,
-            result_type='reduce',
+        """
+        return cls.from_object_ids(
+            acc_raw,
+            get_label_ids(
+                acc_seg,
+                allow_3d=allow_3d,
+                connect_3d=connect_3d
+            ),
+            params
         )
-        df['binary_mask'] = df.apply(
-            lambda r: (acc_obj_ids.data == r.label).max(axis=-1)[r.y0: r.y1, r.x0: r.x1, 0],
-            axis=1,
-            result_type='reduce',
+
+    @classmethod
+    def from_polygons_2d(
+            cls,
+            acc_raw,
+            polygons: List[np.ndarray],
+            params: RoiSetMetaParams = RoiSetMetaParams()
+    ):
+        """
+        Create a RoiSet where objects are defined from a list of polygon coordinates
+        :param acc_raw: accessor to a generally a multichannel z-stack
+        :param polygons: list of (variable x 2) np.ndarrays describing (x, y) polymer coordinates
+        :param params: optional arguments that influence the definition and representation of ROIs
+        """
+        mask = np.zeros(acc_raw.get_mono(0, mip=True).shape, dtype=bool)
+        for p in polygons:
+            sl = draw.polygon(p[:, 1], p[:, 0])
+            mask[sl] = True
+        return cls.from_binary_mask(
+            acc_raw,
+            InMemoryDataAccessor(mask),
+            allow_3d=False,
+            connect_3d=False,
+            params=params,
         )
-        return df
-
-    @staticmethod
-    def filter_df(df: pd.DataFrame, filters: RoiFilter = None) -> pd.DataFrame:
-        query_str = 'label > 0'  # always true
-        if filters is not None:  # parse filters
-            for k, val in filters.dict(exclude_unset=True).items():
-                assert k in ('area')
-                vmin = val['min']
-                vmax = val['max']
-                assert vmin >= 0
-                query_str = query_str + f' & {k} > {vmin} & {k} < {vmax}'
-        return df.loc[df.query(query_str).index, :]
 
     def get_df(self) -> pd.DataFrame:
         return self._df
@@ -275,19 +517,6 @@ class RoiSet(object):
     def add_df_col(self, name, se: pd.Series) -> None:
         self._df[name] = se
 
-    def get_multichannel_projection(self):
-        if self.count:
-            projected = project_stack_from_focal_points(
-                self._df['centroid-0'].to_numpy(),
-                self._df['centroid-1'].to_numpy(),
-                self._df['zi'].to_numpy(),
-                self.acc_raw,
-                degree=4,
-            )
-        else:  # else just return MIP
-            projected = self.acc_raw.data.max(axis=-1)
-        return projected
-
     def get_patches_acc(self, channels: list = None, **kwargs) -> PatchStack:  # padded, un-annotated 2d patches
         if channels and len(channels) == 1:
             patches_df = self.get_patches(white_channel=channels[0], **kwargs)
@@ -301,7 +530,7 @@ class RoiSet(object):
         write_accessor_data_to_file(fp, annotated)
         return (prefix + '.tif')
 
-    def get_zmask(self, mask_type='boxes'):
+    def get_zmask(self, mask_type='boxes') -> np.ndarray:
         """
         Return a mask of same dimensionality as raw data
 
@@ -341,43 +570,21 @@ class RoiSet(object):
     def classify_by(
             self, name: str, channels: list[int],
             object_classification_model: InstanceSegmentationModel,
-            derived_channel_functions: list[callable] = None
     ):
         """
         Insert a column in RoiSet data table that associates each ROI with an integer class, determined by passing
-        specified inputs through an instance segmentation classifier.  Optionally derive additional inputs for object
-        classification by passing a raw input channel through one or more functions.
+        specified inputs through an instance segmentation classifier.
 
         :param name: name of column to insert
         :param channels: list of nc raw input channels to send to classifier
         :param object_classification_model: InstanceSegmentation model object
-        :param derived_channel_functions: list of functions that each receive a PatchStack accessor with nc channels and
-            return a single-channel PatchStack accessor of the same shape
         :return: None
         """
+        if self.count == 0:
+            self._df['classify_by_' + name] = None
+            return True
 
-        raw_acc = self.get_patches_acc(channels=channels, expanded=False, pad_to=None)  # all channels
-        if derived_channel_functions is not None:
-            mono_data = [raw_acc.get_mono(c).data for c in range(0, raw_acc.chroma)]
-            for fcn in derived_channel_functions:
-                der = fcn(raw_acc) # returns patch stack
-                if der.shape != mono_data[0].shape or der.dtype not in ['uint8', 'uint16']:
-                    raise DerivedChannelError(
-                        f'Error processing derived channel {der} with shape {der.shape_dict} and dtype {der.dtype}'
-                    )
-                self.accs_derived.append(der)
-
-            # combine channels
-            data_derived = [acc.data for acc in self.accs_derived]
-            input_acc = PatchStack(
-                np.concatenate(
-                    [*mono_data, *data_derived],
-                    axis=raw_acc._ga('C')
-                )
-            )
-
-        else:
-            input_acc = raw_acc
+        input_acc = self.get_patches_acc(channels=channels, expanded=False, pad_to=None)  # all channels
 
         # do this on a patch basis, i.e. only one object per frame
         obmap_patches = object_classification_model.label_patch_stack(
@@ -385,28 +592,76 @@ class RoiSet(object):
             self.get_patch_masks_acc(expanded=False, pad_to=None)
         )
 
-        om = np.zeros(self.acc_obj_ids.shape, self.acc_obj_ids.dtype)
-
-        self._df['classify_by_' + name] = pd.Series(dtype='Int64')
+        se = pd.Series(dtype='Int64', index=self._df.index)
 
-        # assign labels to object map:
         for i, roi in enumerate(self):
             oc = np.unique(
                 mask_largest_object(
                     obmap_patches.iat(i).data
                 )
             )[-1]
-            self._df.loc[roi.Index, 'classify_by_' + name] = oc
-            om[self.acc_obj_ids.data == roi.label] = oc
-        self.object_class_maps[name] = InMemoryDataAccessor(om)
+            se[roi.Index] = oc
+        self.set_classification(f'classify_by_{name}', se)
+
+
+    def get_instance_classification(self, roiset_from: Self, iou_min: float = 0.5) -> pd.DataFrame:
+        """
+        Transfer instance classification labels from another RoiSet based on intersection over union (IOU) similarity
+        :param roiset_from: RoiSet source of classification labels, same shape as this RoiSet
+        :param iou_min: threshold IOU below which a label is not transferred
+        :return DataFrame of source RoiSet, including overlaps with this RoiSet and IOU metric
+        """
+        if self.acc_raw.shape != roiset_from.acc_raw.shape:
+            raise ShapeMismatchError(
+                f'Expecting two RoiSets of same shape: {self.acc_raw.shape} != {roiset_from.acc_raw.shape}')
+
+        columns = [f'classify_by_{c}' for c in roiset_from.classification_columns]
+
+        if len(columns) == 0:
+            raise MissingInstanceLabelsError('Expecting at least on instance classification channel but none found')
+
+        df_overlaps = filter_df_overlap_seg(
+            roiset_from.get_df(),
+            self.get_df()
+        )
+        df_overlaps['transfer'] = df_overlaps.seg_iou > iou_min
+        df_merge = pd.merge(
+            roiset_from.get_df()[columns],
+            df_overlaps.loc[df_overlaps.transfer, ['overlaps_with']],
+            left_index=True,
+            right_index=True,
+            how='inner',
+        ).set_index('overlaps_with')
+        for col in columns:
+            self.set_classification(col, df_merge[col])
+
+        return df_overlaps
+
+    def get_object_class_map(self, name: str) -> InMemoryDataAccessor:
+        """
+        For a given classification result, return a map where object IDs are replaced by each object's class
+        :param name: name of the classification result, same as passed to RoiSet.classify_by()
+        :return: accessor of object class map
+        """
+        colname = ('classify_by_' + name)
+        assert colname in self._df.columns
+        obj_ids = self.acc_obj_ids
+        om = np.zeros(obj_ids.shape, obj_ids.dtype)
 
+        def _label_object_class(roi):
+            om[self.acc_obj_ids.data == roi.label] = roi[colname]
+        self._df.apply(_label_object_class, axis=1)
+
+        return InMemoryDataAccessor(om)
+
+    def get_serializable_dataframe(self) -> pd.DataFrame:
+        return self._df.drop(['expanded_slice', 'slice', 'relative_slice', 'binary_mask'], axis=1)
 
     def export_dataframe(self, csv_path: Path) -> str:
         csv_path.parent.mkdir(parents=True, exist_ok=True)
-        self._df.drop(['expanded_slice', 'slice', 'relative_slice', 'binary_mask'], axis=1).to_csv(csv_path, index=False)
+        self.get_serializable_dataframe().to_csv(csv_path, index=False)
         return csv_path.name
 
-
     def export_patch_masks(self, where: Path, pad_to: int = None, prefix='mask', expanded=False) -> pd.DataFrame:
         patches_df = self.get_patch_masks(pad_to=pad_to, expanded=expanded).copy()
 
@@ -446,15 +701,13 @@ class RoiSet(object):
                 patch = np.zeros((roi.ebb_h, roi.ebb_w, 1, 1), dtype='uint8')
                 patch[roi.relative_slice][:, :, 0, 0] = roi.binary_mask * 255
             else:
-                patch = np.zeros((roi.y1 - roi.y0, roi.x1 - roi.x0, 1, 1), dtype='uint8')
-                patch[:, :, 0, 0] = roi.binary_mask * 255
-
+                patch = (roi.binary_mask * 255).astype('uint8')
             if pad_to:
                 patch = pad(patch, pad_to)
-            return patch
+            return np.expand_dims(patch, (2, 3))
 
         dfe = self._df.copy()
-        dfe['patch_mask'] = dfe.apply(lambda r: _make_patch_mask(r), axis=1)
+        dfe['patch_mask'] = dfe.apply(_make_patch_mask, axis=1)
         return dfe
 
     def get_patch_masks_acc(self, **kwargs) -> PatchStack:
@@ -482,7 +735,8 @@ class RoiSet(object):
 
             if white_channel:
                 assert white_channel < raw.chroma
-                stack = raw.data[:, :, [white_channel, white_channel, white_channel], :]
+                mono = raw.get_mono(white_channel).data_xyz
+                stack = np.stack([mono, mono, mono], axis=2)
             else:
                 stack = np.zeros([*raw.shape[0:2], 3, raw.shape[3]], dtype=raw.dtype)
 
@@ -491,10 +745,10 @@ class RoiSet(object):
                     continue
                 assert isinstance(ci, int)
                 assert ci < raw.chroma
-                stack[:, :, ii, :] = _safe_add(
+                stack[:, :, ii, :] = safe_add(
                     stack[:, :, ii, :],  # either black or grayscale channel
                     rgb_overlay_weights[ii],
-                    raw.data[:, :, ci, :]
+                    raw.get_mono(ci).data_xyz
                 )
         else:
             if white_channel is not None:  # interpret as just a single channel
@@ -509,9 +763,10 @@ class RoiSet(object):
                         annotate_rgb = True
                         break
                 if annotate_rgb:  # make RGB patches anyway to include annotation color
-                    stack = raw.data[:, :, [white_channel, white_channel, white_channel], :]
+                    mono = raw.get_mono(white_channel).data_xyz
+                    stack = np.stack([mono, mono, mono], axis=2)
                 else:  # make monochrome patches
-                    stack = raw.data[:, :, [white_channel], :]
+                    stack = raw.get_mono(white_channel).data
             elif kwargs.get('channels'):
                 stack = raw.get_channels(kwargs['channels']).data
             else:
@@ -533,7 +788,7 @@ class RoiSet(object):
 
             # make a 2d patch, find optimal z-position determined by focus_metric function on each channel separately
             elif focus_metric is not None:
-                foc = _focus_metrics()[focus_metric]
+                foc = focus_metrics()[focus_metric]
 
                 patch = np.zeros([ph, pw, pc, 1], dtype=patch3d.dtype)
 
@@ -593,6 +848,22 @@ class RoiSet(object):
         dfe['patch'] = dfe.apply(lambda r: _make_patch(r), axis=1)
         return dfe
 
+    @property
+    def classification_columns(self):
+        """
+        Return list of columns that describe instance classification results
+        """
+        pr = 'classify_by_'
+        return [c.split(pr)[1] for c in self._df.columns if c.startswith(pr)]
+
+    def set_classification(self, colname: str, se: pd.Series):
+        """
+        Set instance classification result as a column addition on dataframe
+        :param colname: name of classification result
+        :param se: series containing class information
+        """
+        self._df[colname] = se
+
     def run_exports(self, where: Path, channel, prefix, params: RoiSetExportParams) -> dict:
         """
         Export various representations of ROIs, e.g. patches, annotated stacks, and object maps.
@@ -633,10 +904,10 @@ class RoiSet(object):
             if k == 'annotated_zstacks':
                 record[k] = str(Path(k) / self.export_annotated_zstack(subdir, prefix=pr, **kp))
             if k == 'object_classes':
-                for kc, acc in self.object_class_maps.items():
-                    fp = subdir / kc / (pr + '.tif')
-                    write_accessor_data_to_file(fp, acc)
-                    record[f'{k}_{kc}'] = str(fp)
+                for n in self.classification_columns:
+                    fp = subdir / n / (pr + '.tif')
+                    write_accessor_data_to_file(fp, self.get_object_class_map(n))
+                    record[f'{k}_{n}'] = str(fp)
             if k == 'derived_channels':
                 record[k] = []
                 for di, dacc in enumerate(self.accs_derived):
@@ -650,7 +921,40 @@ class RoiSet(object):
 
         return record
 
-    def serialize(self, where: Path, prefix='') -> dict:
+    def get_export_product_accessors(self, channel, params: RoiSetExportParams) -> dict:
+        """
+        Return various representations of ROIs, e.g. patches, annotated stacks, and object maps, as accessors
+        :param channel: color channel of products to export
+        :param params: RoiSetExportParams object describing which products to export and with which parameters
+        :return: ordered dict of accessors containing the specified products
+        """
+        interm = OrderedDict()
+        if not self.count:
+            return interm
+
+        for k, kp in params.dict().items():
+            if kp is None:
+                continue
+            if k == 'patches_3d':
+                interm[k] = self.get_patches_acc([channel], make_3d=True, **kp)
+            if k == 'annotated_patches_2d':
+                interm[k] = self.get_patches_acc(
+                    make_3d=False, white_channel=channel,
+                    bounding_box_channel=1, bounding_box_linewidth=2, **kp
+                )
+            if k == 'patches_2d':
+                interm[k] = self.get_patches_acc(make_3d=False, white_channel=channel, **kp)
+            if k == 'annotated_zstacks':
+                interm[k] = InMemoryDataAccessor(draw_boxes_on_3d_image(self, **kp))
+            if k == 'object_classes':
+                pr = 'classify_by_'
+                cnames = [c.split(pr)[1] for c in self._df.columns if c.startswith(pr)]
+                for n in cnames:
+                    interm[f'{k}_{n}'] = self.get_object_class_map(n)
+
+        return interm
+
+    def serialize(self, where: Path, prefix='roiset') -> dict:
         """
         Export the minimal information needed to recreate RoiSet object, i.e. CSV data file and tight patch masks
         :param where: path of directory in which to write files
@@ -658,94 +962,220 @@ class RoiSet(object):
         :return: nested dict of Path objects describing the locations of export products
         """
         record = {}
-        df_exp = self.export_patch_masks(
-            where / 'tight_patch_masks',
-            prefix=prefix,
-            pad_to=None,
-            expanded=False
-        )
-        se_pa = df_exp.patch_mask_path.apply(
-            lambda x: str(Path('tight_patch_masks') / x)
-        ).rename('tight_patch_masks_path')
-        self._df = self._df.join(se_pa)
-        df_fn = self.export_dataframe(where / 'dataframe' / (prefix + '.csv'))
-        record['dataframe'] = str(Path('dataframe') / df_fn)
-        record['tight_patch_masks'] = list(se_pa)
+        if not self._df.binary_mask.apply(lambda x: np.all(x)).all():  # binary masks aren't just all True
+            df_exp = self.export_patch_masks(
+                where / 'tight_patch_masks',
+                prefix=prefix,
+                pad_to=None,
+                expanded=False
+            )
+            # record patch masks paths to dataframe, then save static columns to CSV
+            se_pa = df_exp.patch_mask_path.apply(
+                lambda x: str(Path('tight_patch_masks') / x)
+            ).rename('tight_patch_masks_path')
+            self._df = self._df.join(se_pa)
+            record['tight_patch_masks'] = list(se_pa)
+
+        csv_path = where / 'dataframe' / (prefix + '.csv')
+        csv_path.parent.mkdir(parents=True, exist_ok=True)
+        self.export_dataframe(csv_path)
+
+        record['dataframe'] = str(Path('dataframe') / csv_path.name)
+
         return record
 
-    @staticmethod
-    def deserialize(acc_raw: GenericImageDataAccessor, where: Path, prefix=''):
+
+    def get_polygons(self, poly_threshold=0, dilation_radius=1) -> pd.DataFrame:
+        self.coordinates_ = """
+        Fit polygons to all object boundaries in the RoiSet
+        :param poly_threshold: threshold distance for polygon fit; a smaller number follows sharp features more closely
+        :param dilation_radius: radius of binary dilation to apply before fitting polygon
+        :return: Series of (variable x 2) np.ndarrays describing (x, y) polymer coordinates
+        """
+
+        pad_to = 1
+
+        def _poly_from_mask(roi):
+            mask = roi.binary_mask
+            if len(mask.shape) != 2:
+                raise PatchMaskShapeError(f'Patch mask needs to be two dimensions to fit a polygon')
+
+            # label and fill holes
+            labeled = label(mask)
+            filled = [rp.image_filled for rp in regionprops(labeled)]
+            assert (np.unique(labeled)[-1] == 1) and (len(filled) == 1), 'Cannot fit multiple polygons in a single patch mask'
+
+            closed = binary_dilation(filled[0], footprint=disk(dilation_radius))
+            padded = np.pad(closed, pad_to) * 1.0
+            all_contours = find_contours(padded)
+
+            nc = len(all_contours)
+            for j in range(0, nc):
+                if all([points_in_poly(all_contours[k], all_contours[j]).all() for k in range(0, nc)]):
+                    contour = all_contours[j]
+                    break
+
+            rel_polygon = approximate_polygon(contour[:, [1, 0]], poly_threshold) - [pad_to, pad_to]
+            return rel_polygon + [roi.x0, roi.y0]
+
+        return self._df.apply(_poly_from_mask, axis=1)
+
+
+    @property
+    def acc_obj_ids(self):
+        return make_object_ids_from_df(self._df, self.acc_raw.shape_dict)
+
+    @classmethod
+    def deserialize(cls, acc_raw: GenericImageDataAccessor, where: Path, prefix='roiset') -> Self:
+        """
+        Create an RoiSet object from saved files and an image accessor
+        :param acc_raw: accessor to image that contains ROIs
+        :param where: path to directory containing RoiSet serialization files, namely dataframe.csv and a subdirectory
+            named tight_patch_masks
+        :param prefix: starting prefix of patch mask filenames
+        :return: RoiSet object
+        """
         df = pd.read_csv(where / 'dataframe' / (prefix + '.csv'))[['label', 'zi', 'y0', 'y1', 'x0', 'x1']]
+        pa_masks = where / 'tight_patch_masks'
+
+        if pa_masks.exists():  # import segmentation masks
+            def _read_binary_mask(r):
+                ext = 'png'
+                fname = f'{prefix}-la{r.label:04d}-zi{r.zi:04d}.{ext}'
+                try:
+                    ma_acc = generate_file_accessor(pa_masks / fname)
+                    assert ma_acc.chroma == 1 and ma_acc.nz == 1
+                    mask_data = ma_acc.data_xy / np.iinfo(ma_acc.data.dtype).max
+                    return mask_data
+                except Exception as e:
+                    raise DeserializeRoiSet(e)
+
+            df['binary_mask'] = df.apply(_read_binary_mask, axis=1)
+            id_mask = make_object_ids_from_df(df, acc_raw.shape_dict)
+            return cls.from_object_ids(acc_raw, id_mask)
+
+        else:  # assume bounding boxes only
+            df['y'] = df['y0']
+            df['x'] = df['x0']
+            df['h'] = df['y1'] - df['y0']
+            df['w'] = df['x1'] - df['x0']
+            return cls.from_bounding_boxes(
+                acc_raw,
+                df[['y', 'x', 'h', 'w']].to_dict(orient='records'),
+                list(df['zi'])
+            )
 
-        id_mask = np.zeros((*acc_raw.hw, 1, acc_raw.nz), dtype='uint16')
-        def _label_obj(r):
-            sl = np.s_[r.y0:r.y1, r.x0:r.x1, :, r.zi:r.zi + 1]
-            ext = 'png'
-            fname = f'{prefix}-la{r.label:04d}-zi{r.zi:04d}.{ext}'
-            try:
-                ma_acc = generate_file_accessor(where / 'tight_patch_masks' / fname)
-                bool_mask = ma_acc.data / np.iinfo(ma_acc.data.dtype).max
-                id_mask[sl] = id_mask[sl] + r.label * bool_mask
-            except Exception as e:
-                raise DeserializeRoiSet(e)
-
-        df.apply(_label_obj, axis=1)
-        return RoiSet(acc_raw, InMemoryDataAccessor(id_mask))
-
-
-def project_stack_from_focal_points(
-        xx: np.ndarray,
-        yy: np.ndarray,
-        zz: np.ndarray,
-        stack: GenericImageDataAccessor,
-        degree: int = 2,
-) -> np.ndarray:
-    """
-    Given a set of 3D points, project a multichannel z-stack based on a surface fit of the provided points
-    :param xx: vector of point x-coordinates
-    :param yy: vector of point y-coordinates
-    :param zz: vector of point z-coordinates
-    :param stack: z-stack to project
-    :param degree: order of polynomial to fit
-    :return: multichannel 2d projected image array
-    """
-    assert xx.shape == yy.shape
-    assert xx.shape == zz.shape
-
-    poly = PolynomialFeatures(degree=degree)
-    X = np.stack([xx, yy]).T
-    features = poly.fit_transform(X, zz)
-    model = LinearRegression(fit_intercept=False)
-    model.fit(features, zz)
-
-    xy_indices = np.indices(stack.hw).reshape(2, -1).T
-    xy_features = np.dot(
-        poly.fit_transform(xy_indices, zz),
-        model.coef_
-    )
-    zi_image = xy_features.reshape(
-        stack.hw
-    ).round().clip(
-        0, (stack.nz - 1)
-    ).astype('uint16')
-
-    return np.take_along_axis(
-        stack.data,
-        np.repeat(
-            np.expand_dims(zi_image, (2, 3)),
-            stack.chroma,
-            axis=2
-        ),
-        axis=3
-    )
 
+class RoiSetWithDerivedChannelsExportParams(RoiSetExportParams):
+    derived_channels: bool = False
+
+class RoiSetWithDerivedChannels(RoiSet):
+
+    def __init__(self, *a, **k):
+        self.accs_derived = []
+        super().__init__(*a, **k)
+
+    def classify_by(
+            self, name: str, channels: list[int],
+            object_classification_model: InstanceSegmentationModel,
+            derived_channel_functions: list[callable] = None
+    ):
+        """
+        Insert a column in RoiSet data table that associates each ROI with an integer class, determined by passing
+        specified inputs through an instance segmentation classifier.  Derive additional inputs for object
+        classification by passing a raw input channel through one or more functions.
+
+        :param name: name of column to insert
+        :param channels: list of nc raw input channels to send to classifier
+        :param object_classification_model: InstanceSegmentation model object
+        :param derived_channel_functions: list of functions that each receive a PatchStack accessor with nc channels and
+            that return a single-channel PatchStack accessor of the same shape
+        :return: None
+        """
+
+        raw_acc = self.get_patches_acc(channels=channels, expanded=False, pad_to=None)  # all channels
+        if derived_channel_functions is not None:
+            mono_data = [raw_acc.get_mono(c).data for c in range(0, raw_acc.chroma)]
+            for fcn in derived_channel_functions:
+                der = fcn(raw_acc) # returns patch stack
+                if der.shape != mono_data[0].shape or der.dtype not in ['uint8', 'uint16']:
+                    raise DerivedChannelError(
+                        f'Error processing derived channel {der} with shape {der.shape_dict} and dtype {der.dtype}'
+                    )
+                self.accs_derived.append(der)
+
+            # combine channels
+            data_derived = [acc.data for acc in self.accs_derived]
+            input_acc = PatchStack(
+                np.concatenate(
+                    [*mono_data, *data_derived],
+                    axis=raw_acc._ga('C')
+                )
+            )
+
+        else:
+            input_acc = raw_acc
+
+        # do this on a patch basis, i.e. only one object per frame
+        obmap_patches = object_classification_model.label_patch_stack(
+            input_acc,
+            self.get_patch_masks_acc(expanded=False, pad_to=None)
+        )
 
+        self._df['classify_by_' + name] = pd.Series(dtype='Int64')
+
+        for i, roi in enumerate(self):
+            oc = np.unique(
+                mask_largest_object(
+                    obmap_patches.iat(i).data
+                )
+            )[-1]
+            self._df.loc[roi.Index, 'classify_by_' + name] = oc
+
+    def run_exports(self, where: Path, channel, prefix, params: RoiSetWithDerivedChannelsExportParams) -> dict:
+        """
+        Export various representations of ROIs, e.g. patches, annotated stacks, and object maps.
+        :param where: path of directory in which to write all export products
+        :param channel: color channel of products to export
+        :param prefix: prefix of the name of each product's file or subfolder
+        :param params: RoiSetExportParams object describing which products to export and with which parameters
+        :return: nested dict of Path objects describing the location of export products
+        """
+        record = super().run_exports(where, channel, prefix, params)
+
+        k = 'derived_channels'
+        if k in params.dict().keys():
+            record[k] = []
+            for di, dacc in enumerate(self.accs_derived):
+                fp = where / k / f'dc{di:01d}.tif'
+                fp.parent.mkdir(exist_ok=True, parents=True)
+                dacc.export_pyxcz(fp)
+                record[k].append(str(fp))
+        return record
 
 class Error(Exception):
     pass
 
+class BoundingBoxError(Error):
+    pass
+
 class DeserializeRoiSet(Error):
     pass
 
+class NoDeprojectChannelSpecifiedError(Error):
+    pass
+
 class DerivedChannelError(Error):
+    pass
+
+class MissingSegmentationError(Error):
+    pass
+
+class PatchMaskShapeError(Error):
+    pass
+
+class ShapeMismatchError(Error):
+    pass
+
+class MissingInstanceLabelsError(Error):
     pass
\ No newline at end of file
diff --git a/model_server/base/session.py b/model_server/base/session.py
index 9b7b1acfa89b5d502b5579cc03c14525027d14a2..7e06003f8be9407f6a8ee4f1816b70fac12f1900 100644
--- a/model_server/base/session.py
+++ b/model_server/base/session.py
@@ -1,27 +1,20 @@
+from collections import OrderedDict
 import logging
 import os
 
-from pathlib import Path
+from pathlib import Path, PureWindowsPath
 from pydantic import BaseModel
 from time import strftime, localtime
 from typing import Union
 
 import pandas as pd
 
-import model_server.conf.defaults
-from model_server.base.models import Model
+from ..conf import defaults
+from .accessors import GenericImageDataAccessor, PatchStack
+from .models import Model
 
 logger = logging.getLogger(__name__)
 
-
-class Singleton(type):
-    _instances = {}
-
-    def __call__(cls, *args, **kwargs):
-        if cls not in cls._instances:
-            cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
-        return cls._instances[cls]
-
 class CsvTable(object):
     def __init__(self, fpath: Path):
         self.path = fpath
@@ -38,7 +31,7 @@ class CsvTable(object):
         self.empty = False
         return True
 
-class Session(object, metaclass=Singleton):
+class _Session(object):
     """
     Singleton class for a server session that persists data between API calls
     """
@@ -46,9 +39,9 @@ class Session(object, metaclass=Singleton):
     log_format = '%(asctime)s - %(levelname)s - %(message)s'
 
     def __init__(self, root: str = None):
-        print('Initializing session')
         self.models = {} # model_id : model object
         self.paths = self.make_paths(root)
+        self.accessors = OrderedDict()
 
         self.logfile = self.paths['logs'] / f'session.log'
         logging.basicConfig(filename=self.logfile, level=logging.INFO, force=True, format=self.log_format)
@@ -89,6 +82,111 @@ class Session(object, metaclass=Singleton):
             raise InvalidPathError(f'Could not find {path}')
         self.paths[key] = Path(path)
 
+    def add_accessor(self, acc: GenericImageDataAccessor, accessor_id: str = None) -> str:
+        """
+        Add an accessor to session context
+        :param acc: accessor to add
+        :param accessor_id: unique ID, or autogenerate if None
+        :return: ID of accessor
+        """
+        if accessor_id in self.accessors.keys():
+            raise AccessorIdError(f'Access with ID {accessor_id} already exists')
+        if accessor_id is None:
+            idx = len(self.accessors)
+            accessor_id = f'auto_{idx:06d}'
+        self.accessors[accessor_id] = {'loaded': True, 'object': acc, **acc.info}
+        return accessor_id
+
+    def del_accessor(self, accessor_id: str) -> str:
+        """
+        Remove accessor object but retain its info dictionary
+        :param accessor_id: accessor's ID
+        :return: ID of accessor
+        """
+        if accessor_id not in self.accessors.keys():
+            raise AccessorIdError(f'No accessor with ID {accessor_id} is registered')
+        v = self.accessors[accessor_id]
+        if isinstance(v, dict) and v['loaded'] is False:
+            logger.warning(f'Accessor {accessor_id} is already deleted')
+        else:
+            assert isinstance(v['object'], GenericImageDataAccessor)
+            v['loaded'] = False
+            v['object'] = None
+        return accessor_id
+
+    def del_all_accessors(self) -> list[str]:
+        """
+        Remove (unload) all accessors but keep their info in dictionary
+        :return: list of removed accessor IDs
+        """
+        res = []
+        for k, v in self.accessors.items():
+            if v['loaded']:
+                v['object'] = None
+                v['loaded'] = False
+                res.append(k)
+        return res
+
+
+    def list_accessors(self) -> dict:
+        """
+        List information about all accessors in JSON-readable format
+        """
+        if len(self.accessors):
+            return pd.DataFrame(self.accessors).drop('object').to_dict()
+        else:
+            return {}
+
+    def get_accessor_info(self, acc_id: str) -> dict:
+        """
+        Get information about a single accessor
+        """
+        if acc_id not in self.accessors.keys():
+            raise AccessorIdError(f'No accessor with ID {acc_id} is registered')
+        return self.list_accessors()[acc_id]
+
+    def get_accessor(self, acc_id: str, pop: bool = True) -> GenericImageDataAccessor:
+        """
+        Return an accessor object
+        :param acc_id: accessor's ID
+        :param pop: remove object from session accessor registry if True
+        :return: accessor object
+        """
+        if acc_id not in self.accessors.keys():
+            raise AccessorIdError(f'No accessor with ID {acc_id} is registered')
+        acc = self.accessors[acc_id]['object']
+        if pop:
+            self.del_accessor(acc_id)
+        return acc
+
+    def write_accessor(self, acc_id: str, filename: Union[str, None] = None) -> str:
+        """
+        Write an accessor to file and unload it from the session
+        :param acc_id: accessor's ID
+        :param filename: force use of a specific filename, raise InvalidPathError if this already exists
+        :return: name of file
+        """
+        if filename is None:
+            fp = self.paths['outbound_images'] / f'{acc_id}.tif'
+        else:
+            fp = self.paths['outbound_images'] / filename
+            if fp.exists():
+                raise InvalidPathError(f'Cannot overwrite file {filename} when writing accessor')
+        acc = self.get_accessor(acc_id, pop=True)
+
+        old_fp = self.accessors[acc_id]['filepath']
+        if old_fp != '':
+            raise WriteAccessorError(
+                f'Cannot overwrite accessor that is already written to {old_fp}'
+            )
+
+        if isinstance(acc, PatchStack):
+            acc.export_pyxcz(fp)
+        else:
+            acc.write(fp)
+        self.accessors[acc_id]['filepath'] = fp.__str__()
+        return fp.name
+
     @staticmethod
     def make_paths(root: str = None) -> dict:
         """
@@ -97,13 +195,13 @@ class Session(object, metaclass=Singleton):
         :return: dictionary of session paths
         """
         if root is None:
-            root_path = Path(model_server.conf.defaults.root)
+            root_path = Path(defaults.root)
         else:
             root_path = Path(root)
-        sid = Session.create_session_id(root_path)
+        sid = _Session.create_session_id(root_path)
         paths = {'root': root_path}
         for pk in ['inbound_images', 'outbound_images', 'logs', 'tables']:
-            pa = root_path / sid / model_server.conf.defaults.subdirectories[pk]
+            pa = root_path / sid / defaults.subdirectories[pk]
             paths[pk] = pa
             try:
                 pa.mkdir(parents=True, exist_ok=True)
@@ -140,10 +238,16 @@ class Session(object, metaclass=Singleton):
     def log_error(self, msg):
         logger.error(msg)
 
-    def load_model(self, ModelClass: Model, params: Union[BaseModel, None] = None) -> dict:
+    def load_model(
+            self,
+            ModelClass: Model,
+            key: Union[str, None] = None,
+            params: Union[BaseModel, None] = None,
+    ) -> dict:
         """
         Load an instance of a given model class and attach to this session's model registry
         :param ModelClass: subclass of Model
+        :param key: unique identifier of model, or autogenerate if None
         :param params: optional parameters that are passed to the model's construct
         :return: model_id of loaded model
         """
@@ -151,13 +255,17 @@ class Session(object, metaclass=Singleton):
         assert mi.loaded, f'Error loading instance of {ModelClass.__name__}'
         ii = 0
 
-        def mid(i):
-            return f'{ModelClass.__name__}_{i:02d}'
+        if key is None:
+            def mid(i):
+                return f'{mi.name}_{i:02d}'
 
-        while mid(ii) in self.models.keys():
-            ii += 1
+            while mid(ii) in self.models.keys():
+                ii += 1
+
+            key = mid(ii)
+        elif key in self.models.keys():
+            raise CouldNotInstantiateModelError(f'Model with key {key} already exists.')
 
-        key = mid(ii)
         self.models[key] = {
             'object': mi,
             'params': getattr(mi, 'params', None)
@@ -183,7 +291,7 @@ class Session(object, metaclass=Singleton):
         models = self.describe_loaded_models()
         for mid, det in models.items():
             if is_path:
-                if Path(det.get('params').get(key)) == Path(value):
+                if PureWindowsPath(det.get('params').get(key)).as_posix() == Path(value).as_posix():
                     return mid
             else:
                 if det.get('params').get(key) == value:
@@ -193,6 +301,11 @@ class Session(object, metaclass=Singleton):
     def restart(self, **kwargs):
         self.__init__(**kwargs)
 
+
+# create singleton instance
+session = _Session()
+
+
 class Error(Exception):
     pass
 
@@ -202,6 +315,12 @@ class InferenceRecordError(Error):
 class CouldNotInstantiateModelError(Error):
     pass
 
+class AccessorIdError(Error):
+    pass
+
+class WriteAccessorError(Error):
+    pass
+
 class CouldNotCreateDirectory(Error):
     pass
 
diff --git a/model_server/base/util.py b/model_server/base/util.py
index 112118832acb0d15caed1ef29118c3bcc43ea7df..c1556cb5f4c30004d789b05cb539a7a6b770d67e 100644
--- a/model_server/base/util.py
+++ b/model_server/base/util.py
@@ -1,12 +1,14 @@
+from collections import OrderedDict
 from math import ceil
 from pathlib import Path
 import re
 from time import localtime, strftime
 from typing import List
+from time import perf_counter
 
 import pandas as pd
 
-from model_server.base.accessors import InMemoryDataAccessor, write_accessor_data_to_file
+from model_server.base.accessors import GenericImageDataAccessor, InMemoryDataAccessor, write_accessor_data_to_file
 from model_server.base.models import Model
 
 def autonumber_new_directory(where: str, prefix: str) -> str:
@@ -163,4 +165,5 @@ def loop_workflow(
                 )
 
     if len(failures) > 0:
-        pd.DataFrame(failures).to_csv(Path(output_folder_path) / 'failures.csv')
\ No newline at end of file
+        pd.DataFrame(failures).to_csv(Path(output_folder_path) / 'failures.csv')
+
diff --git a/model_server/base/validators.py b/model_server/base/validators.py
deleted file mode 100644
index b4142b0f6ef99904d3bdb13fd7c5494e5df4fe18..0000000000000000000000000000000000000000
--- a/model_server/base/validators.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from fastapi import HTTPException
-
-from model_server.base.session import Session
-
-session = Session()
-
-def validate_workflow_inputs(model_ids, inpaths):
-    for mid in model_ids:
-        if mid not in session.describe_loaded_models().keys():
-            raise HTTPException(
-                status_code=409,
-                detail=f'Model {mid} has not been loaded'
-            )
-    for inpa in inpaths:
-        if not inpa.exists():
-            raise HTTPException(
-                status_code=404,
-                detail=f'Could not find file:\n{inpa}'
-            )
\ No newline at end of file
diff --git a/model_server/base/workflows.py b/model_server/base/workflows.py
deleted file mode 100644
index 9ff6f57c04c4278a59fd683040d42ce6111cae52..0000000000000000000000000000000000000000
--- a/model_server/base/workflows.py
+++ /dev/null
@@ -1,61 +0,0 @@
-"""
-Implementation of image analysis work behind API endpoints, without knowledge of persistent data in server session.
-"""
-from collections import OrderedDict
-from pathlib import Path
-from time import perf_counter
-from typing import Dict
-
-from model_server.base.accessors import generate_file_accessor, write_accessor_data_to_file
-from model_server.base.models import SemanticSegmentationModel
-
-from pydantic import BaseModel
-
-class Timer(object):
-    tfunc = perf_counter
-
-    def __init__(self):
-        self.events = OrderedDict()
-        self.last = self.tfunc()
-
-    def click(self, key):
-        self.events[key] = self.tfunc() - self.last
-        self.last = self.tfunc()
-
-class WorkflowRunRecord(BaseModel):
-    model_id: str
-    input_filepath: str
-    output_filepath: str
-    success: bool
-    timer_results: Dict[str, float]
-
-
-def classify_pixels(fpi: Path, model: SemanticSegmentationModel, where_output: Path, **kwargs) -> WorkflowRunRecord:
-    """
-    Run a semantic segmentation model to compute a binary mask from an input image
-
-    :param fpi: Path object that references input image file
-    :param model: semantic segmentation model instance
-    :param where_output: Path object that references output image directory
-    :param kwargs: variable-length keyword arguments
-    :return: record object
-    """
-    ti = Timer()
-    ch = kwargs.get('channel')
-    img = generate_file_accessor(fpi).get_mono(ch)
-    ti.click('file_input')
-
-    outdata = model.label_pixel_class(img)
-    ti.click('inference')
-
-    outpath = where_output / (model.model_id + '_' + fpi.stem + '.tif')
-    write_accessor_data_to_file(outpath, outdata)
-    ti.click('file_output')
-
-    return WorkflowRunRecord(
-        model_id=model.model_id,
-        input_filepath=str(fpi),
-        output_filepath=str(outpath),
-        success=True,
-        timer_results=ti.events,
-    )
\ No newline at end of file
diff --git a/model_server/clients/ilastik_map_objects.py b/model_server/clients/ilastik_map_objects.py
deleted file mode 100644
index c395a31fb704821ced3a970dcf58bd07d04079ff..0000000000000000000000000000000000000000
--- a/model_server/clients/ilastik_map_objects.py
+++ /dev/null
@@ -1,80 +0,0 @@
-from os.path import basename, dirname
-
-
-def main(request_func, in_abspath, params):
-	"""
-	Execute a sequence of client requests that load ilastik pixel and object classifiers, then infer on an image file
-	:param request_func: (func) function that implements HTTP client, dependent on which environment request are called from
-	:param in_abspath: (str) absolute path to image file to infer
-	:param params:
-		pixel_classifier_path: (str) absolute path to ilastik project file that defines a pixel classifier
-		object_classifier_path: (str) absolute path to ilastik project file that defines an object classifier
-		channel (optional): (int) channel of the input image to process, use all channels if not specified
-	:return: (str) absolute path where a new object map is written
-	"""
-
-	where = dirname(in_abspath)
-	in_file = basename(in_abspath)
-	
-	px_ilp = params['pixel_classifier_path']
-	ob_ilp = params['object_classifier_path']
-	channel = params.get('channel', None)
-	mip = params.get('mip', False)
-	
-	# configure input and output paths
-	resp = request_func(
-		'PUT',
-		'/paths/watch_input',
-		{
-			'path': where,
-		}
-	)
-	assert resp['status'] == 200, 'Error setting up image directory'
-	
-	resp = request_func(
-		'PUT',
-		'/paths/watch_output',
-		{
-			'path': where,
-		}
-	)
-	assert resp['status'] == 200, 'Error setting up image directory'
-	
-	# load pixel classifier
-	resp = request_func(
-		'PUT',
-		'/ilastik/seg/load/',
-		body={
-			'project_file': px_ilp,
-			'duplicate': False,
-		},
-	)
-	assert resp['status'], 'Error loading classifier: ' + px_ilp
-	id_px_mod = resp['content']['model_id']
-	
-	# load object classifier
-	resp = request_func(
-		'PUT', '/ilastik/pxmap_to_obj/load/',
-		body={
-			'project_file': ob_ilp,
-			'duplicate': False,
-		},
-	)
-	assert resp['status'] == 200, 'Error loading object classifier: ' + {ob_ilp}
-	id_ob_mod = resp['content']['model_id']
-	
-	# run inference
-	resp = request_func(
-		'PUT',
-		'/ilastik/pixel_then_object_classification/infer',
-		{
-			'px_model_id': id_px_mod,
-			'ob_model_id': id_ob_mod,
-			'input_filename': in_file,
-			'channel': channel,
-			'mip': mip,
-		}
-	)
-	assert resp['status'] == 200, 'Error calling workfow'
-	return resp['content']['object_map_filepath']
-	
diff --git a/model_server/clients/imagej/adapter.py b/model_server/clients/imagej/adapter.py
deleted file mode 100644
index 237aa7ab51bef9c6dd8813ae1a8669b5f9bc70e6..0000000000000000000000000000000000000000
--- a/model_server/clients/imagej/adapter.py
+++ /dev/null
@@ -1,82 +0,0 @@
-"""
-Functionality needed to run a client request sequence (clients.*.main) in the ImageJ model_server 2.7 script environment
-"""
-
-import httplib
-import json
-import urllib
-
-from ij import IJ
-from ij import ImagePlus
-
-HOST = '127.0.0.1'
-PORT = 6221
-uri = 'http://{}:{}/'.format(HOST, PORT)
-
-def hit_endpoint(method, endpoint, params=None, body=None, drop_none=True):
-    """
-    Python 2.7 implementation of HTTP client
-    :param method: (str) either 'GET' or 'PUT'
-    :param endpoint: (str) endpoint of HTTP request
-    :param params: (dict) of parameters that are embedded in client request URL
-    :param body: (dict) of parameters that JSON-encoded and attached as payload in request
-    :param drop_none: (bool) remove (presumably optional) parameters with value equal to None
-    :return: (dict) of response status and content, formatted as dict if request is successful
-    """
-    connection = httplib.HTTPConnection(HOST, PORT)
-    if not method in ['GET', 'PUT']:
-        raise Exception('Can only handle GET and PUT requests')
-    k_pop = []
-    if drop_none and params is not None:
-        for k, v in params.items():
-            if v is None:
-                k_pop.append(k)
-        for ki in k_pop:
-            params.pop(ki)
-
-    if params:
-        url = endpoint + '?' + urllib.urlencode(params)
-    else:
-        url = endpoint
-    connection.request(method, url, body=json.dumps(body))
-    resp = connection.getresponse()
-    resp_str = resp.read()
-    try:
-        content = json.loads(resp_str)
-    except Exception:
-        content = {'str': str(resp_str)}
-    return {'status': resp.status, 'content': content}
-
-
-def verify_server(popup=True):
-    try:
-        resp = hit_endpoint('GET', '/')
-    except Exception as e:
-        print(e)
-        msg = 'Could not find server at: ' + uri
-        IJ.log(msg)
-        if popup:
-            IJ.error(msg)
-            raise e
-        return False
-    if resp['status'] != 200:
-        msg = 'Unknown error verifying server at: ' + uri
-        if popup:
-            IJ.error(msg)
-            raise Exception(msg)
-        return False
-    else:
-        IJ.log('Verified server is online at: ' + uri)
-        return True
-
-def run_request_sequence(imp, func, params):
-    """
-    Execute a sequence of client requests in the ImageJ scripting environment
-    :param imp: (ij.ImagePlus) input image
-    :param func: (func) function that implements client request sequence
-    :param params: (dict) parameters specific to client request
-    :return: (ij.ImagePlus) output image
-    """
-    in_path = imp.getProp('Location')
-    out_path = func(hit_endpoint, in_path, params)
-    return ImagePlus(out_path)
\ No newline at end of file
diff --git a/model_server/clients/util.py b/model_server/clients/util.py
deleted file mode 100644
index 423e73808d6ee10eaec739c7a726025aeb0de744..0000000000000000000000000000000000000000
--- a/model_server/clients/util.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import requests
-
-def get_client(host='127.0.0.1', port=8000):
-    """Return a client wrapper for testing in same model_server 3.9 environment as server"""
-    uri = f'http://{host}:{port}'
-
-    def hit_endpoint(method, endpoint, params=None):
-        if method == 'GET':
-            resp = requests.get(uri + endpoint)
-        elif method == 'PUT':
-            resp = requests.put(uri + endpoint, params=params)
-        else:
-            raise Exception('Can only handle GET and PUT requests')
-        if resp.status_code != 200:
-            return {'status': resp.status_code, 'content': resp.text}
-        else:
-            return {'status': resp.status_code, 'content': resp.json()}
-    return hit_endpoint
\ No newline at end of file
diff --git a/model_server/conf/defaults.py b/model_server/conf/defaults.py
index bdf7cfd0cf2786783b8f4c16dafdf7418af05976..ff4f9040ceb9d9d14d947bbbce19185491d490fd 100644
--- a/model_server/conf/defaults.py
+++ b/model_server/conf/defaults.py
@@ -8,8 +8,7 @@ subdirectories = {
     'outbound_images': 'images/outbound',
     'tables': 'tables',
 }
-
 server_conf = {
     'host': '127.0.0.1',
     'port': 8000,
-}
\ No newline at end of file
+}
diff --git a/model_server/conf/fastapi.py b/model_server/conf/fastapi.py
new file mode 100644
index 0000000000000000000000000000000000000000..53edadd80cda52c565a4d97ca99923bac92996e1
--- /dev/null
+++ b/model_server/conf/fastapi.py
@@ -0,0 +1,7 @@
+import importlib
+
+from ..base.api import app
+
+for ex in ['ilastik']:
+    m = importlib.import_module(f'..extensions.{ex}.router', package=__package__)
+    app.include_router(m.router)
diff --git a/model_server/conf/testing.py b/model_server/conf/testing.py
index 5fdea52941b420f18849aef78454a3b9a200da45..042a95512bb4417efd0e09f1359db11a37e4ff45 100644
--- a/model_server/conf/testing.py
+++ b/model_server/conf/testing.py
@@ -1,85 +1,181 @@
+import json
+import os
+import unittest
+from math import floor
+from multiprocessing import Process
 from pathlib import Path
+from shutil import copyfile
 
-root = Path.home() / 'model_server' / 'testing'
-
-filename = 'D3-selection-01.czi'
-czifile = {
-    'filename': filename,
-    'path': root / filename,
-    'w': 1274,
-    'h': 1274,
-    'c': 5,
-    'z': 1,
-    'um_per_pixel': 1/3.9881,
-}
-
-filename = 'rgb.png'
-rgbpngfile = {
-    'filename': filename,
-    'path': root / filename,
-    'w': 64,
-    'h': 128,
-    'c': 3,
-    'z': 1
-}
-
-filename = 'mono.png'
-monopngfile = {
-    'filename': filename,
-    'path': root / filename,
-    'w': 64,
-    'h': 128,
-    'c': 1,
-    'z': 1
-}
-
-filename = 'zmask-test-stack.tif'
-tifffile = {
-    'filename': filename,
-    'path': root / filename,
-    'w': 512,
-    'h': 512,
-    'c': 2,
-    'z': 7,
-}
-
-filename = 'mono_zstack_mask.tif'
-monozstackmask = {
-    'filename': filename,
-    'path': root / filename,
-    'w': 256,
-    'h': 256,
-    'c': 1,
-    'z': 85
-}
-
-ilastik_classifiers = {
-    'px': root / 'ilastik' / 'demo_px.ilp',
-    'pxmap_to_obj': root / 'ilastik' / 'demo_obj.ilp',
-    'seg_to_obj': root / 'ilastik' / 'demo_obj_seg.ilp',
-    'px_color_zstack': root / 'ilastik' / 'px-3d-color.ilp',
-    'ob_pxmap_color_zstack': root / 'ilastik' / 'ob-pxmap-color-zstack.ilp',
-    'ob_seg_color_zstack': root / 'ilastik' / 'ob-seg-color-zstack.ilp',
-}
-
-roiset_test_data = {
-    'multichannel_zstack': {
-        'path': root / 'zmask-test-stack-chlorophyl.tif',
-        'w': 512,
-        'h': 512,
-        'c': 5,
-        'z': 7,
-        'mask_path': root / 'zmask-test-stack-mask.tif',
-        'mask_path_3d': root / 'zmask-test-stack-mask-3d.tif',
-    },
-    'pipeline_params': {
-        'segmentation_channel': 0,
-        'patches_channel': 4,
-        'pxmap_channel': 0,
-        'pxmap_threshold': 0.6,
-    },
-    'pixel_classifier': root / 'zmask' / 'AF405-bodies_boundaries.ilp',
-}
-
-output_path = root / 'testing_output'
-output_path.mkdir(parents=True, exist_ok=True)
\ No newline at end of file
+import numpy as np
+import requests
+from urllib3 import Retry
+
+from ..base.accessors import GenericImageDataAccessor, InMemoryDataAccessor
+from ..base.models import SemanticSegmentationModel, InstanceSegmentationModel
+
+from ..base.accessors import generate_file_accessor
+
+class TestServerBaseClass(unittest.TestCase):
+    """
+    Base class for unittests of API functionality.  Implements both server and clients for testing.
+    """
+
+    app_name = 'model_server.base.api:app'
+
+    def setUp(self) -> None:
+        import uvicorn
+        host = '127.0.0.1'
+        port = 5001
+
+        self.server_process = Process(
+            target=uvicorn.run,
+            args=(self.app_name, ),
+            kwargs={'host': host, 'port': port, 'log_level': 'critical'},
+            daemon=True
+        )
+        self.uri = f'http://{host}:{port}/'
+        self.server_process.start()
+
+    def _get_sesh(self):
+        sesh = requests.Session()
+        retries = Retry(
+            total=5,
+            backoff_factor=0.1,
+        )
+        sesh.mount('http://', requests.adapters.HTTPAdapter(max_retries=retries))
+        return sesh
+
+    def _get(self, endpoint):
+        return self._get_sesh().get(self.uri + endpoint)
+
+    def _put(self, endpoint, query=None, body=None):
+        return self._get_sesh().put(
+            self.uri + endpoint,
+            params=query,
+            data=json.dumps(body)
+        )
+
+    def tearDown(self) -> None:
+        self.server_process.terminate()
+        self.server_process.join()
+
+    def copy_input_file_to_server(self):
+        resp = self._get('paths')
+        pa = resp.json()['inbound_images']
+        copyfile(
+            self.input_data['path'],
+            Path(pa) / self.input_data['name']
+        )
+        return self.input_data['name']
+
+    def get_accessor(self, accessor_id, filename=None):
+        resp = self._put(f'/accessors/write_to_file/{accessor_id}', query={'filename': filename})
+        where_out = self._get('paths').json()['outbound_images']
+        fp_out = (Path(where_out) / resp.json())
+        self.assertTrue(fp_out.exists())
+        return generate_file_accessor(fp_out)
+
+
+def setup_test_data():
+    """
+    Look for test data, create test output directory, parse and return meta information
+    :return:
+        meta (dict) of test data and paths
+    """
+
+    def _winpath(f):
+        if not isinstance(f, str):
+            return f
+        p = f.split('/')
+        if len(p) > 1:
+            p[1] = p[1] + ':'
+            return '\\'.join(p[1:])
+        else:
+            return f
+
+    # places to look for test data
+    data_paths = [
+        os.environ.get('UNITTEST_DATA_ROOT'),
+        _winpath(os.environ.get('UNITTEST_DATA_ROOT')),
+        Path.home() / 'model_server' / 'testing',
+        os.getcwd(),
+    ]
+    root = None
+
+    # look for first instance of summary.json
+    for dp in data_paths:
+        if dp is None:
+            continue
+        sf = (Path(dp) / 'summary.json')
+        if sf.exists():
+            with open(sf, 'r') as fh:
+                meta = json.load(fh)
+                root = dp
+                break
+
+    if root is None:
+        raise Exception('Could not find test data, try setting environmental variable UNITTEST_DATA_ROOT.')
+
+    meta['root'] = Path(root)
+    meta['output_path'] = meta['root'] / 'test_output'
+    meta['output_path'].mkdir(parents=True, exist_ok=True)
+
+    # resolve relative paths
+    def _resolve_paths(d):
+        keys = list(d.keys())
+        for k in keys:
+            if k == 'name':
+                d['path'] = meta['root'] / d['name']
+            elif isinstance(d[k], dict):
+                _resolve_paths(d[k])
+    _resolve_paths(meta)
+
+    return meta
+
+# object containing test data paths and metadata, for import into unittest modules
+meta = setup_test_data()
+
+
+class DummySemanticSegmentationModel(SemanticSegmentationModel):
+
+    model_id = 'dummy_make_white_square'
+
+    def load(self):
+        return True
+
+    def infer(self, img: GenericImageDataAccessor) -> (GenericImageDataAccessor, dict):
+        super().infer(img)
+        w = img.shape_dict['X']
+        h = img.shape_dict['Y']
+        result = np.zeros([h, w], dtype='uint8')
+        result[floor(0.25 * h) : floor(0.75 * h), floor(0.25 * w) : floor(0.75 * w)] = 255
+        return InMemoryDataAccessor(data=result), {'success': True}
+
+    def label_pixel_class(
+            self, img: GenericImageDataAccessor, **kwargs) -> GenericImageDataAccessor:
+        mask, _ = self.infer(img)
+        return mask
+
+
+class DummyInstanceSegmentationModel(InstanceSegmentationModel):
+
+    model_id = 'dummy_pass_input_mask'
+
+    def load(self):
+        return True
+
+    def infer(
+            self, img: GenericImageDataAccessor, mask: GenericImageDataAccessor
+    ) -> (GenericImageDataAccessor, dict):
+        return img.__class__(
+            (mask.data / mask.data.max()).astype('uint16')
+        )
+
+    def label_instance_class(
+            self, img: GenericImageDataAccessor, mask: GenericImageDataAccessor, **kwargs
+    ) -> GenericImageDataAccessor:
+        """
+        Returns a trivial segmentation, i.e. the input mask with value 1
+        """
+        super(DummyInstanceSegmentationModel, self).label_instance_class(img, mask, **kwargs)
+        return self.infer(img, mask)
diff --git a/model_server/extensions/ilastik/models.py b/model_server/extensions/ilastik/models.py
index 6d0335568204c1ebf3230d9f95b29091762a3b45..d098e03983e02c978231ca555826b567726b8dd5 100644
--- a/model_server/extensions/ilastik/models.py
+++ b/model_server/extensions/ilastik/models.py
@@ -1,21 +1,26 @@
 import json
+from logging import getLogger
 import os
 from pathlib import Path
+from typing import Union
+import warnings
 
 import numpy as np
-from pydantic import BaseModel
-from skimage.filters import gaussian
+from pydantic import BaseModel, Field
 import vigra
 
 import model_server.extensions.ilastik.conf
-from model_server.base.accessors import PatchStack
-from model_server.base.accessors import GenericImageDataAccessor, InMemoryDataAccessor
-from model_server.base.process import smooth
-from model_server.base.models import Model, ImageToImageModel, InstanceSegmentationModel, InvalidInputImageError, ParameterExpectedError, SemanticSegmentationModel
+from ...base.accessors import PatchStack
+from ...base.accessors import GenericImageDataAccessor, InMemoryDataAccessor
+from ...base.models import Model, ImageToImageModel, InstanceSegmentationModel, InvalidInputImageError, ParameterExpectedError, SemanticSegmentationModel
 
 class IlastikParams(BaseModel):
-    project_file: str
-    duplicate: bool = True
+    project_file: str = Field(description='(*.ilp) ilastik project filename')
+    duplicate: bool = Field(
+        True,
+        description='Load another instance of the same project file if True; return existing one if False'
+    )
+    model_id: Union[str, None] = Field(None, description='Unique identifier of the model, or autogenerate if empty')
 
 class IlastikModel(Model):
 
@@ -44,8 +49,13 @@ class IlastikModel(Model):
         super().__init__(autoload, params)
 
     def load(self):
-        from ilastik import app
-        from ilastik.applets.dataSelection.opDataSelection import PreloadedArrayDatasetInfo
+        # suppress warnings when loading ilastik app
+        getLogger('ilastik.app').setLevel('ERROR')
+
+        with warnings.catch_warnings():
+            warnings.filterwarnings('ignore', category=DeprecationWarning)
+            from ilastik import app
+            from ilastik.applets.dataSelection.opDataSelection import PreloadedArrayDatasetInfo
 
         self.PreloadedArrayDatasetInfo = PreloadedArrayDatasetInfo
 
@@ -96,12 +106,11 @@ class IlastikModel(Model):
 class IlastikPixelClassifierParams(IlastikParams):
     px_class: int = 0
     px_prob_threshold: float = 0.5
-    px_smoothing: float = 0.0
 
 class IlastikPixelClassifierModel(IlastikModel, SemanticSegmentationModel):
     model_id = 'ilastik_pixel_classification'
     operations = ['segment', ]
-    
+
     def __init__(self, params: IlastikPixelClassifierParams, **kwargs):
         super(IlastikPixelClassifierModel, self).__init__(params, **kwargs)
 
@@ -161,13 +170,8 @@ class IlastikPixelClassifierModel(IlastikModel, SemanticSegmentationModel):
 
     def label_pixel_class(self, img: GenericImageDataAccessor, **kwargs):
         pxmap, _ = self.infer(img)
-        sig = self.params['px_smoothing']
-        if sig > 0.0:
-            proc = smooth(img.data, sig)
-        else:
-            proc = pxmap.data
-        mask = proc[:, :, self.params['px_class'], :] > self.params['px_prob_threshold']
-        return InMemoryDataAccessor(mask)
+        mask = pxmap.get_mono(self.params['px_class']).apply(lambda x: x > self.params['px_prob_threshold'])
+        return mask
 
 
 class IlastikObjectClassifierFromSegmentationModel(IlastikModel, InstanceSegmentationModel):
@@ -301,7 +305,7 @@ class IlastikObjectClassifierFromPixelPredictionsModel(IlastikModel, ImageToImag
     def label_instance_class(self, img: GenericImageDataAccessor, pxmap: GenericImageDataAccessor, **kwargs):
         """
         Given an image and a map of pixel probabilities of the same shape, return a map where each connected object is
-        assigned a class.
+        assigned a class
         :param img: input image
         :param pxmap: map of pixel probabilities
         :param kwargs:
@@ -311,9 +315,11 @@ class IlastikObjectClassifierFromPixelPredictionsModel(IlastikModel, ImageToImag
         """
         if not img.shape == pxmap.shape:
             raise InvalidInputImageError('Expecting input image and pixel probabilities to be the same shape')
+        if not pxmap.data.min() >= 0.0 and pxmap.data.max() <= 1.0:
+            raise InvalidInputImageError('Pixel probability values must be between 0.0 and 1.0')
         pxch = kwargs.get('pixel_classification_channel', 0)
         pxtr = kwargs.get('pixel_classification_threshold', 0.5)
-        mask = pxmap.get_mono(pxch).apply(lambda x: x > pxtr)
+        mask = InMemoryDataAccessor(pxmap.get_one_channel_data(pxch).data > pxtr)
         obmap, _ = self.infer(img, mask)
         return obmap
 
diff --git a/model_server/extensions/ilastik/pipelines/px_then_ob.py b/model_server/extensions/ilastik/pipelines/px_then_ob.py
new file mode 100644
index 0000000000000000000000000000000000000000..1aa64615479cad29f01b7d7e416d1fec2f3c9568
--- /dev/null
+++ b/model_server/extensions/ilastik/pipelines/px_then_ob.py
@@ -0,0 +1,69 @@
+from typing import Dict
+
+from fastapi import APIRouter, HTTPException
+from pydantic import Field
+
+from ....base.accessors import GenericImageDataAccessor
+from ....base.models import Model
+from ....base.pipelines.shared import call_pipeline, PipelineTrace, PipelineParams, PipelineRecord
+
+from ..models import IlastikPixelClassifierModel, IlastikObjectClassifierFromPixelPredictionsModel
+
+router = APIRouter(
+    prefix='/pipelines',
+)
+
+class PxThenObParams(PipelineParams):
+    accessor_id: str = Field(description='ID(s) of previously loaded accessor(s) to use as pipeline input')
+    px_model_id: str = Field(description='ID of model for pixel classification')
+    ob_model_id: str = Field(description='ID of model for object classification')
+    channel: int = Field(None, description='Image channel to pass to pixel classification, or all channels if empty.')
+    mip: bool = Field(False, description='Use maximum intensity projection of input image if True')
+
+
+class PxThenObRecord(PipelineRecord):
+    pass
+
+@router.put('/pixel_then_object_classification/infer')
+def pixel_then_object_classification(p: PxThenObParams) -> PxThenObRecord:
+    """
+    Workflow that specifically runs an ilastik pixel classifier, then passes results to an object classifier.
+    """
+
+    try:
+        return call_pipeline(pixel_then_object_classification_pipeline, p)
+    except IncompatibleModelsError as e:
+        raise HTTPException(status_code=409, detail=str(e))
+
+
+def pixel_then_object_classification_pipeline(
+        accessors: Dict[str, GenericImageDataAccessor],
+        models: Dict[str, Model],
+        **k
+) -> PxThenObRecord:
+
+    if not isinstance(models['px_model'], IlastikPixelClassifierModel):
+        raise IncompatibleModelsError(
+            f'Expecting px_model to be an ilastik pixel classification model'
+        )
+    if not isinstance(models['ob_model'], IlastikObjectClassifierFromPixelPredictionsModel):
+        raise IncompatibleModelsError(
+            f'Expecting ob_model to be an ilastik object classification from pixel predictions model'
+        )
+
+    d = PipelineTrace(accessors['accessor'])
+    if (ch := k.get('channel')) is not None:
+        channels = [ch]
+    else:
+        channels = range(0, d['input'].chroma)
+    d['select_channels'] = d.last.get_channels(channels, mip=k.get('mip', False))
+    d['pxmap'], _ = models['px_model'].infer(d.last)
+    d['ob_map'], _ = models['ob_model'].infer(d['select_channels'], d['pxmap'])
+
+    return d
+
+class Error(Exception):
+    pass
+
+class IncompatibleModelsError(Error):
+    pass
\ No newline at end of file
diff --git a/model_server/extensions/ilastik/router.py b/model_server/extensions/ilastik/router.py
index 491e7e40ab678c34426ef0ae8a846c2fe065ea2f..20a1ef24235c0c4ee3d8cc101ffba9e92d86560b 100644
--- a/model_server/extensions/ilastik/router.py
+++ b/model_server/extensions/ilastik/router.py
@@ -1,73 +1,55 @@
-from fastapi import APIRouter, HTTPException
+from fastapi import APIRouter
 
-from model_server.base.session import Session
-from model_server.base.validators import  validate_workflow_inputs
+from model_server.base.session import session
 
 from model_server.extensions.ilastik import models as ilm
-from model_server.base.models import ParameterExpectedError
-from model_server.extensions.ilastik.workflows import infer_px_then_ob_model
 
 router = APIRouter(
     prefix='/ilastik',
     tags=['ilastik'],
 )
 
-session = Session()
 
+import model_server.extensions.ilastik.pipelines.px_then_ob
+router.include_router(model_server.extensions.ilastik.pipelines.px_then_ob.router)
 
-def load_ilastik_model(model_class: ilm.IlastikModel, params: ilm.IlastikParams) -> dict:
+@router.put('/seg/load/')
+def load_px_model(p: ilm.IlastikPixelClassifierParams) -> dict:
     """
-    Load an ilastik model of a given class and project filename.
-    :param model_class:
-    :param project_file: (*.ilp) ilastik project filename
-    :param duplicate: load another instance of the same project file if True; return existing one if false
-    :return: dict containing model's ID
+    Load an ilastik pixel classifier model from its project file
     """
-    project_file = params.project_file
-    if not params.duplicate:
-        existing_model_id = session.find_param_in_loaded_models('project_file', project_file, is_path=True)
-        if existing_model_id is not None:
-            session.log_info(f'An ilastik model from {project_file} already existing exists; did not load a duplicate')
-            return {'model_id': existing_model_id}
-    result = session.load_model(model_class, params)
-    session.log_info(f'Loaded ilastik model {result} from {project_file}')
-    return {'model_id': result}
-
-@router.put('/seg/load/')
-def load_px_model(params: ilm.IlastikPixelClassifierParams) -> dict:
     return load_ilastik_model(
         ilm.IlastikPixelClassifierModel,
-        params,
+        p,
     )
 
 @router.put('/pxmap_to_obj/load/')
-def load_pxmap_to_obj_model(params: ilm.IlastikParams) -> dict:
+def load_pxmap_to_obj_model(p: ilm.IlastikParams) -> dict:
+    """
+    Load an ilastik object classifier from pixel predictions model from its project file
+    """
     return load_ilastik_model(
         ilm.IlastikObjectClassifierFromPixelPredictionsModel,
-        params,
+        p,
     )
 
 @router.put('/seg_to_obj/load/')
-def load_seg_to_obj_model(params: ilm.IlastikParams) -> dict:
+def load_seg_to_obj_model(p: ilm.IlastikParams) -> dict:
+    """
+    Load an ilastik object classifier from segmentation model from its project file
+    """
     return load_ilastik_model(
         ilm.IlastikObjectClassifierFromSegmentationModel,
-        params,
+        p,
     )
 
-@router.put('/pixel_then_object_classification/infer')
-def infer_px_then_ob_maps(px_model_id: str, ob_model_id: str, input_filename: str, channel: int = None, mip: bool = False) -> dict:
-    inpath = session.paths['inbound_images'] / input_filename
-    validate_workflow_inputs([px_model_id, ob_model_id], [inpath])
-    try:
-        record = infer_px_then_ob_model(
-            inpath,
-            session.models[px_model_id]['object'],
-            session.models[ob_model_id]['object'],
-            session.paths['outbound_images'],
-            channel=channel,
-            mip=mip,
-        )
-        session.log_info(f'Completed pixel and object classification of {input_filename}')
-    except AssertionError:
-        raise HTTPException(f'Incompatible models {px_model_id} and/or {ob_model_id}')
-    return record
\ No newline at end of file
+def load_ilastik_model(model_class: ilm.IlastikModel, p: ilm.IlastikParams) -> dict:
+    project_file = p.project_file
+    if not p.duplicate:
+        existing_model_id = session.find_param_in_loaded_models('project_file', project_file, is_path=True)
+        if existing_model_id is not None:
+            session.log_info(f'An ilastik model from {project_file} already existing exists; did not load a duplicate')
+            return {'model_id': existing_model_id}
+    result = session.load_model(model_class, key=p.model_id, params=p)
+    session.log_info(f'Loaded ilastik model {result} from {project_file}')
+    return {'model_id': result}
\ No newline at end of file
diff --git a/model_server/extensions/ilastik/workflows.py b/model_server/extensions/ilastik/workflows.py
deleted file mode 100644
index 6f913f652d013145fdcc2a9f91d3ed0db4234eba..0000000000000000000000000000000000000000
--- a/model_server/extensions/ilastik/workflows.py
+++ /dev/null
@@ -1,78 +0,0 @@
-"""
-Implementation of image analysis work behind API endpoints, without knowledge of persistent data in server session.
-"""
-from pathlib import Path
-from typing import Dict
-
-from pydantic import BaseModel
-
-from model_server.extensions.ilastik.models import IlastikPixelClassifierModel, IlastikObjectClassifierFromPixelPredictionsModel
-from model_server.base.accessors import generate_file_accessor, write_accessor_data_to_file
-from model_server.base.workflows import Timer
-
-
-class WorkflowRunRecord(BaseModel):
-    pixel_model_id: str
-    object_model_id: str
-    input_filepath: str
-    pixel_map_filepath: str
-    object_map_filepath: str
-    success: bool
-    timer_results: Dict[str, float]
-
-
-def infer_px_then_ob_model(
-        fpi: Path,
-        px_model: IlastikPixelClassifierModel,
-        ob_model: IlastikObjectClassifierFromPixelPredictionsModel,
-        where_output: Path,
-        channel: int = None,
-        **kwargs
-) -> WorkflowRunRecord:
-    """
-    Workflow that specifically runs an ilastik pixel classifier, then passes results to an object classifier,
-    saving intermediate images
-    :param fpi: Path object that references input image file
-    :param px_model: model instance for pixel classification
-    :param ob_model: model instance for object classification
-    :param where_output: Path object that references output image directory
-    :param channel: input image channel to pass to pixel classification, or all channels if None
-    :param kwargs: variable-length keyword arguments
-    :return:
-    """
-    assert isinstance(px_model, IlastikPixelClassifierModel)
-    assert isinstance(ob_model, IlastikObjectClassifierFromPixelPredictionsModel)
-
-    ti = Timer()
-    raw_acc = generate_file_accessor(fpi)
-    if channel is not None:
-        channels = [channel]
-    else:
-        channels = range(0, raw_acc.chroma)
-    img = raw_acc.get_channels(channels, mip=kwargs.get('mip', False))
-    ti.click('file_input')
-
-    px_map, _ = px_model.infer(img)
-    ti.click('pixel_probability_inference')
-
-    px_map_path = where_output / (px_model.model_id + '_pxmap_' + fpi.stem + '.tif')
-    write_accessor_data_to_file(px_map_path, px_map)
-    ti.click('pixel_map_output')
-
-    ob_map, _ = ob_model.infer(img, px_map)
-    ti.click('object_classification')
-
-    ob_map_path = where_output / (ob_model.model_id + '_obmap_' + fpi.stem + '.tif')
-    write_accessor_data_to_file(ob_map_path, ob_map)
-    ti.click('object_map_output')
-
-    return WorkflowRunRecord(
-        pixel_model_id=px_model.model_id,
-        object_model_id=ob_model.model_id,
-        input_filepath=str(fpi),
-        pixel_map_filepath=str(px_map_path),
-        object_map_filepath=str(ob_map_path),
-        success=True,
-        timer_results=ti.events,
-    )
-
diff --git a/model_server/scripts/__init__.py b/model_server/scripts/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/model_server/scripts/correct_ilastik_abspath.py b/model_server/scripts/correct_ilastik_abspath.py
deleted file mode 100644
index f915bf3f54bcbe01cb097ee7ea9e7b2915eb18c5..0000000000000000000000000000000000000000
--- a/model_server/scripts/correct_ilastik_abspath.py
+++ /dev/null
@@ -1,48 +0,0 @@
-from os.path import relpath
-from pathlib import Path
-import shutil
-
-import h5py
-
-def make_abspath_to_relpath(ilp_filename: str, my_root: Path, their_root: Path):
-    pa_ilp_old = my_root / ilp_filename
-    assert pa_ilp_old.exists()
-    pa_ilp_new = pa_ilp_old.parent / f'relpath_{pa_ilp_old.name}'
-    with h5py.File(shutil.copy(pa_ilp_old, pa_ilp_new), 'r+') as h5:
-        infos = h5['Input Data/infos']
-        for lane in infos.keys():
-            for role in infos[lane].keys():
-                if len(infos[lane][role]) == 0:
-                    continue
-                pa_img_abs = Path(infos[lane][role]['filePath'][()].decode())
-                my_ilp_dir = (my_root / ilp_filename).parent
-                their_ilp_dir = (their_root / ilp_filename).parent
-                pa_img_rel = Path(relpath(pa_img_abs, their_ilp_dir))
-                if pa_img_rel.parts[-2].upper().endswith('.H5'):
-                    assert (my_ilp_dir / Path(*pa_img_rel.parts[0:-1])).exists()
-                else:
-                    assert (my_ilp_dir / pa_img_rel).exists()
-                del infos[lane][role]['filePath']
-                infos[lane][role]['filePath'] = str(pa_img_rel)
-    return pa_ilp_new
-
-
-
-
-if __name__ == '__main__':
-    files = [
-        '01_ilastik_files/240301_LSM900_DNA_PC.ilp',
-        '01_ilastik_files/240320_LSM900_DNA_OC_new.ilp',
-        '01_ilastik_files/240301_LSM900_TM_PC.ilp',
-        '01_ilastik_files/240320_LSM900_TM_OC_new.ilp'
-    ]
-    for f in files:
-        new_ilp = make_abspath_to_relpath(
-            f,
-            Path('w:/03_analysis/Trial3_LSM900'),
-            Path('/g/cuylen/01_Share/Filemaker/01_Experiments/Experiments_1100/1156/03_analysis/Trial3_LSM900')
-        )
-        print(f'Finished converting {new_ilp}')
-
-
-
diff --git a/model_server/scripts/verify_multichannel_ilastik_inputs.py b/model_server/scripts/verify_multichannel_ilastik_inputs.py
deleted file mode 100644
index 1159b3f1ae42d9902474a854c7f85f1134748fe5..0000000000000000000000000000000000000000
--- a/model_server/scripts/verify_multichannel_ilastik_inputs.py
+++ /dev/null
@@ -1,111 +0,0 @@
-from pathlib import Path
-
-import h5py
-import numpy as np
-import pandas as pd
-
-from model_server.base.accessors import generate_file_accessor, write_accessor_data_to_file, InMemoryDataAccessor
-from model_server.extensions.ilastik.models import IlastikPixelClassifierModel, IlastikObjectClassifierFromPixelPredictionsModel
-
-def get_input_files(where_ilp: Path) -> list:
-    files = []
-    with h5py.File(where_ilp, 'r') as h5:
-        infos = h5['Input Data/infos']
-        for lane in infos.keys():
-            lane_dict = {}
-            for role in infos[lane].keys():
-                if len(infos[lane][role]) == 0:
-                    continue
-                rel_path = Path(infos[lane][role]['filePath'][()].decode())
-                lane_dict[role] = where_ilp.parent / rel_path
-            files.append(lane_dict)
-    return files
-
-if __name__ == '__main__':
-    where_out = Path('c:/Users/rhodes/projects/proj0015-model-server/issues/0032_multiple_input_channels/output')
-    root = Path('w:/03_analysis/Trial3_LSM900')
-    max_files = 1
-    ilps = [
-        '01_ilastik_files/relpath_240301_LSM900_DNA_PC.ilp',
-        '01_ilastik_files/relpath_240320_LSM900_DNA_OC_new.ilp',
-        '01_ilastik_files/relpath_240301_LSM900_TM_PC.ilp',
-        '01_ilastik_files/relpath_240320_LSM900_TM_OC_new.ilp'
-    ]
-    records = []
-    for f in ilps:
-        ilp = root / f
-        assert ilp.exists()
-        outdir = where_out / ilp.stem
-        outdir.mkdir(parents=True, exist_ok=True)
-
-        if ilp.stem.upper().endswith('_PC'):
-            mod = IlastikPixelClassifierModel(
-                params={'project_file': str(ilp)},
-                enforce_embedded=False
-            )
-            infiles = get_input_files(ilp)
-            for ln in infiles[0:max_files]:
-                acc_raw = generate_file_accessor(root / ln['Raw Data'])
-                pxmap = mod.infer(acc_raw)[0]
-                pxmap_fn = 'pxmap_' + ln['Raw Data'].stem + '.tif'
-                write_accessor_data_to_file(outdir / pxmap_fn, pxmap)
-                record = {
-                    'classifier': str(ilp.relative_to(root)),
-                    'input_raw_data': str(ln['Raw Data'].relative_to(root)),
-                    'input_raw_data_chroma': acc_raw.chroma,
-                    'input_raw_data_dtype': acc_raw.dtype,
-                    'input_raw_data_shape_dict': acc_raw.shape_dict,
-                    'output_file': pxmap_fn,
-                    'output_dtype': pxmap.dtype,
-                    'output_chroma': pxmap.chroma,
-                    'output_shape_dict': pxmap.shape_dict,
-                }
-                records.append(record)
-
-        elif ilp.stem.upper().endswith('_OC_NEW'):
-            mod = IlastikObjectClassifierFromPixelPredictionsModel(
-                params={'project_file': str(ilp)},
-                enforce_embedded=False
-            )
-            infiles = get_input_files(ilp)
-            for ln in infiles[0:max_files]:
-                acc_raw = generate_file_accessor(root / ln['Raw Data'])
-                pa_pxmap = root / ln['Prediction Maps']
-
-                if pa_pxmap.parts[-2].upper().endswith('.H5'):
-                    pa_h5f = root / Path(*pa_pxmap.parts[0:-1])
-                    h5_key = pa_pxmap.parts[-1]
-                    pxmap_data = h5py.File(pa_h5f)[h5_key][()] # C x Y x X ?
-                    pxmap_yxc = np.moveaxis(
-                        pxmap_data,
-                        [1, 2, 0],
-                        [0, 1, 2]
-                    )
-                    acc_pxmap = InMemoryDataAccessor(np.expand_dims(pxmap_yxc, -1))
-                else:
-                    acc_pxmap = generate_file_accessor(pa_pxmap)
-                obmap = mod.infer(acc_raw, acc_pxmap)[0]
-                obmap_fn = 'obmap_' + ln['Raw Data'].stem + '.tif'
-                write_accessor_data_to_file(outdir / obmap_fn, obmap)
-                record = {
-                    'classifier': str(ilp.relative_to(root)),
-                    'input_raw_data': str(ln['Raw Data'].relative_to(root)),
-                    'input_raw_data_chroma': acc_raw.chroma,
-                    'input_raw_data_dtype': acc_raw.dtype,
-                    'input_raw_data_shape_dict': acc_raw.shape_dict,
-                    'input_pxmap': str(ln['Prediction Maps'].relative_to(root)),
-                    'input_pxmap_chroma': acc_pxmap.chroma,
-                    'input_pxmap_dtype': acc_pxmap.dtype,
-                    'input_pxmap_shape_dict': acc_pxmap.shape_dict,
-                    'output_file': obmap_fn,
-                    'output_dtype': obmap.dtype,
-                    'output_chroma': obmap.chroma,
-                    'output_shape_dict': obmap.shape_dict,
-                }
-                records.append(record)
-
-        else:
-            raise Exception(f'unidentified project file {ilp}')
-
-    pd.DataFrame(records).to_csv(where_out / 'record.csv', index=False)
-    print('Finished')
diff --git a/pyproject.toml b/pyproject.toml
index 15b6a17c664e94f3c47256e16d5000d0cb1c5f79..882ad79c5a6e8bf5d99116212482db65478df8a3 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -2,15 +2,36 @@
 requires = ["setuptools>=61.0"]
 build-backend = "setuptools.build_meta"
 
+
 [project]
-name = "model_server_package_rhodes"
-version = "0.0.1"
+name = "model_server"
+license = {file = "LICENSE"}
+version = "2024.10.01"
 authors = [
   { name="Christopher Rhodes", email="christopher.rhodes@embl.de" },
 ]
 description = "Service for analyzing microscope images"
 readme = "README.md"
 requires-python = ">=3.9"
+dependencies = [
+  "czifile",
+  "fastapi >=0.101",
+  "ilastik ==1.4.1b6",
+  "imagecodecs",
+  "jupyterlab",
+  "matplotlib",
+  "numpy",
+  "pandas",
+  "pillow",
+  "protobuf ==4.25.3",
+  "pydantic ~=1.10.1",
+  "pytorch ==1.*",
+  "scikit-image >=0.21.0",
+  "scikit-learn >=1.5.0",
+  "tifffile",
+  "uvicorn >=0.23.0",
+  "zstd",
+]
 
 [project.urls]
 Homepage = "https://git.embl.de/rhodes/model_server"
diff --git a/requirements.yml b/requirements.yml
index b1447ee900919d005cefd6345e77b169ce925c80..fa5b53ab6818d693eeb428cc3008de839412a8c1 100644
--- a/requirements.yml
+++ b/requirements.yml
@@ -1,272 +1,22 @@
-name: model_server_env
+name: model_server
 channels:
   - pytorch
   - ilastik-forge
   - conda-forge
-  - defaults
 dependencies:
-  - affogato=0.3.3
-  - annotated-types=0.5.0
-  - anyio=3.7.1
-  - aom=3.5.0
-  - appdirs=1.4.4
-  - attrs=23.1.0
-  - bioimageio.core=0.5.8
-  - bioimageio.spec=0.4.9
-  - blas=2.118
-  - blas-devel=3.9.0
-  - blosc=1.21.5
-  - boost=1.74.0
-  - boost-cpp=1.74.0
-  - brotli=1.0.9
-  - brotli-bin=1.0.9
-  - brotli-python=1.0.9
-  - bzip2=1.0.8
-  - c-blosc2=2.10.2
-  - ca-certificates=2023.7.22
-  - cached-property=1.5.2
-  - cached_property=1.5.2
-  - cachetools=5.3.1
-  - cairo=1.16.0
-  - certifi=2023.7.22
-  - cfitsio=4.2.0
-  - charls=2.3.4
-  - charset-normalizer=3.2.0
-  - click=8.1.7
-  - colorama=0.4.6
-  - contourpy=1.1.0
-  - cycler=0.11.0
-  - czifile=2019.7.2
-  - dav1d=1.2.1
-  - dill=0.3.7
-  - dpct=1.2.post39
-  - et_xmlfile=1.1.0
-  - exceptiongroup=1.1.3
-  - expat=2.5.0
-  - fastapi=0.101.1
-  - fastfilters=0.2.4.post83
-  - fftw=3.3.10
-  - font-ttf-dejavu-sans-mono=2.37
-  - font-ttf-inconsolata=3.000
-  - font-ttf-source-code-pro=2.038
-  - font-ttf-ubuntu=0.83
-  - fontconfig=2.14.2
-  - fonts-conda-ecosystem=1
-  - fonts-conda-forge=1
-  - fonttools=4.42.1
-  - freetype=2.12.1
-  - fribidi=1.0.10
-  - fs=2.4.16
-  - future=0.18.3
-  - getopt-win32=0.1
-  - gettext=0.21.1
-  - giflib=5.2.1
-  - glib=2.78.0
-  - glib-tools=2.78.0
-  - glpk=5.0
-  - graphite2=1.3.13
-  - graphviz=8.1.0
-  - greenlet=2.0.2
-  - grpcio=1.41.1
-  - gst-plugins-base=1.22.5
-  - gstreamer=1.22.5
-  - gts=0.7.6
-  - h11=0.14.0
-  - h5py=3.8.0
-  - harfbuzz=6.0.0
-  - hdf5=1.12.2
-  - hytra=1.1.5
-  - icu=70.1
-  - idna=3.4
-  - ilastik=1.4.1b6
-  - ilastik-core=1.4.1b6
-  - ilastik-feature-selection=0.2.0
-  - ilastikrag=0.1.4
-  - ilastiktools=0.2.post37
-  - imagecodecs=2022.9.26
-  - imagecodecs-lite=2019.12.3
-  - imageio=2.31.1
-  - imath=3.1.6
-  - importlib-resources=6.0.1
-  - importlib_resources=6.0.1
-  - inferno=v0.4.2
-  - intel-openmp=2023.2.0
-  - joblib=1.3.2
-  - jpeg=9e
-  - jsonschema=4.19.0
-  - jsonschema-specifications=2023.7.1
-  - jxrlib=1.1
-  - kiwisolver=1.4.5
-  - krb5=1.20.1
-  - lazy_loader=0.3
-  - lcms2=2.14
-  - lemon=1.3.1
-  - lerc=4.0.0
-  - libabseil=20230802.0
-  - libaec=1.0.6
-  - libavif=0.11.1
-  - libblas=3.9.0
-  - libbrotlicommon=1.0.9
-  - libbrotlidec=1.0.9
-  - libbrotlienc=1.0.9
-  - libcblas=3.9.0
-  - libclang=15.0.7
-  - libclang13=15.0.7
-  - libcurl=8.1.2
-  - libdeflate=1.14
-  - libexpat=2.5.0
-  - libffi=3.4.2
-  - libgd=2.3.3
-  - libglib=2.78.0
-  - libhwloc=2.9.2
-  - libiconv=1.17
-  - liblapack=3.9.0
-  - liblapacke=3.9.0
-  - libogg=1.3.4
-  - libpng=1.6.39
-  - libprotobuf=4.23.4
-  - libsqlite=3.43.0
-  - libssh2=1.11.0
-  - libtiff=4.4.0
-  - libuv=1.44.2
-  - libvorbis=1.3.7
-  - libwebp=1.3.1
-  - libwebp-base=1.3.1
-  - libxcb=1.13
-  - libxml2=2.11.5
-  - libzlib=1.2.13
-  - libzopfli=1.0.3
-  - llvmlite=0.40.1
-  - lz4-c=1.9.4
-  - m2w64-gcc-libgfortran=5.3.0
-  - m2w64-gcc-libs=5.3.0
-  - m2w64-gcc-libs-core=5.3.0
-  - m2w64-gmp=6.1.0
-  - m2w64-libwinpthread-git=5.0.0.4634.697f757
-  - mamutexport=0.2.1.post6
-  - marching_cubes=0.3.post9
-  - markdown-it-py=3.0.0
-  - marshmallow=3.20.1
-  - marshmallow-jsonschema=0.13.0
-  - marshmallow-union=0.1.15.post1
-  - matplotlib-base=3.7.2
-  - mdurl=0.1.0
-  - mkl=2022.1.0
-  - mkl-devel=2022.1.0
-  - mkl-include=2022.1.0
-  - mrcfile=1.4.3
-  - msys2-conda-epoch=20160418
-  - munkres=1.1.4
-  - ndstructs=0.0.5dev0
-  - networkx=3.1
-  - nifty=1.2.1
-  - numba=0.57.1
-  - numpy=1.22.4
-  - openexr=3.1.5
-  - openjpeg=2.5.0
-  - openpyxl=3.1.2
-  - openssl=3.1.2
-  - packaging=23.1
-  - pandas=1.5.3
-  - pango=1.50.14
-  - pcre2=10.40
-  - pillow=9.2.0
-  - pip=23.2.1
-  - pixman=0.40.0
-  - pkgutil-resolve-name=1.3.10
-  - platformdirs=3.10.0
-  - ply=3.11
-  - pooch=1.7.0
-  - protobuf=4.23.4
-  - psutil=5.9.5
-  - pthread-stubs=0.4
-  - pthreads-win32=2.9.1
-  - pydantic=1.10.2
-  - pydantic-core=2.6.3
-  - pygments=2.16.1
-  - pyopengl=3.1.6
-  - pyparsing=3.0.9
-  - pyqt=5.15.9
-  - pyqt5-sip=12.12.2
-  - pyqtgraph=0.13.3
-  - pysocks=1.7.1
-  - python=3.9.18
-  - python-dateutil=2.8.2
-  - python-elf=0.4.7
-  - python-stdnum=1.19
-  - python_abi=3.9
-  - pytorch=1.13.1
-  - pytorch-mutex=1.0
-  - pytz=2023.3.post1
-  - pywavelets=1.4.1
-  - pyyaml=6.0.1
-  - qimage2ndarray=1.8.3
-  - qt-main=5.15.8
-  - referencing=0.30.2
-  - requests=2.31.0
-  - rich=13.5.1
-  - rpds-py=0.10.2
-  - ruamel.yaml=0.17.32
-  - ruamel.yaml.clib=0.2.7
-  - scikit-image=0.21.0
-  - scikit-learn=1.3.0
-  - scipy=1.11.2
-  - setuptools=68.1.2
-  - shellingham=1.5.3
-  - sip=6.7.11
-  - six=1.16.0
-  - skan=0.11.0
-  - snappy=1.1.10
-  - sniffio=1.3.0
-  - starlette=0.27.0
-  - tbb=2021.10.0
-  - tensorboardx=2.6.2.2
-  - threadpoolctl=3.2.0
-  - tifffile=2022.10.10
-  - tiktorch=23.6.0
-  - tk=8.6.12
-  - toml=0.10.2
-  - tomli=2.0.1
-  - toolz=0.12.0
-  - torchvision=0.14.1
-  - tqdm=4.66.1
-  - typer=0.9.0
-  - typing-extensions=4.7.1
-  - typing_extensions=4.7.1
-  - tzdata=2023c
-  - ucrt=10.0.22621.0
-  - unicodedata2=15.0.0
-  - urllib3=2.0.4
-  - uvicorn=0.23.2
-  - vc=14.3
-  - vc14_runtime=14.36.32532
-  - vigra=1.11.1
-  - volumina=1.3.10
-  - vs2015_runtime=14.36.32532
-  - wheel=0.41.2
-  - win_inet_pton=1.1.0
-  - xarray=2023.8.0
-  - xorg-kbproto=1.0.7
-  - xorg-libice=1.0.10
-  - xorg-libsm=1.2.3
-  - xorg-libx11=1.8.4
-  - xorg-libxau=1.0.11
-  - xorg-libxdmcp=1.1.3
-  - xorg-libxext=1.3.4
-  - xorg-libxpm=3.5.16
-  - xorg-libxt=1.3.0
-  - xorg-xextproto=7.3.0
-  - xorg-xproto=7.0.31
-  - xz=5.2.6
-  - yaml=0.2.5
-  - yapsy=1.12.2
-  - z5py=2.0.16
-  - zfp=1.0.0
-  - zipp=3.16.2
-  - zlib=1.2.13
-  - zlib-ng=2.0.7
-  - zstd=1.5.5
-  - pip:
-      - build==1.0.3
-      - importlib-metadata==7.0.0
-      - pyproject-hooks==1.0.0
+  - czifile
+  - fastapi>=0.101
+  - ilastik=1.4.1b15
+  - imagecodecs
+  - jupyterlab
+  - matplotlib
+  - numpy
+  - pandas
+  - pillow
+  - pydantic=1.10.*
+  - pytorch=1.*
+  - scikit-image>=0.21.0
+  - scikit-learn>=1.5.0
+  - tifffile
+  - uvicorn>=0.23.0
+  - zstd
diff --git a/model_server/scripts/run_server.py b/scripts/run_server.py
similarity index 88%
rename from model_server/scripts/run_server.py
rename to scripts/run_server.py
index 2ca5e559019e3444c3f135450db749736e88bf29..4da04d5dde8e851e6c2e2aed586e8b94f2bb1c02 100644
--- a/model_server/scripts/run_server.py
+++ b/scripts/run_server.py
@@ -1,5 +1,6 @@
 import argparse
 from multiprocessing import Process
+from pathlib import Path
 import requests
 from requests.adapters import HTTPAdapter
 from urllib3 import Retry
@@ -8,10 +9,16 @@ import webbrowser
 
 from model_server.conf.defaults import server_conf
 
+
 def parse_args():
     parser = argparse.ArgumentParser(
         description='Start model server with optional arguments',
     )
+    parser.add_argument(
+        '--confpath',
+        default='model_server.conf.fastapi',
+        help='path to server startup configuration',
+    )
     parser.add_argument(
         '--host',
         default=server_conf['host'],
@@ -35,14 +42,15 @@ def parse_args():
     return parser.parse_args()
 
 
-def main(args, app_name='model_server.base.api:app') -> None:
+
+def main(args) -> None:
 
     print('CLI args:\n' + str(args))
     server_process = Process(
         target=uvicorn.run,
-        args=(app_name,),
+        args=(f'{args.confpath}:app',),
         kwargs={
-            'app_dir': '.',
+            'app_dir': '..',
             'host': args.host,
             'port': int(args.port),
             'log_level': 'debug',
diff --git a/start_server.bat b/start_server.bat
deleted file mode 100644
index c1df06b441881f96b02d9f53bf9ca69f99ae0a42..0000000000000000000000000000000000000000
--- a/start_server.bat
+++ /dev/null
@@ -1,11 +0,0 @@
-@echo off
-
-set actbat=%userprofile%\miniforge-pyp3\Scripts\activate.bat
-set pyscripts=%userprofile%\model_server
-
-call %actbat%
-call mamba activate model_server_env
-cd %pyscripts%
-call python -m model_server.scripts.run_server --port 6221
-
-pause
\ No newline at end of file
diff --git a/model_server/clients/imagej/__init__.py b/tests/base/__init__.py
similarity index 100%
rename from model_server/clients/imagej/__init__.py
rename to tests/base/__init__.py
diff --git a/tests/test_accessors.py b/tests/base/test_accessors.py
similarity index 72%
rename from tests/test_accessors.py
rename to tests/base/test_accessors.py
index 014eda2c283ce9297f3084513abad66497303cbd..1df5863c71f1d157d612e74ba83a822632d9f221 100644
--- a/tests/test_accessors.py
+++ b/tests/base/test_accessors.py
@@ -4,8 +4,11 @@ import numpy as np
 
 from model_server.base.accessors import PatchStack, make_patch_stack_from_file, FileNotFoundError
 
-from model_server.conf.testing import czifile, output_path, monopngfile, rgbpngfile, tifffile, monozstackmask
 from model_server.base.accessors import CziImageFileAccessor, DataShapeError, generate_file_accessor, InMemoryDataAccessor, PngFileAccessor, write_accessor_data_to_file, TifSingleSeriesFileAccessor
+import model_server.conf.testing as conf
+
+data = conf.meta['image_files']
+output_path = conf.meta['output_path']
 
 def _random_int(*args):
     return np.random.randint(0, 2 ** 8, size=args, dtype='uint8')
@@ -16,27 +19,27 @@ class TestCziImageFileAccess(unittest.TestCase):
         pass
 
     def test_tiffile_is_correct_shape(self):
-        tf = generate_file_accessor(tifffile['path'])
+        tf = generate_file_accessor(data['tifffile']['path'])
 
         self.assertIsInstance(tf, TifSingleSeriesFileAccessor)
-        self.assertEqual(tf.shape_dict['Y'], tifffile['h'])
-        self.assertEqual(tf.shape_dict['X'], tifffile['w'])
-        self.assertEqual(tf.chroma, tifffile['c'])
+        self.assertEqual(tf.shape_dict['Y'], data['tifffile']['h'])
+        self.assertEqual(tf.shape_dict['X'], data['tifffile']['w'])
+        self.assertEqual(tf.chroma, data['tifffile']['c'])
         self.assertTrue(tf.is_3d())
         self.assertEqual(len(tf.data.shape), 4)
-        self.assertEqual(tf.shape[0], tifffile['h'])
-        self.assertEqual(tf.shape[1], tifffile['w'])
+        self.assertEqual(tf.shape[0], data['tifffile']['h'])
+        self.assertEqual(tf.shape[1], data['tifffile']['w'])
         self.assertEqual(tf.get_axis('x'), 1)
 
     def test_czifile_is_correct_shape(self):
-        cf = CziImageFileAccessor(czifile['path'])
-        self.assertEqual(cf.shape_dict['Y'], czifile['h'])
-        self.assertEqual(cf.shape_dict['X'], czifile['w'])
-        self.assertEqual(cf.chroma, czifile['c'])
+        cf = CziImageFileAccessor(data['czifile']['path'])
+        self.assertEqual(cf.shape_dict['Y'], data['czifile']['h'])
+        self.assertEqual(cf.shape_dict['X'], data['czifile']['w'])
+        self.assertEqual(cf.chroma, data['czifile']['c'])
         self.assertFalse(cf.is_3d())
         self.assertEqual(len(cf.data.shape), 4)
-        self.assertEqual(cf.shape[0], czifile['h'])
-        self.assertEqual(cf.shape[1], czifile['w'])
+        self.assertEqual(cf.shape[0], data['czifile']['h'])
+        self.assertEqual(cf.shape[1], data['czifile']['w'])
 
     def test_get_single_channel_from_zstack(self):
         w = 256
@@ -58,9 +61,66 @@ class TestCziImageFileAccess(unittest.TestCase):
         sc = cf.get_mono(c, mip=True)
         self.assertEqual(sc.shape, (h, w, 1, 1))
 
+    def test_get_single_channel_argmax_from_zstack(self):
+        w = 256
+        h = 512
+        nc = 4
+        nz = 11
+        c = 3
+        cf = InMemoryDataAccessor(np.random.rand(h, w, nc, nz))
+        am = cf.get_mono(c).get_z_argmax()
+        self.assertEqual(am.shape, (h, w, 1, 1))
+        self.assertTrue(np.all(am.unique()[0] == range(0, nz)))
+
+    def test_get_single_channel_z_series_from_zstack(self):
+        w = 256
+        h = 512
+        nc = 4
+        nz = 11
+        c = 3
+        cf = InMemoryDataAccessor(np.random.rand(h, w, nc, nz))
+        zs = cf.get_mono(c).get_focus_vector()
+        self.assertEqual(zs.shape, (nz, ))
+
+    def test_get_zi(self):
+        w = 256
+        h = 512
+        nc = 4
+        nz = 11
+        zi = 5
+        cf = InMemoryDataAccessor(_random_int(h, w, nc, nz))
+        sz = cf.get_zi(zi)
+        self.assertEqual(sz.shape_dict['Z'], 1)
+
+        self.assertTrue(np.all(sz.data[:, :, :, 0] == cf.data[:, :, :, zi]))
+
+    def test_get_mip(self):
+        w = 256
+        h = 512
+        nc = 4
+        nz = 11
+        zi = 5
+        cf = InMemoryDataAccessor(_random_int(h, w, nc, nz))
+        sm = cf.get_mip()
+        self.assertEqual(sm.shape_dict['Z'], 1)
+        self.assertTrue(np.all(cf.data.max(axis=-1, keepdims=True) == sm.data))
+
+    def test_crop_yx(self):
+        w = 256
+        h = 512
+        nc = 4
+        nz = 11
+        cf = InMemoryDataAccessor(_random_int(h, w, nc, nz))
+
+        yxhw = (100, 200, 10, 20)
+        sc = cf.crop_hw(yxhw)
+        self.assertEqual(sc.shape_dict['Z'], nz)
+        self.assertEqual(sc.shape_dict['C'], nc)
+        self.assertEqual(sc.hw, yxhw[2:])
+
     def test_write_single_channel_tif(self):
         ch = 4
-        cf = CziImageFileAccessor(czifile['path'])
+        cf = CziImageFileAccessor(data['czifile']['path'])
         mono = cf.get_mono(ch)
         self.assertTrue(
             write_accessor_data_to_file(
@@ -73,7 +133,7 @@ class TestCziImageFileAccess(unittest.TestCase):
 
     def test_write_two_channel_png(self):
         from model_server.base.process import resample_to_8bit
-        cf = CziImageFileAccessor(czifile['path'])
+        cf = CziImageFileAccessor(data['czifile']['path'])
         acc = cf.get_channels([0, 1])
         opa = output_path / f'{cf.fpath.stem}_2ch.png'
         acc_out = acc.apply(resample_to_8bit)
@@ -129,23 +189,23 @@ class TestCziImageFileAccess(unittest.TestCase):
         fh_shape_dict = {se.axes[i]: se.shape[i] for i in range(0, len(se.shape))}
         self.assertEqual(fh_shape_dict, acc.shape_dict, 'Axes are not preserved in TIF output')
 
-    def test_read_png(self, pngfile=rgbpngfile):
+    def test_read_png(self, pngfile=data['rgbpngfile']):
         acc = PngFileAccessor(pngfile['path'])
         self.assertEqual(acc.hw, (pngfile['h'], pngfile['w']))
         self.assertEqual(acc.chroma, pngfile['c'])
         self.assertEqual(acc.nz, 1)
 
     def test_read_mono_png(self):
-        return self.test_read_png(pngfile=monopngfile)
+        return self.test_read_png(pngfile=data['monopngfile'])
 
     def test_read_zstack_mono_mask(self):
-        acc = generate_file_accessor(monozstackmask['path'])
+        acc = generate_file_accessor(data['monozstackmask']['path'])
         self.assertTrue(acc.is_mask())
 
     def test_read_in_pixel_scale_from_czi(self):
-        cf = CziImageFileAccessor(czifile['path'])
+        cf = CziImageFileAccessor(data['czifile']['path'])
         pxs = cf.pixel_scale_in_micrometers
-        self.assertAlmostEqual(pxs['X'], czifile['um_per_pixel'], places=3)
+        self.assertAlmostEqual(pxs['X'], data['czifile']['um_per_pixel'], places=3)
 
 
 class TestPatchStackAccessor(unittest.TestCase):
@@ -175,12 +235,12 @@ class TestPatchStackAccessor(unittest.TestCase):
 
 
     def test_make_patch_stack_from_file(self):
-        h = monozstackmask['h']
-        w = monozstackmask['w']
-        c = monozstackmask['c']
-        n = monozstackmask['z']
+        h = data['monozstackmask']['h']
+        w = data['monozstackmask']['w']
+        c = data['monozstackmask']['c']
+        n = data['monozstackmask']['z']
 
-        acc = make_patch_stack_from_file(monozstackmask['path'])
+        acc = make_patch_stack_from_file(data['monozstackmask']['path'])
         self.assertEqual(acc.hw, (h, w))
         self.assertEqual(acc.count, n)
         self.assertEqual(acc.pyxcz.shape, (n, h, w, c, 1))
diff --git a/tests/base/test_api.py b/tests/base/test_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..e31b4366f3d4baa9371fcd80c3338cc2c5fbb013
--- /dev/null
+++ b/tests/base/test_api.py
@@ -0,0 +1,262 @@
+from pathlib import Path
+
+from fastapi import APIRouter, FastAPI
+import numpy as np
+from pydantic import BaseModel
+
+import model_server.conf.testing as conf
+from model_server.base.accessors import InMemoryDataAccessor
+from model_server.base.api import app
+from model_server.base.session import session
+from model_server.conf.testing import DummySemanticSegmentationModel, DummyInstanceSegmentationModel
+
+czifile = conf.meta['image_files']['czifile']
+
+"""
+Configure additional endpoints for testing
+"""
+test_router = APIRouter(prefix='/testing', tags=['testing'])
+
+class BounceBackParams(BaseModel):
+    par1: str
+    par2: list
+
+@test_router.put('/bounce_back')
+def list_bounce_back(params: BounceBackParams):
+    return {'success': True, 'params': {'par1': params.par1, 'par2': params.par2}}
+
+@test_router.put('/accessors/dummy_accessor/load')
+def load_dummy_accessor() -> str:
+    acc = InMemoryDataAccessor(
+        np.random.randint(
+            0,
+            2 ** 8,
+            size=(512, 256, 3, 7),
+            dtype='uint8'
+        )
+    )
+    return session.add_accessor(acc)
+
+@test_router.put('/models/dummy_semantic/load/')
+def load_dummy_model() -> dict:
+    mid = session.load_model(DummySemanticSegmentationModel)
+    session.log_info(f'Loaded model {mid}')
+    return {'model_id': mid}
+
+@test_router.put('/models/dummy_instance/load/')
+def load_dummy_model() -> dict:
+    mid = session.load_model(DummyInstanceSegmentationModel)
+    session.log_info(f'Loaded model {mid}')
+    return {'model_id': mid}
+
+app.include_router(test_router)
+
+"""
+Implement unit testing on extended base app
+"""
+
+
+class TestServerTestCase(conf.TestServerBaseClass):
+    app_name = 'tests.base.test_api:app'
+    input_data = czifile
+
+
+class TestApiFromAutomatedClient(TestServerTestCase):
+    def test_trivial_api_response(self):
+        resp = self._get('')
+        self.assertEqual(resp.status_code, 200)
+
+    def test_bounceback_parameters(self):
+        resp = self._put('testing/bounce_back', body={'par1': 'hello', 'par2': ['ab', 'cd']})
+        self.assertEqual(resp.status_code, 200, resp.content)
+        self.assertEqual(resp.json()['params']['par1'], 'hello', resp.json())
+        self.assertEqual(resp.json()['params']['par2'], ['ab', 'cd'], resp.json())
+
+    def test_default_session_paths(self):
+        import model_server.conf.defaults
+        resp = self._get('paths')
+        conf_root = model_server.conf.defaults.root
+        for p in ['inbound_images', 'outbound_images', 'logs']:
+            self.assertTrue(resp.json()[p].startswith(conf_root.__str__()))
+            suffix = Path(model_server.conf.defaults.subdirectories[p]).__str__()
+            self.assertTrue(resp.json()[p].endswith(suffix))
+
+    def test_list_empty_loaded_models(self):
+        resp = self._get('models')
+        self.assertEqual(resp.status_code, 200)
+        self.assertEqual(resp.content, b'{}')
+
+    def test_load_dummy_semantic_model(self):
+        resp_load = self._put(f'testing/models/dummy_semantic/load')
+        model_id = resp_load.json()['model_id']
+        self.assertEqual(resp_load.status_code, 200, resp_load.json())
+        resp_list = self._get('models')
+        self.assertEqual(resp_list.status_code, 200)
+        rj = resp_list.json()
+        self.assertEqual(rj[model_id]['class'], 'DummySemanticSegmentationModel')
+        return model_id
+
+    def test_load_dummy_instance_model(self):
+        resp_load = self._put(f'testing/models/dummy_instance/load')
+        model_id = resp_load.json()['model_id']
+        self.assertEqual(resp_load.status_code, 200, resp_load.json())
+        resp_list = self._get('models')
+        self.assertEqual(resp_list.status_code, 200)
+        rj = resp_list.json()
+        self.assertEqual(rj[model_id]['class'], 'DummyInstanceSegmentationModel')
+        return model_id
+
+    def test_respond_with_error_when_invalid_filepath_requested(self):
+        model_id = self.test_load_dummy_semantic_model()
+
+        resp = self._put(
+            f'infer/from_image_file',
+            query={'model_id': model_id, 'input_filename': 'not_a_real_file.name'}
+        )
+        self.assertEqual(resp.status_code, 404, resp.content.decode())
+
+    def test_pipeline_errors_when_ids_not_found(self):
+        fname = self.copy_input_file_to_server()
+        model_id = self._put(f'testing/models/dummy_semantic/load').json()['model_id']
+        in_acc_id = self._put(f'accessors/read_from_file/{fname}').json()
+
+        # respond with 409 for invalid accessor_id
+        self.assertEqual(
+            self._put(
+                f'pipelines/segment',
+                body={'model_id': model_id, 'accessor_id': 'fake'}
+            ).status_code,
+            409
+        )
+
+        # respond with 409 for invalid model_id
+        self.assertEqual(
+            self._put(
+                f'pipelines/segment',
+                body={'model_id': 'fake', 'accessor_id': in_acc_id}
+            ).status_code,
+            409
+        )
+
+
+    def test_i2i_dummy_inference_by_api(self):
+        fname = self.copy_input_file_to_server()
+        model_id = self._put(f'testing/models/dummy_semantic/load').json()['model_id']
+        in_acc_id = self._put(f'accessors/read_from_file/{fname}').json()
+
+        # run segmentation pipeline on preloaded accessor
+        resp_infer = self._put(
+            f'pipelines/segment',
+            body={
+                'accessor_id': in_acc_id,
+                'model_id': model_id,
+                'channel': 2,
+                'keep_interm': True,
+            },
+        )
+        self.assertEqual(resp_infer.status_code, 200, resp_infer.content.decode())
+        out_acc_id = resp_infer.json()['output_accessor_id']
+        self.assertTrue(self._get(f'accessors/{out_acc_id}').json()['loaded'])
+        acc_out = self.get_accessor(out_acc_id, 'dummy_semantic_output.tif')
+        self.assertEqual(acc_out.shape_dict['C'], 1)
+
+        # validate intermediate data
+        resp_list = self._get(f'accessors').json()
+        self.assertEqual(len([k for k in resp_list.keys() if '_step' in k]), 2)
+
+    def test_restarting_session_clears_loaded_models(self):
+        resp_load = self._put(f'testing/models/dummy_semantic/load',)
+        self.assertEqual(resp_load.status_code, 200, resp_load.json())
+        resp_list_0 = self._get('models')
+        self.assertEqual(resp_list_0.status_code, 200)
+        rj0 = resp_list_0.json()
+        self.assertEqual(len(rj0), 1, f'Unexpected models in response: {rj0}')
+        resp_restart = self._get('session/restart')
+        resp_list_1 = self._get('models')
+        rj1 = resp_list_1.json()
+        self.assertEqual(len(rj1), 0, f'Unexpected models in response: {rj1}')
+
+    def test_change_inbound_path(self):
+        resp_inpath = self._get('paths')
+        resp_change = self._put(
+            f'paths/watch_output',
+            query={'path': resp_inpath.json()['inbound_images']}
+        )
+        self.assertEqual(resp_change.status_code, 200)
+        resp_check = self._get('paths')
+        self.assertEqual(resp_check.json()['inbound_images'], resp_check.json()['outbound_images'])
+
+    def test_exception_when_changing_inbound_path(self):
+        resp_inpath = self._get('paths')
+        fakepath = 'c:/fake/path/to/nowhere'
+        resp_change = self._put(
+            f'paths/watch_output',
+            query={'path': fakepath}
+        )
+        self.assertEqual(resp_change.status_code, 404)
+        self.assertIn(fakepath, resp_change.json()['detail'])
+        resp_check = self._get('paths')
+        self.assertEqual(resp_inpath.json()['outbound_images'], resp_check.json()['outbound_images'])
+
+    def test_no_change_inbound_path(self):
+        resp_inpath = self._get('paths')
+        resp_change = self._put(
+            f'paths/watch_output',
+            query={'path': resp_inpath.json()['outbound_images']}
+        )
+        self.assertEqual(resp_change.status_code, 200)
+        resp_check = self._get('paths')
+        self.assertEqual(resp_inpath.json()['outbound_images'], resp_check.json()['outbound_images'])
+
+    def test_get_logs(self):
+        resp = self._get('session/logs')
+        self.assertEqual(resp.status_code, 200)
+        self.assertEqual(resp.json()[0]['message'], 'Initialized session')
+
+    def test_add_and_delete_accessor(self):
+        fname = self.copy_input_file_to_server()
+
+        # add accessor to session
+        resp_add_acc = self._put(
+            f'accessors/read_from_file/{fname}',
+        )
+        acc_id = resp_add_acc.json()
+        self.assertTrue(acc_id.startswith('auto_'))
+
+        # confirm that accessor is listed in session context
+        resp_list_acc = self._get(
+            f'accessors',
+        )
+        self.assertEqual(len(resp_list_acc.json()), 1)
+        self.assertTrue(list(resp_list_acc.json().keys())[0].startswith('auto_'))
+        self.assertTrue(resp_list_acc.json()[acc_id]['loaded'])
+
+        # delete and check that its 'loaded' state changes
+        self.assertTrue(self._get(f'accessors/{acc_id}').json()['loaded'])
+        self.assertEqual(self._get(f'accessors/delete/{acc_id}').json(), acc_id)
+        self.assertFalse(self._get(f'accessors/{acc_id}').json()['loaded'])
+
+        # and try a non-existent accessor ID
+        resp_wrong_acc = self._get('accessors/auto_123456')
+        self.assertEqual(resp_wrong_acc.status_code, 404)
+
+        # load another... then remove all
+        self._put(f'accessors/read_from_file/{fname}')
+        self.assertEqual(sum([v['loaded'] for v in self._get('accessors').json().values()]), 1)
+        self.assertEqual(len(self._get(f'accessors/delete/*').json()), 1)
+        self.assertEqual(sum([v['loaded'] for v in self._get('accessors').json().values()]), 0)
+
+
+    def test_empty_accessor_list(self):
+        resp_list_acc = self._get(
+            f'accessors',
+        )
+        self.assertEqual(len(resp_list_acc.json()), 0)
+
+    def test_write_accessor(self):
+        acc_id = self._put('/testing/accessors/dummy_accessor/load').json()
+        self.assertTrue(self._get(f'accessors/{acc_id}').json()['loaded'])
+        sd = self._get(f'accessors/{acc_id}').json()['shape_dict']
+        self.assertEqual(self._get(f'accessors/{acc_id}').json()['filepath'], '')
+        acc_out = self.get_accessor(accessor_id=acc_id, filename='test_output.tif')
+        self.assertEqual(sd, acc_out.shape_dict)
\ No newline at end of file
diff --git a/tests/test_model.py b/tests/base/test_model.py
similarity index 77%
rename from tests/test_model.py
rename to tests/base/test_model.py
index 91043f24e7ec354759dafa2c61b833b1dbd78688..d975f7cd8725e0215391b4a526feab7cd69eeb31 100644
--- a/tests/test_model.py
+++ b/tests/base/test_model.py
@@ -1,7 +1,12 @@
 import unittest
-from model_server.conf.testing import czifile
+
+import model_server.conf.testing as conf
+from model_server.conf.testing import DummySemanticSegmentationModel, DummyInstanceSegmentationModel
 from model_server.base.accessors import CziImageFileAccessor
-from model_server.base.models import DummySemanticSegmentationModel, DummyInstanceSegmentationModel, CouldNotLoadModelError
+from model_server.base.models import CouldNotLoadModelError, BinaryThresholdSegmentationModel
+
+czifile = conf.meta['image_files']['czifile']
+
 
 class TestCziImageFileAccess(unittest.TestCase):
     def setUp(self) -> None:
@@ -50,6 +55,12 @@ class TestCziImageFileAccess(unittest.TestCase):
         )
         return img, mask
 
+    def test_binary_segmentation(self):
+        model = BinaryThresholdSegmentationModel(tr=3e4)
+        img = self.cf.get_mono(0)
+        res = model.label_pixel_class(img)
+        self.assertTrue(res.is_mask())
+
     def test_dummy_instance_segmentation(self):
         img, mask = self.test_dummy_pixel_segmentation()
         model = DummyInstanceSegmentationModel()
diff --git a/tests/base/test_pipelines.py b/tests/base/test_pipelines.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f1b0303cbacf9fe9fe9969f7beadfe1ce942711
--- /dev/null
+++ b/tests/base/test_pipelines.py
@@ -0,0 +1,66 @@
+import unittest
+
+from model_server.base.accessors import generate_file_accessor, write_accessor_data_to_file
+from model_server.base.pipelines import router, segment, segment_zproj
+
+import model_server.conf.testing as conf
+from model_server.conf.testing import DummySemanticSegmentationModel
+
+czifile = conf.meta['image_files']['czifile']
+zstack = conf.meta['image_files']['tifffile']
+output_path = conf.meta['output_path']
+
+
+class TestSegmentationPipelines(unittest.TestCase):
+    def setUp(self) -> None:
+        self.model = DummySemanticSegmentationModel()
+
+    def test_call_segment_pipeline(self):
+        acc = generate_file_accessor(czifile['path'])
+        trace = segment.segment_pipeline({'accessor': acc}, {'model': self.model}, channel=2, smooth=3)
+        outfp = output_path / 'pipelines' / 'segment_binary_mask.tif'
+        write_accessor_data_to_file(outfp, trace.last)
+
+        import tifffile
+        img = tifffile.imread(outfp)
+        w = czifile['w']
+        h = czifile['h']
+
+        self.assertEqual(
+            img.shape,
+            (h, w),
+            'Inferred image is not the expected shape'
+        )
+
+        self.assertEqual(
+            img[int(w/2), int(h/2)],
+            255,
+            'Middle pixel is not white as expected'
+        )
+
+        self.assertEqual(
+            img[0, 0],
+            0,
+            'First pixel is not black as expected'
+        )
+
+        interm_fps = trace.write_interm(
+            output_path / 'pipelines' / 'segment_interm',
+            prefix=czifile['name']
+        )
+        self.assertTrue([ofp.stem.split('_')[-1] for ofp in interm_fps] == ['mono', 'inference', 'smooth'])
+
+    def test_call_segment_zproj_pipeline(self):
+        acc = generate_file_accessor(zstack['path'])
+
+        trace1 = segment_zproj.segment_zproj_pipeline({'accessor': acc}, {'model': self.model}, channel=0, smooth=3, zi=4)
+        self.assertEqual(trace1.last.chroma, 1)
+        self.assertEqual(trace1.last.nz, 1)
+
+        trace2 = segment_zproj.segment_zproj_pipeline({'accessor': acc}, {'model': self.model}, channel=0, smooth=3)
+        self.assertEqual(trace2.last.chroma, 1)
+        self.assertEqual(trace2.last.nz, 1)
+
+        trace3 = segment_zproj.segment_zproj_pipeline({'accessor': acc}, {'model': self.model})
+        self.assertEqual(trace3.last.chroma, 1)  # still == 1: model returns a single channel regardless of input
+        self.assertEqual(trace3.last.nz, 1)
diff --git a/tests/test_process.py b/tests/base/test_process.py
similarity index 83%
rename from tests/test_process.py
rename to tests/base/test_process.py
index d2fb33b9cc9a8b6af04c8eacd99b6b87d7d64527..56838fb1f755f9543f3fd8dfffdc21a4de70edb5 100644
--- a/tests/test_process.py
+++ b/tests/base/test_process.py
@@ -1,10 +1,9 @@
 import unittest
 
 import numpy as np
-from skimage.measure import find_contours
 
 from model_server.base.annotators import draw_contours_on_patch
-from model_server.base.process import get_safe_contours, mask_largest_object, pad
+from model_server.base.process import get_safe_contours, mask_largest_object, pad, smooth
 
 class TestProcessingUtilityMethods(unittest.TestCase):
     def setUp(self) -> None:
@@ -53,7 +52,6 @@ class TestMaskLargestObject(unittest.TestCase):
         arr[0:3, 0:3] = 255
         arr[4, 2:5] = 255
         masked = mask_largest_object(arr)
-        print(np.unique(masked))
         self.assertTrue(np.all(np.unique(masked) == [0, 255]))
         self.assertTrue(np.all(masked[:, 3:5] == 0))
         self.assertTrue(np.all(masked[3:5, :] == 0))
@@ -79,4 +77,19 @@ class TestSafeContours(unittest.TestCase):
         self.assertEqual((patch == 0).sum(), 0)
         patch = draw_contours_on_patch(self.patch, con)
         self.assertEqual((patch == 0).sum(), 20)
-        self.assertEqual((patch[0, :] == 0).sum(), 20)
\ No newline at end of file
+        self.assertEqual((patch[0, :] == 0).sum(), 20)
+
+class TestSmooth(unittest.TestCase):
+
+    def test_smooth_uint8_binary_mask(self):
+        mask = np.zeros([4, 4], dtype='uint8')
+        mask[1:3, 1:3] = 255
+        mask[2, 2] = 0
+        res = smooth(mask, sig=3)
+
+        # assert type and range match
+        self.assertEqual(mask.dtype, res.dtype)
+        self.assertTrue(np.all(np.unique(mask) == np.unique(res)))
+
+        # trivial case with sig=0 just returns input array
+        self.assertTrue(np.all(mask == smooth(mask, sig=0)))
diff --git a/tests/test_roiset.py b/tests/base/test_roiset.py
similarity index 63%
rename from tests/test_roiset.py
rename to tests/base/test_roiset.py
index efc0779f067ebffba0acdb5c0f8ff1852eccef2b..785358c92961662273f97f8f8c74037e03fc3cf3 100644
--- a/tests/test_roiset.py
+++ b/tests/base/test_roiset.py
@@ -1,4 +1,3 @@
-import os
 import re
 import unittest
 
@@ -7,26 +6,32 @@ from pathlib import Path
 
 import pandas as pd
 
-from model_server.conf.testing import output_path, roiset_test_data
-
-from model_server.base.roiset import RoiSetExportParams, RoiSetMetaParams
+from model_server.base.process import smooth
+from model_server.base.roiset import filter_df_overlap_bbox, filter_df_overlap_seg, RoiSetExportParams, RoiSetMetaParams
 from model_server.base.roiset import RoiSet
 from model_server.base.accessors import generate_file_accessor, InMemoryDataAccessor, write_accessor_data_to_file, PatchStack
-from model_server.base.models import DummyInstanceSegmentationModel
+import model_server.conf.testing as conf
+from model_server.conf.testing import DummyInstanceSegmentationModel
+
+data = conf.meta['image_files']
+output_path = conf.meta['output_path']
+params = conf.meta['roiset']
+
+
 
 class BaseTestRoiSetMonoProducts(object):
 
     def setUp(self) -> None:
         # set up test raw data and segmentation from file
-        self.stack = generate_file_accessor(roiset_test_data['multichannel_zstack']['path'])
-        self.stack_ch_pa = self.stack.get_mono(roiset_test_data['pipeline_params']['patches_channel'])
-        self.seg_mask = generate_file_accessor(roiset_test_data['multichannel_zstack']['mask_path'])
+        self.stack = generate_file_accessor(data['multichannel_zstack_raw']['path'])
+        self.stack_ch_pa = self.stack.get_mono(params['patches_channel'])
+        self.seg_mask = generate_file_accessor(data['multichannel_zstack_mask2d']['path'])
 
 
 class TestRoiSetMonoProducts(BaseTestRoiSetMonoProducts, unittest.TestCase):
 
     def _make_roi_set(self, mask_type='boxes', **kwargs):
-        roiset = RoiSet.from_segmentation(
+        roiset = RoiSet.from_binary_mask(
             self.stack_ch_pa,
             self.seg_mask,
             params=RoiSetMetaParams(
@@ -67,7 +72,7 @@ class TestRoiSetMonoProducts(BaseTestRoiSetMonoProducts, unittest.TestCase):
         acc_zstack_slice = InMemoryDataAccessor(self.stack_ch_pa.data[:, :, :, 0])
         self.assertEqual(acc_zstack_slice.nz, 1)
 
-        roiset = RoiSet.from_segmentation(acc_zstack_slice, self.seg_mask, params=RoiSetMetaParams(mask_type='boxes'))
+        roiset = RoiSet.from_binary_mask(acc_zstack_slice, self.seg_mask, params=RoiSetMetaParams(mask_type='boxes'))
         zmask = roiset.get_zmask()
 
         zmask_acc = InMemoryDataAccessor(zmask)
@@ -75,8 +80,10 @@ class TestRoiSetMonoProducts(BaseTestRoiSetMonoProducts, unittest.TestCase):
 
     def test_create_roiset_with_no_objects(self):
         zero_obmap = InMemoryDataAccessor(np.zeros(self.seg_mask.shape, self.seg_mask.dtype))
-        roiset = RoiSet(self.stack_ch_pa, zero_obmap)
+        roiset = RoiSet.from_object_ids(self.stack_ch_pa, zero_obmap)
         self.assertEqual(roiset.count, 0)
+        roiset.classify_by('dummy_class', [0], DummyInstanceSegmentationModel())
+        self.assertTrue('classify_by_dummy_class' in roiset.get_df().columns)
 
     def test_slices_are_valid(self):
         roiset = self._make_roi_set()
@@ -160,26 +167,6 @@ class TestRoiSetMonoProducts(BaseTestRoiSetMonoProducts, unittest.TestCase):
         result = generate_file_accessor(where / file)
         self.assertEqual(result.shape, roiset.acc_raw.shape)
 
-    def test_flatten_image(self):
-        roiset = RoiSet.from_segmentation(self.stack_ch_pa, self.seg_mask, params=RoiSetMetaParams(mask_type='boxes'))
-        df = roiset.get_df()
-
-        from model_server.base.roiset import project_stack_from_focal_points
-
-        img = project_stack_from_focal_points(
-            df['centroid-0'].to_numpy(),
-            df['centroid-1'].to_numpy(),
-            df['zi'].to_numpy(),
-            self.stack,
-            degree=4,
-        )
-
-        self.assertEqual(img.shape[0:2], self.stack.shape[0:2])
-
-        write_accessor_data_to_file(
-            output_path / 'flattened.tif',
-            InMemoryDataAccessor(img)
-        )
 
     def test_make_binary_masks(self):
         roiset = self._make_roi_set()
@@ -198,50 +185,39 @@ class TestRoiSetMonoProducts(BaseTestRoiSetMonoProducts, unittest.TestCase):
         roiset = self._make_roi_set()
         roiset.classify_by('dummy_class', [0], DummyInstanceSegmentationModel())
         self.assertTrue(all(roiset.get_df()['classify_by_dummy_class'].unique() == [1]))
-        self.assertTrue(all(np.unique(roiset.object_class_maps['dummy_class'].data) == [0, 1]))
+        self.assertTrue(all(np.unique(roiset.get_object_class_map('dummy_class').data) == [0, 1]))
         return roiset
 
     def test_classify_by_multiple_channels(self):
-        roiset = RoiSet.from_segmentation(self.stack, self.seg_mask)
+        roiset = RoiSet.from_binary_mask(self.stack, self.seg_mask, params=RoiSetMetaParams(deproject_channel=0))
         roiset.classify_by('dummy_class', [0, 1], DummyInstanceSegmentationModel())
         self.assertTrue(all(roiset.get_df()['classify_by_dummy_class'].unique() == [1]))
-        self.assertTrue(all(np.unique(roiset.object_class_maps['dummy_class'].data) == [0, 1]))
+        self.assertTrue(all(np.unique(roiset.get_object_class_map('dummy_class').data) == [0, 1]))
         return roiset
 
-    def test_classify_by_with_derived_channel(self):
-        class ModelWithDerivedInputs(DummyInstanceSegmentationModel):
-            def infer(self, img, mask):
-                return PatchStack(super().infer(img, mask).data * img.chroma)
-
-        roiset = RoiSet.from_segmentation(
-            self.stack,
-            self.seg_mask,
-            params=RoiSetMetaParams(
-                filters={'area': {'min': 1e3, 'max': 1e4}},
-            )
-        )
-        roiset.classify_by(
-            'multiple_input_model',
-            [0, 1],
-            ModelWithDerivedInputs(),
-            derived_channel_functions=[
-                lambda acc: PatchStack(2 * acc.get_channels([0]).data),
-                lambda acc: PatchStack((0.5 * acc.get_channels([1]).data).astype('uint8'))
-            ]
+    def test_transfer_classification(self):
+        roiset1 = RoiSet.from_binary_mask(self.stack, self.seg_mask, params=RoiSetMetaParams(deproject_channel=0))
+
+        # prepare alternative mask and compare
+        smoothed_mask = self.seg_mask.apply(lambda x: smooth(x, sig=1.5))
+        roiset2 = RoiSet.from_binary_mask(self.stack, smoothed_mask, params=RoiSetMetaParams(deproject_channel=0))
+        dmask = (self.seg_mask.data / 255) + (smoothed_mask.data / 255)
+        self.assertTrue(np.all(np.unique(dmask) == [0, 1, 2]))
+        total_iou = (dmask == 2).sum() / ((dmask == 1).sum() + (dmask == 2).sum())
+        self.assertGreater(total_iou, 0.6)
+
+        # classify first RoiSet
+        roiset1.classify_by('dummy_class', [0, 1], DummyInstanceSegmentationModel())
+
+        self.assertTrue('dummy_class' in roiset1.classification_columns)
+        self.assertFalse('dummy_class' in roiset2.classification_columns)
+        res = roiset2.get_instance_classification(roiset1)
+        self.assertTrue('dummy_class' in roiset2.classification_columns)
+        self.assertLess(
+            roiset2.get_df().classify_by_dummy_class.count(),
+            roiset1.get_df().classify_by_dummy_class.count(),
         )
-        self.assertTrue(all(roiset.get_df()['classify_by_multiple_input_model'].unique() == [4]))
-        self.assertTrue(all(np.unique(roiset.object_class_maps['multiple_input_model'].data) == [0, 4]))
-
-        self.assertEqual(len(roiset.accs_derived), 2)
-        for di in roiset.accs_derived:
-            self.assertEqual(roiset.get_patches_acc().hw, di.hw)
-            self.assertEqual(roiset.get_patches_acc().nz, di.nz)
-            self.assertEqual(roiset.get_patches_acc().count, di.count)
-
-        dpas = roiset.run_exports(output_path / 'derived_channels', 0, 'der', RoiSetExportParams(derived_channels=True))
-        for fp in dpas['derived_channels']:
-            assert Path(fp).exists()
-        return roiset
+
 
     def test_export_object_classes(self):
         record = self.test_classify_by().run_exports(
@@ -279,13 +255,14 @@ class TestRoiSetMultichannelProducts(BaseTestRoiSetMonoProducts, unittest.TestCa
 
     def setUp(self) -> None:
         super().setUp()
-        self.roiset = RoiSet.from_segmentation(
+        self.roiset = RoiSet.from_binary_mask(
             self.stack,
             self.seg_mask,
             params=RoiSetMetaParams(
                 expand_box_by=(128, 2),
                 mask_type='boxes',
                 filters={'area': {'min': 1e3, 'max': 1e4}},
+                deproject_channel=0,
             )
         )
 
@@ -396,6 +373,7 @@ class TestRoiSetMultichannelProducts(BaseTestRoiSetMonoProducts, unittest.TestCa
             'test_multichannel_annotated_zstack',
             expanded=True,
             pad_to=256,
+            draw_label=True,
         )
         result = generate_file_accessor(where / file)
         self.assertEqual(result.chroma, self.stack.chroma)
@@ -408,6 +386,7 @@ class TestRoiSetMultichannelProducts(BaseTestRoiSetMonoProducts, unittest.TestCa
             channel=3,
             expanded=True,
             pad_to=256,
+            draw_label=True,
         )
         result = generate_file_accessor(where / file)
         self.assertEqual(result.hw, self.roiset.acc_raw.hw)
@@ -460,6 +439,46 @@ class TestRoiSetMultichannelProducts(BaseTestRoiSetMonoProducts, unittest.TestCa
             for f in test_df[c]:
                 self.assertTrue((where / f).exists(), where / f)
 
+    def test_get_interm_prods(self):
+        p = RoiSetExportParams(**{
+            'patches_3d': None,
+            'annotated_patches_2d': {
+                'draw_bounding_box': True,
+                'rgb_overlay_channels': [3, None, None],
+                'rgb_overlay_weights': [0.2, 1.0, 1.0],
+                'pad_to': 512,
+            },
+            'patches_2d': {
+                'draw_bounding_box': False,
+                'draw_mask': False,
+            },
+            'annotated_zstacks': {},
+            'object_classes': True,
+        })
+        self.roiset.classify_by('dummy_class', [0], DummyInstanceSegmentationModel())
+        interm = self.roiset.get_export_product_accessors(
+            channel=3,
+            params=p
+        )
+        self.assertNotIn('patches_3d', interm.keys())
+        self.assertEqual(
+            interm['annotated_patches_2d'].hw,
+            (self.roiset.get_df().h.max(), self.roiset.get_df().w.max())
+        )
+        self.assertEqual(
+            interm['patches_2d'].hw,
+            (self.roiset.get_df().h.max(), self.roiset.get_df().w.max())
+        )
+        self.assertEqual(
+            interm['annotated_zstacks'].hw,
+            self.stack.hw
+        )
+        self.assertEqual(
+            interm['object_classes_dummy_class'].hw,
+            self.stack.hw
+        )
+        self.assertTrue(np.all(interm['object_classes_dummy_class'].unique()[0] == [0, 1]))
+
     def test_run_export_expanded_2d_patch(self):
         p = RoiSetExportParams(**{
             'patches_2d': {
@@ -514,17 +533,17 @@ class TestRoiSetMultichannelProducts(BaseTestRoiSetMonoProducts, unittest.TestCa
             self.assertTrue(pa.exists())
             pacc = generate_file_accessor(pa)
             self.assertEqual(pacc.chroma, 1)
-        print('res')
 
 
-from model_server.base.roiset import _get_label_ids
+from model_server.base.roiset import get_label_ids
 class TestRoiSetSerialization(unittest.TestCase):
 
     def setUp(self) -> None:
         # set up test raw data and segmentation from file
-        self.stack = generate_file_accessor(roiset_test_data['multichannel_zstack']['path'])
-        self.stack_ch_pa = self.stack.get_mono(roiset_test_data['pipeline_params']['segmentation_channel'])
-        self.seg_mask_3d = generate_file_accessor(roiset_test_data['multichannel_zstack']['mask_path_3d'])
+        self.stack = generate_file_accessor(data['multichannel_zstack_raw']['path'])
+        self.stack_ch_pa = self.stack.get_mono(params['segmentation_channel'])
+        self.seg_mask_3d = generate_file_accessor(data['multichannel_zstack_mask3d']['path'])
+        self.seg_mask_2d = generate_file_accessor(data['multichannel_zstack_raw']['path'])
 
     @staticmethod
     def _label_is_2d(id_map, la):  # single label's zmask has same counts as its MIP
@@ -533,22 +552,22 @@ class TestRoiSetSerialization(unittest.TestCase):
         return mask_3d.sum() == mask_mip.sum()
 
     def test_id_map_connects_z(self):
-        id_map = _get_label_ids(self.seg_mask_3d, allow_3d=True, connect_3d=True)
+        id_map = get_label_ids(self.seg_mask_3d, allow_3d=True, connect_3d=True)
         labels = np.unique(id_map.data)[1:]
         is_2d = all([self._label_is_2d(id_map.data, la) for la in labels])
         self.assertFalse(is_2d)
 
     def test_id_map_disconnects_z(self):
-        id_map = _get_label_ids(self.seg_mask_3d, allow_3d=True, connect_3d=False)
+        id_map = get_label_ids(self.seg_mask_3d, allow_3d=True, connect_3d=False)
         labels = np.unique(id_map.data)[1:]
         is_2d = all([self._label_is_2d(id_map.data, la) for la in labels])
         self.assertTrue(is_2d)
 
     def test_create_roiset_from_3d_obj_ids(self):
-        id_map = _get_label_ids(self.seg_mask_3d, allow_3d=True, connect_3d=False)
+        id_map = get_label_ids(self.seg_mask_3d, allow_3d=True, connect_3d=False)
         self.assertEqual(self.stack_ch_pa.shape, id_map.shape)
 
-        roiset = RoiSet(
+        roiset = RoiSet.from_object_ids(
             self.stack_ch_pa,
             id_map,
             params=RoiSetMetaParams(mask_type='contours')
@@ -557,11 +576,11 @@ class TestRoiSetSerialization(unittest.TestCase):
         self.assertGreater(len(roiset.get_df()['zi'].unique()), 1)
 
     def test_create_roiset_from_2d_obj_ids(self):
-        id_map = _get_label_ids(self.seg_mask_3d, allow_3d=False)
+        id_map = get_label_ids(self.seg_mask_3d, allow_3d=False)
         self.assertEqual(self.stack_ch_pa.shape[0:3], id_map.shape[0:3])
         self.assertEqual(id_map.nz, 1)
 
-        roiset = RoiSet(
+        roiset = RoiSet.from_object_ids(
             self.stack_ch_pa,
             id_map,
             params=RoiSetMetaParams(mask_type='contours')
@@ -590,6 +609,7 @@ class TestRoiSetSerialization(unittest.TestCase):
             m_acc = generate_file_accessor(pmf)
             self.assertEqual((roi.h, roi.w), m_acc.hw)
             patch_filenames.append(pmf.name)
+            self.assertEqual(m_acc.nz, 1)
 
         # make another RoiSet from just the data table, raw images, and (tight) patch masks
         test_roiset = RoiSet.deserialize(self.stack_ch_pa, where_ser, prefix='ref')
@@ -611,3 +631,191 @@ class TestRoiSetSerialization(unittest.TestCase):
             t_acc = generate_file_accessor(pt)
             self.assertTrue(np.all(r_acc.data == t_acc.data))
 
+
+class TestRoiSetObjectDetection(unittest.TestCase):
+
+    def setUp(self) -> None:
+        # set up test raw data and segmentation from file
+        self.stack = generate_file_accessor(data['multichannel_zstack_raw']['path'])
+        self.stack_ch_pa = self.stack.get_mono(params['segmentation_channel'])
+        self.seg_mask_3d = generate_file_accessor(data['multichannel_zstack_mask3d']['path'])
+
+    def test_create_roiset_from_bounding_boxes(self):
+        from skimage.measure import label, regionprops, regionprops_table
+
+        mask = self.seg_mask_3d
+        labels = label(mask.data_xyz, connectivity=3)
+        table = pd.DataFrame(
+            regionprops_table(labels)
+        ).rename(
+            columns={'bbox-0': 'y', 'bbox-1': 'x', 'bbox-2': 'zi', 'bbox-3': 'y1', 'bbox-4': 'x1'}
+        ).drop(
+            columns=['bbox-5']
+        )
+        table['w'] = table['x1'] - table['x']
+        table['h'] = table['y1'] - table['y']
+        bboxes = table[['y', 'x', 'h', 'w']].to_dict(orient='records')
+
+        roiset_bbox = RoiSet.from_bounding_boxes(self.stack_ch_pa, bboxes)
+        self.assertTrue('label' in roiset_bbox.get_df().columns)
+        patches_bbox = roiset_bbox.get_patches_acc()
+        self.assertEqual(len(table), patches_bbox.count)
+
+
+        # roiset w/ seg for comparison
+        roiset_seg = RoiSet.from_binary_mask(self.stack_ch_pa, mask, allow_3d=True)
+        patches_seg = roiset_seg.get_patches_acc()
+
+        # test bounding box dimensions match those from RoiSet generated directly from segmentation
+        self.assertEqual(roiset_seg.count, roiset_bbox.count)
+        for i in range(0, roiset_seg.count):
+            self.assertEqual(patches_seg.iat(0, crop=True).shape, patches_bbox.iat(0, crop=True).shape)
+
+        # test that serialization does not write patch masks
+        roiset_ser_path = output_path / 'roiset_from_bbox'
+        dd = roiset_bbox.serialize(roiset_ser_path)
+        self.assertTrue('tight_patch_masks' not in dd.keys())
+        self.assertFalse((roiset_ser_path / 'tight_patch_masks').exists())
+
+        # test that deserialized RoiSet matches the original
+        roiset_des = RoiSet.deserialize(self.stack_ch_pa, roiset_ser_path)
+        self.assertEqual(roiset_des.count, roiset_bbox.count)
+        for i in range(0, roiset_des.count):
+            self.assertEqual(patches_seg.iat(0, crop=True).shape, patches_bbox.iat(0, crop=True).shape)
+        self.assertTrue((roiset_bbox.get_zmask() == roiset_des.get_zmask()).all())
+
+
+class TestRoiSetPolygons(BaseTestRoiSetMonoProducts, unittest.TestCase):
+
+    def test_compute_polygons(self):
+        roiset_ref = RoiSet.from_binary_mask(
+            self.stack_ch_pa,
+            self.seg_mask,
+            params=RoiSetMetaParams(
+                mask_type='contours',
+                filters={'area': {'min': 1e1, 'max': 1e6}}
+            )
+        )
+
+        poly = roiset_ref.get_polygons()
+        roiset_test = RoiSet.from_polygons_2d(self.stack_ch_pa, poly)
+        binary_poly = (roiset_test.acc_obj_ids.get_mono(0, mip=True).data > 0)
+        self.assertEqual(self.seg_mask.shape, binary_poly.shape)
+
+
+        # most mask pixels are within in fitted polygon
+        test_mask = np.logical_and(
+            np.logical_not(binary_poly),
+            (self.seg_mask.data == 255)
+        )
+        self.assertLess(test_mask.sum() / test_mask.size, 0.001)
+
+        # output results
+        od = output_path / 'polygons'
+        write_accessor_data_to_file(od / 'from_polygons.tif', InMemoryDataAccessor(binary_poly))
+        write_accessor_data_to_file(od / 'ref_mask.tif', self.seg_mask)
+        write_accessor_data_to_file(od / 'diff.tif', InMemoryDataAccessor(test_mask))
+
+
+    def test_overlap_bbox(self):
+        df = pd.DataFrame({
+            'x0': [0, 1, 2, 1, 1],
+            'x1': [2, 3, 4, 3, 3],
+            'y0': [0, 0, 0, 2, 0],
+            'y1': [2, 2, 2, 3, 2],
+            'zi': [0, 0, 0, 0, 1],
+        })
+
+        res = filter_df_overlap_bbox(df)
+        self.assertEqual(len(res), 4)
+        self.assertTrue((res.loc[0, 'overlaps_with'] == [1]).all())
+        self.assertTrue((res.loc[1, 'overlaps_with'] == [0, 2]).all())
+        self.assertTrue((res.bbox_intersec == 2).all())
+        return res
+
+
+    def test_overlap_bbox_multiple(self):
+        df1 = pd.DataFrame({
+            'x0': [0, 1],
+            'x1': [2, 3],
+            'y0': [0, 0],
+            'y1': [2, 2],
+            'zi': [0, 0],
+        })
+        df2 = pd.DataFrame({
+            'x0': [2],
+            'x1': [4],
+            'y0': [0],
+            'y1': [2],
+            'zi': [0],
+        })
+        res = filter_df_overlap_bbox(df1, df2)
+        self.assertTrue((res.loc[1, 'overlaps_with'] == [0]).all())
+        self.assertEqual(len(res), 1)
+        self.assertTrue((res.bbox_intersec == 2).all())
+
+
+    def test_overlap_seg(self):
+        df = pd.DataFrame({
+            'x0': [0, 1, 2],
+            'x1': [2, 3, 4],
+            'y0': [0, 0, 0],
+            'y1': [2, 2, 2],
+            'zi': [0, 0, 0],
+            'binary_mask': [
+                [
+                    [1, 1],
+                    [1, 0]
+                ],
+                [
+                    [0, 1],
+                    [1, 1]
+                ],
+                [
+                    [1, 1],
+                    [1, 1]
+                ],
+            ]
+        })
+
+        res = filter_df_overlap_seg(df)
+        self.assertTrue((res.loc[res.seg_overlaps, :].index == [1, 2]).all())
+        self.assertTrue((res.loc[res.seg_overlaps, 'seg_iou'] == 0.4).all())
+
+    def test_overlap_seg_multiple(self):
+        df1 = pd.DataFrame({
+            'x0': [0, 1],
+            'x1': [2, 3],
+            'y0': [0, 0],
+            'y1': [2, 2],
+            'zi': [0, 0],
+            'binary_mask': [
+                [
+                    [1, 1],
+                    [1, 0]
+                ],
+                [
+                    [0, 1],
+                    [1, 1]
+                ],
+            ]
+        })
+        df2 = pd.DataFrame({
+            'x0': [2],
+            'x1': [4],
+            'y0': [0],
+            'y1': [2],
+            'zi': [0],
+            'binary_mask': [
+                [
+                    [1, 1],
+                    [1, 1]
+                ],
+            ]
+        })
+        res = filter_df_overlap_seg(df1, df2)
+        self.assertTrue((res.loc[1, 'overlaps_with'] == [0]).all())
+        self.assertEqual(len(res), 1)
+        self.assertTrue((res.bbox_intersec == 2).all())
+        self.assertTrue((res.loc[res.seg_overlaps, :].index == [1]).all())
+        self.assertTrue((res.loc[res.seg_overlaps, 'seg_iou'] == 0.4).all())
diff --git a/tests/base/test_roiset_derived.py b/tests/base/test_roiset_derived.py
new file mode 100644
index 0000000000000000000000000000000000000000..156ef9fe42c472dc829085ed3b8c26bb46ac003a
--- /dev/null
+++ b/tests/base/test_roiset_derived.py
@@ -0,0 +1,60 @@
+from pathlib import Path
+import unittest
+
+import numpy as np
+
+from model_server.base.roiset import RoiSetWithDerivedChannelsExportParams, RoiSetMetaParams
+from model_server.base.roiset import RoiSetWithDerivedChannels
+from model_server.base.accessors import generate_file_accessor, PatchStack
+import model_server.conf.testing as conf
+from model_server.conf.testing import DummyInstanceSegmentationModel
+
+data = conf.meta['image_files']
+params = conf.meta['roiset']
+output_path = conf.meta['output_path']
+
+class TestDerivedChannels(unittest.TestCase):
+    def setUp(self) -> None:
+        self.stack = generate_file_accessor(data['multichannel_zstack_raw']['path'])
+        self.stack_ch_pa = self.stack.get_mono(params['patches_channel'])
+        self.seg_mask = generate_file_accessor(data['multichannel_zstack_mask2d']['path'])
+
+    def test_classify_by_with_derived_channel(self):
+        class ModelWithDerivedInputs(DummyInstanceSegmentationModel):
+            def infer(self, img, mask):
+                return PatchStack(super().infer(img, mask).data * img.chroma)
+
+        roiset = RoiSetWithDerivedChannels.from_binary_mask(
+            self.stack,
+            self.seg_mask,
+            params=RoiSetMetaParams(
+                filters={'area': {'min': 1e3, 'max': 1e4}},
+                deproject_channel=0,
+            )
+        )
+        self.assertIsInstance(roiset, RoiSetWithDerivedChannels)
+        roiset.classify_by(
+            'multiple_input_model',
+            [0, 1],
+            ModelWithDerivedInputs(),
+            derived_channel_functions=[
+                lambda acc: PatchStack(2 * acc.get_channels([0]).data),
+                lambda acc: PatchStack((0.5 * acc.get_channels([1]).data).astype('uint8'))
+            ]
+        )
+        self.assertTrue(all(roiset.get_df()['classify_by_multiple_input_model'].unique() == [4]))
+        self.assertTrue(all(np.unique(roiset.get_object_class_map('multiple_input_model').data) == [0, 4]))
+
+        self.assertEqual(len(roiset.accs_derived), 2)
+        for di in roiset.accs_derived:
+            self.assertEqual(roiset.get_patches_acc().hw, di.hw)
+            self.assertEqual(roiset.get_patches_acc().nz, di.nz)
+            self.assertEqual(roiset.get_patches_acc().count, di.count)
+
+        dpas = roiset.run_exports(
+            output_path / 'derived_channels', 0, 'der',
+            RoiSetWithDerivedChannelsExportParams(derived_channels=True)
+        )
+        for fp in dpas['derived_channels']:
+            assert Path(fp).exists()
+        return roiset
\ No newline at end of file
diff --git a/tests/test_session.py b/tests/base/test_session.py
similarity index 50%
rename from tests/test_session.py
rename to tests/base/test_session.py
index 31d6290392537922f87aa1e09e2f61317e9e9537..6843f36a011851638ce3b4739ce9ff6caf15b301 100644
--- a/tests/test_session.py
+++ b/tests/base/test_session.py
@@ -1,35 +1,21 @@
-import json
 from os.path import exists
 import pathlib
-from pydantic import BaseModel
 import unittest
 
-from model_server.base.models import DummySemanticSegmentationModel
-from model_server.base.session import Session
-from model_server.base.workflows import WorkflowRunRecord
+import numpy as np
+from model_server.base.accessors import InMemoryDataAccessor
+from model_server.base.session import session
 
 class TestGetSessionObject(unittest.TestCase):
     def setUp(self) -> None:
-        self.sesh = Session()
-
-    def tearDown(self) -> None:
-        print('Tearing down...')
-        Session._instances = {}
-
-    def test_session_is_singleton(self):
-        Session._instances = {}
-        self.assertEqual(len(Session._instances), 0)
-        s = Session()
-        self.assertEqual(len(Session._instances), 1)
-        self.assertIs(s, Session())
-        self.assertEqual(len(Session._instances), 1)
+        session.restart()
+        self.sesh = session
 
     def test_session_logfile_is_valid(self):
         self.assertTrue(exists(self.sesh.logfile), 'Session did not create a log file in the correct place')
 
     def test_changing_session_root_creates_new_directory(self):
         from model_server.conf.defaults import root
-        from shutil import rmtree
 
         old_paths = self.sesh.get_paths()
         newroot = root / 'subdir'
@@ -38,15 +24,8 @@ class TestGetSessionObject(unittest.TestCase):
         for k in old_paths.keys():
             self.assertTrue(new_paths[k].__str__().startswith(newroot.__str__()))
 
-        # this is necessary because logger itself is a singleton class
-        self.tearDown()
-        self.setUp()
-        rmtree(newroot)
-        self.assertFalse(newroot.exists(), 'Could not clean up temporary test subdirectory')
-
     def test_change_session_subdirectory(self):
         old_paths = self.sesh.get_paths()
-        print(old_paths)
         self.sesh.set_data_directory('outbound_images', old_paths['inbound_images'])
         self.assertEqual(self.sesh.paths['outbound_images'], self.sesh.paths['inbound_images'])
 
@@ -58,6 +37,17 @@ class TestGetSessionObject(unittest.TestCase):
         self.assertTrue(logfile2.exists())
         self.assertNotEqual(logfile1, logfile2, 'Restarting session does not generate new logfile')
 
+    def test_reimporting_session_uses_same_logfile(self):
+        session1 = self.sesh
+        logfile1 = session1.logfile
+        self.assertTrue(logfile1.exists())
+
+        from model_server.base.session import session as session2
+        self.assertEqual(session1, session2)
+        logfile2 = session2.logfile
+        self.assertTrue(logfile2.exists())
+        self.assertEqual(logfile1, logfile2, 'Reimporting session incorrectly creates new logfile')
+
     def test_log_warning(self):
         msg = 'A test warning'
         self.sesh.log_info(msg)
@@ -74,56 +64,6 @@ class TestGetSessionObject(unittest.TestCase):
         self.assertEqual(logs[1]['level'], 'WARNING')
         self.assertEqual(logs[-1]['message'], 'Initialized session')
 
-    def test_session_loads_model(self):
-        MC = DummySemanticSegmentationModel
-        success = self.sesh.load_model(MC)
-        self.assertTrue(success)
-        loaded_models = self.sesh.describe_loaded_models()
-        self.assertTrue(
-            (MC.__name__ + '_00') in loaded_models.keys()
-        )
-        self.assertEqual(
-            loaded_models[MC.__name__ + '_00']['class'],
-            MC.__name__
-        )
-
-    def test_session_loads_second_instance_of_same_model(self):
-        MC = DummySemanticSegmentationModel
-        self.sesh.load_model(MC)
-        self.sesh.load_model(MC)
-        self.assertIn(MC.__name__ + '_00', self.sesh.models.keys())
-        self.assertIn(MC.__name__ + '_01', self.sesh.models.keys())
-
-    def test_session_loads_model_with_params(self):
-        MC = DummySemanticSegmentationModel
-        class _PM(BaseModel):
-            p: str
-        p1 = _PM(p='abc')
-        success = self.sesh.load_model(MC, params=p1)
-        self.assertTrue(success)
-        loaded_models = self.sesh.describe_loaded_models()
-        mid = MC.__name__ + '_00'
-        self.assertEqual(loaded_models[mid]['params'], p1)
-
-        # load a second model and confirm that the first is locatable by its param entry
-        p2 = _PM(p='def')
-        self.sesh.load_model(MC, params=p2)
-        find_mid = self.sesh.find_param_in_loaded_models('p', 'abc')
-        self.assertEqual(mid, find_mid)
-        self.assertEqual(self.sesh.describe_loaded_models()[mid]['params'], p1)
-
-    def test_session_finds_existing_model_with_different_path_formats(self):
-        MC = DummySemanticSegmentationModel
-        class _PM(BaseModel):
-            path: str
-
-        p1 = _PM(path='c:\\windows\\dummy.pa')
-        p2 = _PM(path='c:/windows/dummy.pa')
-        mid = self.sesh.load_model(MC, params=p1)
-        assert pathlib.Path(p1.path) == pathlib.Path(p2.path)
-        find_mid = self.sesh.find_param_in_loaded_models('path', p2.path, is_path=True)
-        self.assertEqual(mid, find_mid)
-
     def test_change_output_path(self):
         pa = self.sesh.get_paths()['inbound_images']
         self.assertIsInstance(pa, pathlib.Path)
@@ -146,3 +86,53 @@ class TestGetSessionObject(unittest.TestCase):
         self.assertEqual(len(dfv), len(data))
         self.assertEqual(dfv.columns[0], 'X')
         self.assertEqual(dfv.columns[1], 'Y')
+
+
+    def test_add_and_remove_accessor(self):
+        acc = InMemoryDataAccessor(
+            np.random.randint(
+                0,
+                2 ** 8,
+                size=(512, 256, 3, 7),
+                dtype='uint8'
+            )
+        )
+        shd = acc.shape_dict
+
+        # add accessor to session registry
+        acc_id = session.add_accessor(acc)
+        self.assertEqual(session.get_accessor_info(acc_id)['shape_dict'], shd)
+        self.assertTrue(session.get_accessor_info(acc_id)['loaded'])
+
+        # remove accessor from session registry
+        session.del_accessor(acc_id)
+        self.assertEqual(session.get_accessor_info(acc_id)['shape_dict'], shd)
+        self.assertFalse(session.get_accessor_info(acc_id)['loaded'])
+
+    def test_add_and_use_accessor(self):
+        acc = InMemoryDataAccessor(
+            np.random.randint(
+                0,
+                2 ** 8,
+                size=(512, 256, 3, 7),
+                dtype='uint8'
+            )
+        )
+        shd = acc.shape_dict
+
+        # add accessor to session registry
+        acc_id = session.add_accessor(acc)
+        self.assertEqual(session.get_accessor_info(acc_id)['shape_dict'], shd)
+        self.assertTrue(session.get_accessor_info(acc_id)['loaded'])
+
+        # get accessor from session registry without popping
+        acc_get = session.get_accessor(acc_id, pop=False)
+        self.assertIsInstance(acc_get, InMemoryDataAccessor)
+        self.assertEqual(acc_get.shape_dict, shd)
+        self.assertTrue(session.get_accessor_info(acc_id)['loaded'])
+
+        # get accessor from session registry with popping
+        acc_get = session.get_accessor(acc_id)
+        self.assertIsInstance(acc_get, InMemoryDataAccessor)
+        self.assertEqual(acc_get.shape_dict, shd)
+        self.assertFalse(session.get_accessor_info(acc_id)['loaded'])
diff --git a/tests/test_api.py b/tests/test_api.py
deleted file mode 100644
index 3ac5c8f14ce6d74640ae54d71cd8619b68813a53..0000000000000000000000000000000000000000
--- a/tests/test_api.py
+++ /dev/null
@@ -1,185 +0,0 @@
-import json
-
-from multiprocessing import Process
-from pathlib import Path
-import requests
-import unittest
-from urllib3 import Retry
-
-from model_server.conf.testing import czifile
-
-
-class TestServerBaseClass(unittest.TestCase):
-    app_name = 'model_server.base.api:app'
-
-    def setUp(self) -> None:
-        import uvicorn
-        host = '127.0.0.1'
-        port = 5000
-
-        self.server_process = Process(
-            target=uvicorn.run,
-            args=(self.app_name, ),
-            kwargs={'host': host, 'port': port, 'log_level': 'debug'},
-            daemon=True
-        )
-        self.uri = f'http://{host}:{port}/'
-        self.server_process.start()
-
-    def _get_sesh(self):
-        sesh = requests.Session()
-        retries = Retry(
-            total=5,
-            backoff_factor=0.1,
-        )
-        sesh.mount('http://', requests.adapters.HTTPAdapter(max_retries=retries))
-        return sesh
-
-    def _get(self, endpoint):
-        return self._get_sesh().get(self.uri + endpoint)
-
-    def _put(self, endpoint, query=None, body=None):
-        return self._get_sesh().put(
-            self.uri + endpoint,
-            params=query,
-            data=json.dumps(body)
-        )
-
-    def copy_input_file_to_server(self):
-        from shutil import copyfile
-
-        resp = self._get('paths')
-        pa = resp.json()['inbound_images']
-        outpath = Path(pa) / czifile['filename']
-        copyfile(
-            czifile['path'],
-            outpath
-        )
-
-    def tearDown(self) -> None:
-        self.server_process.terminate()
-
-class TestApiFromAutomatedClient(TestServerBaseClass):
-    def test_trivial_api_response(self):
-        resp = self._get('')
-        self.assertEqual(resp.status_code, 200)
-
-    def test_bounceback_parameters(self):
-        resp = self._put('bounce_back', body={'par1': 'hello', 'par2': ['ab', 'cd']})
-        self.assertEqual(resp.status_code, 200, resp.json())
-        self.assertEqual(resp.json()['params']['par1'], 'hello', resp.json())
-        self.assertEqual(resp.json()['params']['par2'], ['ab', 'cd'], resp.json())
-
-    def test_default_session_paths(self):
-        import model_server.conf.defaults
-        resp = self._get('paths')
-        conf_root = model_server.conf.defaults.root
-        for p in ['inbound_images', 'outbound_images', 'logs']:
-            self.assertTrue(resp.json()[p].startswith(conf_root.__str__()))
-            suffix = Path(model_server.conf.defaults.subdirectories[p]).__str__()
-            self.assertTrue(resp.json()[p].endswith(suffix))
-
-    def test_list_empty_loaded_models(self):
-        resp = self._get('models')
-        self.assertEqual(resp.status_code, 200)
-        self.assertEqual(resp.content, b'{}')
-
-    def test_load_dummy_semantic_model(self):
-        resp_load = self._put(f'models/dummy_semantic/load')
-        model_id = resp_load.json()['model_id']
-        self.assertEqual(resp_load.status_code, 200, resp_load.json())
-        resp_list = self._get('models')
-        self.assertEqual(resp_list.status_code, 200)
-        rj = resp_list.json()
-        self.assertEqual(rj[model_id]['class'], 'DummySemanticSegmentationModel')
-        return model_id
-
-    def test_load_dummy_instance_model(self):
-        resp_load = self._put(f'models/dummy_instance/load')
-        model_id = resp_load.json()['model_id']
-        self.assertEqual(resp_load.status_code, 200, resp_load.json())
-        resp_list = self._get('models')
-        self.assertEqual(resp_list.status_code, 200)
-        rj = resp_list.json()
-        self.assertEqual(rj[model_id]['class'], 'DummyInstanceSegmentationModel')
-        return model_id
-
-    def test_respond_with_error_when_invalid_filepath_requested(self):
-        model_id = self.test_load_dummy_semantic_model()
-
-        resp = self._put(
-            f'infer/from_image_file',
-            query={'model_id': model_id, 'input_filename': 'not_a_real_file.name'}
-        )
-        self.assertEqual(resp.status_code, 404, resp.content.decode())
-
-
-    def test_i2i_inference_errors_when_model_not_found(self):
-        model_id = 'not_a_real_model'
-        resp = self._put(
-            f'workflows/segment',
-           query={'model_id': model_id, 'input_filename': 'not_a_real_file.name'}
-        )
-        self.assertEqual(resp.status_code, 409, resp.content.decode())
-
-    def test_i2i_dummy_inference_by_api(self):
-        model_id = self.test_load_dummy_semantic_model()
-        self.copy_input_file_to_server()
-        resp_infer = self._put(
-            f'workflows/segment',
-            query={
-                'model_id': model_id,
-                'input_filename': czifile['filename'],
-                'channel': 2,
-            },
-        )
-        self.assertEqual(resp_infer.status_code, 200, resp_infer.content.decode())
-
-    def test_restarting_session_clears_loaded_models(self):
-        resp_load = self._put(f'models/dummy_semantic/load',)
-        self.assertEqual(resp_load.status_code, 200, resp_load.json())
-        resp_list_0 = self._get('models')
-        self.assertEqual(resp_list_0.status_code, 200)
-        rj0 = resp_list_0.json()
-        self.assertEqual(len(rj0), 1, f'Unexpected models in response: {rj0}')
-        resp_restart = self._get('session/restart')
-        resp_list_1 = self._get('models')
-        rj1 = resp_list_1.json()
-        self.assertEqual(len(rj1), 0, f'Unexpected models in response: {rj1}')
-
-    def test_change_inbound_path(self):
-        resp_inpath = self._get('paths')
-        resp_change = self._put(
-            f'paths/watch_output',
-            query={'path': resp_inpath.json()['inbound_images']}
-        )
-        self.assertEqual(resp_change.status_code, 200)
-        resp_check = self._get('paths')
-        self.assertEqual(resp_check.json()['inbound_images'], resp_check.json()['outbound_images'])
-
-    def test_exception_when_changing_inbound_path(self):
-        resp_inpath = self._get('paths')
-        fakepath = 'c:/fake/path/to/nowhere'
-        resp_change = self._put(
-            f'paths/watch_output',
-            query={'path': fakepath}
-        )
-        self.assertEqual(resp_change.status_code, 404)
-        self.assertIn(fakepath, resp_change.json()['detail'])
-        resp_check = self._get('paths')
-        self.assertEqual(resp_inpath.json()['outbound_images'], resp_check.json()['outbound_images'])
-
-    def test_no_change_inbound_path(self):
-        resp_inpath = self._get('paths')
-        resp_change = self._put(
-            f'paths/watch_output',
-            query={'path': resp_inpath.json()['outbound_images']}
-        )
-        self.assertEqual(resp_change.status_code, 200)
-        resp_check = self._get('paths')
-        self.assertEqual(resp_inpath.json()['outbound_images'], resp_check.json()['outbound_images'])
-
-    def test_get_logs(self):
-        resp = self._get('session/logs')
-        self.assertEqual(resp.status_code, 200)
-        self.assertEqual(resp.json()[0]['message'], 'Initialized session')
\ No newline at end of file
diff --git a/model_server/extensions/ilastik/tests/__init__.py b/tests/test_ilastik/__init__.py
similarity index 100%
rename from model_server/extensions/ilastik/tests/__init__.py
rename to tests/test_ilastik/__init__.py
diff --git a/model_server/extensions/ilastik/tests/test_ilastik.py b/tests/test_ilastik/test_ilastik.py
similarity index 69%
rename from model_server/extensions/ilastik/tests/test_ilastik.py
rename to tests/test_ilastik/test_ilastik.py
index c9358796c7be6d022d4dced264819f9eb695baa4..e7744acb0c53e02353654602e7916b69de61679e 100644
--- a/model_server/extensions/ilastik/tests/test_ilastik.py
+++ b/tests/test_ilastik/test_ilastik.py
@@ -1,16 +1,25 @@
-import pathlib
+from pathlib import Path
+from shutil import copyfile
 import unittest
 
 import numpy as np
 
-from model_server.conf.testing import czifile, ilastik_classifiers, output_path, roiset_test_data
 from model_server.base.accessors import CziImageFileAccessor, generate_file_accessor, InMemoryDataAccessor, PatchStack, write_accessor_data_to_file
+from model_server.base.api import app
 from model_server.extensions.ilastik import models as ilm
-from model_server.extensions.ilastik.workflows import infer_px_then_ob_model
-from model_server.base.models import InvalidObjectLabelsError
-from model_server.base.roiset import _get_label_ids, RoiSet, RoiSetMetaParams
-from model_server.base.workflows import classify_pixels
-from tests.test_api import TestServerBaseClass
+from model_server.extensions.ilastik.pipelines import px_then_ob
+from model_server.extensions.ilastik.router import router
+from model_server.base.roiset import RoiSet, RoiSetMetaParams
+from model_server.base.pipelines import segment
+import model_server.conf.testing as conf
+
+data = conf.meta['image_files']
+output_path = conf.meta['output_path']
+params = conf.meta['roiset']
+czifile = conf.meta['image_files']['czifile']
+ilastik_classifiers = conf.meta['ilastik_classifiers']
+
+app.include_router(router)
 
 def _random_int(*args):
     return np.random.randint(0, 2 ** 8, size=args, dtype='uint8')
@@ -20,23 +29,14 @@ class TestIlastikPixelClassification(unittest.TestCase):
         self.cf = CziImageFileAccessor(czifile['path'])
         self.channel = 0
         self.model = ilm.IlastikPixelClassifierModel(
-            params=ilm.IlastikPixelClassifierParams(project_file=ilastik_classifiers['px'].__str__())
+            params=ilm.IlastikPixelClassifierParams(project_file=ilastik_classifiers['px']['path'].__str__())
         )
         self.mono_image = self.cf.get_mono(self.channel)
 
 
-    def test_faulthandler(self): # recreate error that is messing up ilastik
-        import io
-        import sys
-        import faulthandler
-
-        with self.assertRaises(io.UnsupportedOperation):
-            faulthandler.enable(file=sys.stdout)
-
-
     def test_raise_error_if_autoload_disabled(self):
         model = ilm.IlastikPixelClassifierModel(
-            params=ilm.IlastikPixelClassifierParams(project_file=ilastik_classifiers['px'].__str__()),
+            params=ilm.IlastikPixelClassifierParams(project_file=ilastik_classifiers['px']['path'].__str__()),
             autoload=False
         )
         w = 512
@@ -81,7 +81,7 @@ class TestIlastikPixelClassification(unittest.TestCase):
         def _run_seg(tr, sig):
             mod = ilm.IlastikPixelClassifierModel(
                 params=ilm.IlastikPixelClassifierParams(
-                    project_file=ilastik_classifiers['px'].__str__(),
+                    project_file=ilastik_classifiers['px']['path'].__str__(),
                     px_prob_threshold=tr,
                     px_smoothing=sig,
                 ),
@@ -154,7 +154,7 @@ class TestIlastikPixelClassification(unittest.TestCase):
         self.test_run_pixel_classifier()
         fp = czifile['path']
         model = ilm.IlastikObjectClassifierFromPixelPredictionsModel(
-            params=ilm.IlastikParams(project_file=ilastik_classifiers['pxmap_to_obj'].__str__())
+            params=ilm.IlastikParams(project_file=ilastik_classifiers['pxmap_to_obj']['path'].__str__())
         )
         mask = self.model.label_pixel_class(self.mono_image)
         objmap, _ = model.infer(self.mono_image, mask)
@@ -172,7 +172,7 @@ class TestIlastikPixelClassification(unittest.TestCase):
         self.test_run_pixel_classifier()
         fp = czifile['path']
         model = ilm.IlastikObjectClassifierFromSegmentationModel(
-            params=ilm.IlastikParams(project_file=ilastik_classifiers['seg_to_obj'].__str__())
+            params=ilm.IlastikParams(project_file=ilastik_classifiers['seg_to_obj']['path'].__str__())
         )
         mask = self.model.label_pixel_class(self.mono_image)
         objmap = model.label_instance_class(self.mono_image, mask)
@@ -186,19 +186,28 @@ class TestIlastikPixelClassification(unittest.TestCase):
         self.assertEqual(objmap.data.max(), 2)
 
     def test_ilastik_pixel_classification_as_workflow(self):
-        result = classify_pixels(
-            czifile['path'],
-            ilm.IlastikPixelClassifierModel(
-                params=ilm.IlastikPixelClassifierParams(project_file=ilastik_classifiers['px'].__str__()),
-            ),
-            output_path,
+        res = segment.segment_pipeline(
+            accessors={
+                'accessor': generate_file_accessor(czifile['path'])
+            },
+            models={
+                'model': ilm.IlastikPixelClassifierModel(
+                    params=ilm.IlastikPixelClassifierParams(
+                        project_file=ilastik_classifiers['px']['path'].__str__()
+                    ),
+                ),
+            },
             channel=0,
         )
-        self.assertTrue(result.success)
-        self.assertGreater(result.timer_results['inference'], 1.0)
+        self.assertGreater(res.times['inference'], 0.1)
+
+
+class TestServerTestCase(conf.TestServerBaseClass):
+    app_name = 'tests.test_ilastik.test_ilastik:app'
+    input_data = czifile
 
-class TestIlastikOverApi(TestServerBaseClass):
 
+class TestIlastikOverApi(TestServerTestCase):
     def test_httpexception_if_incorrect_project_file_loaded(self):
         resp_load = self._put(
             'ilastik/seg/load/',
@@ -210,7 +219,7 @@ class TestIlastikOverApi(TestServerBaseClass):
     def test_load_ilastik_pixel_model(self):
         resp_load = self._put(
             'ilastik/seg/load/',
-            body={'project_file': str(ilastik_classifiers['px'])},
+            body={'project_file': str(ilastik_classifiers['px']['path'])},
         )
         self.assertEqual(resp_load.status_code, 200, resp_load.json())
         model_id = resp_load.json()['model_id']
@@ -226,20 +235,20 @@ class TestIlastikOverApi(TestServerBaseClass):
         self.assertEqual(len(resp_list_1st), 1, resp_list_1st)
         resp_load_2nd = self._put(
             'ilastik/seg/load/',
-            body={'project_file': str(ilastik_classifiers['px']), 'duplicate': True},
+            body={'project_file': str(ilastik_classifiers['px']['path']), 'duplicate': True},
         )
         resp_list_2nd = self._get('models').json()
         self.assertEqual(len(resp_list_2nd), 2, resp_list_2nd)
         resp_load_3rd = self._put(
             'ilastik/seg/load/',
-            body={'project_file': str(ilastik_classifiers['px']), 'duplicate': False},
+            body={'project_file': str(ilastik_classifiers['px']['path']), 'duplicate': False},
         )
         resp_list_3rd = self._get('models').json()
         self.assertEqual(len(resp_list_3rd), 2, resp_list_3rd)
 
     def test_load_ilastik_pixel_model_with_params(self):
         params = {
-            'project_file': str(ilastik_classifiers['px']),
+            'project_file': str(ilastik_classifiers['px']['path']),
             'px_class': 0,
             'px_prob_threshold': 0.5
         }
@@ -253,42 +262,11 @@ class TestIlastikOverApi(TestServerBaseClass):
         self.assertEqual(len(mods), 1)
         self.assertEqual(mods[model_id]['params']['px_prob_threshold'], 0.5)
 
-    def test_no_duplicate_model_with_different_path_formats(self):
-        self._get('session/restart')
-        resp_list_1 = self._get('models').json()
-        self.assertEqual(len(resp_list_1), 0)
-        ilp = ilastik_classifiers['px']
-
-        # create and validate two copies of the same pathname with different string formats
-        ilp_win = str(pathlib.PureWindowsPath(ilp))
-        self.assertGreater(ilp_win.count('\\'), 0) # i.e. contains backslashes
-        self.assertEqual(ilp_win.count('/'), 0)
-        ilp_posx = ilastik_classifiers['px'].as_posix()
-        self.assertGreater(ilp_posx.count('/'), 0)
-        self.assertEqual(ilp_posx.count('\\'), 0)
-        self.assertEqual(pathlib.Path(ilp_win), pathlib.Path(ilp_posx))
-
-        # load models with these paths
-        resp1 = self._put(
-            'ilastik/seg/load/',
-            body={'project_file': ilp_win, 'duplicate': False},
-        )
-        resp2 = self._put(
-            'ilastik/seg/load/',
-            body={'project_file': ilp_posx, 'duplicate': False},
-        )
-        self.assertEqual(resp1.json(), resp2.json())
-
-        # assert that only one copy of the model is loaded
-        resp_list_2 = self._get('models').json()
-        print(resp_list_2)
-        self.assertEqual(len(resp_list_2), 1)
-
 
     def test_load_ilastik_pxmap_to_obj_model(self):
         resp_load = self._put(
             'ilastik/pxmap_to_obj/load/',
-            body={'project_file': str(ilastik_classifiers['pxmap_to_obj'])},
+            body={'project_file': str(ilastik_classifiers['pxmap_to_obj']['path'])},
         )
         model_id = resp_load.json()['model_id']
 
@@ -299,10 +277,22 @@ class TestIlastikOverApi(TestServerBaseClass):
         self.assertEqual(rj[model_id]['class'], 'IlastikObjectClassifierFromPixelPredictionsModel')
         return model_id
 
+    def test_load_ilastik_model_with_model_id(self):
+        mid = 'new_model_id'
+        resp_load = self._put(
+            'ilastik/pxmap_to_obj/load/',
+            body={
+                'project_file': str(ilastik_classifiers['pxmap_to_obj']['path']),
+                'model_id': mid,
+            },
+        )
+        res_mid = resp_load.json()['model_id']
+        self.assertEqual(res_mid, mid)
+
     def test_load_ilastik_seg_to_obj_model(self):
         resp_load = self._put(
             'ilastik/seg_to_obj/load/',
-            body={'project_file': str(ilastik_classifiers['seg_to_obj'])},
+            body={'project_file': str(ilastik_classifiers['seg_to_obj']['path'])},
         )
         model_id = resp_load.json()['model_id']
 
@@ -314,53 +304,45 @@ class TestIlastikOverApi(TestServerBaseClass):
         return model_id
 
     def test_ilastik_infer_pixel_probability(self):
-        self.copy_input_file_to_server()
+        fname = self.copy_input_file_to_server()
         model_id = self.test_load_ilastik_pixel_model()
+        in_acc_id = self._put(f'accessors/read_from_file/{fname}').json()
 
         resp_infer = self._put(
-            f'workflows/segment',
-            query={'model_id': model_id, 'input_filename': czifile['filename'], 'channel': 0},
+            f'pipelines/segment',
+            body={'model_id': model_id, 'accessor_id': in_acc_id, 'channel': 0},
         )
         self.assertEqual(resp_infer.status_code, 200, resp_infer.content.decode())
 
 
     def test_ilastik_infer_px_then_ob(self):
-        self.copy_input_file_to_server()
+        fname = self.copy_input_file_to_server()
         px_model_id = self.test_load_ilastik_pixel_model()
         ob_model_id = self.test_load_ilastik_pxmap_to_obj_model()
 
+        in_acc_id = self._put(f'accessors/read_from_file/{fname}').json()
+
         resp_infer = self._put(
-            'ilastik/pixel_then_object_classification/infer/',
-            query={
+            'ilastik/pipelines/pixel_then_object_classification/infer/',
+            body={
                 'px_model_id': px_model_id,
                 'ob_model_id': ob_model_id,
-                'input_filename': czifile['filename'],
+                'accessor_id': in_acc_id,
                 'channel': 0,
             }
         )
         self.assertEqual(resp_infer.status_code, 200, resp_infer.content.decode())
 
 
-class TestIlastikOnMultichannelInputs(TestServerBaseClass):
+class TestIlastikOnMultichannelInputs(TestServerTestCase):
     def setUp(self) -> None:
         super(TestIlastikOnMultichannelInputs, self).setUp()
-        self.pa_px_classifier = ilastik_classifiers['px_color_zstack']
-        self.pa_ob_pxmap_classifier = ilastik_classifiers['ob_pxmap_color_zstack']
-        self.pa_ob_seg_classifier = ilastik_classifiers['ob_seg_color_zstack']
-        self.pa_input_image = roiset_test_data['multichannel_zstack']['path']
-        self.pa_mask = roiset_test_data['multichannel_zstack']['mask_path_3d']
-
-    def _copy_input_file_to_server(self):
-        from shutil import copyfile
-
-        pa_data = roiset_test_data['multichannel_zstack']['path']
-        resp = self._get('paths')
-        pa = resp.json()['inbound_images']
-        outpath = pathlib.Path(pa) / pa_data.name
-        copyfile(
-            czifile['path'],
-            outpath
-        )
+        self.pa_px_classifier = ilastik_classifiers['px_color_zstack']['path']
+        self.pa_ob_pxmap_classifier = ilastik_classifiers['ob_pxmap_color_zstack']['path']
+        self.pa_ob_seg_classifier = ilastik_classifiers['ob_seg_color_zstack']['path']
+        self.pa_input_image = data['multichannel_zstack_raw']['path']
+        self.pa_mask = data['multichannel_zstack_mask3d']['path']
+
 
     def test_classify_pixels(self):
         img = generate_file_accessor(self.pa_input_image)
@@ -381,67 +363,89 @@ class TestIlastikOnMultichannelInputs(TestServerBaseClass):
         self.assertEqual(obmap.hw, img.hw)
         self.assertEqual(obmap.nz, img.nz)
 
-    def _call_workflow(self, channel):
-        return infer_px_then_ob_model(
-            self.pa_input_image,
-            ilm.IlastikPixelClassifierModel(
-                ilm.IlastikParams(project_file=self.pa_px_classifier.__str__()),
-            ),
-            ilm.IlastikObjectClassifierFromPixelPredictionsModel(
-                ilm.IlastikParams(project_file=self.pa_ob_pxmap_classifier.__str__()),
-            ),
-            output_path,
-            channel=channel,
-        )
-
     def test_workflow(self):
+        """
+        Test calling pixel then object map classification pipeline function directly
+        """
+        def _call_workflow(channel):
+            return px_then_ob.pixel_then_object_classification_pipeline(
+                accessors={
+                    'accessor': generate_file_accessor(self.pa_input_image)
+                },
+                models={
+                    'px_model': ilm.IlastikPixelClassifierModel(
+                        ilm.IlastikParams(project_file=self.pa_px_classifier.__str__()),
+                    ),
+                    'ob_model': ilm.IlastikObjectClassifierFromPixelPredictionsModel(
+                        ilm.IlastikParams(project_file=self.pa_ob_pxmap_classifier.__str__()),
+                    )
+                },
+                channel=channel,
+            )
+
         with self.assertRaises(ilm.IlastikInputShapeError):
-            self._call_workflow(channel=0)
-        res = self._call_workflow(channel=None)
+            _call_workflow(channel=0)
+        res = _call_workflow(channel=None)
         acc_input = generate_file_accessor(self.pa_input_image)
-        acc_obmap = generate_file_accessor(res.object_map_filepath)
+        acc_obmap = res['ob_map']
         self.assertEqual(acc_obmap.hw, acc_input.hw)
-        self.assertEqual(len(acc_obmap._unique()[1]), 3)
+        self.assertEqual(len(acc_obmap.unique()[1]), 3)
 
 
     def test_api(self):
-        resp_load = self._put(
+        """
+        Test calling pixel then object map classification pipeline over API
+        """
+        copyfile(
+            self.pa_input_image,
+            Path(self._get('paths').json()['inbound_images']) / self.pa_input_image.name
+        )
+
+        in_acc_id = self._put(f'accessors/read_from_file/{self.pa_input_image.name}').json()
+
+        resp_load_px = self._put(
             'ilastik/seg/load/',
             body={'project_file': str(self.pa_px_classifier)},
         )
-        self.assertEqual(resp_load.status_code, 200, resp_load.json())
-        px_model_id = resp_load.json()['model_id']
+        self.assertEqual(resp_load_px.status_code, 200, resp_load_px.json())
+        px_model_id = resp_load_px.json()['model_id']
 
-        resp_load = self._put(
+        resp_load_ob = self._put(
             'ilastik/pxmap_to_obj/load/',
             body={'project_file': str(self.pa_ob_pxmap_classifier)},
         )
-        self.assertEqual(resp_load.status_code, 200, resp_load.json())
-        ob_model_id = resp_load.json()['model_id']
+        self.assertEqual(resp_load_ob.status_code, 200, resp_load_ob.json())
+        ob_model_id = resp_load_ob.json()['model_id']
 
+        # run the pipeline
         resp_infer = self._put(
-            'ilastik/pixel_then_object_classification/infer/',
-            query={
+            'ilastik/pipelines/pixel_then_object_classification/infer/',
+            body={
+                'accessor_id': in_acc_id,
                 'px_model_id': px_model_id,
                 'ob_model_id': ob_model_id,
-                'input_filename': self.pa_input_image.__str__(),
             }
         )
         self.assertEqual(resp_infer.status_code, 200, resp_infer.content.decode())
-        acc_input = generate_file_accessor(self.pa_input_image)
-        acc_obmap = generate_file_accessor(resp_infer.json()['object_map_filepath'])
-        self.assertEqual(acc_obmap.hw, acc_input.hw)
+
+        # save output object map to file and compare
+        obmap_id = resp_infer.json()['output_accessor_id']
+        obmap_acc = self.get_accessor(obmap_id)
+        self.assertEqual(obmap_acc.shape_dict['C'], 1)
+
+        # compare dimensions to input image
+        self.assertEqual(obmap_acc.hw, generate_file_accessor(self.pa_input_image).hw)
 
 
 class TestIlastikObjectClassification(unittest.TestCase):
     def setUp(self):
-        stack = generate_file_accessor(roiset_test_data['multichannel_zstack']['path'])
-        stack_ch_pa = stack.get_mono(roiset_test_data['pipeline_params']['patches_channel'])
-        seg_mask = generate_file_accessor(roiset_test_data['multichannel_zstack']['mask_path'])
+        stack = generate_file_accessor(data['multichannel_zstack_raw']['path'])
+        stack_ch_pa = stack.get_mono(conf.meta['roiset']['patches_channel'])
+        seg_mask = generate_file_accessor(data['multichannel_zstack_mask2d']['path'])
 
-        self.roiset = RoiSet(
+        self.roiset = RoiSet.from_binary_mask(
             stack_ch_pa,
-            _get_label_ids(seg_mask),
+            seg_mask,
             params=RoiSetMetaParams(
                 mask_type='boxes',
                 filters={'area': {'min': 1e3, 'max': 1e4}},
@@ -450,7 +454,7 @@ class TestIlastikObjectClassification(unittest.TestCase):
         )
 
         self.classifier = ilm.IlastikObjectClassifierFromSegmentationModel(
-            params=ilm.IlastikParams(project_file=ilastik_classifiers['seg_to_obj'].__str__()),
+            params=ilm.IlastikParams(project_file=ilastik_classifiers['seg_to_obj']['path'].__str__()),
         )
         self.raw = self.roiset.get_patches_acc()
         self.masks = self.roiset.get_patch_masks_acc()
diff --git a/tests/test_ilastik/test_roiset_workflow.py b/tests/test_ilastik/test_roiset_workflow.py
new file mode 100644
index 0000000000000000000000000000000000000000..cec05dbdbdb1be3c97c1ed53cccf3d57984fa34c
--- /dev/null
+++ b/tests/test_ilastik/test_roiset_workflow.py
@@ -0,0 +1,191 @@
+from pathlib import Path
+import unittest
+
+import numpy as np
+
+
+from model_server.base.accessors import generate_file_accessor
+from model_server.base.api import app
+import model_server.conf.testing as conf
+from model_server.base.pipelines.roiset_obmap import RoiSetObjectMapParams, roiset_object_map_pipeline
+import model_server.extensions.ilastik.models as ilm
+from model_server.extensions.ilastik.router import router
+
+app.include_router(router)
+
+data = conf.meta['image_files']
+output_path = conf.meta['output_path']
+test_params = conf.meta['roiset']
+classifiers = conf.meta['ilastik_classifiers']
+
+
+class BaseTestRoiSetMonoProducts(object):
+
+    @property
+    def fpi(self):
+        return data['multichannel_zstack_raw']['path'].__str__()
+
+    @property
+    def stack(self):
+        return generate_file_accessor(self.fpi)
+
+    @property
+    def stack_ch_pa(self):
+        return self.stack.get_mono(test_params['patches_channel'])
+
+    @property
+    def seg_mask(self):
+        return generate_file_accessor(data['multichannel_zstack_mask2d']['path'])
+
+    def _get_export_params(self):
+        return {
+            'patches_3d': None,
+            'annotated_patches_2d': {
+                'draw_bounding_box': True,
+                'rgb_overlay_channels': [3, None, None],
+                'rgb_overlay_weights': [0.2, 1.0, 1.0],
+                'pad_to': 512,
+            },
+            'patches_2d': {
+                'draw_bounding_box': False,
+                'draw_mask': False,
+            },
+            'annotated_zstacks': None,
+            'object_classes': True,
+        }
+
+    def _get_roi_params(self):
+        return {
+            'mask_type': 'boxes',
+            'filters': {
+                'area': {'min': 1e0, 'max': 1e8}
+            },
+            'expand_box_by': [128, 2],
+            'deproject_channel': 0,
+        }
+
+    def _get_models(self): # tests can either use model objects directly, or load in API via project file string
+        fp_px = classifiers['px']['path'].__str__()
+        fp_ob = classifiers['seg_to_obj']['path'].__str__()
+        return {
+            'pixel_classifier_segmentation': {
+                'name': 'ilastik_px_mod',
+                'project_file': fp_px,
+                'model': ilm.IlastikPixelClassifierModel(
+                    ilm.IlastikPixelClassifierParams(
+                        project_file=fp_px,
+                    )
+                )
+            },
+            'object_classifier': {
+                'name': 'ilastik_ob_mod',
+                'project_file': fp_ob,
+                'model': ilm.IlastikObjectClassifierFromSegmentationModel(
+                    ilm.IlastikParams(
+                        project_file=fp_ob
+                    )
+                )
+            },
+        }
+
+
+class TestRoiSetWorkflow(BaseTestRoiSetMonoProducts, unittest.TestCase):
+
+    def _pipeline_params(self):
+        return {
+            'api': False,
+            'accessor_id': 'acc_id',
+            'pixel_classifier_segmentation_model_id': 'px_id',
+            'object_classifier_model_id': 'ob_id',
+            'segmentation': {
+                'channel': test_params['segmentation_channel'],
+            },
+            'patches_channel': test_params['patches_channel'],
+            'roi_params': self._get_roi_params(),
+            'export_params': self._get_export_params(),
+        }
+
+    def test_object_map_workflow(self):
+        acc_in = generate_file_accessor(self.fpi)
+        params = RoiSetObjectMapParams(
+            **self._pipeline_params(),
+        )
+        trace, rois = roiset_object_map_pipeline(
+            {'accessor': acc_in},
+            {f'{k}_model': v['model'] for k, v in self._get_models().items()},
+            **params.dict()
+        )
+        self.assertEqual(trace.pop('annotated_patches_2d').count, 13)
+        self.assertEqual(trace.pop('patches_2d').count, 13)
+        trace.write_interm(Path(output_path) / 'trace', 'roiset_worfklow_trace', skip_first=False, skip_last=False)
+        self.assertTrue('ob_id' in trace.keys())
+        self.assertEqual(len(trace['labeled'].unique()[0]), 14)
+        self.assertEqual(rois.count, 13)
+        self.assertEqual(len(trace['ob_id'].unique()[0]), 2)
+
+
+class TestRoiSetWorkflowOverApi(conf.TestServerBaseClass, BaseTestRoiSetMonoProducts):
+
+    app_name = 'tests.test_ilastik.test_roiset_workflow:app'
+    input_data = data['multichannel_zstack_raw']
+
+
+    def setUp(self) -> None:
+        self.where_out = output_path / 'roiset'
+        self.where_out.mkdir(parents=True, exist_ok=True)
+        return conf.TestServerBaseClass.setUp(self)
+
+    def test_trivial_api_response(self):
+        resp = self._get('')
+        self.assertEqual(resp.status_code, 200)
+
+    def test_load_input_accessor(self):
+        fname = self.copy_input_file_to_server()
+        return self._put(f'accessors/read_from_file/{fname}').json()
+
+    def test_load_pixel_classifier(self):
+        resp = self._put(
+            'ilastik/seg/load/',
+            body={'project_file': self._get_models()['pixel_classifier_segmentation']['project_file']},
+        )
+        model_id = resp.json()['model_id']
+        self.assertTrue(model_id.startswith('IlastikPixelClassifierModel'))
+        return model_id
+
+    def test_load_object_classifier(self):
+        resp = self._put(
+            'ilastik/seg_to_obj/load/',
+            body={'project_file': self._get_models()['object_classifier']['project_file']},
+        )
+        model_id = resp.json()['model_id']
+        self.assertTrue(model_id.startswith('IlastikObjectClassifierFromSegmentationModel'))
+        return model_id
+
+    def _object_map_workflow(self, ob_classifer_id):
+        resp = self._put(
+            'pipelines/roiset_to_obmap/infer',
+            body={
+                'accessor_id': self.test_load_input_accessor(),
+                'pixel_classifier_segmentation_model_id': self.test_load_pixel_classifier(),
+                'object_classifier_model_id': ob_classifer_id,
+                'segmentation': {'channel': 0},
+                'patches_channel': 1,
+                'roi_params': self._get_roi_params(),
+                'export_params': self._get_export_params(),
+            },
+        )
+        self.assertEqual(resp.status_code, 200, resp.json())
+        oid = resp.json()['output_accessor_id']
+        obmap_fn = self._put(f'/accessors/write_to_file/{oid}').json()
+        where_out = self._get('paths').json()['outbound_images']
+        obmap_fp = Path(where_out) / obmap_fn
+        self.assertTrue(obmap_fp.exists())
+        return generate_file_accessor(obmap_fp)
+
+    def test_workflow_with_object_classifier(self):
+        acc = self._object_map_workflow(self.test_load_object_classifier())
+        self.assertTrue(np.all(acc.unique()[0] == [0, 1, 2]))
+
+    def test_workflow_without_object_classifier(self):
+        acc = self._object_map_workflow(None)
+        self.assertTrue(np.all(acc.unique()[0] == [0, 1]))
diff --git a/tests/test_workflow.py b/tests/test_workflow.py
deleted file mode 100644
index 6e9603ea7418ba367b482a00b8dd8c9aafa8820d..0000000000000000000000000000000000000000
--- a/tests/test_workflow.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import unittest
-
-from model_server.conf.testing import czifile, output_path
-from model_server.base.models import DummySemanticSegmentationModel
-from model_server.base.workflows import classify_pixels
-
-
-class TestGetSessionObject(unittest.TestCase):
-    def setUp(self) -> None:
-        self.model = DummySemanticSegmentationModel()
-
-    def test_single_session_instance(self):
-        result = classify_pixels(czifile['path'], self.model, output_path, channel=2)
-        self.assertTrue(result.success)
-
-        import tifffile
-        img = tifffile.imread(result.output_filepath)
-        w = czifile['w']
-        h = czifile['h']
-
-        self.assertEqual(
-            img.shape,
-            (h, w),
-            'Inferred image is not the expected shape'
-        )
-
-        self.assertEqual(
-            img[int(w/2), int(h/2)],
-            255,
-            'Middle pixel is not white as expected'
-        )
-
-        self.assertEqual(
-            img[0, 0],
-            0,
-            'First pixel is not black as expected'
-        )
\ No newline at end of file
diff --git a/update_source.bat b/update_source.bat
deleted file mode 100644
index 30a16ae54d000c488a0bce9865b18a3c17167051..0000000000000000000000000000000000000000
--- a/update_source.bat
+++ /dev/null
@@ -1,18 +0,0 @@
-@echo off
-
-set srcs=(model_server)
-
-for %%s in %srcs% do (
-	echo:
-	echo Updating %%s ...
-	if not exist %userprofile%\%%s (
-	echo ERROR!
-	echo Could not find file %userprofile%\%%s 
-	) else (
- 	cd %userprofile%\%%s
-	call git checkout master
-	call git pull origin
-	)
-)
-echo:
-pause
\ No newline at end of file