Skip to content
Snippets Groups Projects
Commit a372c464 authored by Christopher Randolph Rhodes's avatar Christopher Randolph Rhodes
Browse files

Test and example notebook that covers object classification from binary segmentation

parent b17d32c6
No related branches found
No related tags found
No related merge requests found
...@@ -56,7 +56,7 @@ monozstackmask = { ...@@ -56,7 +56,7 @@ monozstackmask = {
ilastik_classifiers = { ilastik_classifiers = {
'px': root / 'ilastik' / 'demo_px.ilp', 'px': root / 'ilastik' / 'demo_px.ilp',
'pxmap_to_obj': root / 'ilastik' / 'demo_obj.ilp', 'pxmap_to_obj': root / 'ilastik' / 'demo_obj.ilp',
'seg_to_obj': root / 'ilastik' / 'new_auto_obj.ilp', 'seg_to_obj': root / 'ilastik' / 'demo_obj_seg.ilp',
} }
output_path = root / 'testing_output' output_path = root / 'testing_output'
......
...@@ -92,9 +92,16 @@ class IlastikObjectClassifierFromSegmentationModel(IlastikModel, InstanceSegment ...@@ -92,9 +92,16 @@ class IlastikObjectClassifierFromSegmentationModel(IlastikModel, InstanceSegment
return ObjectClassificationWorkflowBinary return ObjectClassificationWorkflowBinary
def infer(self, input_img: GenericImageDataAccessor, segmentation_img: GenericImageDataAccessor) -> (np.ndarray, dict): def infer(self, input_img: GenericImageDataAccessor, segmentation_img: GenericImageDataAccessor) -> (np.ndarray, dict):
assert segmentation_img.is_mask()
tagged_input_data = vigra.taggedView(input_img.data, 'yxcz') tagged_input_data = vigra.taggedView(input_img.data, 'yxcz')
tagged_seg_data = vigra.taggedView(segmentation_img.data, 'yxcz') assert segmentation_img.is_mask()
if segmentation_img.dtype == 'bool':
seg = 255 * segmentation_img.data.astype('uint8')
tagged_seg_data = vigra.taggedView(
255 * segmentation_img.data.astype('uint8'),
'yxcz'
)
else:
tagged_seg_data = vigra.taggedView(segmentation_img.data, 'yxcz')
dsi = [ dsi = [
{ {
...@@ -105,7 +112,7 @@ class IlastikObjectClassifierFromSegmentationModel(IlastikModel, InstanceSegment ...@@ -105,7 +112,7 @@ class IlastikObjectClassifierFromSegmentationModel(IlastikModel, InstanceSegment
obmaps = self.shell.workflow.batchProcessingApplet.run_export(dsi, export_to_array=True) # [z x h x w x n] obmaps = self.shell.workflow.batchProcessingApplet.run_export(dsi, export_to_array=True) # [z x h x w x n]
assert (len(obmaps) == 1, 'ilastik generated more than one object map') assert len(obmaps) == 1, 'ilastik generated more than one object map'
yxcz = np.moveaxis( yxcz = np.moveaxis(
obmaps[0], obmaps[0],
...@@ -141,7 +148,7 @@ class IlastikObjectClassifierFromPixelPredictionsModel(IlastikModel, ImageToImag ...@@ -141,7 +148,7 @@ class IlastikObjectClassifierFromPixelPredictionsModel(IlastikModel, ImageToImag
obmaps = self.shell.workflow.batchProcessingApplet.run_export(dsi, export_to_array=True) # [z x h x w x n] obmaps = self.shell.workflow.batchProcessingApplet.run_export(dsi, export_to_array=True) # [z x h x w x n]
assert (len(obmaps) == 1, 'ilastik generated more than one object map') assert len(obmaps) == 1, 'ilastik generated more than one object map'
yxcz = np.moveaxis( yxcz = np.moveaxis(
obmaps[0], obmaps[0],
......
...@@ -82,7 +82,7 @@ class TestIlastikPixelClassification(unittest.TestCase): ...@@ -82,7 +82,7 @@ class TestIlastikPixelClassification(unittest.TestCase):
self.mono_image = mono_image self.mono_image = mono_image
self.mask = mask self.mask = mask
def test_run_object_classifier(self): def test_run_object_classifier_from_pixel_predictions(self):
self.test_run_pixel_classifier() self.test_run_pixel_classifier()
fp = czifile['path'] fp = czifile['path']
model = ilm.IlastikObjectClassifierFromPixelPredictionsModel( model = ilm.IlastikObjectClassifierFromPixelPredictionsModel(
...@@ -98,6 +98,22 @@ class TestIlastikPixelClassification(unittest.TestCase): ...@@ -98,6 +98,22 @@ class TestIlastikPixelClassification(unittest.TestCase):
) )
self.assertEqual(objmap.data.max(), 3) self.assertEqual(objmap.data.max(), 3)
def test_run_object_classifier_from_segmentation(self):
self.test_run_pixel_classifier()
fp = czifile['path']
model = ilm.IlastikObjectClassifierFromSegmentationModel(
{'project_file': ilastik_classifiers['seg_to_obj']}
)
objmap = model.label_instance_class(self.mono_image, self.mask)
self.assertTrue(
write_accessor_data_to_file(
output_path / f'obmap_from_seg_{fp.stem}.tif',
objmap,
)
)
self.assertEqual(objmap.data.max(), 3)
def test_ilastik_pixel_classification_as_workflow(self): def test_ilastik_pixel_classification_as_workflow(self):
result = infer_image_to_image( result = infer_image_to_image(
czifile['path'], czifile['path'],
......
...@@ -43,7 +43,7 @@ def infer_image_to_image(fpi: Path, model: Model, where_output: Path, **kwargs) ...@@ -43,7 +43,7 @@ def infer_image_to_image(fpi: Path, model: Model, where_output: Path, **kwargs)
img = generate_file_accessor(fpi).get_one_channel_data(ch) img = generate_file_accessor(fpi).get_one_channel_data(ch)
ti.click('file_input') ti.click('file_input')
outdata, _ = model.infer(img) outdata = model.label_pixel_class(img)
ti.click('inference') ti.click('inference')
outpath = where_output / (model.model_id + '_' + fpi.stem + '.tif') outpath = where_output / (model.model_id + '_' + fpi.stem + '.tif')
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment