Skip to content
Snippets Groups Projects
Commit 062cc38d authored by Constantin Pape's avatar Constantin Pape
Browse files

Start to implement validation code

parent 0e3c656a
No related branches found
No related tags found
1 merge request!8Segmentation validation and correction
from .eval_cells import eval_cells
from .eval_nuclei import eval_nuclei
import numpy as np
import pandas as pd
import vigra
from elf.io import open_file, is_dataset
from .evaluate_annotations import evaluate_annotations, merge_evaluations
def eval_slice(ds_seg, ds_ann, ignore_seg_ids, min_radius):
ds_seg.n_threads = 8
ds_ann.n_threads = 8
attrs = ds_ann.attrs
start, stop = attrs['starts'], attrs['stops']
bb = tuple(slice(sta, sto) for sta, sto in zip(start, stop))
annotations = ds_ann[:]
seg = ds_seg[bb].squeeze()
seg_eval = vigra.analysis.labelImageWithBackground(seg)
if ignore_seg_ids is None:
this_ignore_ids = None
else:
ignore_mask = np.isin(seg, ignore_seg_ids)
this_ignore_ids = np.unique(seg_eval[ignore_mask])
fg_annotations = np.isin(annotations, [1, 2]).astype('uint32')
bg_annotations = annotations == 3
return evaluate_annotations(seg_eval, fg_annotations, bg_annotations,
this_ignore_ids, min_radius=min_radius)
def get_ignore_seg_ids(table_path, ignore_names=['cuticle', 'neuropil', 'yolk']):
table = pd.read_csv(table_path)
ignore_seg_ids = []
for name in ignore_names:
col = table[name].values.astype('uint8')
ignore_seg_ids.extend(np.where(col == 1)[0].tolist())
ignore_seg_ids = np.unique(ignore_seg_ids)
return ignore_seg_ids
def eval_cells(seg_path, seg_key,
annotation_path, annotation_key,
ignore_seg_ids=None, min_radius=16):
""" Evaluate the cell segmentation.
"""
eval_res = {}
with open_file(seg_path, 'r') as f_seg, open_file(annotation_path) as f_ann:
ds_seg = f_seg[seg_key]
g = f_ann[annotation_key]
def visit_annotation(name, node):
nonlocal eval_res
if is_dataset(node):
res = eval_slice(ds_seg, node, ignore_seg_ids, min_radius)
eval_res = merge_evaluations(res, eval_res)
g.visititems(visit_annotation)
return eval_res
from .evaluate_annotations import evaluate_annotations
# TODO
def eval_nuclei():
pass
import numpy as np
import vigra
from tqdm import tqdm
def merge_evaluations(trgt, src):
for name, val in trgt.items():
if name in src:
src[name] += val
else:
src[name] = val
return src
def get_radii(seg):
# TODO I am not sure if this is the best measure.
# maybe use estimate based on convex hull instead?
# compute and filter by region radii
radii = vigra.analysis.extractRegionFeatures(seg.astype('float32'), seg,
features=['RegionRadii'])['RegionRadii']
radii = radii.min(axis=1)
return radii
def evaluate_annotations(seg, fg_annotations, bg_annotations,
ignore_seg_ids=None, min_radius=16,
return_masks=False, return_ids=False):
""" Evaluate segmentation based on evaluations.
"""
# apply connected components to the foreground annotations
# NOTE we don't apply ccs to the segmentation, because this there is
# cases where this is not quite appropriate
labels = vigra.analysis.labelImageWithBackground(fg_annotations)
# get the seg ids and label ids
seg_ids = np.unique(seg)[1:]
label_ids = np.unique(labels)[1:]
radii = get_radii(seg)
# categories for the segmentation objects:
# unmatched: segments with no corresponding annotation
# matched: segments matched to one annotation
# overmatched: sements matched to multiple annotation
unmatched_ids = []
matched_ids = {}
overmatched_ids = {}
# iterate over all seg-ids and map them to annotations
n_segments = 0
for seg_id in tqdm(seg_ids):
mask = seg == seg_id
# check if this is an ignore id and skip
if ignore_seg_ids is not None and seg_id in ignore_seg_ids:
continue
has_bg_label = bg_annotations[mask].sum() > 0
# find the overlapping label ids
this_labels = np.unique(labels[mask])
if 0 in this_labels:
this_labels = this_labels[1:]
# no labels -> this seg-id is unmatched and part of a false split,
# unless we have overlap with a background annotation
# or are in the filter ids
if this_labels.size == 0:
if not has_bg_label and radii > min_radius:
unmatched_ids.append(seg_id)
# one label -> this seg-id seems to be well matched
# note that it could still be part of a false split, which we check later
elif this_labels.size == 1:
matched_ids[seg_id] = this_labels[0]
# multiple labels -> this seg-id is over-matched and part of a false merge
else:
overmatched_ids[seg_id] = this_labels.tolist()
# increase the segment count
n_segments += 1
# false splits = unmatched seg-ids and seg-ids corresponding to annotations that were matched
# more than once
matched_labels = list(matched_ids.values())
matched_ids = np.array(list(matched_ids.keys()), dtype='uint32')
matched_labels, matched_counts = np.unique(matched_labels, return_counts=True)
false_split_ids = np.concatenate([unmatched_ids, matched_ids[matched_counts > 1]])
# false merge annotations = overmatched ids
false_merge_ids = list(overmatched_ids.keys())
false_merge_labels = np.array([lab for overmatched in overmatched_ids
for lab in overmatched.values()], dtype='uint32')
# find label ids that were not matched
all_matched = np.concatenate([matched_labels, false_merge_labels])
all_matched = np.unique(all_matched)
unmatched_labels = np.setdiff1d(label_ids, all_matched)
# print("Number of false splits:", len(false_split_ids), '/', n_segments)
# print("Number of false merges:", len(false_merge_ids), '/', n_segments)
# print("Number of unmatched labels:", len(unmatched_labels), '/', len(label_ids))
metrics = {'n_annotations': len(label_ids), 'n_segments': n_segments,
'n_splits': len(false_split_ids),
'n_merged_annotations': len(false_merge_labels),
'n_merged_ids': len(false_merge_ids),
'n_unmatched': len(unmatched_labels)}
ret = (metrics,)
if return_masks:
fs_mask = np.isin(seg, false_split_ids).astype('uint32')
fm_mask = np.isin(seg, false_merge_ids).astype('uint32')
masks = {'splits': fs_mask, 'merges': fm_mask}
if ignore_seg_ids is not None:
masks['ignore'] = np.isin(seg, ignore_seg_ids).astype('uint32')
ret = ret + (masks,)
if return_ids:
id_dict = {'splits': false_split_ids, 'merges': false_merge_ids}
ret = ret + (id_dict,)
return ret
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment