diff --git a/.gitignore b/.gitignore
index 4a75b01b56e3c6976fde08d004792175aa8a1f83..8c24881875c30da8714f75ce0e52204ba59eccaa 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,3 +2,4 @@
 __pycache__/
 tmp*
 *.DS_Store
+data/dev-*
diff --git a/make_dev_folder.py b/make_dev_folder.py
index 03acfc5de8aaf252ddb1a54c3da7f468b00e937e..315a20fd39159ee7ac074399818f20265d59991a 100644
--- a/make_dev_folder.py
+++ b/make_dev_folder.py
@@ -3,7 +3,7 @@
 import argparse
 import os
 from subprocess import check_output
-from scripts.files import copy_release_folder
+from scripts.files import copy_release_folder, make_folder_structure
 
 
 def make_dev_folder(dev_name, version=''):
@@ -19,6 +19,7 @@ def make_dev_folder(dev_name, version=''):
     if not os.path.exists(folder):
         raise RuntimeError("Source folder %s does not exist" % folder)
 
+    make_folder_structure(new_folder)
     copy_release_folder(folder, new_folder)
 
 
diff --git a/scripts/export/export_segmentation.py b/scripts/export/export_segmentation.py
index 410f9fc0000c1d03620a54f5f0597c7a5f0bfaef..e9f64fc55212db9677755cdcc726d2872108b5a7 100644
--- a/scripts/export/export_segmentation.py
+++ b/scripts/export/export_segmentation.py
@@ -82,4 +82,6 @@ def export_segmentation(paintera_path, paintera_key, folder, new_folder, name, r
     to_bdv(tmp_path, tmp_key, out_path, resolution, tmp_bdv, target)
 
     # compute mapping to old segmentation
-    map_segmentation_ids(folder, new_folder, name, tmp_folder, max_jobs, target)
+    # this can be skipped for new segmentations by setting folder to None
+    if folder is not None:
+        map_segmentation_ids(folder, new_folder, name, tmp_folder, max_jobs, target)
diff --git a/scripts/files/bdv_server.py b/scripts/files/bdv_server.py
index 9b82da1779469b54d75cc2372c66b8ab1bfb1c15..ec355c8f56b86a71c33875c49f46f365aa5df285 100644
--- a/scripts/files/bdv_server.py
+++ b/scripts/files/bdv_server.py
@@ -1,37 +1,44 @@
 import os
 from .xml_utils import get_h5_path_from_xml
+from .sources import get_privates, get_image_names, get_segmentation_names
+
+
+def add_to_bdv_config(name, path, bdv_config, relative_paths, ref_dir):
+
+    # make sure that the h5path linked in the xml exists
+    h5path = get_h5_path_from_xml(path, return_absolute_path=True)
+    if not os.path.exists(h5path):
+        msg = 'Path to h5-file in xml does not exist - %s, %s' % (path, h5path)
+        return RuntimeError(msg)
+
+    if relative_paths:
+        path = os.path.relpath(path, ref_dir)
+    bdv_config[name] = path
 
 
-# TODO enable filtering for files by some pattern
 # e.g. if we don't want to expose the fib dataset to the public yet
-def make_bdv_server_file(folders, out_path, relative_paths=True):
-    """ Make the bigserver config file from all xmls in folders.
+def make_bdv_server_file(folder, out_path, relative_paths=True):
+    """ Make the bigserver config file for a given release.
     """
-    file_list = {}
+    privates = get_privates()
+    image_names = get_image_names()
+    seg_names = get_segmentation_names()
     ref_dir = os.path.split(out_path)[0]
-    for folder in folders:
-        files = os.listdir(folder)
-        for ff in files:
-            path = os.path.join(folder, ff)
-
-            # only add xmls
-            ext = os.path.splitext(path)[1]
-            if ext != '.xml':
-                continue
-
-            # make sure that the h5path linked in the xml exists
-            h5path = get_h5_path_from_xml(path, return_absolute_path=True)
-            if not os.path.exists(h5path):
-                msg = 'Path to h5-file in xml does not exist - %s, %s' % (path,
-                                                                          h5path)
-                return RuntimeError(msg)
-
-            name = os.path.splitext(ff)[0]
-            if relative_paths:
-                path = os.path.relpath(path, ref_dir)
-            file_list[name] = path
+
+    bdv_config = {}
+    for name in image_names:
+        if name in privates:
+            continue
+        path = os.path.join(folder, 'images', '%s.xml' % name)
+        add_to_bdv_config(name, path, bdv_config, relative_paths, ref_dir)
+
+    for name in seg_names:
+        if name in privates:
+            continue
+        path = os.path.join(folder, 'segmentations', '%s.xml' % name)
+        add_to_bdv_config(name, path, bdv_config, relative_paths, ref_dir)
 
     with open(out_path, 'w') as f:
-        for name, path in file_list.items():
+        for name, path in bdv_config.items():
             line = '%s\t%s\n' % (name, path)
             f.write(line)
diff --git a/scripts/files/copy_helper.py b/scripts/files/copy_helper.py
index 9fac746972cde1c8631401aedf7526cf3e6b67c0..dcbd79306c0d890d6c9e9277e0ec6d3cc4aceb0a 100644
--- a/scripts/files/copy_helper.py
+++ b/scripts/files/copy_helper.py
@@ -1,7 +1,7 @@
 import os
 import shutil
 from .xml_utils import copy_xml_with_newpath, get_h5_path_from_xml
-from .sources import get_image_names, RAW_FOLDER
+from .sources import get_image_names, get_segmentation_names, get_segmentations
 
 
 def copy_file(xml_in, xml_out):
@@ -56,15 +56,8 @@ def copy_image_data(src_folder, dst_folder):
         name += '.xml'
         in_path = os.path.join(src_folder, name)
         out_path = os.path.join(dst_folder, name)
-        # we might have just added he image name, so it's not
-        # in the old version folder yet. It must be in the raw folder
-        # in that case
         if not os.path.exists(in_path):
-            in_path = os.path.join(RAW_FOLDER, name)
-        if not os.path.exists(in_path):
-            raise RuntimeError("Could not find %s in either the src folder %s or raw folder %s" % (name,
-                                                                                                   src_folder,
-                                                                                                   RAW_FOLDER))
+            raise RuntimeError("Could not find %s in the src folder %s or raw folder %s" % (name, src_folder))
         # copy the xml
         copy_file(in_path, out_path)
 
@@ -81,3 +74,36 @@ def copy_misc_data(src_folder, dst_folder):
     if os.path.exists(bkmrk_in):
         shutil.copyfile(bkmrk_in,
                         os.path.join(dst_folder, 'bookmarks.json'))
+
+
+def copy_segmentations(src_folder, dst_folder):
+    names = get_segmentation_names()
+    for name in names:
+        name += '.xml'
+        in_path = os.path.join(src_folder, name)
+        out_path = os.path.join(dst_folder, name)
+        if not os.path.exists(in_path):
+            raise RuntimeError("Could not find %s in the src folder %s or raw folder %s" % (name, src_folder))
+        # copy the xml
+        copy_file(in_path, out_path)
+
+
+def copy_all_tables(src_folder, dst_folder):
+    segmentations = get_segmentations()
+    for name, seg in segmentations.items():
+        has_table = seg.get('has_tables', False) or 'table_update_function' in seg
+        if not has_table:
+            continue
+        copy_tables(src_folder, dst_folder, name)
+
+
+def copy_release_folder(src_folder, dst_folder):
+    # copy static image and misc data
+    copy_image_data(os.path.join(src_folder, 'images'),
+                    os.path.join(dst_folder, 'images'))
+    copy_misc_data(os.path.join(src_folder, 'misc'),
+                   os.path.join(dst_folder, 'misc'))
+    copy_segmentations(os.path.join(src_folder, 'segmentations'),
+                       os.path.join(dst_folder, 'segmentations'))
+    copy_all_tables(os.path.join(src_folder, 'tables'),
+                    os.path.join(dst_folder, 'tables'))
diff --git a/scripts/files/sources.py b/scripts/files/sources.py
index 2a2018cde65a15c5ca11fd49a141d2c3e0385c9b..d67072306bce525aade3330ea60a98b54ce352ac 100644
--- a/scripts/files/sources.py
+++ b/scripts/files/sources.py
@@ -9,6 +9,7 @@ RAW_FOLDER = 'data/rawdata'
 SOURCE_FILE = 'data/sources.json'
 SEGMENTATION_FILE = 'data/segmentations.json'
 IMAGE_FILE = 'data/images.json'
+PRIVATE_FILE = 'data/privates.json'
 
 # TODO we need additional functionality:
 # - remove images and segmentations
@@ -89,7 +90,21 @@ def add_source(modality, stage, id=1, region='whole'):
         json.dump(sources, f)
 
 
-def add_image(source_name, name, input_path, copy_data=True):
+def get_privates():
+    if not os.path.exists(PRIVATE_FILE):
+        return []
+    with open(PRIVATE_FILE) as f:
+        return json.load(f)
+
+
+def add_to_privates(name):
+    privates = get_privates()
+    privates.append(name)
+    with open(PRIVATE_FILE, 'w') as f:
+        json.dump(privates, f)
+
+
+def add_image(source_name, name, input_path, copy_data=True, is_private=False):
     """ Add image volume to the platy browser data.
 
     Parameter:
@@ -98,7 +113,8 @@ def add_image(source_name, name, input_path, copy_data=True):
         input_path [str] - path to the data that should be added.
             Data needs to be in bdv-hdf5 format and the path needs to point to the xml.
         copy_data [bool] - whether to copy the data. This should be set to True,
-            unless adding an image volume that is already in the rawdata folder. (default: True)
+            unless adding an image volume that is already in the rawdata folder (default: True).
+        is_private [bool] - whether this data is private (default: False).
     """
     # validate the inputs
     source_names = get_source_names()
@@ -130,10 +146,15 @@ def add_image(source_name, name, input_path, copy_data=True):
     with open(IMAGE_FILE, 'w') as f:
         json.dump(names, f)
 
+    # add the name to the private list if is_private == True
+    if is_private:
+        add_to_privates(output_name)
+
 
 def add_segmentation(source_name, name, segmentation_path=None,
                      table_path_dict=None, paintera_project=None,
-                     resolution=None, table_update_function=None, copy_data=True):
+                     resolution=None, table_update_function=None,
+                     copy_data=True, is_private=False):
     """ Add segmentation volume to the platy browser data.
 
     We distinguish between static and dynamic segmentations. A dynamic segmentation is generated from
@@ -176,6 +197,7 @@ def add_segmentation(source_name, name, segmentation_path=None,
             the segmentation is updated from paintera corrections (default: None).
         copy_data [bool] - whether to copy the data. This should be set to True,
             unless adding a segmentation that is already in the rawdata folder. (default: True)
+        is_private [bool] - whether this data is private (default: False).
     """
     # validate the inputs
 
@@ -245,3 +267,6 @@ def add_segmentation(source_name, name, segmentation_path=None,
     segmentations[output_name] = segmentation
     with open(SEGMENTATION_FILE, 'w') as f:
         json.dump(segmentations, f)
+    # add the name to the private list if is_private == True
+    if is_private:
+        add_to_privates(output_name)
diff --git a/scripts/release_helper.py b/scripts/release_helper.py
new file mode 100644
index 0000000000000000000000000000000000000000..1dc550f266eef0bb11b3483cce638568f5ef1fa5
--- /dev/null
+++ b/scripts/release_helper.py
@@ -0,0 +1,104 @@
+import os
+from . import attributes
+from .export import export_segmentation
+from .files import add_image, add_segmentation, copy_tables
+from .files.copy_helper import copy_file
+
+
+def is_image(data, check_source):
+    if check_source and 'source' not in data:
+        return False
+    if 'name' in data and 'input_path' in data:
+        return True
+    return False
+
+
+def is_static_segmentation(data, check_source):
+    if check_source and 'source' not in data:
+        return False
+    if 'name' in data and 'segmentation_path' in data:
+        return True
+    return False
+
+
+def is_dynamic_segmentation(data, check_source):
+    if check_source and 'source' not in data:
+        return False
+    if 'name' in data and 'paintera_project' in data and 'resolution' in data:
+        if len(data['paintera_project']) != 2 or len(data['resolution']) != 3:
+            return False
+        return True
+    return False
+
+
+def check_inputs(new_data, check_source=True):
+    if not all(isinstance(data, dict) for data in new_data):
+        raise ValueError("Expect list of dicts as input")
+
+    for data in new_data:
+        if not any(is_image(data, check_source), is_static_segmentation(data, check_source), is_dynamic_segmentation(data, check_source)):
+            raise ValueError("Could not parse input element %s" % str(data))
+
+
+def add_data(data, folder, target, max_jobs, source=None):
+    source = data['source'] if source is None else source
+    name = data['name']
+    full_name = '%s-%s' % (source, name)
+    file_name = '%s.xml' % full_name
+
+    is_private = data.get('is_private', False)
+    check_source = source is None
+
+    if is_image(data, check_source):
+        # register the image data
+        add_image(source, name, data['input_path'],
+                  is_private=is_private)
+
+        # copy image data from the raw folder to new release folder
+        xml_raw = os.path.join('data/raw', file_name)
+        xml_out = os.path.join(folder, file_name)
+        copy_file(xml_raw, xml_out)
+
+    elif is_static_segmentation(data, check_source):
+        # register the static segmentation
+        table_path_dict = data.get('table_path_dict', None)
+        add_segmentation(source, name,
+                         segmentation_path=data['segmentation_path'],
+                         table_path_dict=table_path_dict,
+                         is_private=is_private)
+
+        # copy segmentation data from the raw folder to new release folder
+        xml_raw = os.path.join('data/raw', file_name)
+        xml_out = os.path.join(folder, file_name)
+        copy_file(xml_raw, xml_out)
+
+        # if we have tables, copy them as well
+        if table_path_dict is not None:
+            copy_tables('data/raw', os.path.join(folder, 'tables'), full_name)
+
+    elif is_dynamic_segmentation(data, check_source):
+        # register the dynamic segmentation
+        paintera_project = data['paintera_project']
+        resolution = data['resolution']
+        table_update_function = data.get('table_update_function', None)
+        add_segmentation(source, name,
+                         paintera_project=paintera_project,
+                         resolution=resolution,
+                         table_update_function=table_update_function,
+                         is_private=is_private)
+
+        # export segmentation data to new release folder
+        paintera_root, paintera_key = paintera_project
+        tmp_folder = 'tmp_export_%s' % full_name
+        export_segmentation(paintera_root, paintera_key,
+                            None, folder, full_name,
+                            resolution=resolution,
+                            tmp_folder=tmp_folder,
+                            target=target, max_jobs=max_jobs)
+
+        # if we have a table update function, call it
+        if table_update_function is not None:
+            tmp_folder = 'tmp_tables_%s' % name
+            update_function = getattr(attributes, table_update_function)
+            update_function(folder, name, tmp_folder, resolution,
+                            target=target, max_jobs=max_jobs)
diff --git a/update_major.py b/update_major.py
index 1cacce320abca53bc40e1db3e71d0c86311fdb35..f2e0c1c6ea1e3bab8b9ba20b35a2aa45f688406c 100755
--- a/update_major.py
+++ b/update_major.py
@@ -1 +1,63 @@
 #! /g/arendt/pape/miniconda3/envs/platybrowser/bin/python
+
+import os
+import json
+import argparse
+from subprocess import check_output
+
+from scripts.files import add_source, copy_release_folder, make_folder_structure
+from scripts.release_helper import (add_data, check_inputs,
+                                    is_image, is_static_segmentation, is_dynamic_segmentation)
+
+
+def get_tags():
+    tag = check_output(['git', 'describe', '--abbrev=0']).decode('utf-8').rstrip('\n')
+    new_tag = tag.split('.')
+    new_tag[-1] = '0'  # reset patch
+    new_tag[1] = '0'  # reset minor
+    # update major
+    new_tag[0] = str(int(new_tag[0]) + 1)
+    new_tag = '.'.join(new_tag)
+    return tag, new_tag
+
+
+def update_major(new_data_dict, target='slurm', max_jobs=250):
+    """ Update major version of platy browser.
+
+    TODO explain elements of input list.
+    """
+
+    for source, new_data in new_data_dict.items():
+        add_source(source)
+        check_inputs(new_data, check_source=False)
+
+    # increase the minor (middle digit) release tag
+    tag, new_tag = get_tags()
+    print("Updating platy browser from", tag, "to", new_tag)
+
+    # make new folder structure
+    folder = os.path.join('data', tag)
+    new_folder = os.path.join('data', new_tag)
+    make_folder_structure(new_folder)
+
+    # copy the release folder
+    copy_release_folder(folder, new_folder)
+
+    # add the new sources and new data
+    for source, new_data in new_data.items():
+        for data in new_data:
+            add_data(data, new_folder, target, max_jobs,
+                     source=source)
+
+    # TODO auto-release
+    # TODO clean up
+
+
+if __name__ == '__main__':
+    help_str = "Path to a json containing list of the data to add. See docstring of 'update_major' for details."
+    parser = argparse.ArgumentParser(description='Update major version of platy-browser-data.')
+    parser.add_argument('input_path', type=str, help=help_str)
+    input_path = parser.parse_args().input_path
+    with open(input_path) as f:
+        new_data_dict = json.load(f)
+    update_major(new_data_dict)
diff --git a/update_minor.py b/update_minor.py
index f85b63d4961e7884151271bfb6915a801026eea2..42627e7600cc76f162ff7b495653c9c33179abd3 100755
--- a/update_minor.py
+++ b/update_minor.py
@@ -5,9 +5,9 @@ import json
 import argparse
 from subprocess import check_output
 
-from scripts.files import (add_image, add_segmentation,
-                           copy_release_folder, make_folder_structure)
-from scripts.export import export_segmentation
+from scripts.files import copy_release_folder, make_folder_structure
+from scripts.release_helper import (add_data, check_inputs,
+                                    is_image, is_static_segmentation, is_dynamic_segmentation)
 
 
 def get_tags():
@@ -19,26 +19,6 @@ def get_tags():
     return tag, new_tag
 
 
-def is_image(data):
-    if 'source' in data and 'name' in data and 'input_path' in data:
-        return True
-    return False
-
-
-def is_static_segmentation(data):
-    if 'source' in data and 'name' in data and 'segmentation_path' in data:
-        return True
-    return False
-
-
-def is_dynamic_segmentation(data):
-    if 'source' in data and 'name' in data and 'paintera_project' in data and 'resolution' in data:
-        if len(data['paintera_project']) != 2 or len(data['resolution']) != 3:
-            return False
-        return True
-    return False
-
-
 def check_inputs(new_data):
     if not all(isinstance(data, dict) for data in new_data):
         raise ValueError("Expect list of dicts as input")
@@ -48,36 +28,6 @@ def check_inputs(new_data):
             raise ValueError("Could not parse input element %s" % str(data))
 
 
-def add_data(data, folder, target, max_jobs):
-    if is_image(data):
-        # register the image data
-        add_image(data['source'], data['name'], data['input_path'])
-
-        # copy image data to new release folder
-
-    elif is_static_segmentation(data):
-        # register the static segmentation
-        add_segmentation(data['source'], data['name'],
-                         segmentation_path=data['segmentation_path'],
-                         table_path_dict=data.get('table_path_dict', None))
-
-        # copy segmentation data to new release folder
-
-        # if we have tables, copy them as well
-
-    elif is_dynamic_segmentation(data):
-        # register the dynamic segmentation
-        add_segmentation(data['source'], data['name'],
-                         paintera_project=data['paintera_project'],
-                         resolution=data['resolution'],
-                         table_update_function=data.get('table_update_function', None))
-
-        # export segmentation data to new release folder
-        export_segmentation()
-
-        # if we have a table update function, call it
-
-
 def update_minor(new_data, target='slurm', max_jobs=250):
     """ Update minor version of platy browser.
 
diff --git a/update_patch.py b/update_patch.py
index b4929b2dee7f0ddd86dadd321e4ee8e8d8c58168..7e33789084afd4fd0725a27d5e3e93d3bba0533d 100755
--- a/update_patch.py
+++ b/update_patch.py
@@ -154,9 +154,7 @@ def update_patch(update_seg_names, update_table_names,
     update_tables(folder, new_folder, table_updates,
                   target=target, max_jobs=max_jobs)
 
-    make_bdv_server_file([os.path.join(new_folder, 'images'),
-                          os.path.join(new_folder, 'segmentations')],
-                         os.path.join(new_folder, 'misc', 'bdv_server.txt'),
+    make_bdv_server_file(new_folder, os.path.join(new_folder, 'misc', 'bdv_server.txt'),
                          relative_paths=True)
     # TODO add some quality control that cheks that all files are there