Commit 7e7bf87b authored by Constantin Pape's avatar Constantin Pape

Update use_gpu variable in stardist scripts

parent af9495db
......@@ -50,6 +50,11 @@ Note that you don't need the `labels` folder for the prediction script.
The `CUDA_VISIBLE_DEVICES=0` part determines which gpu is used. If you have a machine with multiple GPUs and don't want to
use the first one, you need to change the `0` to the id of the GPU you want to use.
To check for optional parameters for the script, check out the help by running for example
```
train_stardist_2d --help
```
In order to run these scripts on the embl via slurm, you can use the `submit_slurm` script from `ai4ia.utils`, e.g.
```
submit_slurm train_stardist_2d /path/to/data /path/to/model
......
......@@ -121,25 +121,27 @@ def train_model(x_train, y_train, x_val, y_val, save_path,
n_channels, patch_size, n_rays=32):
# make the model config
# copied from the stardist training notebook, this is a very weird line ...
use_gpu = False and gputools_available()
# Stardist supports optional data processing on the gpu to speed up
# the training process. This can only be used if the additional module
# 'gputools' is available.
use_gpu_for_dataprocessing = gputools_available()
# predict on subsampled image for increased efficiency
grid = (2, 2)
config = Config2D(
n_rays=n_rays,
grid=grid,
use_gpu=use_gpu,
use_gpu=use_gpu_for_dataprocessing,
n_channel_in=n_channels,
train_patch_size=patch_size
)
if use_gpu:
print("Using a GPU for training")
# limit gpu memory
if use_gpu_for_dataprocessing:
# limit gpu memory if we use the gpu for data preprocessing
# this is necessary because by default tensorflow allocates all gpu ram
from csbdeep.utils.tf import limit_gpu_memory
limit_gpu_memory(0.8)
else:
print("GPU not found, using the CPU for training")
save_root, save_name = os.path.split(save_path)
os.makedirs(save_root, exist_ok=True)
......
......@@ -110,26 +110,28 @@ def train_model(x_train, y_train,
rays = Rays_GoldenSpiral(n_rays, anisotropy=anisotropy)
# make the model config
# copied from the stardist training notebook, this is a very weird line ...
use_gpu = False and gputools_available()
# Stardist supports optional data processing on the gpu to speed up
# the training process. This can only be used if the additional module
# 'gputools' is available.
use_gpu_for_dataprocessing = gputools_available()
# predict on subsampled image for increased efficiency
grid = tuple(1 if a > 1.5 else 2 for a in anisotropy)
config = Config3D(
rays=rays,
grid=grid,
use_gpu=use_gpu,
use_gpu=use_gpu_for_dataprocessing,
n_channel_in=1,
train_patch_size=patch_size,
anisotropy=anisotropy
)
if use_gpu:
print("Using a GPU for training")
# limit gpu memory
if use_gpu_for_dataprocessing:
# limit gpu memory if we use the gpu for data preprocessing
# this is necessary because by default tensorflow allocates all gpu ram
from csbdeep.utils.tf import limit_gpu_memory
limit_gpu_memory(0.8)
else:
print("GPU not found, using the CPU for training")
save_root, save_name = os.path.split(save_path)
os.makedirs(save_root, exist_ok=True)
......@@ -188,8 +190,7 @@ def main():
help="Size of the image patches used to train the network, default: 128, 128, 128")
aniso_help = """Anisotropy factor, needs to be passed as json encoded list, e.g. \"[.05,0.5,0.5]\".
If not given, will be computed from the dimensions of the input data, default: None"""
parser.add_argument('--anisotropy', type=str, default=None,
help=aniso_help)
parser.add_argument('--anisotropy', type=str, default=None, help=aniso_help)
args = parser.parse_args()
anisotropy = args.anisotropy
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment