diff --git a/src/wrapper/runSnakemakeWrapper.sh b/src/wrapper/runSnakemakeWrapper.sh
index 9e7d207c94ed67350cc22a8f38b217fb6d961eb5..a09cc6de87f605b47fc48e8ebf87fc29622c9d4d 100644
--- a/src/wrapper/runSnakemakeWrapper.sh
+++ b/src/wrapper/runSnakemakeWrapper.sh
@@ -1,6 +1,45 @@
+#############
+# FUNCTIONS #
+#############
+# bash version: function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }';}
+
+requiredVersionStr="6.0.0"
+
+version() {
+  echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }';
+}
+
+installedVersion=$(snakemake --version)
+snakemake_exec=$(which snakemake)
+
+echo "Installed snakemake version: $installedVersion ($snakemake_exec)"
+
+installedVersion=$(version $installedVersion)
+requiredVersion=$(version $requiredVersionStr)
+
+
+if [ "$installedVersion" -lt "$requiredVersion" ]; then
+    echo "Version of snakemake is too old. Please update snakemake to version $requiredVersionStr or higher"
+    exit 1
+fi
+
+
 
 # parameters that are currently fixed
+customJobStatusScript=true
+#Deativate for now until clear why it fails
 customJobStatusScript=false
+show_failed_logs=true
+
+
+clusterStatusDirective=""
+if [ "$customJobStatusScript" = true ] ; then
+
+  customStatusScript="/g/scb/zaugg/zaugg_shared/scripts/Christian/src/Snakemake/Profiles/slurm_test1/slurm-status.py"
+  #clusterStatusDirective="--cluster-status /g/scb/zaugg/zaugg_shared/scripts/Christian/src/Snakemake/SLURM_jobStatus.py"
+  clusterStatusDirective="--cluster-status $customStatusScript"
+fi
+
 
 #######################
 # AUTOMATIC FROM HERE #
@@ -10,7 +49,7 @@ if [ ! -n "$configFile" ]; then
   outputDir="output"
 else
   # grep output directory automatically
-  outputDir=`grep -P "\\"outdir\\"\s*:(.+)" $configFile | cut -d":" -f2 | sed 's/[\"]//g' | sed  -e 's/^[ \t]*//' | sed  -e 's/[,]*//g'`
+  outputDir=`grep -P "\\"?outdir\\"?\s*:(.+)" $configFile | cut -d":" -f2 | sed 's/[\"]//g' | sed  -e 's/^[ \t]*//' | sed  -e 's/[,]*//g'`
 
   if [ ! -n "$outputDir" ]; then
     echo "Error: Could not find \"outdir\" parameter in the config file $configFile to automatically set the output directory correctly."
@@ -52,6 +91,9 @@ if [ ! -n  "$useSLURM" ] ; then
   useSLURM=false
 fi
 
+if [ ! -n  "$useEnvModules" ] ; then
+  useEnvModules=true
+fi
 
 if [ ! -n  "$condaDir" ] ; then
   condaDir="/g/scb2/zaugg/zaugg_shared/Programs/Snakemake/conda"
@@ -134,12 +176,13 @@ echo "  runSpecificRule              = $runSpecificRule"       | tee -a $logPara
 echo "  runAlsoDownstreamRules       = $runAlsoDownstreamRules"| tee -a $logParameters
 echo "  abortAfterFirstError         = $abortAfterFirstError"  | tee -a $logParameters
 
-echo " CONDA AND SINGULARITY OPTIONS"                          | tee -a $logParameters
+echo " CONDA, SINGULARITY AND MODULES OPTIONS"                 | tee -a $logParameters
 echo "  useConda                     = $useConda"              | tee -a $logParameters
 echo "  conda-prefix                 = $condaDir"              | tee -a $logParameters
 echo "  useSingularity               = $useSingularity"        | tee -a $logParameters
 echo "  singularityPrefix            = $singularityPrefix"     | tee -a $logParameters
 echo "  singularityArgs              = $singularityArgs"       | tee -a $logParameters
+echo "  useEnvModules                = $useEnvModules"         | tee -a $logParameters
 
 echo " DEVELOPMENT OPTIONS"                                    | tee -a $logParameters
 echo "  useVerbose                   = $useVerbose"            | tee -a $logParameters
@@ -155,7 +198,10 @@ echo "  clusterConfig                = $clusterConfig"         | tee -a $logPara
 echo "  maxJobsCluster               = $maxJobsCluster"        | tee -a $logParameters
 echo "  maxNoRestartJobsUponFailure  = $maxRestartsPerJob"     | tee -a $logParameters
 echo "  mailType                     = $mailType"              | tee -a $logParameters
-echo "  customJobStatusScript        = $customJobStatusScript" | tee -a $logParameters
+
+echo " CURRENTLY HARD-CODED OPTIONS"
+echo "  customJobStatusScript        = $customJobStatusScript ($customStatusScript)" | tee -a $logParameters
+echo "  show-failed-logs             = $show_failed_logs"      | tee -a $logParameters
 
 
 
@@ -167,7 +213,9 @@ fi
 cp $snakefile $inputDir
 
 if [ -n "$clusterConfig" ]; then
-  cp $clusterConfig $inputDir
+  if [ "$submitToCluster" = true ] ; then
+    cp $clusterConfig $inputDir
+  fi
 fi
 
 configfileDirective=""
@@ -189,7 +237,8 @@ printShellCommands=false
 otherOptionsDirective=""
 shadowDirDirective=""
 singularityDirective=""
-
+useEnvModulesDirective=""
+show_failed_logsDirective=""
 
 if [ "$ignoreZeroSizedFiles" = false ] ; then
 
@@ -225,15 +274,46 @@ fi
 
 if [ "$touchOutputFiles" = true ] ; then
    touchDirective="--touch"
+elif [ "$touchOutputFiles" = false ] ; then
+  :
+else
+  echo "Error: Parameter touchOutputFiles must be true or false, not \"$touchOutputFiles\""
+  exit 1
+fi
+
+if [ "$useEnvModules" = true ] ; then
+   useEnvModulesDirective="--use-envmodules"
 fi
 
-if [ -n "$otherOptions" ] ; then
-   otherOptionsDirective="$otherOptions"
+if [ "$show_failed_logs" = true ] ; then
+   show_failed_logsDirective="--show-failed-logs"
 fi
 
+
+
 if [ "$useSingularity" = true ] ; then
    singularityDirective="--use-singularity "
 
+   echo "Singularity is to be used, forcing unloading of all modules now to avoid issues..."
+
+   if [ "$(module --redirect -t list | grep -c snakemake)" -ge 1 ]; then
+     #echo "The Snakemake module is loaded. Due to a known problem with Singularity for the current version, the snakemake module shall not be used. Please install your local Snakemake version (e.g., via conda) first and unload all modules (module purge)"
+      echo "Purging all modules and reloading the snakemake module"
+      echo "NOTE: IF YOU REQUIRED PARTICULAR MODULES TO BE LOADED FOR YOUR PIPELINE TO WORK, CONTACT CHRISTIAN. THEY ARE NOT LOADED ANYMORE"
+     #exit 1
+     module purge
+     # Load the SciPy-bundle bundle that contains panda etc
+     module load snakemake
+     module load SciPy-bundle
+
+   else
+     module purge
+     # Temporay test for Neha
+     #echo "Temporary fix for Neha: Loading module git"
+     #module load git
+   fi
+
+
    if [ -n "$singularityPrefix" ]; then
       singularityDirective="$singularityDirective --singularity-prefix \"$singularityPrefix\""
    fi
@@ -242,6 +322,11 @@ if [ "$useSingularity" = true ] ; then
         singularityDirective="$singularityDirective --singularity-args \"$singularityArgs\""
    fi
 
+elif [ "$useSingularity" = false ] ; then
+ :
+else
+ echo "Error: Parameter useSingularity must be true or false, not \"$useSingularity\""
+ exit 1
 fi
 
 
@@ -257,22 +342,41 @@ fi
 
 if [ "$nolock" = true ] ; then
    nolockDirective="--nolock"
+elif [ "$nolock" = false ] ; then
+  :
+else
+  echo "Error: Parameter nolock must be true or false, not \"$nolock\""
+  exit 1
 fi
 
 if [ "$useConda" = true ] ; then
    condaDirective="--use-conda --conda-prefix $condaDir"
+elif [ "$useConda" = false ] ; then
+  :
+else
+  echo "Error: Parameter useConda must be true or false, not \"$useConda\""
+  exit 1
 fi
 
 if [ "$abortAfterFirstError" = true ] ; then
    keepGoingDirective=""
+elif [ "$abortAfterFirstError" = false ] ; then
+  :
+else
+  echo "Error: Parameter abortAfterFirstError must be true or false, not \"$abortAfterFirstError\""
+  exit 1
 fi
 
 
 
-
 if [ "$useVerbose" = true ] ; then
-   verboseDirective="--verbose --reason "
+   verboseDirective="--verbose "
    printShellCommands=true
+elif [ "$useVerbose" = false ] ; then
+  :
+else
+  echo "Error: Parameter useVerbose must be true or false, not \"$useVerbose\""
+  exit 1
 fi
 
 if [ "$dryRun" = true ] ; then
@@ -283,7 +387,11 @@ if [ "$dryRun" = true ] ; then
    else
      dryRunDirective="--dryrun --quiet"
    fi
-
+elif [ "$dryRun" = false ] ; then
+  :
+else
+  echo "Error: Parameter dryRun must be true or false, not \"$dryRun\""
+  exit 1
 fi
 
 
@@ -308,10 +416,20 @@ fi
 
 if [ "$rerunIncomplete" = true ] ; then
    rerunIncompleteDirective="--rerun-incomplete"
-fi
+ elif [ "$rerunIncomplete" = false ] ; then
+   :
+ else
+   echo "Error: Parameter rerunIncomplete must be true or false, not \"$rerunIncomplete\""
+   exit 1
+ fi
 
 if [ "$printShellCommands" = true ] ; then
    printShellDirective="--printshellcmds"
+elif [ "$printShellCommands" = false ] ; then
+   :
+else
+   echo "Error: Parameter printShellCommands must be true or false, not \"$printShellCommands\""
+   exit 1
 fi
 
 if [ "$submitToCluster" = false ] ; then
@@ -319,6 +437,11 @@ if [ "$submitToCluster" = false ] ; then
     echo "$nCores CPUs will be used for local computation, make sure this value is ok."
 
   fi
+elif [ "$submitToCluster" = true ] ; then
+   :
+else
+   echo "Error: Parameter submitToCluster must be true or false, not \"$submitToCluster\""
+   exit 1
 fi
 
 if [ $maxRestartsPerJob  -gt 0 ] ; then
@@ -335,6 +458,8 @@ if [ "$submitToCluster" = true ] ; then
     exit 1
   fi
 
+
+
   # if [ $maxJobsCluster  -gt 500 ] ; then
   #   echo "Warning: maxJobsCluster=$maxJobsCluster too high! Adjust maxJobsCluster to 500..."
   #   maxJobsCluster=500
@@ -385,41 +510,47 @@ if [ "$submitToCluster" = true ] ; then
   errorDirective=""
   qosDirective=""
   excludeDirective=""
+  resourcesDirective=""
 
   generalMessage="Check the validity of the cluster file on the cluster system (SLURM or LSF) you want to run it on."
 
 
-  if [ "$hostname" = "login.cluster.embl.de" ] || [ "$hostname" = "spinoza.embl.de" ] ; then
+  if [ "$hostname" = "login.cluster.embl.de" ] || [ "$hostname" = "login-gui01.cluster.embl.de" ] || [ "$hostname" = "login-gui02.cluster.embl.de" ] || [ "$hostname" = "login01.cluster.embl.de" ] || [ "$hostname" = "login02.cluster.embl.de" ] || [ "$hostname" = "spinoza.embl.de" ] || [ "$hostname" = "seneca.embl.de" ] || [ "$hostname" = "schroedinger.embl.de" ] ; then
 
-    echo "Use SLURM cluster because hostname is \"login.cluster.embl.de\""
+    echo "Use SLURM cluster because hostname is one of: \"login*.cluster.embl.de\", \"spinoza.embl.de\", \"seneca.embl.de\", \"schroedinger.embl.de\"."
 
 
-    nHits=$(grep -c queueSLURM $clusterConfig)
+    nHits=$(grep -c \"queueSLURM\" $clusterConfig)
     if [ "$nHits" -gt "0" ]; then
         queueDirective="-p {cluster.queueSLURM}"
     fi
 
-    nHits=$(grep -c nodes $clusterConfig)
+    nHits=$(grep -c \"nodes\" $clusterConfig)
     if [ "$nHits" -gt "0" ]; then
         nodesDirective="-C {cluster.nodes}"
     fi
 
-    nHits=$(grep -c name $clusterConfig)
+    nHits=$(grep -c \"name\" $clusterConfig)
     if [ "$nHits" -gt "0" ]; then
         nameDirective="-J {cluster.name}"
     fi
 
-    nHits=$(grep -c group $clusterConfig)
+    nHits=$(grep -c \"group\" $clusterConfig)
     if [ "$nHits" -gt "0" ]; then
         groupDirective="-A {cluster.group}"
     fi
 
-    nHits=$(grep -c nCPUs $clusterConfig)
+    nHits=$(grep -c \"nCPUs\" $clusterConfig)
     if [ "$nHits" -gt "0" ]; then
         nCPUsDirective="--cpus-per-task {cluster.nCPUs}"
     fi
 
 
+    nHits=$(grep -c \"resources\" $clusterConfig)
+    if [ "$nHits" -gt "0" ]; then
+        resourcesDirective="{cluster.resources}"
+    fi
+
     nHits=$(grep -c threads $clusterConfig)
     if [ "$nHits" -eq "0" ]; then
      echo "Could not find \"threads\" in file \"$clusterConfig\". Use nCPUS: \"\{threads\}\". $generalMessage";
@@ -427,48 +558,48 @@ if [ "$submitToCluster" = true ] ; then
     fi
 
 
-    nHits=$(grep -c memory $clusterConfig)
+    nHits=$(grep -c \"memory\" $clusterConfig)
     if [ "$nHits" -gt "0" ]; then
         memoryDirective="--mem {cluster.memory}"
     fi
 
-    nHits=$(grep -c maxTime $clusterConfig)
+    nHits=$(grep -c \"maxTime\" $clusterConfig)
     if [ "$nHits" -gt "0" ]; then
         maxTimeDirective="--time {cluster.maxTime}"
     fi
 
-    nHits=$(grep -c output $clusterConfig)
+    nHits=$(grep -c \"output\" $clusterConfig)
     if [ "$nHits" -gt "0" ]; then
         outputDirective="-o \"{cluster.output}\""
     fi
 
 
-    nHits=$(grep -c exclude $clusterConfig)
+    nHits=$(grep -c \"exclude\" $clusterConfig)
     if [ "$nHits" -gt "0" ]; then
         excludeDirective="--exclude {cluster.exclude}"
     fi
 
 
-    nHits=$(grep -c error $clusterConfig)
+    nHits=$(grep -c \"error\" $clusterConfig)
     if [ "$nHits" -gt "0" ]; then
         errorDirective="-e \"{cluster.error}\""
     fi
 
 
-    nHits=$(grep -c qos $clusterConfig)
+    nHits=$(grep -c \"qos\" $clusterConfig)
     if [ "$nHits" -gt "0" ]; then
         qosDirective="--qos={cluster.qos}"
     fi
 
-    clusterSpecifics="--cluster \" sbatch $queueDirective $nameDirective $groupDirective $nodesDirective $nCPUsDirective $memoryDirective $maxTimeDirective $outputDirective $errorDirective $qosDirective $excludeDirective --mail-type=$mailType --parsable \""
+    clusterSpecifics="--cluster \" sbatch $queueDirective $nameDirective $groupDirective $nodesDirective $nCPUsDirective $memoryDirective $maxTimeDirective $outputDirective $errorDirective $qosDirective $excludeDirective $resourcesDirective --mail-type=$mailType --parsable \""
 
   #clusterSpecifics=" --drmaa \" -p {cluster.queueSLURM} -J {cluster.name} -A {cluster.group} -N {cluster.nNodes} -n {cluster.nCores} --mem {cluster.memory} -o \"{cluster.output}\" -e \"{cluster.error}\" --mail-type NONE  \""
 
 
   else
 
-    echo "Use bsub because hostname is not \"login.cluster.embl.de\""
-    echo "Exiting, LSF/bsub is not supported anymore"
+    echo "Error: Hostname must be one of the following for cluster support using SLURM: \"login.cluster.embl.de\", \"spinoza.embl.de\", \"seneca.embl.de\", \"schroedinger.embl.de\""
+    echo "Cannot continue, exiting..."
     exit 1
 
     clusterSpecifics="--cluster \" bsub  -q {cluster.queue} -J {cluster.name} -n {cluster.nCPUs} -R \"{cluster.resources}\" -M {cluster.memory} -o \"{cluster.output}\" -e \"{cluster.error}\" \""
@@ -480,7 +611,7 @@ if [ "$submitToCluster" = true ] ; then
     fi
 
 
-    nHits=$(grep -c resources $clusterConfig)
+    nHits=$(grep -c \"resources\" $clusterConfig)
     if [ "$nHits" -eq "0" ]; then
      echo "Could not find \"resources\" parameter in file \"$clusterConfig\". $generalMessage";
      exit 1;
@@ -488,10 +619,6 @@ if [ "$submitToCluster" = true ] ; then
 
   fi
 
-  clusterStatusDirective=""
-  if [ "$customJobStatusScript" = true ] ; then
-    clusterStatusDirective="--cluster-status /g/scb/zaugg/zaugg_shared/scripts/Christian/src/Snakemake/SLURM_jobStatus.py"
-  fi
 
   clusterDirective="--jobs $maxJobsCluster --cluster-config $clusterConfig $clusterSpecifics --local-cores 1 $clusterStatusDirective"
 
@@ -516,10 +643,10 @@ if [ -n  "$runCustomCommand" ] ; then
 else
 
   # Run 1: Detailed summary about what files will be generated
-  command1="snakemake -s $snakefile $configfileDirective $forceRerunDirective $runSpecificRuleDirective $tempDirective --detailed-summary  >$stats2"
+  command1="snakemake -s $snakefile --cores $nCores $configfileDirective $forceRerunDirective $runSpecificRuleDirective $tempDirective --detailed-summary  >$stats2"
 
   #Run 2: Produce a workflow graph
-  command2="snakemake -s $snakefile --configfile $configFile --forceall --dag > $fileDAG"
+  command2="snakemake -s $snakefile --cores $nCores --configfile $configFile --forceall --dag > $fileDAG"
 
   if [ "$skipPDFWorkflow" = true ] ; then
      command2a="echo \"skipping creation of PDF workflow graph\""
@@ -531,7 +658,7 @@ else
   command2b="dot $fileDAG -Tsvg > $workflowGraphSVG"
 
   # Run 3: Main run: Execute the pipeline
-  command3="snakemake -s $snakefile $condaDirective $nolockDirective $configfileDirective --latency-wait 30 $dryRunDirective $shadowDirDirective $touchDirective $allowedRulesDirective $tempDirective $runSpecificRuleDirective $verboseDirective  $printShellDirective $forceRerunDirective $rerunIncompleteDirective --cores $nCores $maxRestartDirective $keepGoingDirective --stats $stats $otherOptionsDirective $singularityDirective $clusterDirective"
+  command3="snakemake -s $snakefile $condaDirective $nolockDirective $configfileDirective --reason --latency-wait 120 $dryRunDirective $shadowDirDirective $touchDirective $allowedRulesDirective $tempDirective $runSpecificRuleDirective $verboseDirective $useEnvModulesDirective $show_failed_logsDirective $printShellDirective $forceRerunDirective $rerunIncompleteDirective --cores $nCores $maxRestartDirective $keepGoingDirective --stats $stats $otherOptionsDirective $singularityDirective $clusterDirective"
 
 
   #commandFull="$command2 && $command2a && $command2b"