Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • bogdanow/embl_hpc
  • grp-bio-it-workshops/embl_hpc
2 results
Show changes
Showing
with 240 additions and 4 deletions
File added
File moved
#!/bin/bash
#SBATCH -J bwa
#SBATCH --time=00-00:06:00
#SBATCH --mem=4000M
#SBATCH --nodes=1
#SBATCH --tmp=1G
#SBATCH --gres=tmp:1G
#SBATCH --output=bwa.out
#SBATCH --open-mode=append
## load required modules
module load SAMtools BWA
## copy data to /tmp and change directory to /tmp
cp /g/its/home/pecar/benchmarks/msmith_bwa/Ecoli_genome.fa.gz $TMPDIR
cp /g/its/home/pecar/benchmarks/msmith_bwa/reads_*.fq.gz $TMPDIR
cd $TMPDIR
## create an index
bwa index -p ecoli Ecoli_genome.fa.gz
## perform alignment
bwa mem -t $SLURM_CPUS_PER_TASK ecoli reads_1.fq.gz reads_2.fq.gz > aligned.sam
## create a compressed BAM file
samtools view -b aligned.sam > aligned.bam
## copy results back to where job was submitted from
cp aligned.bam $SLURM_SUBMIT_DIR/
File moved
File moved
......@@ -14,7 +14,7 @@ cat $HOME/.ssh/id_rsa.pub >> $HOME/.ssh/authorized_keys
```
sudo apt-get update
sudo apt-get install nfs-kernel-server
sudo cat '/home 10.0.0.0/24(rw,sync,no_root_squash,no_subtree_check)' >> /etc/exports
sudo cat '/home 10.0.0.0/24(rw,sync,no_root_squash,no_subtree_check)' >> /etc/exports # you might need to replace with the IP address of your master node
sudo service nfs-kernel-server start
```
......@@ -25,7 +25,7 @@ sudo apt-get update
sudo apt-get install nfs-common
## add a line to automatically mount the shared home directory
sudo cat '10.0.0.8:/home /home nfs auto,noatime,nolock,bg,nfsvers=4,intr,tcp,actimeo=1800 0 0' >> /etc/fstab
sudo cat '10.0.0.8:/home /home nfs auto,noatime,nolock,bg,nfsvers=4,intr,tcp,actimeo=1800 0 0' >> /etc/fstab # make sure you use the correct IP for the master
## restart the machine
sudo shutdown -r now
......@@ -41,7 +41,7 @@ sudo apt-get install slurm-wlm
## enable use of cgroups for process tracking and resource management
sudo bash -c 'echo CgroupAutomount=yes >> /etc/slurm-llnl/cgroup.conf'
sudo chown slurm:slurm /etc/slurm-llnl/cgroup.conf
sudo chown slurm:slurm /etc/slurm-llnl/cgroup.conf # you might need to create a slurm user before you can do this
sudo sed -i 's/GRUB_CMDLINE_LINUX=""/GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"/g' /etc/default/grub
sudo update-grub
......@@ -49,7 +49,7 @@ sudo update-grub
sudo cp /etc/munge/munge.key $HOME/
## download slurm.conf file (may require some editing of IP addresses etc)
sudo wget https://raw.githubusercontent.com/grimbough/embl_swc_hpc/oct2017/cluster_setup/slurm.conf -O /etc/slurm-llnl/slurm.conf -o /dev/null
sudo wget https://git.embl.de/grp-bio-it/embl_hpc/raw/master/swc/cluster_setup/slurm.conf -O /etc/slurm-llnl/slurm.conf -o /dev/null
sudo chown slurm:slurm /etc/slurm-llnl/slurm.conf
```
......
File moved
#!/bin/bash
srun ./hpc_example.py -t 30 -l 5000
## Identifying our computer
```
hostname
```
## Our first SLURM job
```
srun hostname
```
## Exploring our example program
```
./hpc_example.py -t 10 -l 100
```
## Running example program on on the cluster
```
srun ./hpc_example.py -t 10 -l 100
```
## Submitting a job to the background
```
sbatch ./hpc_example.py -t 60 -l 100
```
## Viewing running jobs
```
squeue
squeue --Format="JobID:6,UserName:15,State:10,Reason:10,TimeUsed:6,NodeList"
```
Redirecting output
```
sbatch --output=output.txt ./hpc_example.py -t 20 -l 100
```
## Creating a larger list
```
sbatch --output=output.txt ./hpc_example.py -t 30 -l 50000000
```
## Examing default limits
```
scontrol show partition
```
## Requesting Additional Resources
```
sbatch --mem=500 --output=output.txt ./hpc_example.py -t 30 -l 50000000
```
## reserving a LARGE amount of memory
```
sbatch --mem=8000 --output=output.txt ./hpc_example.py -t 30 -l 50000000
```
## Interactive jobs
```
srun --pty bash
```
## Limits apply to interactive jobs too
```
srun --mem=250 --pty bash
./hpc_example.py -t 30 -l 50000000
```
## Job dependencies
```
jid=$(sbatch --parsable batch_job.sh)
sbatch --dependency=afterok:$jid batch_job.sh
```
File added
// C program to illustrate
// command line arguments
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <limits.h>
#include <time.h>
#include <iostream> // std::cout, std::endl
#include <thread> // std::this_thread::sleep_for
#include <chrono> // std::chrono::seconds
#include <vector>
#include <sstream>
int main(int argc,char* argv[])
{
std::vector <std::string> sources;
int *a;
size_t mem = 0;
int waittime = 10;
char hostname[HOST_NAME_MAX];
time_t t1, t2;
for (int i = 1; i < argc; ++i) {
if (std::string(argv[i]) == "-t") {
if (i + 1 < argc) { // Make sure we aren't at the end of argv!
std::stringstream ss1(argv[++i]);
if (!(ss1 >> waittime))
std::cerr << "Invalid number " << argv[i] << '\n';
} else { // Uh-oh, there was no argument to the destination option.
std::cerr << "-t option requires one argument." << std::endl;
return 1;
}
} else if (std::string(argv[i]) == "-m") {
if (i + 1 < argc) { // Make sure we aren't at the end of argv!
std::stringstream ss2(argv[++i]);
if (!(ss2 >> mem))
std::cerr << "Invalid number " << argv[i] << '\n';
} else { // Uh-oh, there was no argument to the destination option.
std::cerr << "-m option requires one argument." << std::endl;
return 1;
}
} else {
sources.push_back(argv[i]);
}
}
time(&t1);
int result = gethostname(hostname, HOST_NAME_MAX);
std::string hostname2 = hostname;
std::cout << "Current host is: " + hostname2 << std::endl;
std::cout << "Wait time is: " << waittime << " seconds" << std::endl;
std::cout << "Memory usage is: " << mem << "MB" << std::endl;
long int l = mem * 1024 * 1024;
a = (int*)calloc(mem, 1024 * 1024);
memset(a, 0, mem * 1024 * 1024);
std::this_thread::sleep_for (std::chrono::seconds(waittime));
free(a);
time(&t2);
std::cout << "Actually running time: " << difftime(t2, t1) << " seconds" << std::endl;
return 0;
}
#!/usr/bin/env python3
import resource
import platform
from argparse import ArgumentParser
from sys import stdout
from time import sleep
def occupy_memory(length):
'''Create a list of ones of length <length>, to occupy memory in the system.'''
try:
size = int(length)
except TypeError:
raise "Expected an integer value for size parameter, got '{}''".format(length)
print(f'List length is: {format(length)}')
list_of_ones = []
for i in range(length):
list_of_ones.append(1)
def wait_for(seconds):
'''Suspend execution for <seconds> seconds, to control the length of time
that the script takes to run.'''
try:
seconds = int(seconds)
except TypeError:
raise "Expected an integer value for time parameter, got '{}'".format(seconds)
print(f'Wait time is: {format(seconds)} seconds')
sleep(seconds)
parser = ArgumentParser() # Use argparse.ArgumentParser to create a simple user interface
parser.add_argument("--time", "-t", default=10, type=int,
help="Time (in whole seconds) that this script should take to run.")
parser.add_argument("--length", "-l", default=0, type=int,
help="Length of list to generate. This approximates the amount of memory that the script will take up when it runs. Must be an integer.")
# parse arguments from the command line
args = parser.parse_args()
secs = args.time
list_length = args.length
print(f'Current host is: {format(platform.node())}')
occupy_memory(list_length)
wait_for(secs)
# fetch the memory being used by the script and print it out
mem_used = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000.0
print(f'Memory usage: {format(mem_used)}MB')
No preview for this file type