From f018f83407309e769e0a18296f36ac3ac9be5260 Mon Sep 17 00:00:00 2001 From: Mike Smith <grimbough@gmail.com> Date: Wed, 11 Oct 2017 10:40:12 +0200 Subject: [PATCH] changed user names --- cluster_setup/README.md | 4 ++-- cluster_setup/cloud_cluster_setup.md | 24 ++++++++++++++++++++++++ cluster_setup/user_delete.sh | 2 +- cluster_setup/user_setup.sh | 20 ++++++++++---------- 4 files changed, 37 insertions(+), 13 deletions(-) diff --git a/cluster_setup/README.md b/cluster_setup/README.md index b2199ff..df7bc24 100644 --- a/cluster_setup/README.md +++ b/cluster_setup/README.md @@ -4,6 +4,6 @@ This folder contains instructions and files for setting up the example cluster u # Cluster infrastructure -The cluster we're using is running on the Heidelberg installation of the de.NBI cloud. The current design is to create a 4 node cluster (1 controller, 3 compute nodes), with varying hardware specifications for each node so we can demonstrate resource managment. +The cluster we're using is running on the Heidelberg installation of the [de.NBI cloud](https://www.denbi.de/cloud-overview/cloud-hd). The current design is to create a 4 node cluster (1 controller, 3 compute nodes), with varying hardware specifications for each node so we can demonstrate resource managment. -Job scheduling is doing using [https://slurm.schedmd.com/](SLURM) since it is (a) free and (b) mirrors the infrastructure we're currently using at EMBL. +Job scheduling is doing using [SLURM](https://slurm.schedmd.com/) since it is (a) free and (b) mirrors the infrastructure we're currently using at EMBL. diff --git a/cluster_setup/cloud_cluster_setup.md b/cluster_setup/cloud_cluster_setup.md index 78e845a..22c6de8 100644 --- a/cluster_setup/cloud_cluster_setup.md +++ b/cluster_setup/cloud_cluster_setup.md @@ -1,5 +1,7 @@ ## Generate _ubuntu_ user SSH keys +We only need to do the is on the Master, since the home drive will be shared with the compute nodes + ``` ssh-keygen -t rsa -N "" -f $HOME/.ssh/id_rsa cat $HOME/.ssh/id_rsa.pub >> $HOME/.ssh/authorized_keys @@ -21,7 +23,12 @@ sudo service nfs-kernel-server start ``` sudo apt-get update sudo apt-get install nfs-common + +## add a line to automatically mount the shared home directory sudo cat '10.0.0.8:/home /home nfs auto,noatime,nolock,bg,nfsvers=4,intr,tcp,actimeo=1800 0 0' >> /etc/fstab + +## restart the machine +sudo shutdown -r now ``` @@ -31,17 +38,34 @@ sudo cat '10.0.0.8:/home /home nfs auto,noatime,nolock,bg,nfsvers=4,intr,tc ``` sudo apt-get install slurm-wlm + +## enable use of cgroups for process tracking and resource management sudo bash -c 'echo CgroupAutomount=yes >> /etc/slurm-llnl/cgroup.conf' sudo chown slurm:slurm /etc/slurm-llnl/cgroup.conf +sudo sed -i 's/GRUB_CMDLINE_LINUX=""/GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"/g' /etc/default/grub +sudo update-grub + +## put munge key in home directory so we can share it with the nodes sudo cp /etc/munge/munge.key $HOME/ + +## download slurm.conf file (may require some editing of IP addresses etc) +sudo wget https://raw.githubusercontent.com/grimbough/embl_swc_hpc/oct2017/cluster_setup/slurm.conf -O /etc/slurm-llnl/slurm.conf -o /dev/null +sudo chown slurm:slurm /etc/slurm-llnl/slurm.conf ``` ### Node ``` +## install slurm worker daemon sudo apt-get install slurmd + +## enable use of cgroups for process tracking and resource management sudo bash -c 'echo CgroupAutomount=yes >> /etc/slurm-llnl/cgroup.conf' sudo chown slurm:slurm /etc/slurm-llnl/cgroup.conf +sudo sed -i 's/GRUB_CMDLINE_LINUX=""/GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"/g' /etc/default/grub +sudo update-grub + +## copy the shared munge key and restart the service to start using it sudo cp /home/ubuntu/munge.key /etc/munge/munge.key sudo service munge restart ``` diff --git a/cluster_setup/user_delete.sh b/cluster_setup/user_delete.sh index d30afb5..822795d 100644 --- a/cluster_setup/user_delete.sh +++ b/cluster_setup/user_delete.sh @@ -5,5 +5,5 @@ n=40 for i in `seq -w 1 ${n}` do echo $i; - userdel -rf test${i} + userdel -rf user${i} done; diff --git a/cluster_setup/user_setup.sh b/cluster_setup/user_setup.sh index bc2e6ef..25a02d5 100644 --- a/cluster_setup/user_setup.sh +++ b/cluster_setup/user_setup.sh @@ -1,5 +1,5 @@ #!/bin/bash -## script to create 40 users called testXX with a default password +## script to create 40 users called userXX with a default password ## and setup up ssh logins without asking for passwords & host checking n=40 @@ -7,20 +7,20 @@ for i in `seq -w 1 ${n}` do echo $i; - ## create n new user called testXX and create default password - adduser --gecos "" --disabled-password test${i} - echo test${i}:SoftwareC | chpasswd + ## create n new user called userXX and create default password + adduser --gecos "" --disabled-password user${i} + echo user${i}:SoftwareC | chpasswd ## create somewhere to store ssh configuration - mkdir -p /home/test${i}/.ssh - echo 'Host *\n StrictHostKeyChecking no\n ForwardX11 yes' > /home/test${i}/.ssh/config + mkdir -p /home/user${i}/.ssh + printf "Host *\n StrictHostKeyChecking no\n ForwardX11 yes\n" > /home/user${i}/.ssh/config ## generate a ssh key & copy to the list of authorized keys - ssh-keygen -f /home/test${i}/.ssh/id_rsa -t rsa -N '' - cp /home/test${i}/.ssh/id_rsa.pub /home/test${i}/.ssh/authorized_keys + ssh-keygen -f /home/user${i}/.ssh/id_rsa -t rsa -N '' + cp /home/user${i}/.ssh/id_rsa.pub /home/user${i}/.ssh/authorized_keys ## set new user as owner - chown -R test${i}:test${i} /home/test${i}/.ssh - chmod 600 /home/test${i}/.ssh/config + chown -R user${i}:user${i} /home/user${i}/.ssh + chmod 600 /home/user${i}/.ssh/config done -- GitLab