... | @@ -1541,6 +1541,145 @@ You can load R with one of the modules: |
... | @@ -1541,6 +1541,145 @@ You can load R with one of the modules: |
|
* statistical/R/4.0.0/gcc.8.3.1
|
|
* statistical/R/4.0.0/gcc.8.3.1
|
|
* statistical/RStudio/1.2.5033/gcc-8.3.1
|
|
* statistical/RStudio/1.2.5033/gcc-8.3.1
|
|
|
|
|
|
|
|
#### Running R interactively
|
|
|
|
|
|
|
|
```
|
|
|
|
[username@res-hpc-lo01 ~]$ salloc -N1 -n1
|
|
|
|
salloc: Pending job allocation 386499
|
|
|
|
salloc: job 386499 queued and waiting for resources
|
|
|
|
salloc: job 386499 has been allocated resources
|
|
|
|
salloc: Granted job allocation 386499
|
|
|
|
salloc: Waiting for resource configuration
|
|
|
|
salloc: Nodes res-hpc-exe017 are ready for job
|
|
|
|
|
|
|
|
[username@res-hpc-exe017 ~]$ module add statistical/R/4.0.0/gcc.8.3.1
|
|
|
|
[username@res-hpc-exe017 ~]$ R
|
|
|
|
|
|
|
|
R version 4.0.0 (2020-04-24) -- "Arbor Day"
|
|
|
|
...
|
|
|
|
Type 'q()' to quit R.
|
|
|
|
|
|
|
|
> q()
|
|
|
|
Save workspace image? [y/n/c]: n
|
|
|
|
|
|
|
|
[username@res-hpc-exe017 ~]$ exit
|
|
|
|
exit
|
|
|
|
salloc: Relinquishing job allocation 386499
|
|
|
|
salloc: Job allocation 386499 has been revoked.
|
|
|
|
```
|
|
|
|
|
|
|
|
#### Running a R script in batch mode
|
|
|
|
|
|
|
|
HelloWorld.R
|
|
|
|
```
|
|
|
|
print ("Hello world!")
|
|
|
|
```
|
|
|
|
|
|
|
|
myscript.sh
|
|
|
|
```
|
|
|
|
#!/bin/bash
|
|
|
|
|
|
|
|
#SBATCH --job-name=HelloWord # Job name
|
|
|
|
#SBATCH --output=slurm.out # Output file name
|
|
|
|
#SBATCH --error=slurm.err # Error file name
|
|
|
|
#SBATCH --partition=short # Partition
|
|
|
|
#SBATCH --time=00:05:00 # Time limit
|
|
|
|
#SBATCH --nodes=1 # Number of nodes
|
|
|
|
#SBATCH --ntasks-per-node=1 # MPI processes per node
|
|
|
|
|
|
|
|
module purge
|
|
|
|
module add statistical/R/4.0.0/gcc.8.3.1
|
|
|
|
|
|
|
|
Rscript --vanilla HelloWorld.R
|
|
|
|
```
|
|
|
|
|
|
|
|
* sbatch myscript.sh
|
|
|
|
Submitted batch job 386860
|
|
|
|
|
|
|
|
[username@res-hpc-lo01 R]$ cat slurm.out
|
|
|
|
[1] "Hello world!"
|
|
|
|
|
|
|
|
|
|
|
|
driver.R
|
|
|
|
```
|
|
|
|
x <- rnorm(50)
|
|
|
|
cat("My sample from N(0,1) is:\n")
|
|
|
|
print(x)
|
|
|
|
```
|
|
|
|
|
|
|
|
run.slurm
|
|
|
|
```
|
|
|
|
#!/bin/bash
|
|
|
|
|
|
|
|
#SBATCH --job-name=serialR # Job name
|
|
|
|
#SBATCH --output=slurm.out # Output file name
|
|
|
|
#SBATCH --error=slurm.err # Error file name
|
|
|
|
#SBATCH --partition=short # Partition
|
|
|
|
#SBATCH --time=00:05:00 # Time limit
|
|
|
|
#SBATCH --nodes=1 # Number of nodes
|
|
|
|
#SBATCH --ntasks-per-node=1 # MPI processes per node
|
|
|
|
|
|
|
|
module purge
|
|
|
|
module add statistical/R/4.0.0/gcc.8.3.1
|
|
|
|
|
|
|
|
Rscript driver.R
|
|
|
|
```
|
|
|
|
|
|
|
|
```
|
|
|
|
[username@res-hpc-lo01 R]$ sbatch run.slurm
|
|
|
|
Submitted batch job 386568
|
|
|
|
|
|
|
|
[username@res-hpc-lo01 R]$ ls -l
|
|
|
|
total 78
|
|
|
|
-rw-r--r-- 1 username Domain Users 59 Jun 5 11:42 driver.R
|
|
|
|
-rw-r--r-- 1 username Domain Users 483 Jun 5 11:42 run.slurm
|
|
|
|
-rw-r--r-- 1 username Domain Users 0 Jun 5 11:43 slurm.err
|
|
|
|
-rw-r--r-- 1 username Domain Users 671 Jun 5 11:43 slurm.out
|
|
|
|
|
|
|
|
[username@res-hpc-lo01 R]$ cat slurm.out
|
|
|
|
My sample from N(0,1) is:
|
|
|
|
[1] 0.32241013 -0.78250675 -0.28872991 0.12559634 -0.29176358 0.57962942
|
|
|
|
[7] -0.38277807 -0.21266343 0.86537064 1.06636737 0.96487417 0.31699518
|
|
|
|
[13] 0.38003556 0.78275327 -0.85745177 -1.47682958 -0.16192662 0.09207091
|
|
|
|
[19] -0.64508782 1.01504976 -0.07736039 -1.08819811 1.17762738 -0.22819258
|
|
|
|
[25] 0.79564029 1.36863520 -0.63137494 -0.58452239 -0.96832479 -1.56506037
|
|
|
|
[31] 1.68344229 1.03967058 -0.20854621 1.39479829 -0.95509839 0.80826154
|
|
|
|
[37] -0.89781029 0.99954821 -1.25047597 -1.11034908 -1.10759254 1.32150663
|
|
|
|
[43] -0.04589279 -0.62886137 0.63947415 0.18295622 0.63929410 0.16774740
|
|
|
|
[49] 0.92311091 -0.13370228
|
|
|
|
```
|
|
|
|
|
|
|
|
```
|
|
|
|
[username@res-hpc-lo01 R]$ scontrol show job 386568
|
|
|
|
JobId=386568 JobName=serialR
|
|
|
|
UserId=username(225812) GroupId=Domain Users(513) MCS_label=N/A
|
|
|
|
Priority=449759 Nice=0 Account=dnst-ict QOS=normal
|
|
|
|
JobState=COMPLETED Reason=None Dependency=(null)
|
|
|
|
Requeue=1 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0
|
|
|
|
RunTime=00:00:02 TimeLimit=00:05:00 TimeMin=N/A
|
|
|
|
SubmitTime=2020-06-05T11:43:02 EligibleTime=2020-06-05T11:43:02
|
|
|
|
AccrueTime=2020-06-05T11:43:02
|
|
|
|
StartTime=2020-06-05T11:43:02 EndTime=2020-06-05T11:43:04 Deadline=N/A
|
|
|
|
SuspendTime=None SecsPreSuspend=0 LastSchedEval=2020-06-05T11:43:02
|
|
|
|
Partition=short AllocNode:Sid=res-hpc-ma01:27472
|
|
|
|
ReqNodeList=(null) ExcNodeList=(null)
|
|
|
|
NodeList=res-hpc-gpu01
|
|
|
|
BatchHost=res-hpc-gpu01
|
|
|
|
NumNodes=1 NumCPUs=1 NumTasks=1 CPUs/Task=1 ReqB:S:C:T=0:0:*:*
|
|
|
|
TRES=cpu=1,mem=2G,node=1,billing=1
|
|
|
|
Socks/Node=* NtasksPerN:B:S:C=1:0:*:* CoreSpec=*
|
|
|
|
MinCPUsNode=1 MinMemoryCPU=2G MinTmpDiskNode=0
|
|
|
|
Features=(null) DelayBoot=00:00:00
|
|
|
|
OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
|
|
|
|
Command=/home/username/R/run.slurm
|
|
|
|
WorkDir=/home/username/R
|
|
|
|
StdErr=/home/username/R/slurm.err
|
|
|
|
StdIn=/dev/null
|
|
|
|
StdOut=/home/username/R/slurm.out
|
|
|
|
Power=
|
|
|
|
MailUser=(null) MailType=NONE
|
|
|
|
```
|
|
|
|
|
|
#### Running R parallel
|
|
#### Running R parallel
|
|
|
|
|
|
Unfornately, "R" is not very efficient when running on a HPC cluster. Basically every R instance is running on only 1 core.
|
|
Unfornately, "R" is not very efficient when running on a HPC cluster. Basically every R instance is running on only 1 core.
|
... | | ... | |