managers for CECI clusters

This commit is contained in:
gmatteo 2014-11-05 21:47:52 +01:00
parent d6a1402ce6
commit c07e89e742
7 changed files with 211 additions and 38 deletions

1
.gitignore vendored
View File

@ -42,6 +42,7 @@ abipy/extensions/*.html
# vim files
*.swp
*.swo
*.swn
# Unit test / coverage reports
.coverage

View File

@ -0,0 +1,71 @@
# hmem hardware: http://www.ceci-hpc.be/clusters.html#hmem
# See also http://www.cism.ucl.ac.be/faq/index.php#hmem_specifics
high: &high
num_nodes: 2
sockets_per_node: 4
cores_per_socket: 12
mem_per_node: 512Gb
middle: &middle
num_nodes: 7
sockets_per_node: 4
cores_per_socket: 12
mem_per_node: 256Gb
low: &low
num_nodes: 7
sockets_per_node: 4
cores_per_socket: 12
mem_per_node: 128Gb
job: &job
mpi_runner: mpirun
shell_env:
PATH: $HOME/local/bin:$PATH
modules:
- python/2.7
# pre_run is a string in verbatim mode (note |)
pre_run: |
ulimit unlimited
policy:
autoparal: 1
automemory: 0
max_ncpus: 256
# queues
qadapters:
- priority: 1
queue:
qname: high
qtype: slurm
limits:
timelimit: 10-0:0:0
min_cores: 1
max_cores: 48
hardware: *high
job: *job
- priority: 2
queue:
qname: middle
qtype: slurm
limits:
timelimit: 5-0:0:0
min_cores: 1
max_cores: 48
hardware: *middle
job: *job
- priority: 3
queue:
qname: low
qtype: slurm
limits:
timelimit: 5-0:0:0
min_cores: 1
max_cores: 48
hardware: *low
job: *job

View File

@ -0,0 +1,34 @@
# lemaitre2 hardware: http://www.ceci-hpc.be/clusters.html#lemaitre2
hardware: &hardware
num_nodes: 112
sockets_per_node: 2
cores_per_socket: 6
mem_per_node: 48Gb
job: &job
mpi_runner: mpirun
shell_env:
PATH: $HOME/local/bin:$PATH
modules:
- python/2.7
# pre_run is a string in verbatim mode (note |)
pre_run: |
ulimit unlimited
policy:
autoparal: 1
automemory: 0
max_ncpus: 256
# queues
qadapters:
- priority: 1
queue:
qname: defq
qtype: slurm
limits:
timelimit: 3-0:0:0
min_cores: 1
max_cores: 120
hardware: *hardware
job: *job

View File

@ -0,0 +1,34 @@
# nic4 hardware. see http://www.ceci-hpc.be/clusters.html#nic4
hardware: &hardware
num_nodes: 120
sockets_per_node: 2
cores_per_socket: 8
mem_per_node: 64Gb
job: &job
mpi_runner: mpirun
shell_env:
PATH: $HOME/local/bin:$PATH
modules:
- python/2.7
# pre_run is a string in verbatim mode (note |)
pre_run: |
ulimit unlimited
policy:
autoparal: 1
automemory: 0
max_ncpus: 256
# queues
qadapters:
- priority: 1
queue:
qname: main
qtype: slurm
limits:
timelimit: 2-0:0:0
min_cores: 1
max_cores: 256
hardware: *hardware
job: *job

View File

@ -0,0 +1,34 @@
# vega hardware: http://www.ceci-hpc.be/clusters.html#vega
hardware: &hardware
num_nodes: 44
sockets_per_node: 4
cores_per_socket: 16
mem_per_node: 256Gb
job: &job
mpi_runner: mpirun
shell_env:
PATH: $HOME/local/bin:$PATH
modules:
- python/2.7
# pre_run is a string in verbatim mode (note |)
pre_run: |
ulimit unlimited
policy:
autoparal: 1
automemory: 0
max_ncpus: 256
# queues
qadapters:
- priority: 1
queue:
qname: defq
qtype: slurm
limits:
timelimit: 7-0:0:0
min_cores: 1
max_cores: 1024
hardware: *hardware
job: *job

View File

@ -41,12 +41,12 @@ db_connector:
# List of qdapters.
qadapters:
# Westmere default.
- priority: 1
- priority: 99
queue:
qname: main
qtype: pbspro
qparams:
group_list: naps
group_list: naps
#qverbatim: |
# #PBS -r y
limits:
@ -57,7 +57,7 @@ qadapters:
hardware: *westmere
# Ivybridge large.
- priority: 99
- priority: 1
queue:
qname: large
qtype: pbspro

View File

@ -1,44 +1,43 @@
---
#host: gmac
qtype: shell
mpi_runner: mpirun
pre_run: "source ~/env.sh"
shell_env:
PATH: "~/Coding/Abinit/bzr_archives/773/gmatteo-private/gcc/src/98_main/:$PATH"
DYLD_LIBRARY_PATH: /opt/intel/composerxe-2011.0.085/mkl/lib/:/opt/intel/composerxe-2011.0.085/compiler/lib/:$DYLD_LIBRARY_PATH
##omp_env:
# OMP_NUM_THREADS: 4
#qtype: shell
#mpi_runner: mpirun
#pre_run: "source ~/env.sh"
#shell_env:
# PATH: "~/Coding/Abinit/bzr_archives/773/gmatteo-private/gcc/src/98_main/:$PATH"
# DYLD_LIBRARY_PATH: /opt/intel/composerxe-2011.0.085/mkl/lib/:/opt/intel/composerxe-2011.0.085/compiler/lib/:$DYLD_LIBRARY_PATH
###omp_env:
## OMP_NUM_THREADS: 4
policy:
autoparal: 1
max_ncpus: 2
#automemory: 0
#mode: aggressive
#condition: {omp_ncpus: {$eq: 2}}}
#condition: {mem_per_cpu: {$le: 10}}}
#condition: {efficiency: {$gt: 0.99}}}
#condition: {$and: [ {efficiency: {$gt: 0.99}}, {tot_ncpus: {$divisible: 2}} ]}
# #automemory: 0
# #mode: aggressive
# #condition: {omp_ncpus: {$eq: 2}}}
# #condition: {mem_per_cpu: {$le: 10}}}
# #condition: {efficiency: {$gt: 0.99}}}
# #condition: {$and: [ {efficiency: {$gt: 0.99}}, {tot_ncpus: {$divisible: 2}} ]}
#qtype: shell
#qadapters:
# - priority: 1
# queue:
# qname: gmac
# qtype: shell
# job:
# mpi_runner: mpirun
# pre_run:
# - source ~/env.sh
# - ulimit
# limits:
# min_cores: 1
# max_cores: 2
# timelimit: 1:00:00
# hardware:
# num_nodes: 1
# sockets_per_node: 1
# cores_per_socket: 2
# mem_per_node: 4 Gb
# # Optional
# #condition: {"$eq": {omp_threads: 2}}
qadapters:
- priority: 1
queue:
qname: gmac
qtype: shell
job:
mpi_runner: mpirun
pre_run:
- source ~/env.sh
limits:
min_cores: 1
max_cores: 2
timelimit: 1:00:00
hardware:
num_nodes: 1
sockets_per_node: 1
cores_per_socket: 2
mem_per_node: 4 Gb
# Optional
#condition: {"$eq": {omp_threads: 2}}
#db_connector:
# enabled: no
# database: abinit