Skip to content
Snippets Groups Projects
Commit a0acea2f authored by Yann.Sagon's avatar Yann.Sagon
Browse files

Merge branch 'master' of gitlab.unige.ch:hpc/softs

parents 26a78cf6 b9973e98
No related branches found
No related tags found
No related merge requests found
#!/bin/sh
#
# <http://baobabmaster.unige.ch/enduser/src/enduser/applications.html#nvida-cuda>
#SBATCH --partition=shared-gpu-EL7
#SBATCH --time=00:15:00
#SBATCH --gres=gpu:1
echo "I: full hostname: $(hostname -f)"
module load CUDA/9.1.85
# see here for more samples:
# /opt/cudasample/NVIDIA_CUDA-8.0_Samples/bin/x86_64/linux/release/
# if you need to know the allocated CUDA device, you can obtain it here:
echo "I: CUDA_VISIBLE_DEVICES: ${CUDA_VISIBLE_DEVICES}"
echo "====="
srun /opt/ebsofts/Compiler/GCC/6.4.0-2.28/CUDA/9.1.85/extras/demo_suite/deviceQuery
#!/bin/sh
#
# <http://baobabmaster.unige.ch/enduser/src/enduser/applications.html#nvida-cuda>
#SBATCH --partition=shared-gpu-EL7
#SBATCH --time=00:15:00
#SBATCH --gres=gpu:1
echo "I: full hostname: $(hostname -f)"
VERSION_COMPILER='7.3.0-2.30'
module load GCC/${VERSION_COMPILER}
VERSION_CUDA='9.2.88'
module load CUDA/${VERSION_CUDA}
# see here for more samples:
# /opt/cudasample/NVIDIA_CUDA-8.0_Samples/bin/x86_64/linux/release/
# if you need to know the allocated CUDA device, you can obtain it here:
echo "I: CUDA_VISIBLE_DEVICES: ${CUDA_VISIBLE_DEVICES}"
echo "====="
srun /opt/ebsofts/Compiler/GCC/${VERSION_COMPILER}/CUDA/${VERSION_CUDA}/extras/demo_suite/deviceQuery
#!/usr/bin/env python
#
# <https://pytorch.org/docs/stable/cuda.html>
### ATTENTION, print behavior changed between Python 2.x and 3.x!
from __future__ import print_function
import torch
device_count = torch.cuda.device_count()
print("torch.cuda.device_count:", device_count)
#!/usr/bin/env python
#
# Create a 10x10 matrix filled with zero via the CUDA device
# <https://hpc-community.unige.ch/t/gpu010-cuda-singularity-cuda-runtime-error/306>
import torch
cuda = torch.device('cuda')
a = torch.zeros(10, device=cuda)
print(a)
#!/bin/sh
#
# <https://hpc-community.unige.ch/t/gpu010-cuda-singularity-cuda-runtime-error/306>
#SBATCH --partition=shared-gpu-EL7
#SBATCH --time=00:15:00
#SBATCH --gres=gpu:1
echo "I: full hostname: $(hostname -f)"
VERSION_PYTHON='3.6.4'
module load GCC/6.4.0-2.28 OpenMPI/2.1.2
module load Python/${VERSION_PYTHON}
VERSION_CUDA='9.1.85'
module load CUDA/${VERSION_CUDA}
module load PyTorch/0.3.0-Python-${VERSION_PYTHON}
# if you need to know the allocated CUDA device, you can obtain it here:
echo "I: CUDA_VISIBLE_DEVICES: ${CUDA_VISIBLE_DEVICES}"
echo "====="
srun ./cuda_-_device_count.py
#!/bin/sh
#
# <https://hpc-community.unige.ch/t/gpu010-cuda-singularity-cuda-runtime-error/306>
#SBATCH --partition=shared-gpu-EL7
#SBATCH --time=00:15:00
#SBATCH --gres=gpu:1
echo "I: full hostname: $(hostname -f)"
module load GCC/5.4.0-2.26
module load Singularity/2.4.5
VERSION_CUDA='9.2.148.1'
module load CUDA/${VERSION_CUDA}
# if you need to know the allocated CUDA device, you can obtain it here:
echo "I: CUDA_VISIBLE_DEVICES: ${CUDA_VISIBLE_DEVICES}"
echo "====="
srun singularity \
exec \
--bind $(readlink ${HOME}/scratch) \
--nv \
${PWD}/pytorch.simg \
python \
${PWD}/cuda_-_matrix_zeros.py
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment