Skip to content
Snippets Groups Projects
Commit a3d3223e authored by Yann.Sagon's avatar Yann.Sagon
Browse files

example openmp

parent d63b62a1
No related branches found
No related tags found
No related merge requests found
File added
/*
* Calculate $\pi = 4 \int_0^1 \sqrt{1 - x^2} \, dx$
* using a rectangle rule $\sum_{i=1}^N f(x_0 + i h - h/2) h$
*
* compile as $> gcc -o hello pi.c -fopenmp -lm
* Run as $> OMP_NUM_THREADS=2 ./a.out
*/
#include <stdio.h>
#include <math.h>
#include <omp.h>
#include <stdint.h>
int main()
{
const int64_t N = 100000000000;
const double L = 1.0;
const double h = L / N;
const double x_0 = 0.0;
double pi;
double t_1, t_2;
int64_t i;
double sum = 0.0;
t_1 = omp_get_wtime();
#pragma omp parallel for reduction(+: sum) schedule(static)
for (i = 0; i < N; ++i)
{
double x = x_0 + i * h + h/2;
sum += sqrt(1 - x*x);
}
t_2 = omp_get_wtime();
pi = sum * h * 4.0;
printf("omp_get_max_threads(): %d\n", omp_get_max_threads());
printf("time: %f\n", t_2 - t_1);
printf("pi ~ %f\n", pi);
return 0;
}
Requested cpu_bind option outside of job step allocation for task[0]
omp_get_max_threads(): 16
time: 60.947409
pi ~ 3.141593
#!/bin/sh
#SBATCH --job-name=test
#SBATCH --partition=shared
#SBATCH --output=slurm-%J.out
#SBATCH --nodes 1
#SBATCH --exclusive
# facultatif
#export OMP_NUM_THREADS=$SLURM_JOB_CPUS_PER_NODE
# obligatoire si on ne spécifie pas le nombre de cpu par tâche.
srun --cpu_bind=mask_cpu:0xffffffff hello
# on peut également faire ainsi (max 1 noeud, pas top pour de l'hybride)
#srun -c $SLURM_JOB_CPUS_PER_NODE hello
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment