1
Fork 0
mirror of https://github.com/Steffo99/unimore-hpc-assignments.git synced 2024-11-23 00:24:23 +00:00

Make some optimizations toggleable, so results can be compared easily

This commit is contained in:
Steffo 2022-11-17 02:59:31 +01:00
parent 60a061991b
commit 7cd8707bb9
Signed by: steffo
GPG key ID: 6965406171929D01
3 changed files with 52 additions and 10 deletions

View file

@ -1,14 +1,46 @@
#!/bin/bash #!/bin/bash
runs=9 run_benchmarks() {
totalt=0.0 runs=25
totalt=0.0
for i in $(seq $runs) for i in $(seq $runs)
do do
exet=$(./atax_acc) exet=$(./atax_acc)
totalt=$(awk "BEGIN{print $totalt+$exet}") totalt=$(awk "BEGIN{print $totalt+$exet}")
echo "Run #$i: " $(awk "BEGIN{printf(\"%.3g\", $exet)}") "seconds" # echo "Run #$i: " $(awk "BEGIN{printf(\"%.3g\", $exet)}") "seconds"
done done
avgt=$(awk "BEGIN{print $totalt/$runs}") avgt=$(awk "BEGIN{print $totalt/$runs}")
echo "Average: " $(awk "BEGIN{printf(\"%.3g\", $avgt)}") "seconds" echo " Average of $runs runs: " $(awk "BEGIN{printf(\"%.3g\", $avgt)}") "seconds"
}
for c in $(seq 0 15)
do
cflags=""
if (( $c & 1 ))
then
cflags="$cflags -DTOGGLE_INIT_ARRAY_1"
fi
if (( $c & 2 ))
then
cflags="$cflags -DTOGGLE_INIT_ARRAY_2"
fi
if (( $c & 4 ))
then
cflags="$cflags -DTOGGLE_KERNEL_ATAX_1"
fi
if (( $c & 8 ))
then
cflags="$cflags -DTOGGLE_KERNEL_ATAX_2"
fi
echo "Flags: $cflags"
make "EXTRA_CFLAGS=$cflags" clean all
run_benchmarks
done

View file

@ -15,12 +15,14 @@ CFLAGS+= -O3
CFLAGS+= -g3 CFLAGS+= -g3
# -DTHREAD_COUNT allows us to alter the number of threads used in the whole file # -DTHREAD_COUNT allows us to alter the number of threads used in the whole file
CFLAGS+= -DTHREAD_COUNT=4 CFLAGS+= -DTHREAD_COUNT=4
# Extend CFLAGS with command line parameters
CFLAGS+= ${EXTRA_CFLAGS}
# Disable make output # Disable make output
MAKEFLAGS+= --silent MAKEFLAGS+= --silent
.PHONY: bench .PHONY: bench
bench: bench:
make clean all
./.bench.sh ./.bench.sh

View file

@ -25,13 +25,17 @@ static void init_array(int nx, int ny,
int i, j; int i, j;
/// Initialize the `x` array with PI and its multiples. /// Initialize the `x` array with PI and its multiples.
#ifdef TOGGLE_INIT_ARRAY_1
#pragma omp parallel for num_threads(THREAD_COUNT) schedule(static) #pragma omp parallel for num_threads(THREAD_COUNT) schedule(static)
#endif
for (i = 0; i < ny; i++) { for (i = 0; i < ny; i++) {
x[i] = i * M_PI; x[i] = i * M_PI;
} }
/// Initialize the `A` matrix with [something?] /// Initialize the `A` matrix with [something?]
#ifdef TOGGLE_INIT_ARRAY_2
#pragma omp parallel for num_threads(THREAD_COUNT) schedule(static) #pragma omp parallel for num_threads(THREAD_COUNT) schedule(static)
#endif
for (i = 0; i < nx; i++) { for (i = 0; i < nx; i++) {
for (j = 0; j < ny; j++) { for (j = 0; j < ny; j++) {
A[i][j] = ((DATA_TYPE)i * (j + 1)) / nx; A[i][j] = ((DATA_TYPE)i * (j + 1)) / nx;
@ -63,14 +67,18 @@ static void kernel_atax(int nx, int ny,
{ {
int i, j; int i, j;
#ifdef TOGGLE_KERNEL_ATAX_1
#pragma omp parallel for num_threads(THREAD_COUNT) schedule(static) #pragma omp parallel for num_threads(THREAD_COUNT) schedule(static)
#endif
for (i = 0; i < _PB_NY; i++) for (i = 0; i < _PB_NY; i++)
y[i] = 0; y[i] = 0;
/// This computes... something? I guess whatever ATAX is? /// This computes... something? I guess whatever ATAX is?
// Now this gives a nice speedup, especially with a lot more threads than the count! // Now this gives a nice speedup, especially with a lot more threads than the count!
// THREAD_COUNT * 4 seems to be the best on my local computer. What's the best for the Jetson Nano? // THREAD_COUNT * 4 seems to be the best on my local computer. What's the best for the Jetson Nano?
#ifdef TOGGLE_KERNEL_ATAX_2
#pragma omp parallel for num_threads(THREAD_COUNT) schedule(static) #pragma omp parallel for num_threads(THREAD_COUNT) schedule(static)
#endif
for (i = 0; i < _PB_NX; i++) for (i = 0; i < _PB_NX; i++)
{ {
/// Every iteration has its own tmp variable /// Every iteration has its own tmp variable