mirror of
https://github.com/Steffo99/unimore-hpc-assignments.git
synced 2024-11-22 16:14:24 +00:00
HPC CUDA Lab 2
This commit is contained in:
parent
0d0b0f04cf
commit
809d881f2f
17 changed files with 2623 additions and 0 deletions
|
@ -16,3 +16,4 @@ The exercises related to OpenMP programming model can be found in the folder `op
|
||||||
|
|
||||||
### CUDA Exercises
|
### CUDA Exercises
|
||||||
- `cuda\lab1`: CUDA Basics.
|
- `cuda\lab1`: CUDA Basics.
|
||||||
|
- `cuda\lab2`: CUDA Memory Model.
|
||||||
|
|
208
cuda/lab2/.solutions/constant.cu
Normal file
208
cuda/lab2/.solutions/constant.cu
Normal file
|
@ -0,0 +1,208 @@
|
||||||
|
/*
|
||||||
|
* BSD 2-Clause License
|
||||||
|
*
|
||||||
|
* Copyright (c) 2020, Alessandro Capotondi
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright notice, this
|
||||||
|
* list of conditions and the following disclaimer.
|
||||||
|
*
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
* this list of conditions and the following disclaimer in the documentation
|
||||||
|
* and/or other materials provided with the distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||||
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file constant.cu
|
||||||
|
* @author Alessandro Capotondi
|
||||||
|
* @date 27 Mar 2020
|
||||||
|
* @brief Exercise 2
|
||||||
|
*
|
||||||
|
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <assert.h>
|
||||||
|
#include <time.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <math.h>
|
||||||
|
#include <cuda_runtime.h>
|
||||||
|
|
||||||
|
#define gpuErrchk(ans) \
|
||||||
|
{ \
|
||||||
|
gpuAssert((ans), __FILE__, __LINE__); \
|
||||||
|
}
|
||||||
|
static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
|
||||||
|
{
|
||||||
|
if (code != cudaSuccess)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
|
||||||
|
if (abort)
|
||||||
|
exit(code);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C"
|
||||||
|
{
|
||||||
|
#include "utils.h"
|
||||||
|
}
|
||||||
|
|
||||||
|
#define TWO02 (1 << 2)
|
||||||
|
#define TWO04 (1 << 4)
|
||||||
|
#define TWO08 (1 << 8)
|
||||||
|
#ifndef N
|
||||||
|
#define N (1 << 27)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef BLOCK_SIZE
|
||||||
|
#define BLOCK_SIZE (128)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
float K[4098];
|
||||||
|
//TODO declare constant K
|
||||||
|
__constant__ float cK[4098];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Filering
|
||||||
|
*/
|
||||||
|
void filter(float * __restrict__ y, int n)
|
||||||
|
{
|
||||||
|
#pragma omp parallel for simd schedule(simd: static)
|
||||||
|
for (int i = 0; i < n; i++)
|
||||||
|
{
|
||||||
|
y[i] = y[i] - K[i%4098];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO GPU Filter implementation
|
||||||
|
__global__ void filter_v1(float * __restrict__ y, int n)
|
||||||
|
{
|
||||||
|
int i = blockIdx.x * blockDim.x + threadIdx.x;
|
||||||
|
|
||||||
|
y[i] = y[i] - cK[i%4098];
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO GPU Filter implementation without constant mem
|
||||||
|
__global__ void filter_v2(float * __restrict__ y, float * __restrict__ k, int n)
|
||||||
|
{
|
||||||
|
int i = blockIdx.x * blockDim.x + threadIdx.x;
|
||||||
|
|
||||||
|
y[i] = y[i] - k[i%4098];
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, const char **argv)
|
||||||
|
{
|
||||||
|
int iret = 0;
|
||||||
|
int n = N;
|
||||||
|
float *h_y, *d_y;
|
||||||
|
float *h_x, *d_x, *d_k;
|
||||||
|
float *h_z;
|
||||||
|
|
||||||
|
if (argc > 1)
|
||||||
|
n = atoi(argv[1]);
|
||||||
|
|
||||||
|
if (NULL == (h_x = (float *)malloc(sizeof(float) * n)))
|
||||||
|
{
|
||||||
|
printf("error: memory allocation for 'x'\n");
|
||||||
|
iret = -1;
|
||||||
|
}
|
||||||
|
if (NULL == (h_y = (float *)malloc(sizeof(float) * n)))
|
||||||
|
{
|
||||||
|
printf("error: memory allocation for 'y'\n");
|
||||||
|
iret = -1;
|
||||||
|
}
|
||||||
|
if (NULL == (h_z = (float *)malloc(sizeof(float) * n)))
|
||||||
|
{
|
||||||
|
printf("error: memory allocation for 'z'\n");
|
||||||
|
iret = -1;
|
||||||
|
}
|
||||||
|
if (0 != iret)
|
||||||
|
{
|
||||||
|
free(h_y);
|
||||||
|
free(h_z);
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
|
||||||
|
//Init Data
|
||||||
|
float b = rand() % TWO04;
|
||||||
|
float c = rand() % TWO08;
|
||||||
|
|
||||||
|
for (int i = 0; i < 4098; i++)
|
||||||
|
{
|
||||||
|
K[i] = b;
|
||||||
|
}
|
||||||
|
for (int i = 0; i < n; i++)
|
||||||
|
{
|
||||||
|
h_x[i] = h_y[i] = h_z[i] = c / (float)TWO04;
|
||||||
|
}
|
||||||
|
|
||||||
|
start_timer();
|
||||||
|
filter(h_z, n);
|
||||||
|
stop_timer();
|
||||||
|
printf("Filter (Host): %9.3f sec %9.1f MFLOPS\n", elapsed_ns() / 1.0e9, n / ((1.0e6 / 1e9) * elapsed_ns()));
|
||||||
|
|
||||||
|
//CUDA Buffer Allocation
|
||||||
|
gpuErrchk(cudaMalloc((void **)&d_y, sizeof(float) * n));
|
||||||
|
//TODO: Load Device Constant using cudaMemcpyToSymbol
|
||||||
|
gpuErrchk(cudaMemcpyToSymbol(cK, K, sizeof(float)*4098));
|
||||||
|
|
||||||
|
start_timer();
|
||||||
|
//TODO Add Code here
|
||||||
|
cudaMemcpy(d_y, h_y, sizeof(float) * n, cudaMemcpyHostToDevice);
|
||||||
|
filter_v1<<<((n + BLOCK_SIZE - 1) / BLOCK_SIZE), BLOCK_SIZE>>>(d_y, n);
|
||||||
|
gpuErrchk(cudaPeekAtLastError());
|
||||||
|
gpuErrchk(cudaMemcpy(h_y, d_y, sizeof(float) * n, cudaMemcpyDeviceToHost));
|
||||||
|
stop_timer();
|
||||||
|
printf("Filter-v1 (GPU): %9.3f sec %9.1f MFLOPS\n", elapsed_ns() / 1.0e9, n / ((1.0e6 / 1e9) * elapsed_ns()));
|
||||||
|
|
||||||
|
//Check Matematical Consistency
|
||||||
|
for (int i = 0; i < n; ++i)
|
||||||
|
{
|
||||||
|
iret = *(int *)(h_y + i) ^ *(int *)(h_z + i);
|
||||||
|
assert(iret == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
//-- No-Constant version --
|
||||||
|
gpuErrchk(cudaMalloc((void **)&d_x, sizeof(float) * n));
|
||||||
|
gpuErrchk(cudaMalloc((void **)&d_k, sizeof(float) * 4098));
|
||||||
|
|
||||||
|
start_timer();
|
||||||
|
//TODO Add Code here
|
||||||
|
cudaMemcpy(d_x, h_x, sizeof(float) * n, cudaMemcpyHostToDevice);
|
||||||
|
cudaMemcpy(d_k, K, sizeof(float) * 4098, cudaMemcpyHostToDevice);
|
||||||
|
filter_v2<<<((n + BLOCK_SIZE - 1) / BLOCK_SIZE), BLOCK_SIZE>>>(d_x, d_k, n);
|
||||||
|
gpuErrchk(cudaPeekAtLastError());
|
||||||
|
gpuErrchk(cudaMemcpy(h_x, d_x, sizeof(float) * n, cudaMemcpyDeviceToHost));
|
||||||
|
stop_timer();
|
||||||
|
printf("Filter-v2 (GPU): %9.3f sec %9.1f MFLOPS\n", elapsed_ns() / 1.0e9, n / ((1.0e6 / 1e9) * elapsed_ns()));
|
||||||
|
|
||||||
|
//Check Matematical Consistency
|
||||||
|
for (int i = 0; i < n; ++i)
|
||||||
|
{
|
||||||
|
iret = *(int *)(h_y + i) ^ *(int *)(h_x + i);
|
||||||
|
assert(iret == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
//CUDA Buffer Allocation
|
||||||
|
free(h_x);
|
||||||
|
gpuErrchk(cudaFree(d_x));
|
||||||
|
free(h_y);
|
||||||
|
gpuErrchk(cudaFree(d_y));
|
||||||
|
free(h_z);
|
||||||
|
return 0;
|
||||||
|
}
|
355
cuda/lab2/.solutions/exercise2.cu
Normal file
355
cuda/lab2/.solutions/exercise2.cu
Normal file
|
@ -0,0 +1,355 @@
|
||||||
|
/*
|
||||||
|
* BSD 2-Clause License
|
||||||
|
*
|
||||||
|
* Copyright (c) 2020, Alessandro Capotondi
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright notice, this
|
||||||
|
* list of conditions and the following disclaimer.
|
||||||
|
*
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
* this list of conditions and the following disclaimer in the documentation
|
||||||
|
* and/or other materials provided with the distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||||
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file exercise2.cu
|
||||||
|
* @author Alessandro Capotondi
|
||||||
|
* @date 5 May 2020
|
||||||
|
* @brief Exercise 3 - CUDA MATMUL Optimized
|
||||||
|
*
|
||||||
|
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <assert.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <time.h>
|
||||||
|
|
||||||
|
#define gpuErrchk(ans) \
|
||||||
|
{ \
|
||||||
|
gpuAssert((ans), __FILE__, __LINE__); \
|
||||||
|
}
|
||||||
|
static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
|
||||||
|
{
|
||||||
|
if (code != cudaSuccess)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
|
||||||
|
if (abort)
|
||||||
|
exit(code);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C"
|
||||||
|
{
|
||||||
|
#include "utils.h"
|
||||||
|
}
|
||||||
|
|
||||||
|
#define TWO02 (1 << 2)
|
||||||
|
#define TWO04 (1 << 4)
|
||||||
|
#define TWO08 (1 << 8)
|
||||||
|
|
||||||
|
#ifndef N
|
||||||
|
#define N (1 << 10)
|
||||||
|
#endif
|
||||||
|
#ifndef TILE_W
|
||||||
|
#define TILE_W 128
|
||||||
|
#endif
|
||||||
|
#ifndef BLOCK_SIZE
|
||||||
|
#define BLOCK_SIZE 32
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void gemm(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n)
|
||||||
|
{
|
||||||
|
|
||||||
|
#pragma omp parallel for collapse(2)
|
||||||
|
for (int i = 0; i < n; ++i)
|
||||||
|
{
|
||||||
|
for (int j = 0; j < n; ++j)
|
||||||
|
{
|
||||||
|
float sum = 0.0;
|
||||||
|
for (int k = 0; k < n; ++k)
|
||||||
|
{
|
||||||
|
sum += a[i * n + k] * b[k * n + j];
|
||||||
|
}
|
||||||
|
c[i * n + j] = sum;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__global__ void gemm_v1(float * __restrict__ a, float * __restrict__ b, float * __restrict__ c, int n)
|
||||||
|
{
|
||||||
|
int row = threadIdx.x + blockIdx.x * blockDim.x;
|
||||||
|
int col = threadIdx.y + blockIdx.y * blockDim.y;
|
||||||
|
|
||||||
|
float sum = 0.0;
|
||||||
|
for (int k = 0; k < n; ++k)
|
||||||
|
{
|
||||||
|
sum += a[row * n + k] * b[k * n + col];
|
||||||
|
}
|
||||||
|
c[row * n + col] = sum;
|
||||||
|
}
|
||||||
|
|
||||||
|
__device__ int get_offset(int idx_i, int idx_j, int n)
|
||||||
|
{
|
||||||
|
return idx_i * n * BLOCK_SIZE + idx_j * BLOCK_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
__global__ void gemm_v2(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n)
|
||||||
|
{
|
||||||
|
//TODO Shared memory used to store Asub and Bsub respectively
|
||||||
|
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
|
||||||
|
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
|
||||||
|
|
||||||
|
//TODO Block row and column
|
||||||
|
int ib = blockIdx.y;
|
||||||
|
int jb = blockIdx.x;
|
||||||
|
|
||||||
|
//TODO Thread row and column within Csub
|
||||||
|
int it = threadIdx.y;
|
||||||
|
int jt = threadIdx.x;
|
||||||
|
|
||||||
|
int a_offset, b_offset, c_offset;
|
||||||
|
|
||||||
|
//TODO Each thread computes one element of Csub
|
||||||
|
// by accumulating results into Cvalue
|
||||||
|
float Cvalue = 0.0f;
|
||||||
|
|
||||||
|
//TODO Loop over all the sub-matrices of A and B that are
|
||||||
|
// required to compute Csub.
|
||||||
|
// Multiply each pair of sub-matrices together
|
||||||
|
// and accumulate the results.
|
||||||
|
for (int kb = 0; kb < (n / BLOCK_SIZE); ++kb)
|
||||||
|
{
|
||||||
|
//TODO Get the starting address (a_offset) of Asub
|
||||||
|
// (sub-matrix of A of dimension BLOCK_SIZE x BLOCK_SIZE)
|
||||||
|
// Asub is located i_block sub-matrices to the right and
|
||||||
|
// k_block sub-matrices down from the upper-left corner of A
|
||||||
|
a_offset = get_offset(ib, kb, n);
|
||||||
|
//TODO Get the starting address (b_offset) of Bsub
|
||||||
|
b_offset = get_offset(kb, jb, n);
|
||||||
|
|
||||||
|
//TODO Load Asub and Bsub from device memory to shared memory
|
||||||
|
// Each thread loads one element of each sub-matrix
|
||||||
|
As[it][jt] = a[a_offset + it * n + jt];
|
||||||
|
Bs[it][jt] = b[b_offset + it * n + jt];
|
||||||
|
|
||||||
|
//TODO Synchronize to make sure the sub-matrices are loaded
|
||||||
|
// before starting the computation
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
//TODO Multiply As and Bs together
|
||||||
|
for (int k = 0; k < BLOCK_SIZE; ++k)
|
||||||
|
{
|
||||||
|
Cvalue += As[it][k] * Bs[k][jt];
|
||||||
|
}
|
||||||
|
//TODO Synchronize to make sure that the preceding
|
||||||
|
// computation is done before loading two new
|
||||||
|
// sub-matrices of A and B in the next iteration
|
||||||
|
__syncthreads();
|
||||||
|
}
|
||||||
|
|
||||||
|
c_offset = get_offset(ib, jb, n);
|
||||||
|
//TODO Each thread block computes one sub-matrix Csub of C
|
||||||
|
c[c_offset + it * n + jt] = Cvalue;
|
||||||
|
}
|
||||||
|
|
||||||
|
__global__ void gemm_v3(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n)
|
||||||
|
{
|
||||||
|
//TODO Shared memory used to store Asub and Bsub respectively
|
||||||
|
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
|
||||||
|
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
|
||||||
|
|
||||||
|
//TODO Block row and column
|
||||||
|
int ib = blockIdx.y;
|
||||||
|
int jb = blockIdx.x;
|
||||||
|
|
||||||
|
//TODO Thread row and column within Csub
|
||||||
|
int it = threadIdx.y;
|
||||||
|
int jt = threadIdx.x;
|
||||||
|
|
||||||
|
int a_offset, b_offset, c_offset;
|
||||||
|
|
||||||
|
//TODO Each thread computes one element of Csub
|
||||||
|
// by accumulating results into Cvalue
|
||||||
|
float Cvalue = 0.0f;
|
||||||
|
|
||||||
|
//TODO Loop over all the sub-matrices of A and B that are
|
||||||
|
// required to compute Csub.
|
||||||
|
// Multiply each pair of sub-matrices together
|
||||||
|
// and accumulate the results.
|
||||||
|
for (int kb = 0; kb < (n / BLOCK_SIZE); ++kb)
|
||||||
|
{
|
||||||
|
//TODO Get the starting address (a_offset) of Asub
|
||||||
|
// (sub-matrix of A of dimension BLOCK_SIZE x BLOCK_SIZE)
|
||||||
|
// Asub is located i_block sub-matrices to the right and
|
||||||
|
// k_block sub-matrices down from the upper-left corner of A
|
||||||
|
a_offset = get_offset(ib, kb, n);
|
||||||
|
//TODO Get the starting address (b_offset) of Bsub
|
||||||
|
b_offset = get_offset(ib, kb, n);
|
||||||
|
|
||||||
|
//TODO Load Asub and Bsub from device memory to shared memory
|
||||||
|
// Each thread loads one element of each sub-matrix
|
||||||
|
As[it][jt] = a[a_offset + it * n + jt];
|
||||||
|
Bs[it][jt] = b[b_offset + it * n + jt];
|
||||||
|
|
||||||
|
//TODO Synchronize to make sure the sub-matrices are loaded
|
||||||
|
// before starting the computation
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
//TODO Multiply As and Bs together
|
||||||
|
for (int k = 0; k < BLOCK_SIZE; ++k)
|
||||||
|
{
|
||||||
|
Cvalue += As[it][k] * Bs[k][jt];
|
||||||
|
}
|
||||||
|
//TODO Synchronize to make sure that the preceding
|
||||||
|
// computation is done before loading two new
|
||||||
|
// sub-matrices of A and B in the next iteration
|
||||||
|
__syncthreads();
|
||||||
|
}
|
||||||
|
|
||||||
|
c_offset = get_offset(ib, jb, n);
|
||||||
|
//TODO Each thread block computes one sub-matrix Csub of C
|
||||||
|
c[c_offset + it * n + jt] = Cvalue;
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char *argv[])
|
||||||
|
{
|
||||||
|
int n = N, iret = 0;
|
||||||
|
float *a, *b, *c, *g;
|
||||||
|
struct timespec rt[2];
|
||||||
|
double wt; // walltime
|
||||||
|
|
||||||
|
if (argc > 1)
|
||||||
|
n = atoi(argv[1]);
|
||||||
|
|
||||||
|
if (NULL == (a = (float *)malloc(sizeof(*a) * n * n)))
|
||||||
|
{
|
||||||
|
printf("error: memory allocation for 'x'\n");
|
||||||
|
iret = -1;
|
||||||
|
}
|
||||||
|
if (NULL == (b = (float *)malloc(sizeof(*b) * n * n)))
|
||||||
|
{
|
||||||
|
printf("error: memory allocation for 'y'\n");
|
||||||
|
iret = -1;
|
||||||
|
}
|
||||||
|
if (NULL == (c = (float *)malloc(sizeof(*c) * n * n)))
|
||||||
|
{
|
||||||
|
printf("error: memory allocation for 'z'\n");
|
||||||
|
iret = -1;
|
||||||
|
}
|
||||||
|
if (NULL == (g = (float *)malloc(sizeof(*g) * n * n)))
|
||||||
|
{
|
||||||
|
printf("error: memory allocation for 'z'\n");
|
||||||
|
iret = -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (0 != iret)
|
||||||
|
{
|
||||||
|
free(a);
|
||||||
|
free(b);
|
||||||
|
free(c);
|
||||||
|
free(g);
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
|
||||||
|
//Init Data
|
||||||
|
int _b = rand() % TWO04;
|
||||||
|
int _c = rand() % TWO08;
|
||||||
|
#pragma omp parallel for
|
||||||
|
for (int i = 0; i < n * n; i++)
|
||||||
|
{
|
||||||
|
a[i] = _b / (float)TWO02;
|
||||||
|
b[i] = _c / (float)TWO04;
|
||||||
|
c[i] = g[i] = 0.0;
|
||||||
|
}
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
gemm(a, b, g, n);
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("GEMM (Host) : %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
|
||||||
|
|
||||||
|
//CUDA Buffer Allocation
|
||||||
|
float *d_a, *d_b, *d_c;
|
||||||
|
gpuErrchk(cudaMalloc((void **)&d_a, sizeof(float) * n * n));
|
||||||
|
gpuErrchk(cudaMalloc((void **)&d_b, sizeof(float) * n * n));
|
||||||
|
gpuErrchk(cudaMalloc((void **)&d_c, sizeof(float) * n * n));
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
gpuErrchk(cudaMemcpy(d_a, a, sizeof(float) * n * n, cudaMemcpyHostToDevice));
|
||||||
|
gpuErrchk(cudaMemcpy(d_b, b, sizeof(float) * n * n, cudaMemcpyHostToDevice));
|
||||||
|
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
|
||||||
|
dim3 dimGrid((n + (BLOCK_SIZE)-1) / (BLOCK_SIZE), (n + (BLOCK_SIZE)-1) / (BLOCK_SIZE));
|
||||||
|
gemm_v1<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, n);
|
||||||
|
gpuErrchk(cudaPeekAtLastError());
|
||||||
|
gpuErrchk(cudaMemcpy(c, d_c, sizeof(float) * n * n, cudaMemcpyDeviceToHost));
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("GEMM-v1 (GPU): %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
|
||||||
|
|
||||||
|
for (int i = 0; i < n * n; i++)
|
||||||
|
{
|
||||||
|
iret = *(int *)(g + i) ^ *(int *)(c + i);
|
||||||
|
assert(iret == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
gpuErrchk(cudaMemcpy(d_a, a, sizeof(float) * n * n, cudaMemcpyHostToDevice));
|
||||||
|
gpuErrchk(cudaMemcpy(d_b, b, sizeof(float) * n * n, cudaMemcpyHostToDevice));
|
||||||
|
//dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
|
||||||
|
//dim3 dimGrid((n + (BLOCK_SIZE)-1) / (BLOCK_SIZE), (n + (BLOCK_SIZE)-1) / (BLOCK_SIZE));
|
||||||
|
gemm_v2<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, n);
|
||||||
|
gpuErrchk(cudaPeekAtLastError());
|
||||||
|
gpuErrchk(cudaMemcpy(c, d_c, sizeof(float) * n * n, cudaMemcpyDeviceToHost));
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("GEMM-v2 (GPU): %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
|
||||||
|
|
||||||
|
for (int i = 0; i < n * n; i++)
|
||||||
|
{
|
||||||
|
iret = *(int *)(g + i) ^ *(int *)(c + i);
|
||||||
|
assert(iret == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
gpuErrchk(cudaMemcpy(d_a, a, sizeof(float) * n * n, cudaMemcpyHostToDevice));
|
||||||
|
gpuErrchk(cudaMemcpy(d_b, b, sizeof(float) * n * n, cudaMemcpyHostToDevice));
|
||||||
|
//dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
|
||||||
|
//dim3 dimGrid((n + (BLOCK_SIZE)-1) / (BLOCK_SIZE), (n + (BLOCK_SIZE)-1) / (BLOCK_SIZE));
|
||||||
|
gemm_v3<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, n);
|
||||||
|
gpuErrchk(cudaPeekAtLastError());
|
||||||
|
gpuErrchk(cudaMemcpy(c, d_c, sizeof(float) * n * n, cudaMemcpyDeviceToHost));
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("GEMM-v3 (GPU): %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
|
||||||
|
|
||||||
|
for (int i = 0; i < n * n; i++)
|
||||||
|
{
|
||||||
|
iret = *(int *)(g + i) ^ *(int *)(c + i);
|
||||||
|
assert(iret == 0);
|
||||||
|
}
|
||||||
|
free(a);
|
||||||
|
free(b);
|
||||||
|
free(c);
|
||||||
|
free(g);
|
||||||
|
gpuErrchk(cudaFree(d_a));
|
||||||
|
gpuErrchk(cudaFree(d_b));
|
||||||
|
gpuErrchk(cudaFree(d_c));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
202
cuda/lab2/.solutions/exercise3.cu
Normal file
202
cuda/lab2/.solutions/exercise3.cu
Normal file
|
@ -0,0 +1,202 @@
|
||||||
|
/*
|
||||||
|
* BSD 2-Clause License
|
||||||
|
*
|
||||||
|
* Copyright (c) 2020, Alessandro Capotondi
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright notice, this
|
||||||
|
* list of conditions and the following disclaimer.
|
||||||
|
*
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
* this list of conditions and the following disclaimer in the documentation
|
||||||
|
* and/or other materials provided with the distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||||
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file exercise3.cu
|
||||||
|
* @author Alessandro Capotondi
|
||||||
|
* @date 5 May 2020
|
||||||
|
* @brief Exercise 3 - Image Luminance Histogram
|
||||||
|
*
|
||||||
|
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <time.h>
|
||||||
|
#include <sys/time.h>
|
||||||
|
#include <opencv2/opencv.hpp>
|
||||||
|
#include <opencv2/imgcodecs/imgcodecs.hpp>
|
||||||
|
#include <opencv2/objdetect/objdetect.hpp>
|
||||||
|
#include <opencv2/highgui/highgui.hpp>
|
||||||
|
|
||||||
|
using namespace cv;
|
||||||
|
using namespace std;
|
||||||
|
|
||||||
|
#ifndef BLOCK_SIZE
|
||||||
|
#define BLOCK_SIZE 32
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define gpuErrchk(ans) \
|
||||||
|
{ \
|
||||||
|
gpuAssert((ans), __FILE__, __LINE__); \
|
||||||
|
}
|
||||||
|
static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
|
||||||
|
{
|
||||||
|
if (code != cudaSuccess)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
|
||||||
|
if (abort)
|
||||||
|
exit(code);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C"
|
||||||
|
{
|
||||||
|
#include "utils.h"
|
||||||
|
}
|
||||||
|
|
||||||
|
#define NBINS 256
|
||||||
|
|
||||||
|
void hist(unsigned char *__restrict__ im, int *__restrict__ hist, int width, int height)
|
||||||
|
{
|
||||||
|
#pragma omp parallel for
|
||||||
|
for (int i = 0; i < width * height; i++)
|
||||||
|
{
|
||||||
|
int val = im[i];
|
||||||
|
#pragma omp atomic
|
||||||
|
hist[val]++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__global__ void hist_v1(unsigned char *__restrict__ im, int *__restrict__ hist, int width, int height)
|
||||||
|
{
|
||||||
|
int i = blockIdx.x * blockDim.x + threadIdx.x;
|
||||||
|
int j = blockIdx.y * blockDim.y + threadIdx.y;
|
||||||
|
if (i < width && j < height)
|
||||||
|
{
|
||||||
|
int value;
|
||||||
|
value = im[(j * width) + i];
|
||||||
|
atomicAdd(&(hist[value]), 1);
|
||||||
|
//hist[value]++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__global__ void hist_v2(unsigned char *__restrict__ im, int *__restrict__ hist, int width, int height)
|
||||||
|
{
|
||||||
|
int i = blockIdx.x * blockDim.x + threadIdx.x;
|
||||||
|
int j = blockIdx.y * blockDim.y + threadIdx.y;
|
||||||
|
int blockIndex = (threadIdx.y * blockDim.y) + threadIdx.x;
|
||||||
|
__shared__ int tmpHist[NBINS];
|
||||||
|
|
||||||
|
if (blockIndex < NBINS)
|
||||||
|
{
|
||||||
|
tmpHist[blockIndex] = 0;
|
||||||
|
}
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
if (i < width && j < height)
|
||||||
|
{
|
||||||
|
int value;
|
||||||
|
value = im[(j * width) + i];
|
||||||
|
atomicAdd(&(tmpHist[value]), 1);
|
||||||
|
}
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
if (blockIndex < NBINS)
|
||||||
|
atomicAdd(&(hist[blockIndex]), tmpHist[blockIndex]);
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char *argv[])
|
||||||
|
{
|
||||||
|
int iret = 0;
|
||||||
|
struct timespec rt[2];
|
||||||
|
double wt; // walltime
|
||||||
|
int hist_host[NBINS], hist_gpu[NBINS];
|
||||||
|
|
||||||
|
string filename("data/buzz.jpg");
|
||||||
|
|
||||||
|
if (argc > 1)
|
||||||
|
filename = argv[1];
|
||||||
|
|
||||||
|
// Load Image
|
||||||
|
Mat image = imread(filename, IMREAD_GRAYSCALE);
|
||||||
|
if (!image.data)
|
||||||
|
{
|
||||||
|
cout << "Could not open or find the image" << std::endl;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int width = image.size().width;
|
||||||
|
int height = image.size().height;
|
||||||
|
|
||||||
|
memset(hist_host, 0, NBINS * sizeof(int));
|
||||||
|
memset(hist_gpu, 0, NBINS * sizeof(int));
|
||||||
|
|
||||||
|
// Compute CPU Version - Golden Model
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
hist(image.ptr(), hist_host, width, height);
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("Hist (Host) : %9.6f sec\n", wt);
|
||||||
|
|
||||||
|
//CUDA Buffer Allocation
|
||||||
|
int *d_hist_gpu;
|
||||||
|
unsigned char *d_image;
|
||||||
|
gpuErrchk(cudaMalloc((void **)&d_hist_gpu, sizeof(int) * NBINS));
|
||||||
|
gpuErrchk(cudaMalloc((void **)&d_image, sizeof(unsigned char) * width * height));
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
gpuErrchk(cudaMemcpy(d_image, image.ptr(), sizeof(unsigned char) * width * height, cudaMemcpyHostToDevice));
|
||||||
|
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
|
||||||
|
dim3 dimGrid((width + BLOCK_SIZE - 1) / BLOCK_SIZE, (height + BLOCK_SIZE - 1) / BLOCK_SIZE);
|
||||||
|
hist_v1<<<dimGrid, dimBlock>>>(d_image, d_hist_gpu, width, height);
|
||||||
|
gpuErrchk(cudaPeekAtLastError());
|
||||||
|
gpuErrchk(cudaMemcpy(hist_gpu, d_hist_gpu, sizeof(int) * NBINS, cudaMemcpyDeviceToHost));
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("Hist (GPU) : %9.6f sec\n", wt);
|
||||||
|
|
||||||
|
for (int i = 0; i < NBINS; i++)
|
||||||
|
{
|
||||||
|
iret = *(int *)(hist_host + i) ^ *(int *)(hist_gpu + i);
|
||||||
|
assert(iret == 0);
|
||||||
|
}
|
||||||
|
// Reset Output
|
||||||
|
gpuErrchk(cudaMemset(d_hist_gpu, 0, NBINS * sizeof(unsigned int)));
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
gpuErrchk(cudaMemcpy(d_image, image.ptr(), sizeof(unsigned char) * width * height, cudaMemcpyHostToDevice));
|
||||||
|
//dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
|
||||||
|
//dim3 dimGrid(width/BLOCK_SIZE, height/BLOCK_SIZE);
|
||||||
|
hist_v2<<<dimGrid, dimBlock>>>(d_image, d_hist_gpu, width, height);
|
||||||
|
gpuErrchk(cudaPeekAtLastError());
|
||||||
|
gpuErrchk(cudaMemcpy(hist_gpu, d_hist_gpu, sizeof(int) * NBINS, cudaMemcpyDeviceToHost));
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("Hist-2 (GPU) : %9.6f sec\n", wt);
|
||||||
|
|
||||||
|
for (int i = 0; i < NBINS; i++)
|
||||||
|
{
|
||||||
|
iret = *(int *)(hist_host + i) ^ *(int *)(hist_gpu + i);
|
||||||
|
assert(iret == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
gpuErrchk(cudaFree(d_hist_gpu));
|
||||||
|
gpuErrchk(cudaFree(d_image));
|
||||||
|
|
||||||
|
return iret;
|
||||||
|
}
|
366
cuda/lab2/.solutions/exercise4.cu
Normal file
366
cuda/lab2/.solutions/exercise4.cu
Normal file
|
@ -0,0 +1,366 @@
|
||||||
|
/*
|
||||||
|
* BSD 2-Clause License
|
||||||
|
*
|
||||||
|
* Copyright (c) 2020, Alessandro Capotondi
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright notice, this
|
||||||
|
* list of conditions and the following disclaimer.
|
||||||
|
*
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
* this list of conditions and the following disclaimer in the documentation
|
||||||
|
* and/or other materials provided with the distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||||
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file exercise4.cu
|
||||||
|
* @author Alessandro Capotondi
|
||||||
|
* @date 5 May 2020
|
||||||
|
* @brief Exercise 4 - Stencil 2d - Sobel
|
||||||
|
*
|
||||||
|
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <time.h>
|
||||||
|
#include <sys/time.h>
|
||||||
|
#include <opencv2/opencv.hpp>
|
||||||
|
#include <opencv2/imgcodecs/imgcodecs.hpp>
|
||||||
|
#include <opencv2/objdetect/objdetect.hpp>
|
||||||
|
#include <opencv2/highgui/highgui.hpp>
|
||||||
|
|
||||||
|
using namespace cv;
|
||||||
|
using namespace std;
|
||||||
|
|
||||||
|
#ifndef BLOCK_SIZE
|
||||||
|
#define BLOCK_SIZE 32
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define gpuErrchk(ans) \
|
||||||
|
{ \
|
||||||
|
gpuAssert((ans), __FILE__, __LINE__); \
|
||||||
|
}
|
||||||
|
static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
|
||||||
|
{
|
||||||
|
if (code != cudaSuccess)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
|
||||||
|
if (abort)
|
||||||
|
exit(code);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C"
|
||||||
|
{
|
||||||
|
#include "utils.h"
|
||||||
|
}
|
||||||
|
|
||||||
|
void sobel_host(unsigned char *__restrict__ orig, unsigned char *__restrict__ out, int width, int height)
|
||||||
|
{
|
||||||
|
#pragma omp parallel for simd collapse(2)
|
||||||
|
for (int y = 1; y < height - 1; y++)
|
||||||
|
{
|
||||||
|
for (int x = 1; x < width - 1; x++)
|
||||||
|
{
|
||||||
|
int dx = (-1 * orig[(y - 1) * width + (x - 1)]) + (-2 * orig[y * width + (x - 1)]) + (-1 * orig[(y + 1) * width + (x - 1)]) +
|
||||||
|
(orig[(y - 1) * width + (x + 1)]) + (2 * orig[y * width + (x + 1)]) + (orig[(y + 1) * width + (x + 1)]);
|
||||||
|
int dy = (orig[(y - 1) * width + (x - 1)]) + (2 * orig[(y - 1) * width + x]) + (orig[(y - 1) * width + (x + 1)]) +
|
||||||
|
(-1 * orig[(y + 1) * width + (x - 1)]) + (-2 * orig[(y + 1) * width + x]) + (-1 * orig[(y + 1) * width + (x + 1)]);
|
||||||
|
out[y * width + x] = sqrt((float)((dx * dx) + (dy * dy)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__global__ void sobel_v1(unsigned char *__restrict__ orig, unsigned char *__restrict__ out, int width, int height)
|
||||||
|
{
|
||||||
|
int i = threadIdx.y + blockIdx.y * blockDim.y;
|
||||||
|
int j = threadIdx.x + blockIdx.x * blockDim.x;
|
||||||
|
|
||||||
|
if (j > 0 && i > 0 && j < width - 1 && i < height - 1)
|
||||||
|
{
|
||||||
|
int dx = (-1 * orig[(i - 1) * width + (j - 1)]) + (-2 * orig[i * width + (j - 1)]) + (-1 * orig[(i + 1) * width + (j - 1)]) +
|
||||||
|
(orig[(i - 1) * width + (j + 1)]) + (2 * orig[i * width + (j + 1)]) + (orig[(i + 1) * width + (j + 1)]);
|
||||||
|
int dy = (orig[(i - 1) * width + (j - 1)]) + (2 * orig[(i - 1) * width + j]) + (orig[(i - 1) * width + (j + 1)]) +
|
||||||
|
(-1 * orig[(i + 1) * width + (j - 1)]) + (-2 * orig[(i + 1) * width + j]) + (-1 * orig[(i + 1) * width + (j + 1)]);
|
||||||
|
out[i * width + j] = sqrt((float)((dx * dx) + (dy * dy)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__global__ void sobel_v2(unsigned char *__restrict__ orig, unsigned char *__restrict__ out, int width, int height)
|
||||||
|
{
|
||||||
|
//TODO Declare i and j: global output indexes
|
||||||
|
int i = threadIdx.y + blockIdx.y * blockDim.y;
|
||||||
|
int j = threadIdx.x + blockIdx.x * blockDim.x;
|
||||||
|
|
||||||
|
//TODO Declare it and jt: Thread row and column of output matrix
|
||||||
|
int it = threadIdx.y;
|
||||||
|
int jt = threadIdx.x;
|
||||||
|
|
||||||
|
//TODO Declare shared input patch
|
||||||
|
__shared__ unsigned char s_in[BLOCK_SIZE][BLOCK_SIZE];
|
||||||
|
|
||||||
|
//TODO Load input patch
|
||||||
|
// Each thread loads one element of the patch
|
||||||
|
s_in[it][jt] = orig[i * width + j];
|
||||||
|
|
||||||
|
//TODO Synchronize to make sure the sub-matrices are loaded
|
||||||
|
// before starting the computation
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
//TODO if block boundary do
|
||||||
|
if (jt > 0 && it > 0 && jt < BLOCK_SIZE - 1 && it < BLOCK_SIZE - 1 && j > 0 && i > 0 && j < width - 1 && i < height - 1)
|
||||||
|
{
|
||||||
|
int dx = (-1 * s_in[it - 1][jt - 1]) + (-2 * s_in[it][jt - 1]) + (-1 * s_in[it + 1][jt - 1]) +
|
||||||
|
(s_in[it - 1][jt + 1]) + (2 * s_in[it][jt + 1]) + (s_in[it + 1][jt + 1]);
|
||||||
|
int dy = (s_in[it - 1][jt - 1]) + (2 * s_in[it - 1][jt]) + (s_in[it - 1][jt + 1]) +
|
||||||
|
(-1 * s_in[it + 1][jt - 1]) + (-2 * s_in[it + 1][jt]) + (-1 * s_in[it + 1][jt + 1]);
|
||||||
|
out[i * width + j] = sqrt((float)((dx * dx) + (dy * dy)));
|
||||||
|
}
|
||||||
|
else if (j > 0 && i > 0 && j < width - 1 && i < height - 1)
|
||||||
|
{
|
||||||
|
//TODO if not-block boundary do (tip check global boundaries)
|
||||||
|
int dx = (-1 * orig[(i - 1) * width + (j - 1)]) + (-2 * orig[i * width + (j - 1)]) + (-1 * orig[(i + 1) * width + (j - 1)]) +
|
||||||
|
(orig[(i - 1) * width + (j + 1)]) + (2 * orig[i * width + (j + 1)]) + (orig[(i + 1) * width + (j + 1)]);
|
||||||
|
int dy = (orig[(i - 1) * width + (j - 1)]) + (2 * orig[(i - 1) * width + j]) + (orig[(i - 1) * width + (j + 1)]) +
|
||||||
|
(-1 * orig[(i + 1) * width + (j - 1)]) + (-2 * orig[(i + 1) * width + j]) + (-1 * orig[(i + 1) * width + (j + 1)]);
|
||||||
|
out[i * width + j] = sqrt((float)((dx * dx) + (dy * dy)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__global__ void sobel_v3(unsigned char *__restrict__ orig, unsigned char *__restrict__ out, int width, int height)
|
||||||
|
{
|
||||||
|
//TODO Declare i and j: global output indexes (tip: use BLOCK_SIZE-2)
|
||||||
|
int i = threadIdx.y + blockIdx.y * (BLOCK_SIZE - 2);
|
||||||
|
int j = threadIdx.x + blockIdx.x * (BLOCK_SIZE - 2);
|
||||||
|
|
||||||
|
//TODO Declare it and jt: Thread row and column of output matrix
|
||||||
|
int it = threadIdx.y;
|
||||||
|
int jt = threadIdx.x;
|
||||||
|
|
||||||
|
//TODO Check if i and j are out of memory
|
||||||
|
if (i >= width && j >= height)
|
||||||
|
return;
|
||||||
|
|
||||||
|
//TODO Declare shared input patch
|
||||||
|
__shared__ unsigned char s_in[BLOCK_SIZE][BLOCK_SIZE];
|
||||||
|
|
||||||
|
//TODO Load input patch
|
||||||
|
// Each thread loads one element of the patch
|
||||||
|
s_in[it][jt] = orig[i * width + j];
|
||||||
|
|
||||||
|
//TODO Synchronize to make sure the sub-matrices are loaded
|
||||||
|
// before starting the computation
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
//TODO Update block and bound checks
|
||||||
|
if (jt > 0 && it > 0 && jt < BLOCK_SIZE - 1 && it < BLOCK_SIZE - 1 && j > 0 && i > 0 && j < width - 1 && i < height - 1)
|
||||||
|
{
|
||||||
|
int dx = (-1 * s_in[it - 1][jt - 1]) + (-2 * s_in[it][jt - 1]) + (-1 * s_in[it + 1][jt - 1]) +
|
||||||
|
(s_in[it - 1][jt + 1]) + (2 * s_in[it][jt + 1]) + (s_in[it + 1][jt + 1]);
|
||||||
|
int dy = (s_in[it - 1][jt - 1]) + (2 * s_in[it - 1][jt]) + (s_in[it - 1][jt + 1]) +
|
||||||
|
(-1 * s_in[it + 1][jt - 1]) + (-2 * s_in[it + 1][jt]) + (-1 * s_in[it + 1][jt + 1]);
|
||||||
|
out[i * width + j] = sqrt((float)((dx * dx) + (dy * dy)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__global__ void sobel_v4(unsigned char *__restrict__ orig, unsigned char *__restrict__ out, int width, int height)
|
||||||
|
{
|
||||||
|
//TODO Declare i and j: global output indexes (tip: use BLOCK_SIZE)
|
||||||
|
int i = threadIdx.y + blockIdx.y * blockDim.y;
|
||||||
|
int j = threadIdx.x + blockIdx.x * blockDim.x;
|
||||||
|
|
||||||
|
//TODO Declare it and jt: Thread row and column of output matrix
|
||||||
|
int it = threadIdx.y;
|
||||||
|
int jt = threadIdx.x;
|
||||||
|
|
||||||
|
//TODO Declare shared input patch (tip: use BLOCK_SIZE+2)
|
||||||
|
__shared__ unsigned char s_in[BLOCK_SIZE + 32][BLOCK_SIZE + 32];
|
||||||
|
|
||||||
|
//TODO Load input patch
|
||||||
|
// Each thread loads one element of the patch
|
||||||
|
s_in[it][jt] = orig[i * width + j];
|
||||||
|
|
||||||
|
//TODO Check condition and load remaining elements
|
||||||
|
if ((it + BLOCK_SIZE) < BLOCK_SIZE + 2 && (jt) < BLOCK_SIZE + 2 && (i + BLOCK_SIZE) < width && (j) < height)
|
||||||
|
s_in[it + BLOCK_SIZE][jt] = orig[(i + BLOCK_SIZE) * width + j];
|
||||||
|
|
||||||
|
if ((it) < BLOCK_SIZE + 2 && (jt + BLOCK_SIZE) < BLOCK_SIZE + 2 && (i) < width && (j + BLOCK_SIZE) < height)
|
||||||
|
s_in[it][jt + BLOCK_SIZE] = orig[i * width + j + BLOCK_SIZE];
|
||||||
|
|
||||||
|
if ((it + BLOCK_SIZE) < BLOCK_SIZE + 2 && (jt + BLOCK_SIZE) < BLOCK_SIZE + 2 && (i + BLOCK_SIZE) < width && (j + BLOCK_SIZE) < height)
|
||||||
|
s_in[it + BLOCK_SIZE][jt + BLOCK_SIZE] = orig[(i + BLOCK_SIZE) * width + j + BLOCK_SIZE];
|
||||||
|
|
||||||
|
//TODO Synchronize to make sure the sub-matrices are loaded
|
||||||
|
// before starting the computation
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
//TODO Update all idx adding y +1 and x +1
|
||||||
|
if (jt < BLOCK_SIZE && it < BLOCK_SIZE && j < (width - 2) && i < (height - 2))
|
||||||
|
{
|
||||||
|
int dx = (-1 * s_in[it - 1 + 1][jt - 1 + 1]) + (-2 * s_in[it + 1][jt - 1 + 1]) + (-1 * s_in[it + 1 + 1][jt - 1 + 1]) +
|
||||||
|
(s_in[it - 1 + 1][jt + 1 + 1]) + (2 * s_in[it + 1][jt + 1 + 1]) + (s_in[it + 1 + 1][jt + 1 + 1]);
|
||||||
|
int dy = (s_in[it - 1 + 1][jt - 1 + 1]) + (2 * s_in[it - 1 + 1][jt + 1]) + (s_in[it - 1 + 1][jt + 1 + 1]) +
|
||||||
|
(-1 * s_in[it + 1 + 1][jt - 1 + 1]) + (-2 * s_in[it + 1 + 1][jt + 1]) + (-1 * s_in[it + 1 + 1][jt + 1 + 1]);
|
||||||
|
out[(i + 1) * width + j + 1] = sqrt((float)((dx * dx) + (dy * dy)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char *argv[])
|
||||||
|
{
|
||||||
|
int iret = 0;
|
||||||
|
struct timespec rt[2];
|
||||||
|
double wt; // walltime
|
||||||
|
string filename("data/buzz.jpg");
|
||||||
|
|
||||||
|
if (argc > 1)
|
||||||
|
filename = argv[1];
|
||||||
|
|
||||||
|
// Load Image
|
||||||
|
Mat image = imread(filename, IMREAD_GRAYSCALE);
|
||||||
|
if (!image.data)
|
||||||
|
{
|
||||||
|
cout << "Could not open or find the image" << std::endl;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
int width = image.size().width;
|
||||||
|
int height = image.size().height;
|
||||||
|
|
||||||
|
// Create Output Images
|
||||||
|
Mat out1 = image.clone();
|
||||||
|
Mat out2 = image.clone();
|
||||||
|
Mat result = image.clone();
|
||||||
|
memset(out1.ptr(), 0, sizeof(unsigned char) * width * height);
|
||||||
|
memset(out2.ptr(), 0, sizeof(unsigned char) * width * height);
|
||||||
|
memset(result.ptr(), 0, sizeof(unsigned char) * width * height);
|
||||||
|
|
||||||
|
// Compute CPU Version - Golden Model
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
sobel_host(image.ptr(), out1.ptr(), width, height);
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("Sobel (Host) : %9.6f sec\n", wt);
|
||||||
|
|
||||||
|
//CUDA Buffer Allocation
|
||||||
|
unsigned char *d_image_in;
|
||||||
|
unsigned char *d_image_out;
|
||||||
|
gpuErrchk(cudaMalloc((void **)&d_image_in, sizeof(unsigned char) * width * height));
|
||||||
|
gpuErrchk(cudaMalloc((void **)&d_image_out, sizeof(unsigned char) * width * height));
|
||||||
|
gpuErrchk(cudaMemset(d_image_out, 0, sizeof(unsigned char) * width * height));
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
gpuErrchk(cudaMemcpy(d_image_in, image.ptr(), sizeof(unsigned char) * width * height, cudaMemcpyHostToDevice));
|
||||||
|
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
|
||||||
|
dim3 dimGrid((width + BLOCK_SIZE - 1) / BLOCK_SIZE, (height + BLOCK_SIZE - 1) / BLOCK_SIZE);
|
||||||
|
sobel_v1<<<dimGrid, dimBlock>>>(d_image_in, d_image_out, width, height);
|
||||||
|
gpuErrchk(cudaPeekAtLastError());
|
||||||
|
gpuErrchk(cudaMemcpy(out2.ptr(), d_image_out, sizeof(unsigned char) * width * height, cudaMemcpyDeviceToHost));
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("Sobel-v1 (GPU) : %9.6f sec\n", wt);
|
||||||
|
|
||||||
|
//Check results
|
||||||
|
absdiff(out1, out2, result);
|
||||||
|
int percentage = countNonZero(result);
|
||||||
|
|
||||||
|
//Reset Output image
|
||||||
|
memset(out2.ptr(), 0, sizeof(unsigned char) * width * height);
|
||||||
|
gpuErrchk(cudaMemset(d_image_out, 0, sizeof(unsigned char) * width * height));
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
gpuErrchk(cudaMemcpy(d_image_in, image.ptr(), sizeof(unsigned char) * width * height, cudaMemcpyHostToDevice));
|
||||||
|
// dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
|
||||||
|
// dim3 dimGrid((width + BLOCK_SIZE - 1) / BLOCK_SIZE, (height + BLOCK_SIZE - 1) / BLOCK_SIZE);
|
||||||
|
sobel_v2<<<dimGrid, dimBlock>>>(d_image_in, d_image_out, width, height);
|
||||||
|
gpuErrchk(cudaPeekAtLastError());
|
||||||
|
gpuErrchk(cudaMemcpy(out2.ptr(), d_image_out, sizeof(unsigned char) * width * height, cudaMemcpyDeviceToHost));
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("Sobel-v2 (GPU) : %9.6f sec\n", wt);
|
||||||
|
|
||||||
|
//Check results
|
||||||
|
absdiff(out1, out2, result);
|
||||||
|
percentage = countNonZero(result);
|
||||||
|
if (percentage)
|
||||||
|
{
|
||||||
|
printf("Divergence %d\n", percentage);
|
||||||
|
imshow("Output GPU", out2);
|
||||||
|
imshow("error diff", result);
|
||||||
|
waitKey(0);
|
||||||
|
}
|
||||||
|
assert(percentage == 0);
|
||||||
|
|
||||||
|
//Reset Output image
|
||||||
|
memset(out2.ptr(), 0, sizeof(unsigned char) * width * height);
|
||||||
|
gpuErrchk(cudaMemset(d_image_out, 0, sizeof(unsigned char) * width * height));
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
gpuErrchk(cudaMemcpy(d_image_in, image.ptr(), sizeof(unsigned char) * width * height, cudaMemcpyHostToDevice));
|
||||||
|
//TODO define dimGrid, dimBlock
|
||||||
|
//TODO add sobel_v4 call
|
||||||
|
dim3 dimBlock_v3(BLOCK_SIZE, BLOCK_SIZE);
|
||||||
|
dim3 dimGrid_v3((width + (BLOCK_SIZE - 2) - 1) / (BLOCK_SIZE - 2), (height + (BLOCK_SIZE - 2) - 1) / (BLOCK_SIZE - 2));
|
||||||
|
sobel_v3<<<dimGrid_v3, dimBlock_v3>>>(d_image_in, d_image_out, width, height);
|
||||||
|
gpuErrchk(cudaPeekAtLastError());
|
||||||
|
gpuErrchk(cudaMemcpy(out2.ptr(), d_image_out, sizeof(unsigned char) * width * height, cudaMemcpyDeviceToHost));
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("Sobel-v3 (GPU) : %9.6f sec\n", wt);
|
||||||
|
|
||||||
|
//Check results
|
||||||
|
absdiff(out1, out2, result);
|
||||||
|
percentage = countNonZero(result);
|
||||||
|
if (percentage)
|
||||||
|
{
|
||||||
|
printf("Divergence %d\n", percentage);
|
||||||
|
imshow("Output GPU", out2);
|
||||||
|
imshow("error diff", result);
|
||||||
|
waitKey(0);
|
||||||
|
}
|
||||||
|
assert(percentage == 0);
|
||||||
|
|
||||||
|
//Reset Output image
|
||||||
|
memset(out2.ptr(), 0, sizeof(unsigned char) * width * height);
|
||||||
|
gpuErrchk(cudaMemset(d_image_out, 0, sizeof(unsigned char) * width * height));
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
gpuErrchk(cudaMemcpy(d_image_in, image.ptr(), sizeof(unsigned char) * width * height, cudaMemcpyHostToDevice));
|
||||||
|
//TODO define dimGrid, dimBlock
|
||||||
|
//TODO add sobel_v4 call
|
||||||
|
sobel_v4<<<dimGrid, dimBlock>>>(d_image_in, d_image_out, width, height);
|
||||||
|
gpuErrchk(cudaPeekAtLastError());
|
||||||
|
gpuErrchk(cudaMemcpy(out2.ptr(), d_image_out, sizeof(unsigned char) * width * height, cudaMemcpyDeviceToHost));
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("Sobel-v4 (GPU) : %9.6f sec\n", wt);
|
||||||
|
|
||||||
|
//Check results
|
||||||
|
absdiff(out1, out2, result);
|
||||||
|
percentage = countNonZero(result);
|
||||||
|
if (percentage)
|
||||||
|
{
|
||||||
|
printf("Divergence %d\n", percentage);
|
||||||
|
imshow("Output GPU", out2);
|
||||||
|
imshow("error diff", result);
|
||||||
|
waitKey(0);
|
||||||
|
}
|
||||||
|
assert(percentage == 0);
|
||||||
|
|
||||||
|
gpuErrchk(cudaFree(d_image_out));
|
||||||
|
gpuErrchk(cudaFree(d_image_in));
|
||||||
|
|
||||||
|
return iret;
|
||||||
|
}
|
55
cuda/lab2/Makefile
Normal file
55
cuda/lab2/Makefile
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
ifndef CUDA_HOME
|
||||||
|
CUDA_HOME:=/usr/local/cuda
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifndef EXERCISE
|
||||||
|
EXERCISE=exercise1.cu
|
||||||
|
endif
|
||||||
|
|
||||||
|
BUILD_DIR ?= ./build
|
||||||
|
|
||||||
|
NVCC=$(CUDA_HOME)/bin/nvcc
|
||||||
|
CXX=g++
|
||||||
|
|
||||||
|
OPT:=-O2 -g
|
||||||
|
NVOPT:=-Xcompiler -fopenmp -lineinfo -arch=sm_53 --ptxas-options=-v --use_fast_math `pkg-config --cflags --libs opencv4`
|
||||||
|
|
||||||
|
CXXFLAGS:=$(OPT) -I. $(EXT_CXXFLAGS)
|
||||||
|
LDFLAGS:=-lm -lcudart $(EXT_LDFLAGS)
|
||||||
|
|
||||||
|
NVCFLAGS:=$(CXXFLAGS) $(NVOPT)
|
||||||
|
NVLDFLAGS:=$(LDFLAGS) -lgomp
|
||||||
|
|
||||||
|
SRCS:= utils.c
|
||||||
|
OBJS := $(SRCS:%=$(BUILD_DIR)/%.o) $(EXERCISE:%=$(BUILD_DIR)/%.o)
|
||||||
|
EXE=$(EXERCISE:.cu=.exe)
|
||||||
|
|
||||||
|
$(EXE): $(OBJS)
|
||||||
|
$(MKDIR_P) $(dir $@)
|
||||||
|
$(NVCC) $(NVCFLAGS) $(OBJS) -o $@ $(NVLDFLAGS)
|
||||||
|
|
||||||
|
$(BUILD_DIR)/%.cu.o: %.cu
|
||||||
|
$(MKDIR_P) $(dir $@)
|
||||||
|
$(NVCC) $(NVCFLAGS) -c $< -o $@
|
||||||
|
|
||||||
|
$(BUILD_DIR)/%.cpp.o: %.cpp
|
||||||
|
$(MKDIR_P) $(dir $@)
|
||||||
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||||
|
|
||||||
|
$(BUILD_DIR)/%.c.o: %.c
|
||||||
|
$(MKDIR_P) $(dir $@)
|
||||||
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||||
|
|
||||||
|
all: $(EXE)
|
||||||
|
|
||||||
|
.PHONY: run profile clean
|
||||||
|
run: $(EXE)
|
||||||
|
./$(EXE)
|
||||||
|
|
||||||
|
profile: $(EXE)
|
||||||
|
sudo LD_LIBRARY_PATH=$(CUDA_HOME)/lib:/usr/ext/lib:${LD_LIBRARY_PATH} LIBRARY_PATH=/usr/ext/lib:${LIBRARY_PATH} nvprof ./$(EXE)
|
||||||
|
|
||||||
|
clean:
|
||||||
|
-rm -fr $(BUILD_DIR) *.exe *.out *~
|
||||||
|
|
||||||
|
MKDIR_P ?= mkdir -p
|
198
cuda/lab2/constant.cu
Normal file
198
cuda/lab2/constant.cu
Normal file
|
@ -0,0 +1,198 @@
|
||||||
|
/*
|
||||||
|
* BSD 2-Clause License
|
||||||
|
*
|
||||||
|
* Copyright (c) 2020, Alessandro Capotondi
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright notice, this
|
||||||
|
* list of conditions and the following disclaimer.
|
||||||
|
*
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
* this list of conditions and the following disclaimer in the documentation
|
||||||
|
* and/or other materials provided with the distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||||
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file constant.cu
|
||||||
|
* @author Alessandro Capotondi
|
||||||
|
* @date 27 Mar 2020
|
||||||
|
* @brief Exercise 2
|
||||||
|
*
|
||||||
|
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <assert.h>
|
||||||
|
#include <time.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <math.h>
|
||||||
|
#include <cuda_runtime.h>
|
||||||
|
|
||||||
|
#define gpuErrchk(ans) \
|
||||||
|
{ \
|
||||||
|
gpuAssert((ans), __FILE__, __LINE__); \
|
||||||
|
}
|
||||||
|
static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
|
||||||
|
{
|
||||||
|
if (code != cudaSuccess)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
|
||||||
|
if (abort)
|
||||||
|
exit(code);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C"
|
||||||
|
{
|
||||||
|
#include "utils.h"
|
||||||
|
}
|
||||||
|
|
||||||
|
#define TWO02 (1 << 2)
|
||||||
|
#define TWO04 (1 << 4)
|
||||||
|
#define TWO08 (1 << 8)
|
||||||
|
#ifndef N
|
||||||
|
#define N (1 << 27)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef BLOCK_SIZE
|
||||||
|
#define BLOCK_SIZE (128)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
float K[4098];
|
||||||
|
//TODO declare constant K
|
||||||
|
__constant__ float cK[4098];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Filering
|
||||||
|
*/
|
||||||
|
void filter(float * __restrict__ y, int n)
|
||||||
|
{
|
||||||
|
#pragma omp parallel for simd schedule(simd: static)
|
||||||
|
for (int i = 0; i < n; i++)
|
||||||
|
{
|
||||||
|
y[i] = y[i] - K[i%4098];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO GPU Filter implementation
|
||||||
|
__global__ void filter_v1(float * __restrict__ y, int n)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO GPU Filter implementation without constant mem
|
||||||
|
__global__ void filter_v2(float * __restrict__ y, float * __restrict__ k, int n)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, const char **argv)
|
||||||
|
{
|
||||||
|
int iret = 0;
|
||||||
|
int n = N;
|
||||||
|
float *h_y, *d_y;
|
||||||
|
float *h_x, *d_x, *d_k;
|
||||||
|
float *h_z;
|
||||||
|
|
||||||
|
if (argc > 1)
|
||||||
|
n = atoi(argv[1]);
|
||||||
|
|
||||||
|
if (NULL == (h_x = (float *)malloc(sizeof(float) * n)))
|
||||||
|
{
|
||||||
|
printf("error: memory allocation for 'x'\n");
|
||||||
|
iret = -1;
|
||||||
|
}
|
||||||
|
if (NULL == (h_y = (float *)malloc(sizeof(float) * n)))
|
||||||
|
{
|
||||||
|
printf("error: memory allocation for 'y'\n");
|
||||||
|
iret = -1;
|
||||||
|
}
|
||||||
|
if (NULL == (h_z = (float *)malloc(sizeof(float) * n)))
|
||||||
|
{
|
||||||
|
printf("error: memory allocation for 'z'\n");
|
||||||
|
iret = -1;
|
||||||
|
}
|
||||||
|
if (0 != iret)
|
||||||
|
{
|
||||||
|
free(h_y);
|
||||||
|
free(h_z);
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
|
||||||
|
//Init Data
|
||||||
|
float b = rand() % TWO04;
|
||||||
|
float c = rand() % TWO08;
|
||||||
|
|
||||||
|
for (int i = 0; i < 4098; i++)
|
||||||
|
{
|
||||||
|
K[i] = b;
|
||||||
|
}
|
||||||
|
for (int i = 0; i < n; i++)
|
||||||
|
{
|
||||||
|
h_x[i] = h_y[i] = h_z[i] = c / (float)TWO04;
|
||||||
|
}
|
||||||
|
|
||||||
|
start_timer();
|
||||||
|
filter(h_z, n);
|
||||||
|
stop_timer();
|
||||||
|
printf("Filter (Host): %9.3f sec %9.1f MFLOPS\n", elapsed_ns() / 1.0e9, n / ((1.0e6 / 1e9) * elapsed_ns()));
|
||||||
|
|
||||||
|
//CUDA Buffer Allocation
|
||||||
|
gpuErrchk(cudaMalloc((void **)&d_y, sizeof(float) * n));
|
||||||
|
//TODO: Load Device Constant using cudaMemcpyToSymbol
|
||||||
|
gpuErrchk(cudaMemcpyToSymbol(cK, K, sizeof(float)*4098));
|
||||||
|
|
||||||
|
start_timer();
|
||||||
|
//TODO Add Code here for calling filter_v1
|
||||||
|
gpuErrchk(cudaPeekAtLastError());
|
||||||
|
gpuErrchk(cudaMemcpy(h_y, d_y, sizeof(float) * n, cudaMemcpyDeviceToHost));
|
||||||
|
stop_timer();
|
||||||
|
printf("Filter-v1 (GPU): %9.3f sec %9.1f MFLOPS\n", elapsed_ns() / 1.0e9, n / ((1.0e6 / 1e9) * elapsed_ns()));
|
||||||
|
|
||||||
|
//Check Matematical Consistency
|
||||||
|
for (int i = 0; i < n; ++i)
|
||||||
|
{
|
||||||
|
iret = *(int *)(h_y + i) ^ *(int *)(h_z + i);
|
||||||
|
assert(iret == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
//-- No-Constant version --
|
||||||
|
gpuErrchk(cudaMalloc((void **)&d_x, sizeof(float) * n));
|
||||||
|
gpuErrchk(cudaMalloc((void **)&d_k, sizeof(float) * 4098));
|
||||||
|
|
||||||
|
start_timer();
|
||||||
|
//TODO Add Code here for calling filter_v2ù
|
||||||
|
|
||||||
|
gpuErrchk(cudaPeekAtLastError());
|
||||||
|
gpuErrchk(cudaMemcpy(h_x, d_x, sizeof(float) * n, cudaMemcpyDeviceToHost));
|
||||||
|
stop_timer();
|
||||||
|
printf("Filter-v2 (GPU): %9.3f sec %9.1f MFLOPS\n", elapsed_ns() / 1.0e9, n / ((1.0e6 / 1e9) * elapsed_ns()));
|
||||||
|
|
||||||
|
//Check Matematical Consistency
|
||||||
|
for (int i = 0; i < n; ++i)
|
||||||
|
{
|
||||||
|
iret = *(int *)(h_y + i) ^ *(int *)(h_x + i);
|
||||||
|
assert(iret == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
//CUDA Buffer Allocation
|
||||||
|
free(h_x);
|
||||||
|
gpuErrchk(cudaFree(d_x));
|
||||||
|
free(h_y);
|
||||||
|
gpuErrchk(cudaFree(d_y));
|
||||||
|
free(h_z);
|
||||||
|
return 0;
|
||||||
|
}
|
BIN
cuda/lab2/data/buzz.jpg
Normal file
BIN
cuda/lab2/data/buzz.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 2.4 MiB |
BIN
cuda/lab2/data/daisy.jpg
Normal file
BIN
cuda/lab2/data/daisy.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 66 KiB |
BIN
cuda/lab2/data/earth_rise.jpg
Normal file
BIN
cuda/lab2/data/earth_rise.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 2.8 MiB |
BIN
cuda/lab2/data/fiore.jpg
Normal file
BIN
cuda/lab2/data/fiore.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 261 KiB |
137
cuda/lab2/exercise1.cu
Normal file
137
cuda/lab2/exercise1.cu
Normal file
|
@ -0,0 +1,137 @@
|
||||||
|
/*
|
||||||
|
* BSD 2-Clause License
|
||||||
|
*
|
||||||
|
* Copyright (c) 2020, Alessandro Capotondi
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright notice, this
|
||||||
|
* list of conditions and the following disclaimer.
|
||||||
|
*
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
* this list of conditions and the following disclaimer in the documentation
|
||||||
|
* and/or other materials provided with the distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||||
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file exercise1.cu
|
||||||
|
* @author Alessandro Capotondi
|
||||||
|
* @date 27 Mar 2020
|
||||||
|
* @brief Exercise 1
|
||||||
|
*
|
||||||
|
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <math.h>
|
||||||
|
|
||||||
|
#include <cuda_runtime.h>
|
||||||
|
#define gpuErrchk(ans) \
|
||||||
|
{ \
|
||||||
|
gpuAssert((ans), __FILE__, __LINE__); \
|
||||||
|
}
|
||||||
|
static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
|
||||||
|
{
|
||||||
|
if (code != cudaSuccess)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
|
||||||
|
if (abort)
|
||||||
|
exit(code);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C"
|
||||||
|
{
|
||||||
|
#include "utils.h"
|
||||||
|
}
|
||||||
|
|
||||||
|
#define TWO02 (1 << 2)
|
||||||
|
#define TWO04 (1 << 4)
|
||||||
|
#define TWO08 (1 << 8)
|
||||||
|
#ifndef N
|
||||||
|
#define N (1LL << 28)
|
||||||
|
#endif
|
||||||
|
#ifndef BLOCK_SIZE
|
||||||
|
#define BLOCK_SIZE (1024)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief EX 1 - Offset and Strided Accesses
|
||||||
|
*
|
||||||
|
* a) Measure the bandwidth accessing the memory using an offset = {1,2,4,8,16,32} (mem_update v1)
|
||||||
|
* b) Measure the bandwidth accessing the memory using a stride = {1,2,4,8,16,32} (mem_update v2)
|
||||||
|
*
|
||||||
|
* @return void
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef STRIDE
|
||||||
|
#define STRIDE 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// mem_update v1 - Offseted Accesses
|
||||||
|
__global__ void mem_udpate(float * __restrict__ y, float a)
|
||||||
|
{
|
||||||
|
int i = threadIdx.x + blockIdx.x * blockDim.x;
|
||||||
|
y[(i+STRIDE)%N] = a;
|
||||||
|
}
|
||||||
|
|
||||||
|
// mem_update v2 - Strided Accesses
|
||||||
|
// __global__ void mem_udpate(float * __restrict__ y, float a)
|
||||||
|
// {
|
||||||
|
// int i = threadIdx.x + blockIdx.x * blockDim.x;
|
||||||
|
// y[(i*STRIDE)%N] = a;
|
||||||
|
// }
|
||||||
|
|
||||||
|
int main(int argc, const char **argv)
|
||||||
|
{
|
||||||
|
int iret = 0;
|
||||||
|
float *h_y, *d_y;
|
||||||
|
float a = 101.0f / TWO02;
|
||||||
|
|
||||||
|
if (NULL == (h_y = (float *)malloc(sizeof(float) * N)))
|
||||||
|
{
|
||||||
|
printf("error: memory allocation for 'y'\n");
|
||||||
|
iret = -1;
|
||||||
|
}
|
||||||
|
if (0 != iret)
|
||||||
|
{
|
||||||
|
free(h_y);
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
|
||||||
|
//CUDA Buffer Allocation
|
||||||
|
gpuErrchk(cudaMalloc((void **)&d_y, sizeof(float) * N));
|
||||||
|
gpuErrchk(cudaMemcpy(d_y, h_y, sizeof(float) * N, cudaMemcpyHostToDevice));
|
||||||
|
|
||||||
|
start_timer();
|
||||||
|
mem_udpate<<<128*BLOCK_SIZE,BLOCK_SIZE>>>(d_y, a);
|
||||||
|
gpuErrchk(cudaPeekAtLastError());
|
||||||
|
cudaDeviceSynchronize();
|
||||||
|
stop_timer();
|
||||||
|
|
||||||
|
gpuErrchk(cudaMemcpy(h_y, d_y, sizeof(float) * N, cudaMemcpyDeviceToHost));
|
||||||
|
printf("mem_udpate (GPU): %9.3f sec %9.1f MB/s\n", elapsed_ns() / 1.0e9, (4 * 128*BLOCK_SIZE*BLOCK_SIZE) / ((1.0e6 / 1e9) * elapsed_ns()));
|
||||||
|
|
||||||
|
//CUDA Buffer Allocation
|
||||||
|
free(h_y);
|
||||||
|
gpuErrchk(cudaFree(d_y));
|
||||||
|
|
||||||
|
// CUDA exit -- needed to flush printf write buffer
|
||||||
|
cudaDeviceReset();
|
||||||
|
return 0;
|
||||||
|
}
|
310
cuda/lab2/exercise2.cu
Normal file
310
cuda/lab2/exercise2.cu
Normal file
|
@ -0,0 +1,310 @@
|
||||||
|
/*
|
||||||
|
* BSD 2-Clause License
|
||||||
|
*
|
||||||
|
* Copyright (c) 2020, Alessandro Capotondi
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright notice, this
|
||||||
|
* list of conditions and the following disclaimer.
|
||||||
|
*
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
* this list of conditions and the following disclaimer in the documentation
|
||||||
|
* and/or other materials provided with the distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||||
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file exercise2.cu
|
||||||
|
* @author Alessandro Capotondi
|
||||||
|
* @date 5 May 2020
|
||||||
|
* @brief Exercise 2 - CUDA MATMUL Optimized
|
||||||
|
*
|
||||||
|
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <assert.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <time.h>
|
||||||
|
|
||||||
|
#define gpuErrchk(ans) \
|
||||||
|
{ \
|
||||||
|
gpuAssert((ans), __FILE__, __LINE__); \
|
||||||
|
}
|
||||||
|
static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
|
||||||
|
{
|
||||||
|
if (code != cudaSuccess)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
|
||||||
|
if (abort)
|
||||||
|
exit(code);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C"
|
||||||
|
{
|
||||||
|
#include "utils.h"
|
||||||
|
}
|
||||||
|
|
||||||
|
#define TWO02 (1 << 2)
|
||||||
|
#define TWO04 (1 << 4)
|
||||||
|
#define TWO08 (1 << 8)
|
||||||
|
|
||||||
|
#ifndef N
|
||||||
|
#define N (1 << 10)
|
||||||
|
#endif
|
||||||
|
#ifndef TILE_W
|
||||||
|
#define TILE_W 128
|
||||||
|
#endif
|
||||||
|
#ifndef BLOCK_SIZE
|
||||||
|
#define BLOCK_SIZE 32
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void gemm(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n)
|
||||||
|
{
|
||||||
|
|
||||||
|
#pragma omp parallel for collapse(2)
|
||||||
|
for (int i = 0; i < n; ++i)
|
||||||
|
{
|
||||||
|
for (int j = 0; j < n; ++j)
|
||||||
|
{
|
||||||
|
float sum = 0.0;
|
||||||
|
for (int k = 0; k < n; ++k)
|
||||||
|
{
|
||||||
|
sum += a[i * n + k] * b[k * n + j];
|
||||||
|
}
|
||||||
|
c[i * n + j] = sum;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__global__ void gemm_v1(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n)
|
||||||
|
{
|
||||||
|
int row = threadIdx.x + blockIdx.x * blockDim.x;
|
||||||
|
int col = threadIdx.y + blockIdx.y * blockDim.y;
|
||||||
|
|
||||||
|
float sum = 0.0;
|
||||||
|
for (int k = 0; k < n; ++k)
|
||||||
|
{
|
||||||
|
sum += a[row * n + k] * b[k * n + col];
|
||||||
|
}
|
||||||
|
c[row * n + col] = sum;
|
||||||
|
}
|
||||||
|
|
||||||
|
__global__ void gemm_v2(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n)
|
||||||
|
{
|
||||||
|
//TODO Shared memory used to store Asub and Bsub respectively
|
||||||
|
|
||||||
|
//TODO Block row and column
|
||||||
|
|
||||||
|
//TODO Thread row and column within Csub
|
||||||
|
|
||||||
|
//TODO Each thread computes one element of Csub
|
||||||
|
// by accumulating results into Cvalue
|
||||||
|
|
||||||
|
//TODO Loop over all the sub-matrices of A and B that are
|
||||||
|
// required to compute Csub.
|
||||||
|
// Multiply each pair of sub-matrices together
|
||||||
|
// and accumulate the results.
|
||||||
|
for (int kb = 0; kb < (n / BLOCK_SIZE); ++kb)
|
||||||
|
{
|
||||||
|
//TODO Get the starting address (a_offset) of Asub
|
||||||
|
// (sub-matrix of A of dimension BLOCK_SIZE x BLOCK_SIZE)
|
||||||
|
// Asub is located i_block sub-matrices to the right and
|
||||||
|
// k_block sub-matrices down from the upper-left corner of A
|
||||||
|
//TODO Get the starting address (b_offset) of Bsub
|
||||||
|
|
||||||
|
//TODO Load Asub and Bsub from device memory to shared memory
|
||||||
|
// Each thread loads one element of each sub-matrix
|
||||||
|
|
||||||
|
//TODO Synchronize to make sure the sub-matrices are loaded
|
||||||
|
// before starting the computation
|
||||||
|
|
||||||
|
//TODO Multiply As and Bs together
|
||||||
|
|
||||||
|
//TODO Synchronize to make sure that the preceding
|
||||||
|
// computation is done before loading two new
|
||||||
|
// sub-matrices of A and B in the next iteration
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO Each thread block computes one sub-matrix Csub of C
|
||||||
|
}
|
||||||
|
|
||||||
|
__global__ void gemm_v3(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n)
|
||||||
|
{
|
||||||
|
//TODO Shared memory used to store Asub and Bsub respectively
|
||||||
|
|
||||||
|
//TODO Block row and column
|
||||||
|
|
||||||
|
//TODO Thread row and column within Csub
|
||||||
|
|
||||||
|
//TODO Each thread computes one element of Csub
|
||||||
|
// by accumulating results into Cvalue
|
||||||
|
|
||||||
|
//TODO Loop over all the sub-matrices of A and B that are
|
||||||
|
// required to compute Csub.
|
||||||
|
// Multiply each pair of sub-matrices together
|
||||||
|
// and accumulate the results.
|
||||||
|
for (int kb = 0; kb < (n / BLOCK_SIZE); ++kb)
|
||||||
|
{
|
||||||
|
//TODO Get the starting address (a_offset) of Asub
|
||||||
|
// (sub-matrix of A of dimension BLOCK_SIZE x BLOCK_SIZE)
|
||||||
|
// Asub is located i_block sub-matrices to the right and
|
||||||
|
// k_block sub-matrices down from the upper-left corner of A
|
||||||
|
//TODO Get the starting address (b_offset) of Bsub (Coalesced Access)
|
||||||
|
|
||||||
|
//TODO Load Asub and Bsub from device memory to shared memory
|
||||||
|
// Each thread loads one element of each sub-matrix
|
||||||
|
|
||||||
|
//TODO Synchronize to make sure the sub-matrices are loaded
|
||||||
|
// before starting the computation
|
||||||
|
|
||||||
|
//TODO Multiply As and Bs together
|
||||||
|
|
||||||
|
//TODO Synchronize to make sure that the preceding
|
||||||
|
// computation is done before loading two new
|
||||||
|
// sub-matrices of A and B in the next iteration
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO Each thread block computes one sub-matrix Csub of C
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char *argv[])
|
||||||
|
{
|
||||||
|
int n = N, iret = 0;
|
||||||
|
float *a, *b, *c, *g;
|
||||||
|
struct timespec rt[2];
|
||||||
|
double wt; // walltime
|
||||||
|
|
||||||
|
if (argc > 1)
|
||||||
|
n = atoi(argv[1]);
|
||||||
|
|
||||||
|
if (NULL == (a = (float *)malloc(sizeof(*a) * n * n)))
|
||||||
|
{
|
||||||
|
printf("error: memory allocation for 'x'\n");
|
||||||
|
iret = -1;
|
||||||
|
}
|
||||||
|
if (NULL == (b = (float *)malloc(sizeof(*b) * n * n)))
|
||||||
|
{
|
||||||
|
printf("error: memory allocation for 'y'\n");
|
||||||
|
iret = -1;
|
||||||
|
}
|
||||||
|
if (NULL == (c = (float *)malloc(sizeof(*c) * n * n)))
|
||||||
|
{
|
||||||
|
printf("error: memory allocation for 'z'\n");
|
||||||
|
iret = -1;
|
||||||
|
}
|
||||||
|
if (NULL == (g = (float *)malloc(sizeof(*g) * n * n)))
|
||||||
|
{
|
||||||
|
printf("error: memory allocation for 'z'\n");
|
||||||
|
iret = -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (0 != iret)
|
||||||
|
{
|
||||||
|
free(a);
|
||||||
|
free(b);
|
||||||
|
free(c);
|
||||||
|
free(g);
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
|
||||||
|
//Init Data
|
||||||
|
int _b = rand() % TWO04;
|
||||||
|
int _c = rand() % TWO08;
|
||||||
|
#pragma omp parallel for
|
||||||
|
for (int i = 0; i < n * n; i++)
|
||||||
|
{
|
||||||
|
a[i] = _b / (float)TWO02;
|
||||||
|
b[i] = _c / (float)TWO04;
|
||||||
|
c[i] = g[i] = 0.0;
|
||||||
|
}
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
gemm(a, b, g, n);
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("GEMM (Host) : %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
|
||||||
|
|
||||||
|
//CUDA Buffer Allocation
|
||||||
|
float *d_a, *d_b, *d_c;
|
||||||
|
gpuErrchk(cudaMalloc((void **)&d_a, sizeof(float) * n * n));
|
||||||
|
gpuErrchk(cudaMalloc((void **)&d_b, sizeof(float) * n * n));
|
||||||
|
gpuErrchk(cudaMalloc((void **)&d_c, sizeof(float) * n * n));
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
gpuErrchk(cudaMemcpy(d_a, a, sizeof(float) * n * n, cudaMemcpyHostToDevice));
|
||||||
|
gpuErrchk(cudaMemcpy(d_b, b, sizeof(float) * n * n, cudaMemcpyHostToDevice));
|
||||||
|
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
|
||||||
|
dim3 dimGrid((n + (BLOCK_SIZE)-1) / (BLOCK_SIZE), (n + (BLOCK_SIZE)-1) / (BLOCK_SIZE));
|
||||||
|
gemm_v1<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, n);
|
||||||
|
gpuErrchk(cudaPeekAtLastError());
|
||||||
|
gpuErrchk(cudaMemcpy(c, d_c, sizeof(float) * n * n, cudaMemcpyDeviceToHost));
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("GEMM-v1 (GPU): %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
|
||||||
|
|
||||||
|
for (int i = 0; i < n * n; i++)
|
||||||
|
{
|
||||||
|
iret = *(int *)(g + i) ^ *(int *)(c + i);
|
||||||
|
assert(iret == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
gpuErrchk(cudaMemcpy(d_a, a, sizeof(float) * n * n, cudaMemcpyHostToDevice));
|
||||||
|
gpuErrchk(cudaMemcpy(d_b, b, sizeof(float) * n * n, cudaMemcpyHostToDevice));
|
||||||
|
//dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
|
||||||
|
//dim3 dimGrid((n + (BLOCK_SIZE)-1) / (BLOCK_SIZE), (n + (BLOCK_SIZE)-1) / (BLOCK_SIZE));
|
||||||
|
gemm_v2<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, n);
|
||||||
|
gpuErrchk(cudaPeekAtLastError());
|
||||||
|
gpuErrchk(cudaMemcpy(c, d_c, sizeof(float) * n * n, cudaMemcpyDeviceToHost));
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("GEMM-v2 (GPU): %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
|
||||||
|
|
||||||
|
for (int i = 0; i < n * n; i++)
|
||||||
|
{
|
||||||
|
iret = *(int *)(g + i) ^ *(int *)(c + i);
|
||||||
|
assert(iret == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
gpuErrchk(cudaMemcpy(d_a, a, sizeof(float) * n * n, cudaMemcpyHostToDevice));
|
||||||
|
gpuErrchk(cudaMemcpy(d_b, b, sizeof(float) * n * n, cudaMemcpyHostToDevice));
|
||||||
|
//dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
|
||||||
|
//dim3 dimGrid((n + (BLOCK_SIZE)-1) / (BLOCK_SIZE), (n + (BLOCK_SIZE)-1) / (BLOCK_SIZE));
|
||||||
|
gemm_v3<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, n);
|
||||||
|
gpuErrchk(cudaPeekAtLastError());
|
||||||
|
gpuErrchk(cudaMemcpy(c, d_c, sizeof(float) * n * n, cudaMemcpyDeviceToHost));
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("GEMM-v3 (GPU): %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
|
||||||
|
|
||||||
|
for (int i = 0; i < n * n; i++)
|
||||||
|
{
|
||||||
|
iret = *(int *)(g + i) ^ *(int *)(c + i);
|
||||||
|
assert(iret == 0);
|
||||||
|
}
|
||||||
|
free(a);
|
||||||
|
free(b);
|
||||||
|
free(c);
|
||||||
|
free(g);
|
||||||
|
gpuErrchk(cudaFree(d_a));
|
||||||
|
gpuErrchk(cudaFree(d_b));
|
||||||
|
gpuErrchk(cudaFree(d_c));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
179
cuda/lab2/exercise3.cu
Normal file
179
cuda/lab2/exercise3.cu
Normal file
|
@ -0,0 +1,179 @@
|
||||||
|
/*
|
||||||
|
* BSD 2-Clause License
|
||||||
|
*
|
||||||
|
* Copyright (c) 2020, Alessandro Capotondi
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright notice, this
|
||||||
|
* list of conditions and the following disclaimer.
|
||||||
|
*
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
* this list of conditions and the following disclaimer in the documentation
|
||||||
|
* and/or other materials provided with the distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||||
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file exercise3.cu
|
||||||
|
* @author Alessandro Capotondi
|
||||||
|
* @date 5 May 2020
|
||||||
|
* @brief Exercise 3 - Image Luminance Histogram
|
||||||
|
*
|
||||||
|
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <time.h>
|
||||||
|
#include <sys/time.h>
|
||||||
|
#include <opencv2/opencv.hpp>
|
||||||
|
#include <opencv2/imgcodecs/imgcodecs.hpp>
|
||||||
|
#include <opencv2/objdetect/objdetect.hpp>
|
||||||
|
#include <opencv2/highgui/highgui.hpp>
|
||||||
|
|
||||||
|
using namespace cv;
|
||||||
|
using namespace std;
|
||||||
|
|
||||||
|
#ifndef BLOCK_SIZE
|
||||||
|
#define BLOCK_SIZE 32
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define gpuErrchk(ans) \
|
||||||
|
{ \
|
||||||
|
gpuAssert((ans), __FILE__, __LINE__); \
|
||||||
|
}
|
||||||
|
static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
|
||||||
|
{
|
||||||
|
if (code != cudaSuccess)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
|
||||||
|
if (abort)
|
||||||
|
exit(code);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C"
|
||||||
|
{
|
||||||
|
#include "utils.h"
|
||||||
|
}
|
||||||
|
|
||||||
|
#define NBINS 256
|
||||||
|
|
||||||
|
void hist(unsigned char *__restrict__ im, int *__restrict__ hist, int width, int height)
|
||||||
|
{
|
||||||
|
#pragma omp parallel for
|
||||||
|
for (int i = 0; i < width * height; i++)
|
||||||
|
{
|
||||||
|
int val = im[i];
|
||||||
|
#pragma omp atomic
|
||||||
|
hist[val]++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO Ex3-a) Implement Histogram Calculation. Using Global Accesses
|
||||||
|
__global__ void hist_v1(unsigned char *__restrict__ im, int *__restrict__ hist, int width, int height)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO Ex3-b) Implement Histogram Calculation. Exploiting Shared Memory
|
||||||
|
__global__ void hist_v2(unsigned char *__restrict__ im, int *__restrict__ hist, int width, int height)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char *argv[])
|
||||||
|
{
|
||||||
|
int iret = 0;
|
||||||
|
struct timespec rt[2];
|
||||||
|
double wt; // walltime
|
||||||
|
int hist_host[NBINS], hist_gpu[NBINS];
|
||||||
|
string filename("data/buzz.jpg");
|
||||||
|
|
||||||
|
if (argc > 1)
|
||||||
|
filename = argv[1];
|
||||||
|
|
||||||
|
// Load Image
|
||||||
|
Mat image = imread(filename, IMREAD_GRAYSCALE);
|
||||||
|
if (!image.data)
|
||||||
|
{
|
||||||
|
cout << "Could not open or find the image" << std::endl;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int width = image.size().width;
|
||||||
|
int height = image.size().height;
|
||||||
|
|
||||||
|
// Set Output Memory
|
||||||
|
memset(hist_host, 0, NBINS * sizeof(int));
|
||||||
|
memset(hist_gpu, 0, NBINS * sizeof(int));
|
||||||
|
|
||||||
|
// Compute CPU Version - Golden Model
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
hist(image.ptr(), hist_host, width, height);
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("Hist (Host) : %9.6f sec\n", wt);
|
||||||
|
|
||||||
|
//CUDA Buffer Allocation
|
||||||
|
int *d_hist_gpu;
|
||||||
|
unsigned char *d_image;
|
||||||
|
gpuErrchk(cudaMalloc((void **)&d_hist_gpu, sizeof(int) * NBINS));
|
||||||
|
gpuErrchk(cudaMalloc((void **)&d_image, sizeof(unsigned char) * width * height));
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
//TODO Copy Image to the device
|
||||||
|
|
||||||
|
//TODO Define Grid and Block
|
||||||
|
|
||||||
|
//TODO Launch Kernel hist_v1
|
||||||
|
|
||||||
|
gpuErrchk(cudaPeekAtLastError());
|
||||||
|
//TODO Copy histogram from the device
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("Hist (GPU) : %9.6f sec\n", wt);
|
||||||
|
|
||||||
|
for (int i = 0; i < NBINS; i++)
|
||||||
|
{
|
||||||
|
iret = *(int *)(hist_host + i) ^ *(int *)(hist_gpu + i);
|
||||||
|
assert(iret == 0);
|
||||||
|
}
|
||||||
|
// Reset Output
|
||||||
|
gpuErrchk(cudaMemset(d_hist_gpu, 0, NBINS * sizeof(unsigned int)));
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
//TODO Copy Image to the device
|
||||||
|
|
||||||
|
//Use the same dimBlock, dimGrid of previous version
|
||||||
|
//TODO Launch Kernel hist_v2
|
||||||
|
|
||||||
|
gpuErrchk(cudaPeekAtLastError());
|
||||||
|
//TODO Copy histogram from the device
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("Hist-2 (GPU) : %9.6f sec\n", wt);
|
||||||
|
|
||||||
|
for (int i = 0; i < NBINS; i++)
|
||||||
|
{
|
||||||
|
iret = *(int *)(hist_host + i) ^ *(int *)(hist_gpu + i);
|
||||||
|
assert(iret == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
gpuErrchk(cudaFree(d_hist_gpu));
|
||||||
|
gpuErrchk(cudaFree(d_image));
|
||||||
|
|
||||||
|
return iret;
|
||||||
|
}
|
332
cuda/lab2/exercise4.cu
Normal file
332
cuda/lab2/exercise4.cu
Normal file
|
@ -0,0 +1,332 @@
|
||||||
|
/*
|
||||||
|
* BSD 2-Clause License
|
||||||
|
*
|
||||||
|
* Copyright (c) 2020, Alessandro Capotondi
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright notice, this
|
||||||
|
* list of conditions and the following disclaimer.
|
||||||
|
*
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
* this list of conditions and the following disclaimer in the documentation
|
||||||
|
* and/or other materials provided with the distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||||
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file exercise4.cu
|
||||||
|
* @author Alessandro Capotondi
|
||||||
|
* @date 5 May 2020
|
||||||
|
* @brief Exercise 4 - Stencil 2d - Sobel
|
||||||
|
*
|
||||||
|
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <time.h>
|
||||||
|
#include <sys/time.h>
|
||||||
|
#include <opencv2/opencv.hpp>
|
||||||
|
#include <opencv2/imgcodecs/imgcodecs.hpp>
|
||||||
|
#include <opencv2/objdetect/objdetect.hpp>
|
||||||
|
#include <opencv2/highgui/highgui.hpp>
|
||||||
|
|
||||||
|
using namespace cv;
|
||||||
|
using namespace std;
|
||||||
|
|
||||||
|
#ifndef BLOCK_SIZE
|
||||||
|
#define BLOCK_SIZE 32
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define gpuErrchk(ans) \
|
||||||
|
{ \
|
||||||
|
gpuAssert((ans), __FILE__, __LINE__); \
|
||||||
|
}
|
||||||
|
static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
|
||||||
|
{
|
||||||
|
if (code != cudaSuccess)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
|
||||||
|
if (abort)
|
||||||
|
exit(code);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C"
|
||||||
|
{
|
||||||
|
#include "utils.h"
|
||||||
|
}
|
||||||
|
|
||||||
|
void sobel_host(unsigned char *__restrict__ orig, unsigned char *__restrict__ out, int width, int height)
|
||||||
|
{
|
||||||
|
#pragma omp parallel for simd collapse(2)
|
||||||
|
for (int y = 1; y < height - 1; y++)
|
||||||
|
{
|
||||||
|
for (int x = 1; x < width - 1; x++)
|
||||||
|
{
|
||||||
|
int dx = (-1 * orig[(y - 1) * width + (x - 1)]) + (-2 * orig[y * width + (x - 1)]) + (-1 * orig[(y + 1) * width + (x - 1)]) +
|
||||||
|
(orig[(y - 1) * width + (x + 1)]) + (2 * orig[y * width + (x + 1)]) + (orig[(y + 1) * width + (x + 1)]);
|
||||||
|
int dy = (orig[(y - 1) * width + (x - 1)]) + (2 * orig[(y - 1) * width + x]) + (orig[(y - 1) * width + (x + 1)]) +
|
||||||
|
(-1 * orig[(y + 1) * width + (x - 1)]) + (-2 * orig[(y + 1) * width + x]) + (-1 * orig[(y + 1) * width + (x + 1)]);
|
||||||
|
out[y * width + x] = sqrt((float)((dx * dx) + (dy * dy)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO Each thread compute one pixel out reading from global memory
|
||||||
|
__global__ void sobel_v1(unsigned char *__restrict__ orig, unsigned char *__restrict__ out, int width, int height)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef V2
|
||||||
|
//TODO Each thread compute one pixel out reading from shared memory (corner case readed from global memory)
|
||||||
|
__global__ void sobel_v2(unsigned char *__restrict__ orig, unsigned char *__restrict__ out, int width, int height)
|
||||||
|
{
|
||||||
|
//TODO Declare i and j: global output indexes
|
||||||
|
|
||||||
|
|
||||||
|
//TODO Declare it and jt: Thread row and column of output matrix
|
||||||
|
|
||||||
|
//TODO Declare shared input patch
|
||||||
|
|
||||||
|
//TODO Load input patch
|
||||||
|
// Each thread loads one element of the patch
|
||||||
|
|
||||||
|
//TODO Synchronize to make sure the sub-matrices are loaded
|
||||||
|
// before starting the computation
|
||||||
|
|
||||||
|
//TODO if block boundary do
|
||||||
|
if (jt > 0 && it > 0 && jt < BLOCK_SIZE - 1 && it < BLOCK_SIZE - 1 && j > 0 && i > 0 && j < width - 1 && i < height - 1)
|
||||||
|
{
|
||||||
|
|
||||||
|
}
|
||||||
|
else if (j > 0 && i > 0 && j < width - 1 && i < height - 1)
|
||||||
|
{
|
||||||
|
//TODO if not-block boundary do (tip check global boundaries)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef V3
|
||||||
|
//TODO Each thread compute one pixel out reading from shared memory.
|
||||||
|
__global__ void sobel_v3(unsigned char *__restrict__ orig, unsigned char *__restrict__ out, int width, int height)
|
||||||
|
{
|
||||||
|
//TODO Declare i and j: global output indexes (tip: use BLOCK_SIZE-2)
|
||||||
|
|
||||||
|
//TODO Declare it and jt: Thread row and column of output matrix
|
||||||
|
|
||||||
|
//TODO Check if i and j are out of memory
|
||||||
|
if (i >= width && j >= height)
|
||||||
|
return;
|
||||||
|
|
||||||
|
//TODO Declare shared input patch
|
||||||
|
|
||||||
|
//TODO Load input patch
|
||||||
|
// Each thread loads one element of the patch
|
||||||
|
|
||||||
|
//TODO Synchronize to make sure the sub-matrices are loaded
|
||||||
|
// before starting the computation
|
||||||
|
|
||||||
|
//TODO Update block and bound checks
|
||||||
|
if (jt > 0 && it > 0 && jt < BLOCK_SIZE - 1 && it < BLOCK_SIZE - 1 && j > 0 && i > 0 && j < width - 1 && i < height - 1)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef V4
|
||||||
|
//TODO Each thread compute one pixel out reading from shared memory. Avoid thread under-usage
|
||||||
|
__global__ void sobel_v4(unsigned char *__restrict__ orig, unsigned char *__restrict__ out, int width, int height)
|
||||||
|
{
|
||||||
|
//TODO Declare i and j: global output indexes (tip: use BLOCK_SIZE)
|
||||||
|
|
||||||
|
//TODO Declare it and jt: Thread row and column of output matrix
|
||||||
|
|
||||||
|
//TODO Declare shared input patch (tip: use BLOCK_SIZE+2)
|
||||||
|
|
||||||
|
//TODO Load input patch
|
||||||
|
// Each thread loads one element of the patch
|
||||||
|
|
||||||
|
//TODO Check condition and load remaining elements
|
||||||
|
if ((it + BLOCK_SIZE) < BLOCK_SIZE + 2 && (jt) < BLOCK_SIZE + 2 && (i + BLOCK_SIZE) < width && (j) < height)
|
||||||
|
s_in[it + BLOCK_SIZE][jt] = orig[(i + BLOCK_SIZE) * width + j];
|
||||||
|
|
||||||
|
if ((it) < BLOCK_SIZE + 2 && (jt + BLOCK_SIZE) < BLOCK_SIZE + 2 && (i) < width && (j + BLOCK_SIZE) < height)
|
||||||
|
s_in[it][jt + BLOCK_SIZE] = orig[i * width + j + BLOCK_SIZE];
|
||||||
|
|
||||||
|
if ((it + BLOCK_SIZE) < BLOCK_SIZE + 2 && (jt + BLOCK_SIZE) < BLOCK_SIZE + 2 && (i + BLOCK_SIZE) < width && (j + BLOCK_SIZE) < height)
|
||||||
|
s_in[it + BLOCK_SIZE][jt + BLOCK_SIZE] = orig[(i + BLOCK_SIZE) * width + j + BLOCK_SIZE];
|
||||||
|
|
||||||
|
//TODO Synchronize to make sure the sub-matrices are loaded
|
||||||
|
// before starting the computation
|
||||||
|
|
||||||
|
//TODO Update all idx adding y +1 and x +1
|
||||||
|
if (jt < BLOCK_SIZE && it < BLOCK_SIZE && j < (width - 2) && i < (height - 2))
|
||||||
|
{
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
int main(int argc, char *argv[])
|
||||||
|
{
|
||||||
|
int iret = 0;
|
||||||
|
struct timespec rt[2];
|
||||||
|
double wt; // walltime
|
||||||
|
string filename("data/buzz.jpg");
|
||||||
|
|
||||||
|
if (argc > 1)
|
||||||
|
filename = argv[1];
|
||||||
|
|
||||||
|
// Load Image
|
||||||
|
Mat image = imread(filename, IMREAD_GRAYSCALE);
|
||||||
|
if (!image.data)
|
||||||
|
{
|
||||||
|
cout << "Could not open or find the image" << std::endl;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
int width = image.size().width;
|
||||||
|
int height = image.size().height;
|
||||||
|
|
||||||
|
// Create Output Images
|
||||||
|
Mat out1 = image.clone();
|
||||||
|
Mat out2 = image.clone();
|
||||||
|
Mat result = image.clone();
|
||||||
|
memset(out1.ptr(), 0, sizeof(unsigned char) * width * height);
|
||||||
|
memset(out2.ptr(), 0, sizeof(unsigned char) * width * height);
|
||||||
|
memset(result.ptr(), 0, sizeof(unsigned char) * width * height);
|
||||||
|
|
||||||
|
// Compute CPU Version - Golden Model
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
sobel_host(image.ptr(), out1.ptr(), width, height);
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("Sobel (Host) : %9.6f sec\n", wt);
|
||||||
|
|
||||||
|
//CUDA Buffer Allocation
|
||||||
|
unsigned char *d_image_in;
|
||||||
|
unsigned char *d_image_out;
|
||||||
|
gpuErrchk(cudaMalloc((void **)&d_image_in, sizeof(unsigned char) * width * height));
|
||||||
|
gpuErrchk(cudaMalloc((void **)&d_image_out, sizeof(unsigned char) * width * height));
|
||||||
|
gpuErrchk(cudaMemset(d_image_out, 0, sizeof(unsigned char) * width * height));
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
//TODO Copy Image to the device
|
||||||
|
|
||||||
|
//TODO Define Grid and Block
|
||||||
|
|
||||||
|
//TODO Launch Kernel sobel_v1
|
||||||
|
|
||||||
|
gpuErrchk(cudaPeekAtLastError());
|
||||||
|
gpuErrchk(cudaMemcpy(out2.ptr(), d_image_out, sizeof(unsigned char) * width * height, cudaMemcpyDeviceToHost));
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("Sobel-v1 (GPU) : %9.6f sec\n", wt);
|
||||||
|
|
||||||
|
//Check results
|
||||||
|
absdiff(out1, out2, result);
|
||||||
|
int percentage = countNonZero(result);
|
||||||
|
|
||||||
|
#ifdef V2
|
||||||
|
//Reset Output image
|
||||||
|
memset(out2.ptr(), 0, sizeof(unsigned char) * width * height);
|
||||||
|
gpuErrchk(cudaMemset(d_image_out, 0, sizeof(unsigned char) * width * height));
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
//TODO Copy Image to the device
|
||||||
|
|
||||||
|
//TODO Define Grid and Block
|
||||||
|
|
||||||
|
//TODO Launch Kernel sobel_v2
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("Sobel-v2 (GPU) : %9.6f sec\n", wt);
|
||||||
|
|
||||||
|
//Check results
|
||||||
|
absdiff(out1, out2, result);
|
||||||
|
percentage = countNonZero(result);
|
||||||
|
if (percentage)
|
||||||
|
{
|
||||||
|
printf("Divergence %d\n", percentage);
|
||||||
|
imshow("Output GPU", out2);
|
||||||
|
imshow("error diff", result);
|
||||||
|
waitKey(0);
|
||||||
|
}
|
||||||
|
assert(percentage == 0);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef V3
|
||||||
|
//Reset Output image
|
||||||
|
memset(out2.ptr(), 0, sizeof(unsigned char) * width * height);
|
||||||
|
gpuErrchk(cudaMemset(d_image_out, 0, sizeof(unsigned char) * width * height));
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
gpuErrchk(cudaMemcpy(d_image_in, image.ptr(), sizeof(unsigned char) * width * height, cudaMemcpyHostToDevice));
|
||||||
|
//TODO Copy Image to the device
|
||||||
|
|
||||||
|
//TODO Define Grid and Block
|
||||||
|
|
||||||
|
//TODO Launch Kernel sobel_v3
|
||||||
|
gpuErrchk(cudaPeekAtLastError());
|
||||||
|
gpuErrchk(cudaMemcpy(out2.ptr(), d_image_out, sizeof(unsigned char) * width * height, cudaMemcpyDeviceToHost));
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("Sobel-v3 (GPU) : %9.6f sec\n", wt);
|
||||||
|
|
||||||
|
//Check results
|
||||||
|
absdiff(out1, out2, result);
|
||||||
|
percentage = countNonZero(result);
|
||||||
|
if (percentage)
|
||||||
|
{
|
||||||
|
printf("Divergence %d\n", percentage);
|
||||||
|
imshow("Output GPU", out2);
|
||||||
|
imshow("error diff", result);
|
||||||
|
waitKey(0);
|
||||||
|
}
|
||||||
|
assert(percentage == 0);
|
||||||
|
#endif
|
||||||
|
#ifdef V4
|
||||||
|
//Reset Output image
|
||||||
|
memset(out2.ptr(), 0, sizeof(unsigned char) * width * height);
|
||||||
|
gpuErrchk(cudaMemset(d_image_out, 0, sizeof(unsigned char) * width * height));
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 0);
|
||||||
|
//TODO Copy Image to the device
|
||||||
|
|
||||||
|
//TODO Define Grid and Block
|
||||||
|
|
||||||
|
//TODO Launch Kernel sobel_v4
|
||||||
|
gpuErrchk(cudaPeekAtLastError());
|
||||||
|
gpuErrchk(cudaMemcpy(out2.ptr(), d_image_out, sizeof(unsigned char) * width * height, cudaMemcpyDeviceToHost));
|
||||||
|
clock_gettime(CLOCK_REALTIME, rt + 1);
|
||||||
|
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
|
||||||
|
printf("Sobel-v4 (GPU) : %9.6f sec\n", wt);
|
||||||
|
|
||||||
|
//Check results
|
||||||
|
absdiff(out1, out2, result);
|
||||||
|
percentage = countNonZero(result);
|
||||||
|
if (percentage)
|
||||||
|
{
|
||||||
|
printf("Divergence %d\n", percentage);
|
||||||
|
imshow("Output GPU", out2);
|
||||||
|
imshow("error diff", result);
|
||||||
|
waitKey(0);
|
||||||
|
}
|
||||||
|
assert(percentage == 0);
|
||||||
|
#endif
|
||||||
|
gpuErrchk(cudaFree(d_image_out));
|
||||||
|
gpuErrchk(cudaFree(d_image_in));
|
||||||
|
|
||||||
|
return iret;
|
||||||
|
}
|
138
cuda/lab2/utils.c
Normal file
138
cuda/lab2/utils.c
Normal file
|
@ -0,0 +1,138 @@
|
||||||
|
/*
|
||||||
|
* BSD 2-Clause License
|
||||||
|
*
|
||||||
|
* Copyright (c) 2020, Alessandro Capotondi
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright notice, this
|
||||||
|
* list of conditions and the following disclaimer.
|
||||||
|
*
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
* this list of conditions and the following disclaimer in the documentation
|
||||||
|
* and/or other materials provided with the distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||||
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* @file utils.c
|
||||||
|
* @author Alessandro Capotondi
|
||||||
|
* @date 27 Mar 2020
|
||||||
|
* @brief File containing utilities functions for HPC Unimore Class
|
||||||
|
*
|
||||||
|
* Utilities for OpenMP lab.
|
||||||
|
*
|
||||||
|
* @see http://algo.ing.unimo.it/people/andrea/Didattica/HPC/index.html
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define _POSIX_C_SOURCE 199309L
|
||||||
|
#include <time.h>
|
||||||
|
#include <limits.h>
|
||||||
|
#include <math.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <assert.h>
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
|
||||||
|
#include "utils.h"
|
||||||
|
|
||||||
|
#define MAX_ITERATIONS 100
|
||||||
|
static struct timespec timestampA, timestampB;
|
||||||
|
static unsigned long long statistics[MAX_ITERATIONS];
|
||||||
|
static int iterations = 0;
|
||||||
|
|
||||||
|
static unsigned long long __diff_ns(struct timespec start, struct timespec end)
|
||||||
|
{
|
||||||
|
struct timespec temp;
|
||||||
|
if ((end.tv_nsec - start.tv_nsec) < 0)
|
||||||
|
{
|
||||||
|
temp.tv_sec = end.tv_sec - start.tv_sec - 1;
|
||||||
|
temp.tv_nsec = 1000000000ULL + end.tv_nsec - start.tv_nsec;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
temp.tv_sec = end.tv_sec - start.tv_sec;
|
||||||
|
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
|
||||||
|
}
|
||||||
|
|
||||||
|
return temp.tv_nsec + temp.tv_sec * 1000000000ULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
void start_timer()
|
||||||
|
{
|
||||||
|
asm volatile("" ::
|
||||||
|
: "memory");
|
||||||
|
clock_gettime(CLOCK_MONOTONIC_RAW, ×tampA);
|
||||||
|
asm volatile("" ::
|
||||||
|
: "memory");
|
||||||
|
}
|
||||||
|
|
||||||
|
void stop_timer()
|
||||||
|
{
|
||||||
|
unsigned long long elapsed = 0ULL;
|
||||||
|
asm volatile("" ::
|
||||||
|
: "memory");
|
||||||
|
clock_gettime(CLOCK_MONOTONIC_RAW, ×tampB);
|
||||||
|
asm volatile("" ::
|
||||||
|
: "memory");
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned long long elapsed_ns()
|
||||||
|
{
|
||||||
|
return __diff_ns(timestampA, timestampB);
|
||||||
|
}
|
||||||
|
|
||||||
|
void start_stats()
|
||||||
|
{
|
||||||
|
start_timer();
|
||||||
|
}
|
||||||
|
|
||||||
|
void collect_stats()
|
||||||
|
{
|
||||||
|
assert(iterations < MAX_ITERATIONS);
|
||||||
|
stop_timer();
|
||||||
|
statistics[iterations++] = elapsed_ns();
|
||||||
|
}
|
||||||
|
|
||||||
|
void print_stats()
|
||||||
|
{
|
||||||
|
unsigned long long min = ULLONG_MAX;
|
||||||
|
unsigned long long max = 0LL;
|
||||||
|
double average = 0.0;
|
||||||
|
double std_deviation = 0.0;
|
||||||
|
double sum = 0.0;
|
||||||
|
|
||||||
|
/* Compute the sum of all elements */
|
||||||
|
for (int i = 0; i < iterations; i++)
|
||||||
|
{
|
||||||
|
if (statistics[i] > max)
|
||||||
|
max = statistics[i];
|
||||||
|
if (statistics[i] < min)
|
||||||
|
min = statistics[i];
|
||||||
|
sum = sum + statistics[i] / 1E6;
|
||||||
|
}
|
||||||
|
average = sum / (double)iterations;
|
||||||
|
|
||||||
|
/* Compute variance and standard deviation */
|
||||||
|
for (int i = 0; i < iterations; i++)
|
||||||
|
{
|
||||||
|
sum = sum + pow((statistics[i] / 1E6 - average), 2);
|
||||||
|
}
|
||||||
|
std_deviation = sqrt(sum / (double)iterations);
|
||||||
|
|
||||||
|
printf("AvgTime\tMinTime\tMaxTime\tStdDev\n");
|
||||||
|
printf("%.4f ms\t%.4f ms\t%.4f ms\t%.4f\n", (double)average, (double)min / 1E6, (double)max / 1E6, (double)std_deviation);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
142
cuda/lab2/utils.h
Normal file
142
cuda/lab2/utils.h
Normal file
|
@ -0,0 +1,142 @@
|
||||||
|
/*
|
||||||
|
* BSD 2-Clause License
|
||||||
|
*
|
||||||
|
* Copyright (c) 2020, Alessandro Capotondi
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright notice, this
|
||||||
|
* list of conditions and the following disclaimer.
|
||||||
|
*
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
* this list of conditions and the following disclaimer in the documentation
|
||||||
|
* and/or other materials provided with the distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||||
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file utils.h
|
||||||
|
* @author Alessandro Capotondi
|
||||||
|
* @date 27 Mar 2020
|
||||||
|
* @brief File containing utilities functions for HPC Unimore Class
|
||||||
|
*
|
||||||
|
* The header define time functions and dummy workload used on the example tests.
|
||||||
|
*
|
||||||
|
* @see http://algo.ing.unimo.it/people/andrea/Didattica/HPC/index.html
|
||||||
|
*/
|
||||||
|
#ifndef __UTILS_H__
|
||||||
|
#define __UTILS_H__
|
||||||
|
|
||||||
|
#include <stdarg.h>
|
||||||
|
|
||||||
|
#if defined(VERBOSE)
|
||||||
|
#define DEBUG_PRINT(x, ...) printf((x), ##__VA_ARGS__)
|
||||||
|
#else
|
||||||
|
#define DEBUG_PRINT(x, ...)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if !defined(NTHREADS)
|
||||||
|
#define NTHREADS (4)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
extern "C"
|
||||||
|
{
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief The function set the timestampA
|
||||||
|
*
|
||||||
|
* The function is used to measure elapsed time between two execution points.
|
||||||
|
* The function start_timer() sets the starting point timestamp, while the function
|
||||||
|
* stop_timer() sets the termination timestamp. The elapsed time, expressed in nanoseconds,
|
||||||
|
* between the two points can be retrieved using the function elapsed_ns().
|
||||||
|
*
|
||||||
|
* Example usage:
|
||||||
|
* @code
|
||||||
|
* start_timer(); // Point A
|
||||||
|
* //SOME CODE HERE
|
||||||
|
* stop_timer(); // Point B
|
||||||
|
* printf("Elapsed time = %llu ns\n", elapsed_ns())); //Elapsed time between A and B
|
||||||
|
* //SOME OTHER CODE HERE
|
||||||
|
* stop_timer(); // Point C
|
||||||
|
* printf("Elapsed time = %llu ns\n", elapsed_ns())); //Elapsed time between A and C
|
||||||
|
* @endcode
|
||||||
|
*
|
||||||
|
* @return void
|
||||||
|
* @see start_timer()
|
||||||
|
* @see stop_timer()
|
||||||
|
* @see elapsed_ns()
|
||||||
|
*/
|
||||||
|
void start_timer();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief The function set the second timestamps
|
||||||
|
*
|
||||||
|
* The function is used to measure elapsed time between two execution points.
|
||||||
|
* The function start_timer() sets the starting point timestamp, while the function
|
||||||
|
* stop_timer() returns the elapsed time, expressed in nanoseconds between the last call
|
||||||
|
* of start_timer() and the current execution point.
|
||||||
|
*
|
||||||
|
* Example usage:
|
||||||
|
* @code
|
||||||
|
* start_timer(); // Point A
|
||||||
|
* //SOME CODE HERE
|
||||||
|
* stop_timer(); // Point B
|
||||||
|
* printf("Elapsed time = %llu ns\n", elapsed_ns())); //Elapsed time between A and B
|
||||||
|
* //SOME OTHER CODE HERE
|
||||||
|
* stop_timer(); // Point C
|
||||||
|
* printf("Elapsed time = %llu ns\n", elapsed_ns())); //Elapsed time between A and C
|
||||||
|
* @endcode
|
||||||
|
*
|
||||||
|
* @return void
|
||||||
|
* @see start_timer()
|
||||||
|
* @see stop_timer()
|
||||||
|
* @see elapsed_ns()
|
||||||
|
*/
|
||||||
|
void stop_timer();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Elapsed nano seconds between start_timer() and stop_timer().
|
||||||
|
*
|
||||||
|
* @return Elapsed nano seconds
|
||||||
|
* @see start_timer()
|
||||||
|
* @see stop_timer()
|
||||||
|
*/
|
||||||
|
unsigned long long elapsed_ns();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief The function init the starting point of stat measurement.
|
||||||
|
*
|
||||||
|
* The function is similar to start_timer().
|
||||||
|
*
|
||||||
|
* @return void
|
||||||
|
* @see start_timer
|
||||||
|
*/
|
||||||
|
void start_stats();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief The function collects the elapsed time between the current exeuction point and the
|
||||||
|
* last call of start_stats().
|
||||||
|
*
|
||||||
|
* @return void
|
||||||
|
*/
|
||||||
|
void collect_stats();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief The function display the collected statistics.
|
||||||
|
* @return void
|
||||||
|
*/
|
||||||
|
void print_stats();
|
||||||
|
}
|
||||||
|
#endif /*__UTILS_H__*/
|
Loading…
Reference in a new issue