Name – Harsh Parmar Date – 09-02-2024 Reg No. – 21BCE11052 Winter semester 2023-24 CSE3009 - Parallel and Distributed Computing OpenMP LAB PROGRAMS (LAB-1) 1. OpenMP – Basic programs such as Vector addition, Dot Product A) Vector Addition #include <iostream> #include <vector> #include <omp.h> void vectorAddition(const std::vector<int>& a, const std::vector<int>& b, std::vector<int>& result) { for (int i = 0; i < a.size(); ++i) { result[i] = a[i] + b[i]; } } int main() { const int size = 20; // Reduced size of the vectors std::vector<int> a(size); std::vector<int> b(size); std::vector<int> result(size); for (int i = 0; i < size; ++i) { a[i] = i; b[i] = size - i; } vectorAddition(a, b, result); std::cout << "Result of vector addition:" << std::endl; for (int i = 0; i < 15; ++i) { std::cout << result[i] << " "; } std::cout << std::endl; return 0; } Name – Harsh Parmar Reg No. – 21BCE11052 B)Dot Product #include <iostream> #include <vector> #include <omp.h> int dotProduct(const std::vector<int>& a, const std::vector<int>& b) { int result = 0; for (int i = 0; i < a.size(); ++i) { result += a[i] * b[i]; } return result; } int main() { const int size = 500; std::vector<int> a(size); std::vector<int> b(size); for (int i = 0; i < size; ++i) { a[i] = i; b[i] = size - i; } int result = dotProduct(a, b); std::cout << "Dot product: " << result << std::endl; return 0; } Date – 09-02-2024 Name – Harsh Parmar Reg No. – 21BCE11052 2. OpenMP – Loop work-sharing and sections work-sharing #include <iostream> #include <omp.h> int main() { const int N = 100; int array[N]; #pragma omp parallel for for (int i = 0; i < N; ++i) { array[i] = omp_get_thread_num(); // Assigning thread ID to each element } std::cout << "Loop Work-sharing:" << std::endl; for (int i = 0; i < N; ++i) { std::cout << "Element " << i << ": Thread " << array[i] << std::endl; } std::cout << std::endl; #pragma omp parallel sections { #pragma omp section { std::cout << "Section 1 executed by Thread " << omp_get_thread_num() << std::endl; } #pragma omp section { std::cout << "Section 2 executed by Thread " << omp_get_thread_num() << std::endl; } } return 0; } Date – 09-02-2024 Name – Harsh Parmar Reg No. – 21BCE11052 Date – 09-02-2024 Name – Harsh Parmar Reg No. – 21BCE11052 Date – 09-02-2024 Name – Harsh Parmar Date – 09-02-2024 Reg No. – 21BCE11052 3. OpenMP – Combined parallel loop reduction and Orphaned parallel loop reduction #include <iostream> #include <omp.h> int main() { const int N = 500; int sum = 0; #pragma omp parallel for reduction(+:sum) for (int i = 0; i < N; ++i) { sum += i; } std::cout << "Combined parallel loop reduction: " << sum << std::endl; int sum2 = 0; #pragma omp parallel for reduction(+:sum2) for (int i = 0; i < N; ++i) { sum2 += i; } std::cout << "Orphaned parallel loop reduction: " << sum2 << std::endl; return 0; } Name – Harsh Parmar Date – 09-02-2024 Reg No. – 21BCE11052 4. OpenMP – Matrix multiply (specify run of a GPU card, large scale data … Complexity of the problem need to be specified) #include <iostream> #include <vector> #include <omp.h> void matrixMultiply(const std::vector<std::vector<int>>& A, const std::vector<std::vector<int>>& B, std::vector<std::vector<int>>& result) { #pragma omp target teams distribute parallel for collapse(2) for (int i = 0; i < A.size(); ++i) { for (int j = 0; j < B[0].size(); ++j) { int temp = 0; for (int k = 0; k < A[0].size(); ++k) { temp += A[i][k] * B[k][j]; } result[i][j] = temp; } } } int main() { const int size = 1000; // Size of the matrices (large-scale data) std::vector<std::vector<int>> A(size, std::vector<int>(size)); std::vector<std::vector<int>> B(size, std::vector<int>(size)); std::vector<std::vector<int>> result(size, std::vector<int>(size)); for (int i = 0; i < size; ++i) { for (int j = 0; j < size; ++j) { A[i][j] = i + j; B[i][j] = i - j; } } matrixMultiply(A, B, result); std::cout << "Result of matrix multiplication:" << std::endl; for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { std::cout << result[i][j] << " "; } std::cout << std::endl; } return 0; } Name – Harsh Parmar Reg No. – 21BCE11052 Date – 09-02-2024