24
VIT UNIVERSITY VELLORE, KATPADI, TAMIL NADU PIN-632014 Multi Core Programming Lab LAB PROGRAMS Sujesh P Lal 08MCS054 4/4/2009

Intel Multi Core Programming

Embed Size (px)

DESCRIPTION

Basic multi core parallel programs, developed in Intel C++ compiler

Citation preview

Page 1: Intel Multi Core Programming

VIT UNIVERSITY VELLORE, KATPADI, TAMIL NADU PIN-632014

Multi Core

Programming Lab LAB PROGRAMS

Sujesh P Lal � 08MCS054 4/4/2009

id14086125 pdfMachine by Broadgun Software - a great PDF writer! - a great PDF creator! - http://www.pdfmachine.com http://www.broadgun.com

Page 2: Intel Multi Core Programming

2

1. Write a Parallel program to print �hello World� using open MP #include<stdio.h> #include<omp.h> int main() {

#pragma omp parallel { printf("\n\t hello world"); } printf("\n"); return 0; } Result

Page 3: Intel Multi Core Programming

3

2. Write a program to print following details using OpenMP

a. Number of threads currently running using main thread. b. Number of processors c. Max. Number of threads d. Thread identification number

Code #include<stdio.h> #include<omp.h> int main() { int a,b,c,d; #pragma omp parallel { a=omp_get_num_threads(); b=omp_get_max_threads(); c=omp_get_num_procs(); d=omp_get_thread_num(); } printf("\n\t number of threads identified in this system=%d",a); printf("\n\t maximum number of threads identified in this system=%d",b); printf("\n\t to find number of processes running in your system in this system =%d",c); printf("\n\t to find thread identification number in your system in this system=%d",d); printf("\n"); return 0; }

Result:

Page 4: Intel Multi Core Programming

4

3. Write a code to find the biggest and the smallest number from given three Numbers. Make sure that biggest number is computing by one core and smallest number is computing by another core in parallel.

Code:

#include<stdio.h> #include<omp.h> int main() { int a,b,c; printf("\n\t enter values of a,b and c:"); scanf("%d %d %d",&a,&b,&c);

#pragma omp parallel { if(omp_get_thread_num()) { if(a>b&&a>c) printf("\n\t %d is greatest",a); else if(b>a&&b>c) printf("\n\t %d is greatest",b); else printf("\n\t %d is greatest",c); } else { if(a<b&&a<c) printf("\n\t %d is smallest",a); else if(b<a&&b<c) printf("\n\t %d is smallest",b); else printf("\n\t %d is smallest",c); } } return 0; }

Page 5: Intel Multi Core Programming

5

Result:

4. Write a code for to evaluate arithmetic expression a=a+(b*c)+d for the given values of b, c& d with two cores. � One with overriding value of a (both cores get different result) � One without overriding value of a (both cores get same result)

Code:

#include<stdio.h> #include<omp.h> int main() { int a,b,c,d,core; printf("\n\r Enter b,c and d values:"); scanf("%d%d%d",&b,&c,&d); #pragma omp parallel private(a) { core=omp_get_thread_num(); if(core==0) { a=0; printf("\n\t Processor %d\n",core); a=a+(b*c)+d; printf("\n\t Answer is:%d",a); } else if(core==1) { a=1; printf("\n\t processor %d\n",core); a=a+(b*c)+d; printf("\n\t Answer is:%d",a); }

Page 6: Intel Multi Core Programming

6

} printf("\n");

return 0; }

Result:

Code:

#include<stdio.h> #include<omp.h> int main() { int a,b,c,d,core; a=1;b=1;c=2;d=3; a=a+(b*c)+d; printf("\n\t Answer is:%d\n",a); #pragma omp parallel private(a) { core=omp_get_thread_num(); if(core==0) { a=1;b=1;c=2;d=3; printf("\n\t Processor %d\n",core); a=a+(b*c)+d; printf("\n\t Answer is:%d",a); } else { a=2;b=1;c=2;d=3; printf("\n\t Processor %d\n",core); a=a+(b*c)+d; printf("\n\t Answer is:%d",a); }

Page 7: Intel Multi Core Programming

7

} Return 0; }

Result:

5. Write a program to generate prime number for a range using Open Mp

Code:

#include<stdio.h> #include<omp.h> int main() { int i,j,n,count; printf("\n\t Enter the range of prime numbers:"); scanf("%d",&n); #pragma omp parallel for schedule(static) private(i,j,count) for(i=2;i<n;i++) { count=0; for(j=2;j<i;j++) { if(i%j==0) { count++; break; } } if(count==0) { printf("\n\t the number %d is prime connected by thread %d \n",i,omp_get_thread_num());

Page 8: Intel Multi Core Programming

8

} } scanf("\n"); return 0; }

Result:

6. Write a program to share a single loop with a two cores, alternative iterations are computed by cores.

Code:

#include<stdio.h> #include<omp.h> int main() { int chunk=1; #pragma omp parallel { #pragma omp for schedule(static,chunk) for(int i=0;i<10;i++) { printf("\n\t thread %d\n\t executing iteration %d",omp_get_thread_num(),i); } } scanf("\n"); }

Page 9: Intel Multi Core Programming

9

Result:

7. Write a program to share a single loop with a two cores, alternative ten iterations are computed by cores.

Code:

#include<stdio.h> #include<omp.h> int main() { int chunk=10,n; printf("\n\t enter limit:"); scanf("%d",&n); #pragma omp parallel { #pragma omp for schedule(static,chunk) for(int i=0;i<n;i++) { printf("\n\t thread %d\n\t executing iteration %d",omp_get_thread_num(),i); } } scanf("\n"); }

Page 10: Intel Multi Core Programming

10

Result:

9. Matrix vector multiplication

Code:

#include<stdio.h> #include<omp.h> int main() { int vect1[3],vect2[3],vect3[3],i; printf("\n\t enter i,j,k values of first vector:"); for(i=0;i<3;i++) { scanf("%d",&vect1[i]); } printf("\n\t enter i,j,k values of second vector:"); for(i=0;i<3;i++) { scanf_s("%d",&vect2[i]); } printf("\n\t vecotr1:%di+%dj+%dk",vect1[0],vect1[1],vect1[2]); printf("\n\t vecotr2:%di+%dj+%dk",vect2[0],vect2[1],vect2[2]); #pragma omp parallel { for(i=0;i<3;i++) vect3[i]=vect1[i]*vect2[i];

Page 11: Intel Multi Core Programming

11

} printf("\n\t resultant vector is:%di+%dj+%dk",vect3[0],vect3[1],vect3[2]); scanf("\n"); return 0; }

Result

10. Matrix vector multiplication to find the vector sum

Code

#include<stdio.h> #include<omp.h> int main() { int vect1[3]={0,0,0},vect2[3]={0,0,0}; printf("\n\t enter values of vector1:"); for(int i=0;i<3;i++) { printf("a[%d]=",i); scanf("%d",&vect1[i]); printf("\n"); } printf("\n\t enter values of vector2:"); for(int i=0;i<3;i++) { printf("b[%d]=",i); scanf("%d",&vect2[i]); printf("\n"); } int x=0;

Page 12: Intel Multi Core Programming

12

printf("\n\t vector representation a=%di %dj %dk",vect1[0],vect1[1],vect1[2]); printf("\n\t vector representation b=%di %dj %dk",vect2[0],vect2[1],vect2[2]); #pragma omp parallel for for(int i=0;i<3;i++) { x+=vect1[i]*vect2[i]; } printf("\n\t vector sum=%d\n",x); scanf("\n"); return 0; }

Result

11. Write a program to print a statement using master thread.

Code

#include<stdio.h> #include<omp.h> int main() { #pragma omp master { printf("\n\t here we are displaying information usint master thread \n\t directive for using master thread is \n\t #pragma omp master"); } scanf("\n"); }

Page 13: Intel Multi Core Programming

13

Result

12. Write a program to generate ten thousand random numbers by using two loops and fine biggest among them in parallel with two cores.

Code

#include<stdio.h> #include<omp.h> int main() { int large=0,n; printf("\n\t enter range:"); scanf("%d",&n); #pragma omp parallel for for(int i=0;i<n;i++) { for(int j=0;j<n;j++) { int temp=rand(); if(large<temp) { large=temp; } } int tid=omp_get_thread_num(); printf("\n\t random number and id is:%d \n and the biggest number is:%d",tid,large); } scanf("\n"); return 0; }

Page 14: Intel Multi Core Programming

14

Result

13. Write a program to find sum of squares of first hundred natural numbers see that half computation is done by one core and another half is computed by another core. Finally results of computations are added

code

#include<stdio.h> #include<omp.h> int main() { int sum0=0,sum1=0,count=0,ch=50,n; printf("\n\t enter limit:"); scanf("%d",&n); #pragma omp parallel { #pragma omp parallel for schedule(static,ch) for(int i=0;i<n;i++) { int sum0,sum1; int tid=omp_get_thread_num(); if(tid==0) { sum0=sum0+(i*i); printf("\n\t iteration %d done by thread %d \n",omp_get_thread_num(),sum0); } else { sum1=sum1+(i*i); printf("\n\t iteration %d done by thread %d\n",omp_get_thread_num(),sum1); }

Page 15: Intel Multi Core Programming

15

} scanf("\n"); } }

14. Write a program to set the number of threads and show that currently running time in the system.

Code:

#include<stdio.h> #include<omp.h> #include<time.h> int main() { double sttime,endtime; sttime=omp_get_wtime(); omp_set_num_threads(5); char datestr[9]; char timestr[9]; _strdate(datestr); printf("\n\t current date:%s\n",datestr); _strtime(timestr); printf("\n\t current time:%s\n",timestr); #pragma omp parallel { int tid=omp_get_thread_num(); printf("\n\t thread no:%d\n",tid); } endtime=omp_get_wtime(); printf("\n\t start time:%f\n",sttime); printf("\n\t end time:%f\n",endtime); printf("time taken to execute:%f\n",endtime-sttime); return 0; scanf(""); }

15. Write a program to distinguish between shared data and private data.

Code:

#include<stdio.h> #include<omp.h> #include<time.h> int main() { int shared,pr; printf("\n\t enter values:"); scanf("%d %d",&shared,&pr); #pragma omp parallel private(pr) { int thread=omp_get_thread_num(); if(thread==0) { shared=shared+1;

Page 16: Intel Multi Core Programming

16

pr=pr+1; printf("\n\t shared value %d is %d",thread,shared); printf("\n\t private value for thread %d is %d",thread,pr); } else { printf("\n\t shared value %d is %d",thread,shared); printf("\n\t private value for thread %d is %d",thread,pr); } scanf("\n"); } return 0; }

16. Write a program to find dot product of two vectors by using two cores finally reduced result displayed by one core

code:

#include<stdio.h> #include<omp.h> int main() { int vect1[3],vect2[3],value=0,i; printf("\n\t enter i,j,k values of first vector:"); for(i=0;i<3;i++) { scanf("%d",&vect1[i]); } printf("\n\t enter i,j,k values of second vector:"); for(i=0;i<3;i++) { scanf_s("%d",&vect2[i]); } printf("\n\t vecotr1:%di+%dj+%dk",vect1[0],vect1[1],vect1[2]); printf("\n\t vecotr2:%di+%dj+%dk",vect2[0],vect2[1],vect2[2]); #pragma omp parallel for for(i=0;i<3;i++) value+=vect1[i]+vect2[i]; #pragma omp single printf("\n\t resultant magnitude is:%d\n",value); scanf("\n"); return 0; }

Page 17: Intel Multi Core Programming

17

17. Write a program to get environment information (whether they are running in parallel or not?, nested parallelism is supported or not etc.) of the system.

Code:

#include<stdio.h> #include<omp.h> int main() { omp_set_nested(3); #pragma omp parallel { if(!omp_in_parallel()) { printf("\n\t this is not a prallel region"); } else printf("\n\t parallel region"); if(!omp_get_nested) { printf("\n\t nested parallelism is disabled"); } else printf("\n\t nested parallelism is enabled"); } scanf("\n"); return 0; }

Page 18: Intel Multi Core Programming

18

18. Write a program to execute any simple code by using four threads.

Code:

#include<stdio.h> #include<omp.h> int main() { int tid,n; float a,b; printf("\n\t enter a and b values:"); scanf("%f\n%f",&a,&b); printf("\n\t enter no of threads:"); scanf("%d",&n); #pragma omp parallel num_threads(n) { tid=omp_get_thread_num(); if(tid==0) { printf("\n\t the add value of %f in thread %d is\n",a+b,tid); } else if(tid==1) { printf("\n\t the sub value of %2.3f in thread %d is\n",a-b,tid); } else if(tid==2) { printf("\n\t the mul value of %2.3f in thread %d is\n",a-b,tid); } else if(tid==3) {

Page 19: Intel Multi Core Programming

19

printf("\n\t the div value of %2.3f in thread %d is\n",a/b,tid); } scanf("\n"); } }

20. Write a program to find time taken to execute a loop.

Code:

#include<stdio.h> #include<omp.h> int main() { int a[10]={1,2,3,4,5,6,7,8,9,0}; double ti_st,ti_end,ti_total; ti_st=omp_get_wtime(); printf("\n\t start time is:%f",ti_st); #pragma omp parallel for schedule(static) for(int i=0;i<10;i++) { printf("\n\t %d\t",a[i]); } ti_end=omp_get_wtime(); printf("\n\t end time is:%f",ti_end); ti_total=ti_end-ti_st; printf("\n\t time taken to execute the loop by two cores parallely is: %f",ti_total); scanf("\n"); return 0; }

Page 20: Intel Multi Core Programming

20

21. Demonstration of shared private variables and scheduling in for loop.

Code:

#include<stdio.h> #include<omp.h> int main() { int i,j,t,n; printf("\n\t enter i,j values:"); scanf("%d\n%d",&i,&j); printf("\n\t enter range:"); scanf("%d",&n); #pragma omp parallel for schedule(static,1) shared(i) private(j) for(j=1;j<n;j++) { t=omp_get_thread_num(); if(t==0) i=i+j; else i=i-j; } printf("\n\t shared variable is:i=%d\n",i); printf("\nt private variable is:j=%d\n",j); scanf("\n"); return 0; }

Page 21: Intel Multi Core Programming

21

22. Sum of natural numbers by reducing the result.

Code:

#include<stdio.h> #include<omp.h> int main() { int i,sum=0,n; printf("\n\t enter range:"); scanf("%d",&n); #pragma omp parallel for reduction(+:sum) for(int i=1;i<n;i++) { sum+=i; printf("\n\t sum=%d",sum); } printf("\n\t reduced sum of natural numbers:%d",sum); scanf("\n"); }

Page 22: Intel Multi Core Programming

22

23. Sum of prime numbers from 2 to n. This involves two nested loops . One generates the number and the second one checks whether the generated number is prime or not.

Code:

#include<stdio.h> #include<omp.h> int main() { int n,sum=0,i,j,count=0; printf("\n\t enter range of prime numbers:"); scanf("%d",&n); if(n==1) { printf("\n\t %d is neither prime nor composite:",n); return 0; } printf("\n\t prime numbers:"); #pragma omp parallel for for(i=0;i<n;i++) { for(j=1;j<=n;j++) { if(i%j==0) count++; } if(count==2) { printf("%d--> executed in thread %d \n",i,omp_get_thread_num()); sum=sum+i; } count=0;

Page 23: Intel Multi Core Programming

23

} printf("\n\t sum of prime numbers=%d\n",sum); scanf("\n"); return 0; }

24. Demonstrate a sum reduction with a combined parallel loop construct.

Code:

#include<stdio.h> #include<omp.h> int main() { long red_sum; long int i; int n; printf("\n\t enter range:"); scanf("%d",&n); #pragma omp parallel { #pragma omp parallel for for(i=1;i<n;i++) { red_sum+=i; printf("\n\t sum of the numbers without reduction=%d",red_sum); } } #pragma omp parallel { #pragma omp for reduction(+:red_sum) for(i=1;i<n;i++) red_sum+=i;

Page 24: Intel Multi Core Programming

24

printf("\n\t sum of numbers after reduction=%d",red_sum); } scanf("\n"); return 0; }