1 | /* |
---|
2 | * $Id: mpi.c 5713 2016-05-27 01:06:37Z skylar $ |
---|
3 | */ |
---|
4 | |
---|
5 | #include "matmul.h" |
---|
6 | #include "mpi.h" |
---|
7 | |
---|
8 | #define FIRST_RANK 0 |
---|
9 | |
---|
10 | int main(int argc,char **argv) { |
---|
11 | int rank,size,c; |
---|
12 | unsigned int start_row,stop_row,stride; |
---|
13 | bool print; |
---|
14 | struct matrix m1,m2,local_dst_m,dst_m; |
---|
15 | |
---|
16 | MPI_Init(&argc,&argv); |
---|
17 | MPI_Comm_size(MPI_COMM_WORLD,&size); |
---|
18 | MPI_Comm_rank(MPI_COMM_WORLD,&rank); |
---|
19 | |
---|
20 | #ifdef DEBUG |
---|
21 | fprintf(stderr,"Rank %d/%d - Hello World\n",rank,size); |
---|
22 | #endif |
---|
23 | |
---|
24 | m1.rows = m1.cols = m2.rows = m2.cols = 0; |
---|
25 | |
---|
26 | while((c = getopt(argc,argv, "x:y:a:b:p")) != -1) { |
---|
27 | switch(c) { |
---|
28 | case 'x': |
---|
29 | m1.cols = atoi(optarg); |
---|
30 | break; |
---|
31 | case 'y': |
---|
32 | m1.rows = atoi(optarg); |
---|
33 | break; |
---|
34 | case 'a': |
---|
35 | m2.cols = atoi(optarg); |
---|
36 | break; |
---|
37 | case 'b': |
---|
38 | m2.rows = atoi(optarg); |
---|
39 | break; |
---|
40 | case 'p': |
---|
41 | print = true; |
---|
42 | break; |
---|
43 | case '?': |
---|
44 | usage(); |
---|
45 | exit(EXIT_FAILURE); |
---|
46 | } |
---|
47 | } |
---|
48 | |
---|
49 | if(m1.rows == 0 || m1.cols == 0 || m2.rows == 0 || m2.cols == 0) { |
---|
50 | fprintf(stderr,"Supply row and column counts!\n"); |
---|
51 | usage(); |
---|
52 | exit(EXIT_FAILURE); |
---|
53 | } |
---|
54 | |
---|
55 | if(m1.cols != m2.rows) { |
---|
56 | fprintf(stderr,"Invalid matrix dimensions!\n"); |
---|
57 | exit(EXIT_FAILURE); |
---|
58 | } |
---|
59 | |
---|
60 | |
---|
61 | m1.matrix = safe_malloc_int( |
---|
62 | m1.rows*m1.cols, |
---|
63 | "Allocating first matrix" |
---|
64 | ); |
---|
65 | m2.matrix = safe_malloc_int( |
---|
66 | m2.rows*m2.cols, |
---|
67 | "Allocating second matrix" |
---|
68 | ); |
---|
69 | |
---|
70 | if(rank == FIRST_RANK) { |
---|
71 | // Each thread will get a separate random seed |
---|
72 | unsigned int *random_seeds = init_random_seeds(); |
---|
73 | init_matrix(&m1,random_seeds); |
---|
74 | init_matrix(&m2,random_seeds); |
---|
75 | |
---|
76 | // Allocate full destination matrix on first rank |
---|
77 | // Will be populated via MPI_Gather |
---|
78 | dst_m.rows = m1.rows; |
---|
79 | dst_m.cols = m2.cols; |
---|
80 | dst_m.matrix = safe_malloc_int( |
---|
81 | dst_m.rows*dst_m.cols, |
---|
82 | "Allocating destination matrix" |
---|
83 | ); |
---|
84 | |
---|
85 | if(print) { |
---|
86 | puts("Matrix 1\n"); |
---|
87 | print_matrix(&m1); |
---|
88 | puts(""); |
---|
89 | puts("Matrix 2\n"); |
---|
90 | print_matrix(&m2); |
---|
91 | puts(""); |
---|
92 | } |
---|
93 | } |
---|
94 | |
---|
95 | // Broadcast each element of the structs, to avoid the complexity |
---|
96 | // of creating custom data types |
---|
97 | MPI_Bcast(&m1.rows,1,MPI_INT,FIRST_RANK,MPI_COMM_WORLD); |
---|
98 | MPI_Bcast(&m1.cols,1,MPI_INT,FIRST_RANK,MPI_COMM_WORLD); |
---|
99 | MPI_Bcast(&m2.rows,1,MPI_INT,FIRST_RANK,MPI_COMM_WORLD); |
---|
100 | MPI_Bcast(&m2.cols,1,MPI_INT,FIRST_RANK,MPI_COMM_WORLD); |
---|
101 | MPI_Bcast(m1.matrix,m1.rows*m1.cols,MPI_INT,FIRST_RANK,MPI_COMM_WORLD); |
---|
102 | MPI_Bcast(m2.matrix,m2.rows*m2.cols,MPI_INT,FIRST_RANK,MPI_COMM_WORLD); |
---|
103 | |
---|
104 | // Calculate row offset in product matrix to start and stop calculation |
---|
105 | stride = m1.rows/size; |
---|
106 | start_row = rank*stride; |
---|
107 | |
---|
108 | // Assign an even number of rows to each rank, except for the last rank |
---|
109 | // whiich gets the remainder |
---|
110 | if(rank<(size-1)) { |
---|
111 | stop_row = stride*(rank+1); |
---|
112 | } |
---|
113 | else { |
---|
114 | stop_row = m1.rows; |
---|
115 | } |
---|
116 | |
---|
117 | // Only need to allocate enough for the local computation |
---|
118 | local_dst_m.cols = m2.cols; |
---|
119 | local_dst_m.rows = (stop_row-start_row), |
---|
120 | local_dst_m.matrix = safe_malloc_int( |
---|
121 | local_dst_m.rows*local_dst_m.cols, |
---|
122 | "Allocating local destination matrix" |
---|
123 | ); |
---|
124 | |
---|
125 | #ifdef DEBUG |
---|
126 | fprintf(stderr,"Rank %d - Stride %u, start_row %u, stop_row %u\n", |
---|
127 | rank,stride,start_row,stop_row); |
---|
128 | #endif |
---|
129 | matmul(&m1,&m2,&local_dst_m,start_row,stop_row); |
---|
130 | |
---|
131 | #ifdef DEBUG |
---|
132 | fprintf(stderr,"Rank %d local destination matrix:\n",rank); |
---|
133 | print_matrix(&local_dst_m); |
---|
134 | #endif |
---|
135 | |
---|
136 | MPI_Gather( |
---|
137 | &local_dst_m.matrix, |
---|
138 | (local_dst_m.cols*local_dst_m.rows), |
---|
139 | MPI_INT, |
---|
140 | &dst_m.matrix, |
---|
141 | (dst_m.cols*dst_m.rows), |
---|
142 | MPI_INT, |
---|
143 | FIRST_RANK, |
---|
144 | MPI_COMM_WORLD |
---|
145 | ); |
---|
146 | |
---|
147 | MPI_Finalize(); |
---|
148 | |
---|
149 | free(m1.matrix); |
---|
150 | free(m2.matrix); |
---|
151 | free(local_dst_m.matrix); |
---|
152 | |
---|
153 | exit(EXIT_SUCCESS); |
---|
154 | } |
---|