Hitmap 1.3
 All Data Structures Namespaces Files Functions Variables Typedefs Friends Macros Groups Pages
refMPICannon.c
Go to the documentation of this file.
1 /*
2 * refMPICannon.c
3 * MPI reference code manually developed and optimized
4 * Cannon's algorithm for Matrix Multiplication.
5 *
6 * Simplified version where the matrix is always divisible in square pieces
7 * for the given number of processors.
8 *
9 * Asynchronous sends, synchronous receives
10 *
11 * v1.0
12 * (c) 2007-2009, Arturo Gonzalez-Escribano
13 */
14 
15 /*
16  * <license>
17  *
18  * Hitmap v1.2
19  *
20  * This software is provided to enhance knowledge and encourage progress in the scientific
21  * community. It should be used only for research and educational purposes. Any reproduction
22  * or use for commercial purpose, public redistribution, in source or binary forms, with or
23  * without modifications, is NOT ALLOWED without the previous authorization of the copyright
24  * holder. The origin of this software must not be misrepresented; you must not claim that you
25  * wrote the original software. If you use this software for any purpose (e.g. publication),
26  * a reference to the software package and the authors must be included.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS IS" AND ANY
29  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
30  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
31  * THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
35  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37  *
38  * Copyright (c) 2007-2015, Trasgo Group, Universidad de Valladolid.
39  * All rights reserved.
40  *
41  * More information on http://trasgo.infor.uva.es/
42  *
43  * </license>
44 */
45 
46 #include<stdio.h>
47 #include<math.h>
48 #include<stdlib.h>
49 #include<mpi.h>
50 
51 #define MATRIX_ROWS 10
52 #define MATRIX_COLUMNS 20
53 
54 #define mpiTestError(ok,cad) \
55  if ( ok != MPI_SUCCESS ) { \
56  fprintf(stderr,"RunTime-Error, Rank %d: %s - %d\n", mpiRank, cad, ok);\
57  exit(-1); \
58  }
59 
60 
61 #define elemA(i,j) A[(i)*MATRIX_COLUMNS+(j)]
62 #define elemCopyA(i,j) copyA[(i)*MATRIX_COLUMNS+(j)]
63 #define elemB(i,j) B[(i)*MATRIX_ROWS+(j)]
64 #define elemCopyB(i,j) copyB[(i)*MATRIX_ROWS+(j)]
65 #define elemC(i,j) C[(i)*MATRIX_ROWS+(j)]
66 
67 
68 int mpiRank;
71 
72 
73 int main(int argc, char *argv[]) {
74  int ok;
75  int i,j,k;
76 #ifdef DEBUG
77  setbuf(stderr, NULL);
78  setbuf(stdout, NULL);
79 #endif
80 
81  /* 1. INITIALIZE MPI */
82  ok = MPI_Init( &argc, &argv);
83  mpiTestError( ok, "Initializing MPI" );
84 
85  /* 2. GET BASIC GROUP PARAMETERS */
86  MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank);
87  MPI_Comm_size(MPI_COMM_WORLD, &mpiNProc);
88 
89  /* 3. CHECK APPROPRIATE NUMBER OF PROCS/SIZES FOR THE SIMPLIFIED VERSION */
90  double sqProc = sqrt( mpiNProc );
91  if ( sqProc != floor(sqProc) ) {
92  if ( mpiRank == 0 ) {
93  fprintf(stderr,"Error: This code does not support a number of procs. without a perfect square root: %d\n", mpiNProc);
94  }
95  MPI_Finalize();
96  exit(-1);
97  }
98  if ( MATRIX_COLUMNS % (int)sqProc != 0 || MATRIX_ROWS % (int)sqProc != 0 ) {
99  if ( mpiRank == 0 ) {
100  fprintf(stderr,"Error: This code does not support matrix sizes which are not integer divisible by the square root of the number of processors\n");
101  }
102  MPI_Finalize();
103  exit(-1);
104  }
105 
106  /* 4. DEFINE MAIN DATA STRUCTURES */
107  double *A = (double *)calloc( (size_t)(MATRIX_ROWS*MATRIX_COLUMNS), sizeof(double));
108  double *B = (double *)calloc( (size_t)(MATRIX_ROWS*MATRIX_COLUMNS), sizeof(double));
109  double *C = (double *)calloc( (size_t)(MATRIX_ROWS*MATRIX_ROWS), sizeof(double));
110  double *copyA = (double *)calloc( (size_t)(MATRIX_ROWS*MATRIX_COLUMNS), sizeof(double));
111  double *copyB = (double *)calloc( (size_t)(MATRIX_ROWS*MATRIX_COLUMNS), sizeof(double));
112  if ( A==NULL || B==NULL || C==NULL ) {
113  fprintf(stderr,"Error, Rank %d: Allocating data structures\n", mpiRank);
114  exit(3);
115  }
116 
117  /* 5. INITIAL MATRIX VALUES */
118  /* INIT A(i,j) = i*j + j */
119  int cont=0;
120  for (i=0; i<MATRIX_ROWS; i++) {
121  for (j=0; j<MATRIX_COLUMNS; j++) {
122  elemA(i,j) = cont++;
123  }
124  }
125 #ifdef B_IDENTITY
126  /* INIT B = Identity */
127  for (i=0; i<MATRIX_COLUMNS; i++) {
128  for (j=0; j<MATRIX_ROWS; j++) {
129  elemB(i,j) = (i==j) ? 1.0 : 0.0;
130  }
131  }
132 #else
133  /* INIT B = Random with seed */
134  srand48( 364543 );
135  for (i=0; i<MATRIX_COLUMNS; i++) {
136  for (j=0; j<MATRIX_ROWS; j++) {
137  elemB(i,j) = drand48();
138  }
139  }
140 #endif
141  /* INIT result = 0 */
142  for (i=0; i<MATRIX_ROWS; i++) {
143  for (j=0; j<MATRIX_ROWS; j++) {
144  elemC(i,j) = 0.0;
145  }
146  }
147 
148  /* 6. START CLOCK */
149  MPI_Barrier( MPI_COMM_WORLD );
150  mainClock = MPI_Wtime();
151 
152  /* 7. CARTESIAN TOPOLOGY */
153  MPI_Comm cartesianComm;
154  int procs[2];
155  procs[0] = (int)sqrt( mpiNProc );
156  procs[1] = procs[0];
157  int periods[2] = { 1, 1 };
158  ok = MPI_Cart_create(MPI_COMM_WORLD, 2, procs, periods, 0, &cartesianComm);
159  mpiTestError( ok, "Creating cartesian toology" );
160 
161  int myCoords[2];
162  ok = MPI_Cart_coords(cartesianComm, mpiRank, 2, myCoords);
163  mpiTestError( ok, "Getting cartesian coordinates" );
164 
165  int rankUp=-1000, rankDown=-1000, rankLeft=-1000, rankRight=-1000;
166  int rankSendIniA=-1000, rankSendIniB=-1000;
167  int rankRecvIniA=-1000, rankRecvIniB=-1000;
168 
169  ok = MPI_Cart_shift(cartesianComm, 0, +1, &rankUp, &rankDown);
170  mpiTestError( ok, "Shifting cartesian coordinates vertical" );
171 
172  ok = MPI_Cart_shift(cartesianComm, 1, +1, &rankLeft, &rankRight);
173  mpiTestError( ok, "Shifting cartesian coordinates horizontal" );
174 
175  ok = MPI_Cart_shift(cartesianComm, 1, -myCoords[0], &rankRecvIniA, &rankSendIniA);
176  mpiTestError( ok, "Shifting cartesian coordinates ini A" );
177 
178  ok = MPI_Cart_shift(cartesianComm, 0, -myCoords[1], &rankRecvIniB, &rankSendIniB);
179  mpiTestError( ok, "Shifting cartesian coordinates ini B" );
180 
181 #ifdef DEBUG
182 fprintf(stderr,"CTRL %d RANKS: (%d,%d,%d,%d)\n", mpiRank, rankUp, rankDown, rankLeft, rankRight);
183 fprintf(stderr,"CTRL %d INI RANKS: A(send %d, recv %d) B(send %d, recv %d)\n", mpiRank, rankSendIniA, rankRecvIniA, rankSendIniB, rankRecvIniB);
184 #endif
185 
186 
187  /* 8. COMPUTE THE INDECES OF THE LOCAL PART OF THE MATRICES */
188  #define procRatioRows0 MATRIX_ROWS/procs[0]
189  #define procRatioRows1 MATRIX_ROWS/procs[1]
190  #define procRatioColumns0 MATRIX_COLUMNS/procs[0]
191  #define procRatioColumns1 MATRIX_COLUMNS/procs[1]
192 
193  int beginA[2];
194  int endA[2];
195  int beginB[2];
196  int endB[2];
197  int beginC[2];
198  int endC[2];
199 
200 #ifdef DEBUG
201 fprintf(stderr,"CTRL %d, myCoords %d,%d\n", mpiRank, myCoords[0], myCoords[1]);
202 #endif
203 
204  beginA[0] = (int)floor( (double)myCoords[0]*procRatioRows0 );
205  endA[0] = (int)floor( ((double)myCoords[0]+((MATRIX_ROWS > procs[0]) ? 1 : 0))*procRatioRows0 - ((MATRIX_ROWS > procs[0]) ? 1 : 0) );
206  beginA[1] = (int)floor( (double)myCoords[1]*procRatioColumns1 );
207  endA[1] = (int)floor( ((double)myCoords[1]+((MATRIX_COLUMNS > procs[1]) ? 1 : 0))*procRatioColumns1 - ((MATRIX_COLUMNS > procs[1]) ? 1 : 0) );
208 
209  beginB[0] = (int)floor( (double)myCoords[0]*procRatioColumns0 );
210  endB[0] = (int)floor( ((double)myCoords[0]+((MATRIX_COLUMNS > procs[0]) ? 1 : 0))*procRatioColumns0 - ((MATRIX_COLUMNS > procs[0]) ? 1 : 0) );
211  beginB[1] = (int)floor( (double)myCoords[1]*procRatioRows1 );
212  endB[1] = (int)floor( ((double)myCoords[1]+((MATRIX_ROWS > procs[1]) ? 1 : 0))*procRatioRows1 - ((MATRIX_ROWS > procs[1]) ? 1 : 0) );
213 
214  beginC[0] = (int)floor( (double)myCoords[0]*procRatioRows0 );
215  endC[0] = (int)floor( ((double)myCoords[0]+((MATRIX_ROWS > procs[0]) ? 1 : 0))*procRatioRows0 - ((MATRIX_ROWS > procs[0]) ? 1 : 0) );
216  beginC[1] = (int)floor( (double)myCoords[1]*procRatioRows1 );
217  endC[1] = (int)floor( ((double)myCoords[1]+((MATRIX_ROWS > procs[1]) ? 1 : 0))*procRatioRows1 - ((MATRIX_ROWS > procs[1]) ? 1 : 0) );
218 
219 #ifdef DEBUG
220 fprintf(stderr,"CTRL %d, A[%d:%d][%d:%d]\n", mpiRank, beginA[0], endA[0], beginA[1], endA[1]);
221 fprintf(stderr,"CTRL %d, B[%d:%d][%d:%d]\n", mpiRank, beginB[0], endB[0], beginB[1], endB[1]);
222 fprintf(stderr,"CTRL %d, C[%d:%d][%d:%d]\n", mpiRank, beginC[0], endC[0], beginC[1], endC[1]);
223 #endif
224 
225  /* 9. COMMIT MPI TYPES */
226  MPI_Datatype dataBlockA;
227  MPI_Datatype dataBlockB;
228  MPI_Datatype dataBlockC;
229 
230  ok = MPI_Type_hvector( endA[0]-beginA[0]+1, endA[1]-beginA[1]+1, sizeof(double)*(size_t)MATRIX_COLUMNS, MPI_DOUBLE, &dataBlockA );
231  mpiTestError( ok, "Creating the A block type" );
232  ok = MPI_Type_commit( &dataBlockA );
233  mpiTestError( ok, "Commiting the A block type" );
234 
235  ok = MPI_Type_hvector( endB[0]-beginB[0]+1, endB[1]-beginB[1]+1, sizeof(double)*(size_t)MATRIX_ROWS, MPI_DOUBLE, &dataBlockB );
236  mpiTestError( ok, "Creating the B block type" );
237  ok = MPI_Type_commit( &dataBlockB );
238  mpiTestError( ok, "Commiting the B block type" );
239 
240  ok = MPI_Type_hvector( endC[0]-beginC[0]+1, endC[1]-beginC[1]+1, sizeof(double)*(size_t)MATRIX_ROWS, MPI_DOUBLE, &dataBlockC );
241  mpiTestError( ok, "Creating the C block type" );
242  ok = MPI_Type_commit( &dataBlockC );
243  mpiTestError( ok, "Commiting the C block type" );
244 
245  MPI_Request request[2];
246  MPI_Status status;
247 
248 #ifdef DEBUG_V
249 {
250 int i,j;
251 for (i=beginA[0]; i<=endA[0]; i++) {
252  for (j=beginA[1]; j<=endA[1]; j++) {
253  fprintf(stderr,"CTRL %d Before shift - A[%d][%d]=%lf\n", mpiRank, i,j,elemA(i,j) );
254  }
255 }
256 for (i=beginB[0]; i<=endB[0]; i++) {
257  for (j=beginB[1]; j<=endB[1]; j++) {
258  fprintf(stderr,"CTRL %d Before shift - B[%d][%d]=%lf\n", mpiRank, i,j,elemB(i,j) );
259  }
260 }
261 }
262 #endif
263 
264  /* 10. INITIAL DATA DISTRIBUTION */
265  ok = MPI_Isend( &(elemA(beginA[0],beginA[1])), 1, dataBlockA, rankSendIniA, 0, MPI_COMM_WORLD, &request[0] );
266  mpiTestError( ok, "Send Ini A" );
267 
268  ok = MPI_Isend( &(elemB(beginB[0],beginB[1])), 1, dataBlockB, rankSendIniB, 1, MPI_COMM_WORLD, &request[1] );
269  mpiTestError( ok, "Send Ini B" );
270 
271  ok = MPI_Recv( &(elemCopyA(beginA[0],beginA[1])), 1, dataBlockA, rankRecvIniA, 0, MPI_COMM_WORLD, &status );
272  mpiTestError( ok, "Recv Ini A" );
273 
274  ok = MPI_Recv( &(elemCopyB(beginB[0],beginB[1])), 1, dataBlockB, rankRecvIniB, 1, MPI_COMM_WORLD, &status );
275  mpiTestError( ok, "Recv Ini B" );
276 
277  MPI_Wait( &request[0], &status );
278  MPI_Wait( &request[1], &status );
279 
280  for (i=beginA[0]; i<=endA[0]; i++) {
281  for (j=beginA[1]; j<=endA[1]; j++) {
282  elemA(i,j) = elemCopyA(i,j);
283  }
284  }
285  for (i=beginB[0]; i<=endB[0]; i++) {
286  for (j=beginB[1]; j<=endB[1]; j++) {
287  elemB(i,j) = elemCopyB(i,j);
288  }
289  }
290 
291 
292 #ifdef DEBUG_V
293 {
294 int i,j;
295 for (i=beginA[0]; i<=endA[0]; i++) {
296  for (j=beginA[1]; j<=endA[1]; j++) {
297  fprintf(stderr,"CTRL %d After shift - A[%d][%d]=%lf\n", mpiRank, i,j, elemA(i,j) );
298  }
299 }
300 for (i=beginB[0]; i<=endB[0]; i++) {
301  for (j=beginB[1]; j<=endB[1]; j++) {
302  fprintf(stderr,"CTRL %d After shift - B[%d][%d]=%lf\n", mpiRank, i,j,elemB(i,j) );
303  }
304 }
305 }
306 #endif
307 
308  /* 11. LOOP, ONE ITERATION LESS TO AVOID EXTRA COMUNICATION
309  AFTER THE LAST ITERATION */
310  int loopIndex;
311  for (loopIndex = 0; loopIndex < procs[0]-1; loopIndex++) {
312 
313  /* 12. COMPUTE LOCAL PRODUCT */
314  for (i=beginC[0]; i<=endC[0]; i++) {
315  for (j=beginC[1]; j<=endC[1]; j++) {
316  for (k=0; k<=endA[1]-beginA[1]; k++) {
317 #ifdef DEBUG
318 fprintf(stderr,"CTRL %d, i,j,k:(%d,%d,%d) %lf x %lf Stage 1\n",mpiRank, i,j,k,elemA(i,k+begin[1]), elemB(k+beginB[0],j));
319 #endif
320  elemC(i,j) = elemC(i,j) + elemA(i,k+beginA[1]) * elemB(k+beginB[0],j);
321  }
322  }
323  }
324 
325  /* 13. COMMUNICATE A,B BLOCKS */
326  ok = MPI_Isend( &elemA(beginA[0],beginA[1]), 1, dataBlockA, rankLeft, 0, MPI_COMM_WORLD, &request[0] );
327  mpiTestError( ok, "Send Block A" );
328 
329  ok = MPI_Isend( &elemB(beginB[0],beginB[1]), 1, dataBlockB, rankUp, 1, MPI_COMM_WORLD, &request[1] );
330  mpiTestError( ok, "Send Block B" );
331 
332  ok = MPI_Recv( &elemCopyA(beginA[0],beginA[1]), 1, dataBlockA, rankRight, 0, MPI_COMM_WORLD, &status );
333  mpiTestError( ok, "Recv Block A" );
334 
335  ok = MPI_Recv( &elemCopyB(beginB[0],beginB[1]), 1, dataBlockB, rankDown, 1, MPI_COMM_WORLD, &status );
336  mpiTestError( ok, "Recv Block B" );
337 
338 #ifdef DEBUG_V
339 for (i=beginA[0]; i<=endA[0]; i++) {
340  for (j=beginA[1]; j<=endA[1]; j++) {
341  fprintf(stderr,"CTRL %d After repeated comm. A[%d][%d] %lf\n", mpiRank, i,j, elemA(i,j));
342  }
343 }
344 #endif
345  /* 14. END Isends */
346  MPI_Wait( &request[0], &status );
347  MPI_Wait( &request[1], &status );
348 
349  for (i=beginA[0]; i<=endA[0]; i++) {
350  for (j=beginA[1]; j<=endA[1]; j++) {
351  elemA(i,j) = elemCopyA(i,j);
352  }
353  }
354  for (i=beginB[0]; i<=endB[0]; i++) {
355  for (j=beginB[1]; j<=endB[1]; j++) {
356 #ifdef DEBUG
357 fprintf(stderr,"CTRL %d B->%lf\n", mpiRank, elemCopyB(i,j));
358 #endif
359  elemB(i,j) = elemCopyB(i,j);
360  }
361  }
362 
363  } /* END LOOP */
364 
365  ok = MPI_Type_free( &dataBlockA );
366  mpiTestError( ok, "Freeing data block A type" );
367  ok = MPI_Type_free( &dataBlockB );
368  mpiTestError( ok, "Freeing data block B type" );
369 
370  /* 15. COMPUTE LOCAL PRODUCT, LAST ITERATION */
371  for (i=beginC[0]; i<=endC[0]; i++) {
372  for (j=beginC[1]; j<=endC[1]; j++) {
373  for (k=0; k<=endA[1]-beginA[1]; k++) {
374 #ifdef DEBUG
375 fprintf(stderr,"CTRL %d, i,j,k:(%d,%d,%d) %lf x %lf Stage 1\n",mpiRank, i,j,k,elemA(i,k+begin[1]), elemB(k+beginB[0],j));
376 #endif
377  elemC(i,j) = elemC(i,j) + elemA(i,k+beginA[1]) * elemB(k+beginB[0],j);
378  }
379  }
380  }
381 
382 #ifdef DEBUG
383 for (i=beginC[0]; i<=endC[0]; i++) {
384  for (j=beginC[1]; j<=endC[1]; j++) {
385  fprintf(stderr,"CTRL %d Final C[%d][%d] %lf\n", mpiRank, i,j, elemC(i,j));
386  }
387 }
388 #endif
389 
390  /* 16. COMMUNICATE ALL LOCAL PIECES */
391  if ( mpiRank == 0 ) {
392  /* 16.1. RANK 0 RECEIVES ALL */
393  int rank;
394  for ( rank=1; rank<mpiNProc; rank++ ) {
395  /* COMPUTE THE REMOTE LOCAL PART */
396  int remoteCoords[2];
397  ok = MPI_Cart_coords(cartesianComm, rank, 2, remoteCoords);
398  mpiTestError( ok, "Getting remote cartesian coordinates in end comm." );
399 
400  int remoteBeginC[2];
401  remoteBeginC[0] = (int)floor( remoteCoords[0]*procRatioRows0 );
402  remoteBeginC[1] = (int)floor( remoteCoords[1]*procRatioRows1 );
403 
404 #ifdef DEBUG
405  int remoteEndC[2];
406  remoteEndC[0] = (int)floor( (remoteCoords[0]+((MATRIX_ROWS > procs[0]) ? 1 : 0))*procRatioRows0 - ((MATRIX_ROWS > procs[0]) ? 1 : 0) );
407  remoteEndC[1] = (int)floor( (remoteCoords[1]+((MATRIX_ROWS > procs[1]) ? 1 : 0))*procRatioRows1 - ((MATRIX_ROWS > procs[1]) ? 1 : 0) );
408 
409 fprintf(stderr,"CTRL RemoteC %d, [%d:%d][%d:%d]\n", rank, remoteBeginC[0], remoteEndC[0], remoteBeginC[1], remoteEndC[1]);
410 #endif
411 
412  ok = MPI_Recv( &(elemC(remoteBeginC[0],remoteBeginC[1])), 1, dataBlockC, rank, rank, MPI_COMM_WORLD, &status );
413  mpiTestError( ok, "Receiving remote parts in end comm." );
414  }
415  }
416  else {
417  /* 16.2. OTHERS SEND THE LOCAL PART */
418 #ifdef DEBUG
419 fprintf(stderr,"CTRL LocalC %d, [%d:%d][%d:%d]\n", mpiRank, beginC[0], endC[0], beginC[1], endC[1]);
420 #endif
421  ok = MPI_Send( &(elemC(beginC[0],beginC[1])), 1, dataBlockC, 0, mpiRank, MPI_COMM_WORLD );
422  mpiTestError( ok, "Sending local part in end comm." );
423  }
424  /* FREE TYPE */
425  ok = MPI_Type_free( &dataBlockC );
426  mpiTestError( ok, "Freeing remote type in end comm." );
427 
428  /* 17. STOP AND PRINT CLOCK */
429  mainClock = MPI_Wtime() - mainClock;
430  MPI_Reduce( &mainClock, &clockReduce, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD );
431  if (mpiRank==0) printf("Clock_%s: %14.6lf\n", "mainClock", clockReduce);
432 
433  /* 18. WRITE RESULT IN FILE */
434 #ifdef WRITE_RESULT
435  if ( mpiRank == 0 ) {
436  FILE *f;
437  if ((f=fopen("C.dtxt","w")) == NULL) {
438  fprintf(stderr,"Error: Imposible to open output file\n");
439  exit(-1);
440  }
441  for (i=0; i<MATRIX_ROWS; i++) {
442  for (j=0; j<MATRIX_ROWS; j++) {
443  fprintf(f,"%014.4lf", elemC(i,j));
444  fprintf(f,"\n");
445  }
446  }
447  fclose(f);
448  }
449 #endif
450 
451  /* 19. END PROGRAM */
452  MPI_Finalize();
453 
454  /* 20. RETURN CORRECT */
455  return 0;
456 }
#define elemCopyA(i, j)
Definition: refMPICannon.c:62
#define A
Definition: mg.c:64
void srand48(long)
#define B
Definition: mg.c:65
#define elemB(i, j)
Definition: refMPICannon.c:63
int mpiNProc
Definition: refMPICannon.c:69
#define procRatioRows0
int rank
Definition: SWpar_ref.c:181
#define mpiTestError(ok, cad)
Definition: refMPICannon.c:54
#define elemCopyB(i, j)
Definition: refMPICannon.c:64
HitClock mainClock
Definition: cannonAsync.c:55
double clockReduce
Definition: refMPICannon.c:70
#define elemC(i, j)
Definition: refMPICannon.c:65
#define MATRIX_COLUMNS
Definition: refMPICannon.c:52
#define procRatioRows1
#define MATRIX_ROWS
Definition: refMPICannon.c:51
#define elemA(i, j)
Definition: refMPICannon.c:61
int mpiRank
Definition: refMPICannon.c:68
int main(int argc, char *argv[])
Definition: cannonAsync.c:62
#define procRatioColumns1
#define C
Definition: mg.c:66
#define procRatioColumns0
double drand48()