47 #define mpiTestError(ok,cad) \
48 if ( ok != MPI_SUCCESS ) { \
49 fprintf(stderr,"RunTime-Error, Rank %d: %s - %d\n", mpiRank, cad, ok);\
68 result = (
int *)calloc(2,
sizeof(
int));
70 fprintf(stderr,
"RunTime-Error, Rank %d: Allocating factors2D result\n",
mpiRank);
73 low = high = (int)sqrt((
double)numProc);
77 while ( product != numProc ) {
78 if (product < numProc) {
80 product = product + low;
84 if (low==1) { high=numProc; product=numProc; }
85 else product = product - high;
99 int main(
int argc,
char *argv[]) {
103 setbuf(stderr, NULL);
104 setbuf(stdout, NULL);
108 int rows, columns, numIter;
111 fprintf(stderr,
"\nUsage: %s <numRows> <numColumns> <numIterations>\n", argv[0]);
114 rows = atoi( argv[1] );
115 columns = atoi( argv[2] );
116 numIter = atoi( argv[3] );
120 ok = MPI_Init( &argc, &argv);
124 MPI_Comm_rank(MPI_COMM_WORLD, &
mpiRank);
125 MPI_Comm_size(MPI_COMM_WORLD, &
mpiNProc);
129 matrix = (
double *)calloc( (
size_t)(rows * columns),
sizeof(
double) );
131 if ( matrix==NULL ) {
132 fprintf(stderr,
"RunTime-Error, Rank %d: Allocating data structures\n",
mpiRank);
136 #define element(mat, idx1, idx2) (mat[(idx1)*columns+(idx2)])
139 for (i=0; i<rows; i++) {
140 for (j=0; j<columns; j++) {
145 for (i=0; i<rows; i++) {
147 element(matrix, i, columns-1) = 4.0;
149 for (i=0; i<columns; i++) {
151 element(matrix, rows-1, i) = 2.0;
155 MPI_Barrier( MPI_COMM_WORLD );
159 MPI_Comm cartesianComm;
162 int periods[2] = { 0, 0 };
163 ok = MPI_Cart_create(MPI_COMM_WORLD, 2, procs, periods, 0, &cartesianComm);
167 ok = MPI_Cart_coords(cartesianComm,
mpiRank, 2, myCoords);
170 int rankUp=-1000, rankDown=-1000, rankLeft=-1000, rankRight=-1000;
173 if ( myCoords[0] != 0 ) {
174 ok = MPI_Cart_shift(cartesianComm, 0, -1, &foo, &rankUp);
175 mpiTestError( ok,
"Shifting cartesian coordinates up" );
178 if ( myCoords[0] != procs[0]-1 ) {
179 ok = MPI_Cart_shift(cartesianComm, 0, +1, &foo, &rankDown);
180 mpiTestError( ok,
"Shifting cartesian coordinates down" );
183 if ( myCoords[1] != 0 ) {
184 ok = MPI_Cart_shift(cartesianComm, 1, -1, &foo, &rankLeft);
185 mpiTestError( ok,
"Shifting cartesian coordinates left" );
188 if ( myCoords[1] != procs[1]-1 ) {
189 ok = MPI_Cart_shift(cartesianComm, 1, +1, &foo, &rankRight);
190 mpiTestError( ok,
"Shifting cartesian coordinates right" );
194 if ( procs[0] > rows-2 || procs[1] > columns-2 ) {
196 fprintf(stderr,
"Error: This code does not support partitioning less rows/columns than processors in an axis\n");
201 fprintf(stderr,
"CTRL RANKS: %d (%d,%d,%d,%d)\n",
mpiRank, rankUp, rankDown, rankLeft, rankRight);
206 procRatio[0] = (double)(rows-2)/procs[0];
207 procRatio[1] = (double)(columns-2)/procs[1];
213 fprintf(stderr,
"CTRL %d, myCoords %d,%d\n",
mpiRank, myCoords[0], myCoords[1]);
216 begin[0] = 1 + (int)floor( myCoords[0]*procRatio[0] );
217 end[0] = 1 + (int)floor( (myCoords[0]+((rows > procs[0]) ? 1 : 0))*procRatio[0] - ((rows > procs[0]) ? 1 : 0) );
218 begin[1] = 1 + (int)floor( myCoords[1]*procRatio[1] );
219 end[1] = 1 + (int)floor( (myCoords[1]+((columns > procs[1]) ? 1 : 0))*procRatio[1] - ((columns > procs[1]) ? 1 : 0) );
222 fprintf(stderr,
"CTRL %d, matrix[%d:%d][%d:%d]\n",
mpiRank, begin[0], end[0], begin[1], end[1]);
226 MPI_Datatype verticalBorderType;
227 ok = MPI_Type_hvector( end[0]-begin[0]+1, 1, (
int)
sizeof(
double)*columns, MPI_DOUBLE, &verticalBorderType );
229 ok = MPI_Type_commit( &verticalBorderType );
232 MPI_Request request[4];
237 for (loopIndex = 0; loopIndex < numIter; loopIndex++) {
239 if ( myCoords[0] != 0 ) {
240 ok = MPI_Recv( &
element(matrix, begin[0]-1, begin[1]), end[1]-begin[1]+1, MPI_DOUBLE, rankUp, 1, MPI_COMM_WORLD, &status );
243 if ( myCoords[1] != 0 ) {
244 ok = MPI_Recv( &
element(matrix, begin[0], begin[1]-1), 1, verticalBorderType, rankLeft, 3, MPI_COMM_WORLD, &status );
249 for (i=begin[0]; i<=end[0]; i++) {
250 for (j=begin[1]; j<=end[1]; j++) {
251 element( matrix,i,j ) = (
element( matrix,i-1,j ) +
element( matrix,i+1,j ) +
element( matrix,i,j-1 ) +
element( matrix,i,j+1 ) ) / 4;
256 if ( myCoords[0] != 0 ) {
257 ok = MPI_Isend( &
element(matrix, begin[0], begin[1]), end[1]-begin[1]+1, MPI_DOUBLE, rankUp, 0, MPI_COMM_WORLD, &request[0] );
260 if ( myCoords[0] != procs[0]-1 ) {
261 ok = MPI_Isend( &
element(matrix, end[0], begin[1]), end[1]-begin[1]+1, MPI_DOUBLE, rankDown, 1, MPI_COMM_WORLD, &request[1] );
264 if ( myCoords[1] != 0 ) {
265 ok = MPI_Isend( &
element(matrix,begin[0], begin[1]), 1, verticalBorderType, rankLeft, 2, MPI_COMM_WORLD, &request[2] );
268 if ( myCoords[1] != procs[1]-1 ) {
269 ok = MPI_Isend( &
element(matrix,begin[0],end[1]), 1, verticalBorderType, rankRight, 3, MPI_COMM_WORLD, &request[3] );
274 if ( myCoords[0] != procs[0]-1 ) {
275 ok = MPI_Recv( &
element(matrix, end[0]+1, begin[1]), end[1]-begin[1]+1, MPI_DOUBLE, rankDown, 0, MPI_COMM_WORLD, &status );
278 if ( myCoords[1] != procs[1]-1 ) {
279 ok = MPI_Recv( &
element(matrix, begin[0], end[1]+1), 1, verticalBorderType, rankRight, 2, MPI_COMM_WORLD, &status );
284 if ( myCoords[0] != 0 ) {
285 MPI_Wait( &request[0], &status );
287 if ( myCoords[0] != procs[0]-1 ) {
288 MPI_Wait( &request[1], &status );
290 if ( myCoords[1] != 0 ) {
291 MPI_Wait( &request[2], &status );
293 if ( myCoords[1] != procs[1]-1 ) {
294 MPI_Wait( &request[3], &status );
297 ok = MPI_Type_free( &verticalBorderType );
301 for (i=begin[0]; i<=end[0]; i++) {
302 for (j=begin[1]; j<=end[1]; j++) {
303 fprintf(stderr,
"M[%d][%d] %lf\n", i,j,
element(matrix,i,j));
312 for ( rank=1; rank<
mpiNProc; rank++ ) {
315 ok = MPI_Cart_coords(cartesianComm, rank, 2, remoteCoords);
316 mpiTestError( ok,
"Getting remote cartesian coordinates in end comm." );
321 remoteBegin[0] = 1 + (int)floor( remoteCoords[0]*procRatio[0] );
322 remoteEnd[0] = 1 + (int)floor( (remoteCoords[0]+((rows > procs[0]) ? 1 : 0))*procRatio[0] - ((rows > procs[0]) ? 1 : 0) );
323 remoteBegin[1] = 1 + (int)floor( remoteCoords[1]*procRatio[1] );
324 remoteEnd[1] = 1 + (int)floor( (remoteCoords[1]+((columns > procs[1]) ? 1 : 0))*procRatio[1] - ((columns > procs[1]) ? 1 : 0) );
327 fprintf(stderr,
"CTRL RemoteMatrix %d, [%d:%d][%d:%d]\n", rank, remoteBegin[0], remoteEnd[0], remoteBegin[1], remoteEnd[1]);
330 MPI_Datatype remoteType;
331 ok = MPI_Type_hvector( remoteEnd[0]-remoteBegin[0]+1, remoteEnd[1]-remoteBegin[1]+1, (
int)
sizeof(
double)*columns, MPI_DOUBLE, &remoteType );
333 ok = MPI_Type_commit( &remoteType );
334 mpiTestError( ok,
"Commiting the remote type in end comm." );
336 ok = MPI_Recv( &
element( matrix,remoteBegin[0],remoteBegin[1] ), 1, remoteType, rank, rank, MPI_COMM_WORLD, &status );
337 mpiTestError( ok,
"Receiving remote parts in end comm." );
339 ok = MPI_Type_free( &remoteType );
346 fprintf(stderr,
"CTRL LocalMatrix %d, [%d:%d][%d:%d]\n",
mpiRank, begin[0], end[0], begin[1], end[1]);
348 MPI_Datatype localType;
349 ok = MPI_Type_hvector( end[0]-begin[0]+1, end[1]-begin[1]+1, (
int)
sizeof(
double)*columns, MPI_DOUBLE, &localType );
351 ok = MPI_Type_commit( &localType );
352 mpiTestError( ok,
"Commiting the local type in end comm." );
354 ok = MPI_Send( &
element( matrix,begin[0],begin[1] ), 1, localType, 0,
mpiRank, MPI_COMM_WORLD );
357 ok = MPI_Type_free( &localType );
363 MPI_Reduce( &
clock, &
clockReduce, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD );
370 if ((f=fopen(
"Result.out.dtxt",
"w")) == NULL) {
371 fprintf(stderr,
"Error: Imposible to open output file\n");
374 for (i=0; i<rows; i++) {
375 for (j=0; j<rows; j++) {
376 fprintf(f,
"%014.4lf",
element(matrix,i,j) );
#define mpiTestError(ok, cad)
#define element(mat, idx1, idx2)
int main(int argc, char *argv[])
int * factors2D(int numProc)