49 #define mpiTestError(ok,cad) \
50 if ( ok != MPI_SUCCESS ) { \
51 fprintf(stderr,"RunTime-Error, Rank %d: %s - %d\n", mpiRank, cad, ok);\
70 result = (
int *)calloc(2,
sizeof(
int));
72 fprintf(stderr,
"RunTime-Error, Rank %d: Allocating factors2D result\n",
mpiRank);
75 low = high = (int)sqrt((
double)numProc);
79 while ( product != numProc ) {
80 if (product < numProc) {
82 product = product + low;
86 if (low==1) { high=numProc; product=numProc; }
87 else product = product - high;
101 int main(
int argc,
char *argv[]) {
105 setbuf(stderr, NULL);
106 setbuf(stdout, NULL);
110 int rows, columns, numIter;
113 fprintf(stderr,
"\nUsage: %s <numRows> <numColumns> <numIterations>\n", argv[0]);
116 rows = atoi( argv[1] );
117 columns = atoi( argv[2] );
118 numIter = atoi( argv[3] );
122 ok = MPI_Init( &argc, &argv);
126 MPI_Comm_rank(MPI_COMM_WORLD, &
mpiRank);
127 MPI_Comm_size(MPI_COMM_WORLD, &
mpiNProc);
131 matrix = (
double *)calloc( (
size_t)(rows * columns),
sizeof(
double) );
133 if ( matrix==NULL ) {
134 fprintf(stderr,
"Error, Rank %d: Allocating data structures\n",
mpiRank);
138 #define element(mat, idx1, idx2) (mat[(idx1)*columns+(idx2)])
141 for (i=0; i<rows; i++) {
142 for (j=0; j<columns; j++) {
147 for (i=0; i<rows; i++) {
149 element(matrix, i, columns-1) = 4.0;
151 for (i=0; i<columns; i++) {
153 element(matrix, rows-1, i) = 2.0;
157 MPI_Barrier( MPI_COMM_WORLD );
161 MPI_Comm cartesianComm;
164 int periods[2] = { 0, 0 };
165 ok = MPI_Cart_create(MPI_COMM_WORLD, 2, procs, periods, 0, &cartesianComm);
169 ok = MPI_Cart_coords(cartesianComm,
mpiRank, 2, myCoords);
172 int rankUp=-1000, rankDown=-1000, rankLeft=-1000, rankRight=-1000;
175 if ( myCoords[0] != 0 ) {
176 ok = MPI_Cart_shift(cartesianComm, 0, -1, &foo, &rankUp);
177 mpiTestError( ok,
"Shifting cartesian coordinates up" );
180 if ( myCoords[0] != procs[0]-1 ) {
181 ok = MPI_Cart_shift(cartesianComm, 0, +1, &foo, &rankDown);
182 mpiTestError( ok,
"Shifting cartesian coordinates down" );
185 if ( myCoords[1] != 0 ) {
186 ok = MPI_Cart_shift(cartesianComm, 1, -1, &foo, &rankLeft);
187 mpiTestError( ok,
"Shifting cartesian coordinates left" );
190 if ( myCoords[1] != procs[1]-1 ) {
191 ok = MPI_Cart_shift(cartesianComm, 1, +1, &foo, &rankRight);
192 mpiTestError( ok,
"Shifting cartesian coordinates right" );
196 if ( procs[0] > rows-2 || procs[1] > columns-2 ) {
198 fprintf(stderr,
"Error: This code does not support partitioning less rows/columns than processors in an axis\n");
203 fprintf(stderr,
"CTRL RANKS: %d (%d,%d,%d,%d)\n",
mpiRank, rankUp, rankDown, rankLeft, rankRight);
208 procRatio[0] = (double)(rows-2)/procs[0];
209 procRatio[1] = (double)(columns-2)/procs[1];
215 fprintf(stderr,
"CTRL %d, myCoords %d,%d\n",
mpiRank, myCoords[0], myCoords[1]);
218 begin[0] = 1 + (int)floor( myCoords[0]*procRatio[0] );
219 end[0] = 1 + (int)floor( (myCoords[0]+((rows > procs[0]) ? 1 : 0))*procRatio[0] - ((rows > procs[0]) ? 1 : 0) );
220 begin[1] = 1 + (int)floor( myCoords[1]*procRatio[1] );
221 end[1] = 1 + (int)floor( (myCoords[1]+((columns > procs[1]) ? 1 : 0))*procRatio[1] - ((columns > procs[1]) ? 1 : 0) );
224 fprintf(stderr,
"CTRL %d, matrix[%d:%d][%d:%d]\n",
mpiRank, begin[0], end[0], begin[1], end[1]);
228 MPI_Datatype verticalBorderType;
229 ok = MPI_Type_hvector( end[0]-begin[0]+1, 1, (
int)
sizeof(
double)*columns, MPI_DOUBLE, &verticalBorderType );
231 ok = MPI_Type_commit( &verticalBorderType );
234 MPI_Request request[4];
239 for (loopIndex = 0; loopIndex < numIter; loopIndex++) {
241 if ( myCoords[0] != 0 ) {
242 ok = MPI_Recv( &
element(matrix, begin[0]-1, begin[1]), end[1]-begin[1]+1, MPI_DOUBLE, rankUp, 1, MPI_COMM_WORLD, &status );
245 if ( myCoords[1] != 0 ) {
246 ok = MPI_Recv( &
element(matrix, begin[0], begin[1]-1), 1, verticalBorderType, rankLeft, 3, MPI_COMM_WORLD, &status );
251 for (i=begin[0]; i<=end[0]; i++) {
252 for (j=begin[1]; j<=end[1]; j++) {
253 element( matrix,i,j ) = (
element( matrix,i-1,j ) +
element( matrix,i+1,j ) +
element( matrix,i,j-1 ) +
element( matrix,i,j+1 ) ) / 4;
258 if ( myCoords[0] != 0 ) {
259 ok = MPI_Isend( &
element(matrix, begin[0], begin[1]), end[1]-begin[1]+1, MPI_DOUBLE, rankUp, 0, MPI_COMM_WORLD, &request[0] );
262 if ( myCoords[0] != procs[0]-1 ) {
263 ok = MPI_Isend( &
element(matrix, end[0], begin[1]), end[1]-begin[1]+1, MPI_DOUBLE, rankDown, 1, MPI_COMM_WORLD, &request[1] );
266 if ( myCoords[1] != 0 ) {
267 ok = MPI_Isend( &
element(matrix,begin[0], begin[1]), 1, verticalBorderType, rankLeft, 2, MPI_COMM_WORLD, &request[2] );
270 if ( myCoords[1] != procs[1]-1 ) {
271 ok = MPI_Isend( &
element(matrix,begin[0],end[1]), 1, verticalBorderType, rankRight, 3, MPI_COMM_WORLD, &request[3] );
276 if ( myCoords[0] != procs[0]-1 ) {
277 ok = MPI_Recv( &
element(matrix, end[0]+1, begin[1]), end[1]-begin[1]+1, MPI_DOUBLE, rankDown, 0, MPI_COMM_WORLD, &status );
280 if ( myCoords[1] != procs[1]-1 ) {
281 ok = MPI_Recv( &
element(matrix, begin[0], end[1]+1), 1, verticalBorderType, rankRight, 2, MPI_COMM_WORLD, &status );
286 if ( myCoords[0] != 0 ) {
287 MPI_Wait( &request[0], &status );
289 if ( myCoords[0] != procs[0]-1 ) {
290 MPI_Wait( &request[1], &status );
292 if ( myCoords[1] != 0 ) {
293 MPI_Wait( &request[2], &status );
295 if ( myCoords[1] != procs[1]-1 ) {
296 MPI_Wait( &request[3], &status );
299 ok = MPI_Type_free( &verticalBorderType );
303 for (i=begin[0]; i<=end[0]; i++) {
304 for (j=begin[1]; j<=end[1]; j++) {
305 fprintf(stderr,
"M[%d][%d] %lf\n", i,j,
element(matrix,i,j));
314 for ( rank=1; rank<
mpiNProc; rank++ ) {
317 ok = MPI_Cart_coords(cartesianComm, rank, 2, remoteCoords);
318 mpiTestError( ok,
"Getting remote cartesian coordinates in end comm." );
323 remoteBegin[0] = 1 + (int)floor( remoteCoords[0]*procRatio[0] );
324 remoteEnd[0] = 1 + (int)floor( (remoteCoords[0]+((rows > procs[0]) ? 1 : 0))*procRatio[0] - ((rows > procs[0]) ? 1 : 0) );
325 remoteBegin[1] = 1 + (int)floor( remoteCoords[1]*procRatio[1] );
326 remoteEnd[1] = 1 + (int)floor( (remoteCoords[1]+((columns > procs[1]) ? 1 : 0))*procRatio[1] - ((columns > procs[1]) ? 1 : 0) );
329 fprintf(stderr,
"CTRL RemoteMatrix %d, [%d:%d][%d:%d]\n", rank, remoteBegin[0], remoteEnd[0], remoteBegin[1], remoteEnd[1]);
332 MPI_Datatype remoteType;
333 ok = MPI_Type_hvector( remoteEnd[0]-remoteBegin[0]+1, remoteEnd[1]-remoteBegin[1]+1, (
int)
sizeof(
double)*columns, MPI_DOUBLE, &remoteType );
335 ok = MPI_Type_commit( &remoteType );
336 mpiTestError( ok,
"Commiting the remote type in end comm." );
338 ok = MPI_Recv( &
element( matrix,remoteBegin[0],remoteBegin[1] ), 1, remoteType, rank, rank, MPI_COMM_WORLD, &status );
339 mpiTestError( ok,
"Receiving remote parts in end comm." );
341 ok = MPI_Type_free( &remoteType );
348 fprintf(stderr,
"CTRL LocalMatrix %d, [%d:%d][%d:%d]\n",
mpiRank, begin[0], end[0], begin[1], end[1]);
350 MPI_Datatype localType;
351 ok = MPI_Type_hvector( end[0]-begin[0]+1, end[1]-begin[1]+1, (
int)
sizeof(
double)*columns, MPI_DOUBLE, &localType );
353 ok = MPI_Type_commit( &localType );
354 mpiTestError( ok,
"Commiting the local type in end comm." );
356 ok = MPI_Send( &
element( matrix,begin[0],begin[1] ), 1, localType, 0,
mpiRank, MPI_COMM_WORLD );
359 ok = MPI_Type_free( &localType );
365 MPI_Reduce( &
clock, &
clockReduce, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD );
372 if ((f=fopen(
"Result.out.dtxt",
"w")) == NULL) {
373 fprintf(stderr,
"Error: Imposible to open output file\n");
376 for (i=0; i<rows; i++) {
377 for (j=0; j<rows; j++) {
378 fprintf(f,
"%014.4lf",
element(matrix,i,j) );
#define element(mat, idx1, idx2)
int main(int argc, char *argv[])
#define mpiTestError(ok, cad)
int * factors2D(int numProc)