Hitmap 1.3
 All Data Structures Namespaces Files Functions Variables Typedefs Friends Macros Groups Pages
hit_pattern.c
Go to the documentation of this file.
1 
15 /*
16  * <license>
17  *
18  * Hitmap v1.2
19  *
20  * This software is provided to enhance knowledge and encourage progress in the scientific
21  * community. It should be used only for research and educational purposes. Any reproduction
22  * or use for commercial purpose, public redistribution, in source or binary forms, with or
23  * without modifications, is NOT ALLOWED without the previous authorization of the copyright
24  * holder. The origin of this software must not be misrepresented; you must not claim that you
25  * wrote the original software. If you use this software for any purpose (e.g. publication),
26  * a reference to the software package and the authors must be included.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS IS" AND ANY
29  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
30  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
31  * THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
35  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37  *
38  * Copyright (c) 2007-2015, Trasgo Group, Universidad de Valladolid.
39  * All rights reserved.
40  *
41  * More information on http://trasgo.infor.uva.es/
42  *
43  * </license>
44 */
45 
46 #include <stdio.h>
47 #include <stdlib.h>
48 #include <hit_pattern.h>
49 #include <hit_com.h>
50 #include <hit_sshape.h>
51 #include <hit_cshape.h>
52 #include <hit_bshape.h>
53 
54 
55 /* Hit COMMUNICATION PATTERNS */
56 /* Hit INITIALIZE NULL PATTERN */
58 
59 
60 /* Hit ADD COMM TO PATTERN */
61 void hit_patternAdd( HitPattern *pattern, HitCom comm ) {
62  HitPatS *tmp;
63  // @arturo Ago 2015: New allocP interface
64  // hit_malloc(tmp,sizeof(HitPatS),HitPatS*);
65  hit_malloc(tmp, HitPatS, 1);
66 
67  /* COPY COMM SPEC IN THE PATTERN */
68  tmp->commSpec = comm;
69 
70  /* ADVANCE THE NUMBER OF COMM IN THE PATTERN */
71  pattern->numComm++;
72 
73  /* LINK THE FIFO */
74  tmp->next = NULL;
75  if ( pattern->first == NULL ) {
76  pattern->first = tmp;
77  pattern->last = tmp;
78  }
79  else {
80  pattern->last->next = tmp;
81  pattern->last = tmp;
82  }
83 }
84 
85 void hit_patternCompose( HitPattern *pattern, HitPattern *pattern2 ) {
86 
87  pattern->last->next = pattern2->first;
88  pattern->last = pattern2->last;
89  pattern->numComm += pattern2->numComm;
90 }
91 
92 
93 /* Hit FREE PATTERN */
94 void hit_patternFree( HitPattern *pattern ) {
95  HitPatS *elem, *tmp;
96  for (elem = pattern->first; elem != NULL; elem=tmp ) {
97  tmp = elem->next;
98  hit_comFree(elem->commSpec);
99  free(elem);
100  }
101  pattern->numComm = 0;
102  pattern->first = NULL;
103  pattern->last = NULL;
104 }
105 
106 /* Hit EXECUTE PATTERN: ORDERED MODE */
108  HitPatS *elem;
109  /* 1. EXECUTE COMM SPECIFICATIONS IN ORDER */
110  for (elem = pattern.first; elem != NULL; elem = elem->next) {
111  hit_comDo( &(elem->commSpec) );
112  }
113 }
114 
115 
116 /* Hit EXECUTE PATTERN: UNORDERED MODE */
118  HitPatS *elem;
119  MPI_Request allRequest[ pattern.numComm * 2 ];
120  int cont;
121 
122  // @arturo: TEST WITH THE WAIT HANDLERS IN SPLITTED ARRAYS
123  //MPI_Request sendRequest[ pattern.numComm ];
125 
126  /* 1. START ISENDs FOR ALL SPECIFICACIONS */
127  cont = 0;
128  for (elem = pattern.first; elem != NULL; elem=elem->next ) {
129  if(elem->commSpec.commType == HIT_SENDRECV){
130  hit_comStartSend( &(elem->commSpec) );
131  allRequest[ cont*2 ] = elem->commSpec.requestSend;
132  // sendRequest[ cont ] = elem->commSpec.requestSend;
133  hit_comStartRecv( &(elem->commSpec) );
134  allRequest[ cont*2+1 ] = elem->commSpec.requestRecv;
135  // recvRequest[ cont ] = elem->commSpec.requestRecv;
136  cont++;
137  }
138  }
139 
140  /* 2. CALL THE PROPER COUPLED/COLECTIVE FUNCTIONS TO DO THE PATTERN */
141  for (elem = pattern.first; elem != NULL; elem=elem->next ) {
142  switch (elem->commSpec.commType) {
143  case HIT_COMTYPE_NULL:
144  /* SKIP */
145  break;
146  case HIT_SENDRECV:
147  /* NOTHING */
148  break;
151  break;
152  case HIT_REDUCE:
153  hit_comDoReduce(&(elem->commSpec));
154  break;
155  case HIT_ALLTOALL:
156  hit_comDoAlltoall(&(elem->commSpec));
157  break;
158  case HIT_BROADCAST:
159  hit_comDoBroadcast(&(elem->commSpec));
160  break;
161  case HIT_ALLTOALLV:
162  hit_comDoAlltoallv(&(elem->commSpec));
163  break;
164  default:
165  hit_errInternal("patternDoUnordered", "Unknown type of communication object", "", __FILE__, __LINE__);
166  break;
167  }
168  }
169 
170  /* 3. WAIT FOR COUPLED ISEND TO COMPLETE */
171 #ifdef DEBUG
172  printf("CTRL %d - Waiting sends and recvs\n", hit_Rank);
173 #endif
174  int ok = MPI_Waitall( cont*2, allRequest, MPI_STATUSES_IGNORE );
175  hit_mpiTestError(ok,"Failed wait all");
176 #ifdef DEBUG
177  printf("CTRL %d - End waiting sends and recvs\n", hit_Rank);
178 #endif
179  //int ok = MPI_Waitall( cont, recvRequest, MPI_STATUSES_IGNORE );
181  //ok = MPI_Waitall( cont, sendRequest, MPI_STATUSES_IGNORE );
182  //hit_mpiTestError(ok,"Failed wait all");
183 }
184 
185 
186 /* Hit EXECUTE PATTERN: START ASYNC */
188  HitPatS *elem;
189 
190  /* 1. START ISEND/IRECV FOR ALL SPECIFICACIONS */
191  for (elem = pattern.first; elem != NULL; elem=elem->next ) {
192  if(elem->commSpec.commType == HIT_SENDRECV){
193  hit_comStartSend( &(elem->commSpec) );
194  hit_comStartRecv( &(elem->commSpec) );
195  }
196  }
197 }
198 
199 /* Hit EXECUTE PATTERN: END ASYNC */
201  HitPatS *elem;
202  MPI_Request allRequest[ pattern.numComm * 2 ];
203  int cont;
204 
205  /* 1. COLLECT REQUEST OBJECTS IN AN ARRAY */
206  cont = 0;
207  for (elem = pattern.first; elem != NULL; elem=elem->next ) {
208  if(elem->commSpec.commType == HIT_SENDRECV){
209  allRequest[ cont*2 ] = elem->commSpec.requestSend;
210  allRequest[ cont*2+1 ] = elem->commSpec.requestRecv;
211  cont++;
212  }
213  }
214 
215  /* 2. CALL THE PROPER COUPLED/COLECTIVE FUNCTIONS TO DO THE PATTERN */
216  for (elem = pattern.first; elem != NULL; elem=elem->next ) {
217  switch (elem->commSpec.commType) {
218  case HIT_COMTYPE_NULL:
219  /* SKIP */
220  break;
221  case HIT_SENDRECV:
222  /* NOTHING */
223  break;
226  break;
227  case HIT_REDUCE:
228  hit_comDoReduce(&(elem->commSpec));
229  break;
230  case HIT_ALLTOALL:
231  hit_comDoAlltoall(&(elem->commSpec));
232  break;
233  case HIT_BROADCAST:
234  hit_comDoBroadcast(&(elem->commSpec));
235  break;
236  case HIT_ALLTOALLV:
237  hit_comDoAlltoallv(&(elem->commSpec));
238  break;
239  default:
240  hit_errInternal("patternDoUnordered", "Unknown type of communication object", "", __FILE__, __LINE__);
241  break;
242  }
243  }
244 
245  /* 3. WAIT FOR COUPLED ISEND/IRECV TO COMPLETE */
246 #ifdef DEBUG
247  printf("CTRL %d - Waiting sends and recvs\n", hit_Rank);
248 #endif
249  int ok = MPI_Waitall( cont*2, allRequest, MPI_STATUSES_IGNORE );
250  hit_mpiTestError(ok,"Failed wait all");
251 }
252 
253 
254 
255 /* Hit PATTERN: SPECIFIC PATTERNS FOR SPECIAL COMPLEX COMMUNICATIONS */
256 
257 /*
258  * @arturo Jan 2015
259  * hit_patternLayRedistribute2: Redistribute a tile allocated with a given layout,
260  * as indicated by another layout, WHEN topologies of the layouts are different.
261  * For layouts using the same topology see hit_patternLayRedistribute
262  * Use hit_patterLayRedistribute as front-end for any case.
263  *
264  * Restrictions:
265  * 1) The tile should have been allocated using the local shape from the first layout
266  */
267 #define HIT_PAT_REDISTRIBUTE2_TAG 15005
268 HitPattern hit_patternLayRedistribute2( HitLayout lay1, HitLayout lay2, void *tileP1, void *tileP2, HitType baseType ) {
269  int i;
270 
271  // @arturo 2015/01/03: Eliminate the requirement for topologies associated to both layouts
272  // being the same
273  /* 1. CHECK THAT THE TOPOLOGY IS THE SAME IN BOTH LAYOUTS */
274  /*
275  if ( ! hit_ptopoCmp( lay1.topo.pTopology, lay2.topo.pTopology ) )
276  hit_errInternal( __FUNCTION__, "Layouts with different topologies", "", __FILE__, __LINE__ );
277  */
278 
279  /* 2. SKIP IF MY PROC IS NOT ACTIVE IN ANY LAYOUT */
280  if ( ! hit_layImActive( lay1 ) && ! hit_layImActive( lay2 ) ) return HIT_PATTERN_NULL;
281 
282  /* 3. DECLARE THE NEW COMM. PATTERN */
283  HitPattern allToAll = hit_pattern( HIT_PAT_UNORDERED );
284  // @arturo 2015/01/03 Two possible number of processors when topologies are not the same
285  //int myRank = lay1.topo.linearRank;
286  //int numProcs = hit_topCard( lay1.topo );
287  // @arturo 2015/01/22
288  //int myRank1 = lay1.topo.linearRank;
289  //int myRank2 = lay1.topo.linearRank;
290  int myRank1 = hit_topSelfRankInternal( lay1.topo );
291  int myRank2 = hit_topSelfRankInternal( lay2.topo );
292  int numProcs1 = hit_topCard( lay1.topo );
293  int numProcs2 = hit_topCard( lay2.topo );
294 
295  // @arturo 2015/01/03: False layout to contain the global topology communicator
296  MPI_Group gLay1;
297  MPI_Group gLay2;
298  MPI_Group gGlobal;
299  MPI_Comm_group( lay1.topo.pTopology->global->comm, &gGlobal );
300  // @arturo 2015/01/07 Bug corrected: Get info for the active group on each layout
301  if ( lay1.topo.active ) MPI_Comm_group( lay1.topo.pTopology->comm, &gLay1 );
302  else gLay1 = lay1.topo.pTopology->antiCommGroup;
303  if ( lay2.topo.active ) MPI_Comm_group( lay2.topo.pTopology->comm, &gLay2 );
304  else gLay2 = lay2.topo.pTopology->antiCommGroup;
305 
306  int myRankGlobal = lay1.topo.pTopology->global->selfRank;
307 
308  HitLayout fooGlobal = HIT_LAYOUT_NULL;
309  fooGlobal.active = 1;
310  fooGlobal.topo.pTopology = lay1.topo.pTopology->global;
311  fooGlobal.topo.numDims = 1;
312  fooGlobal.topo.card[0] = fooGlobal.topo.pTopology->numProcs;
313  fooGlobal.topo.active = 1;
314  //fooGlobal.topo.linearRank = myRankGlobal;
315 
316 #ifdef DEBUG
317 printf("Ranks: Lay1: %d, Lay2: %d, Global: %d\n", myRank1, myRank2, myRankGlobal );
318 #endif
319 
320  /* 4. ONLY COMPUTE SENDS IF I'M ACTIVE IN THE FIRST LAYOUT (I HAVE DATA) */
321  if ( hit_layImActive( lay1 ) ) {
322  HitShape localShp = hit_layShape( lay1 );
323 
324 #ifdef DEBUG
325 printf("Ranks: Lay1: %d, Lay2: %d, Global: %d ACTIVO en Lay1\n", myRank1, myRank2, myRankGlobal );
326 #endif
327  /* FOR ALL PROCESSORS IN THE TOPOLOGY */
328  /* (FUNCTIONS INTERNALLY CHECK ACTIVE STATUS IN TOPOLOGY AND LAYOUT) */
329  /* REORDER THE ORDER OF EXPLORATION TO CREATE A SKEWED PATTERN AND
330  * AVOID POTENTIAL COMM. BOTTLENECKS */
331  // @arturo 2015/01/03 Two possible number of processors when topologies are not the same
332  //for ( i=0; i<numProcs; i++ ) {
333  // int foreignId = ( myRank + i ) % numProcs;
334  for ( i=0; i<numProcs2; i++ ) {
335  // Perhaps this processor is not active in lay2. For skewing, it does not matter
336  // what is the real processor we start at. It is only important that they
337  // are skewed. Thus, I use here myRank1 anyway. Thus, each processor in lay1
338  // start the communications in a different processors of lay2 (if possible, lay2
339  // may contain less processors than lay1)
340  int foreignId = ( myRank1 + i ) % numProcs2;
341  int foreignIdGlobal;
342  MPI_Group_translate_ranks( gLay2, 1, &foreignId, gGlobal, &foreignIdGlobal );
343 
344  HitRanks foreignProc1 = hit_topRanksInternal( lay1.topo, foreignId );
345  HitRanks foreignProc2 = hit_topRanksInternal( lay2.topo, foreignId );
346  HitShape foreignShp1 = hit_layShapeOther( lay1, foreignProc1 );
347  HitShape foreignShp2 = hit_layShapeOther( lay2, foreignProc2 );
348 #ifdef DEBUG
349 printf("[%d] Topology: NumDims: %d, Cards:(%d,%d)\n", hit_Rank,
350  lay1.topo.numDims, lay1.topo.card[0],
351  lay1.topo.card[1] );
352 printf("[%d] Send Comprobando foreig: %d (%d) Ranks1:(%d,%d), Ranks2(%d,%d)\n", hit_Rank, foreignId, foreignIdGlobal,
353  foreignProc1.rank[0],
354  foreignProc1.rank[1],
355  foreignProc2.rank[0],
356  foreignProc2.rank[1]
357  );
358 #endif
359 
360  /* INTERSECTION: NON-EMPTY IMPLIES COMMUNICATION */
361  /* FOR COPY SIG. LAYOUTS: SKIP COMM. IF THE FOREIGN PROC. ALREADY HAS THIS SHAPE */
362  HitShape overlapShp2 = hit_shapeIntersect( localShp, foreignShp2 );
363  HitShape alreadyThere = hit_shapeIntersect( overlapShp2, foreignShp1 );
364 
365 #ifdef DEBUG
366 printf("[%d] Send shapes: f1 [%d:%d][%d:%d], f2:[%d:%d][%d:%d], over2:[%d:%d][%d:%d], alreadyThere:[%d:%d][%d:%d]\n", hit_Rank,
367  hit_shapeSig(foreignShp1,0).begin,
368  hit_shapeSig(foreignShp1,0).end,
369  hit_shapeSig(foreignShp1,1).begin,
370  hit_shapeSig(foreignShp1,1).end,
371  hit_shapeSig(foreignShp2,0).begin,
372  hit_shapeSig(foreignShp2,0).end,
373  hit_shapeSig(foreignShp2,1).begin,
374  hit_shapeSig(foreignShp2,1).end,
375  hit_shapeSig(overlapShp2,0).begin,
376  hit_shapeSig(overlapShp2,0).end,
377  hit_shapeSig(overlapShp2,1).begin,
378  hit_shapeSig(overlapShp2,1).end,
379  hit_shapeSig(alreadyThere,0).begin,
380  hit_shapeSig(alreadyThere,0).end,
381  hit_shapeSig(alreadyThere,1).begin,
382  hit_shapeSig(alreadyThere,1).end
383  );
384 #endif
385  // In this comparison, i==0 is substituted by the global rank of the local
386  // proc. and the foreign proc are the same, as the local proc. can have different
387  // ranks in the different topologies.
388  //if ( ! hit_shapeCmp( overlapShp2, HIT_SHAPE_NULL )
389  // && ( i == 0 || hit_shapeCmp( alreadyThere, HIT_SHAPE_NULL ) ) ) {
390  if ( ! hit_shapeCmp( overlapShp2, HIT_SHAPE_NULL )
391  && ( myRankGlobal == foreignIdGlobal
392  || hit_shapeCmp( alreadyThere, HIT_SHAPE_NULL ) ) ) {
393 
394 #ifdef DEBUG
395 fprintf(stderr, "%s Adding send from %d to %d with shape %d [%d:%d:%d][%d:%d:%d]\n", __FUNCTION__, myRank1, foreignId,
396  hit_shapeDims( overlapShp2 ),
397  hit_shapeSig( overlapShp2, 0 ).begin,
398  hit_shapeSig( overlapShp2, 0 ).end,
399  hit_shapeSig( overlapShp2, 0 ).stride,
400  hit_shapeSig( overlapShp2, 1 ).begin,
401  hit_shapeSig( overlapShp2, 1 ).end,
402  hit_shapeSig( overlapShp2, 1 ).stride
403  );
404 
405 printf("Ranks: Lay1: %d, Lay2: %d, Global: %d COM to RLay2: %d, RGlobal: %d\n", myRank1, myRank2, myRankGlobal, foreignId, foreignIdGlobal );
406 #endif
407 
408  // @arturo 2015/01/03 Translate to global ranks for the foo Layout
409  HitRanks foreignProcGlobal = HIT_RANKS_NULL;
410  foreignProcGlobal.rank[0] = foreignIdGlobal;
411 
412  // @arturo 2015/01/03 Use the foo layout in order to use the global communicator
413  /*
414  hit_patternAdd( &allToAll,
415  hit_comSendSelectTag(
416  lay2, foreignProc, tileP1, overlapShp2, HIT_COM_ARRAYCOORDS, baseType,
417  HIT_PAT_REDISTRIBUTE_TAG
418  )
419  );
420  */
421  hit_patternAdd( &allToAll,
423  fooGlobal, foreignProcGlobal, tileP1, overlapShp2, HIT_COM_ARRAYCOORDS,
424  baseType, HIT_PAT_REDISTRIBUTE2_TAG
425  )
426  );
427  }
428  }
429  }
430 
431  /* 6. ONLY COMPUTE RECEIVES IF I'M ACTIVE IN THE SECOND LAYOUT (I WILL HAVE DATA) */
432  if ( hit_layImActive( lay2 ) ) {
433  HitShape localShp1 = hit_layShape( lay1 );
434  HitShape localShp2 = hit_layShape( lay2 );
435 
436 #ifdef DEBUG
437 printf("Ranks: Lay1: %d, Lay2: %d, Global: %d ACTIVO en Lay2\n", myRank1, myRank2, myRankGlobal );
438 #endif
439 
440  /* FOR ALL PROCESSORS IN THE TOPOLOGY */
441  /* (FUNCTIONS INTERNALLY CHECK ACTIVE STATUS IN TOPOLOGY AND LAYOUT) */
442  /* REORDER THE ORDER OF EXPLORATION TO CREATE A SKEWED PATTERN AND
443  * AVOID POTENTIAL COMM. BOTTLENECKS */
444  //for ( i=0; i<numProcs; i++ ) {
445  // int foreignId = ( myRank + i ) % numProcs;
446  for ( i=0; i<numProcs1; i++ ) {
447  int foreignId = ( myRank2 + i ) % numProcs1;
448  int foreignIdGlobal;
449  MPI_Group_translate_ranks( gLay1, 1, &foreignId, gGlobal, &foreignIdGlobal );
450 #ifdef DEBUG
451 printf("[%d] Recv Comprobando foreig: %d (%d)\n", hit_Rank, foreignId, foreignIdGlobal );
452 #endif
453 
454  HitRanks foreignProc = hit_topRanksInternal( lay1.topo, foreignId );
455 #ifdef DEBUG
456 printf("[%d] Foreign Ranks: %d,%d,%d,%d\n", hit_Rank,
457  foreignProc.rank[0],
458  foreignProc.rank[1],
459  foreignProc.rank[2],
460  foreignProc.rank[3]
461  );
462 #endif
463  HitShape foreignShp1 = hit_layShapeOther( lay1, foreignProc );
464 
465  /* INTERSECTION: NON-EMPTY IMPLIES COMMUNICATION */
466  /* FOR COPY SIG. LAYOUTS: SKIP COMM. IF THE FOREIGN PROC. ALREADY HAS THIS SHAPE */
467  HitShape overlapShp = hit_shapeIntersect( localShp2, foreignShp1 );
468  HitShape alreadyHere = hit_shapeIntersect( overlapShp, localShp1 );
469 
470 #ifdef DEBUG
471 printf("[%d] After shapes: %d (%d)\n", hit_Rank, foreignId, foreignIdGlobal );
472 #endif
473  //if ( ! hit_shapeCmp( overlapShp, HIT_SHAPE_NULL )
474  // && ( i == 0 || hit_shapeCmp( alreadyHere, HIT_SHAPE_NULL ) ) ) {
475  if ( ! hit_shapeCmp( overlapShp, HIT_SHAPE_NULL )
476  && ( myRankGlobal == foreignIdGlobal
477  || hit_shapeCmp( alreadyHere, HIT_SHAPE_NULL ) ) ) {
478 
479 #ifdef DEBUG
480 fprintf(stderr, "%s Adding recv from %d to %d with shape %d [%d:%d:%d][%d:%d:%d]\n", __FUNCTION__, foreignId, myRank2,
481  hit_shapeDims( overlapShp ),
482  hit_shapeSig( overlapShp, 0 ).begin,
483  hit_shapeSig( overlapShp, 0 ).end,
484  hit_shapeSig( overlapShp, 0 ).stride,
485  hit_shapeSig( overlapShp, 1 ).begin,
486  hit_shapeSig( overlapShp, 1 ).end,
487  hit_shapeSig( overlapShp, 1 ).stride
488  );
489 #endif
490  // @arturo 2015/01/03 Translate to global ranks for the foo Layout
491  HitRanks foreignProcGlobal = HIT_RANKS_NULL;
492  foreignProcGlobal.rank[0] = foreignIdGlobal;
493 
494 #ifdef DEBUG
495 printf("Ranks: Lay1: %d, Lay2: %d, Global: %d COM to RLay2: %d, RGlobal: %d\n", myRank1, myRank2, myRankGlobal, foreignId, foreignIdGlobal );
496 #endif
497  /*
498  hit_patternAdd( &allToAll,
499  hit_comRecvSelectTag(
500  lay1, foreignProc, tileP2, overlapShp, HIT_COM_ARRAYCOORDS, baseType,
501  HIT_PAT_REDISTRIBUTE_TAG
502  )
503  );
504  */
505  hit_patternAdd( &allToAll,
507  fooGlobal, foreignProcGlobal, tileP2, overlapShp, HIT_COM_ARRAYCOORDS,
508  baseType, HIT_PAT_REDISTRIBUTE2_TAG
509  )
510  );
511  }
512 #ifdef DEBUG
513  else
514  printf("[%d] Fail condition: %d (%d)\n", hit_Rank, foreignId, foreignIdGlobal );
515 #endif
516  }
517  }
518 
519  /* 7. RETURN */
520  return allToAll;
521 }
522 
523 /*
524  * @arturo Feb 2013
525  * hit_patternLayRedistribute: Redistribute a tile allocated with a given layout,
526  * as indicated by another layout.
527  * Restrictions:
528  * 1) The two layouts should have been built using the same topology
529  * (Eliminated: It calls to hit_patternRedistribute2() that considers that case)
530  * 2) The tile should have been allocated using the local shape from the first layout
531  */
532 #define HIT_PAT_REDISTRIBUTE_TAG 15001
533 HitPattern hit_patternLayRedistribute( HitLayout lay1, HitLayout lay2, void *tileP1, void *tileP2, HitType baseType ) {
534  int i;
535 
536  /* 1. CHECK THAT THE TOPOLOGY IS THE SAME IN BOTH LAYOUTS */
537  // @arturo 2015/01/05 In case topologies are different, call the Redistribute2 function
538  /*
539  if ( ! hit_ptopoCmp( lay1.topo.pTopology, lay2.topo.pTopology ) )
540  hit_errInternal( __FUNCTION__, "Layouts with different topologies", "", __FILE__, __LINE__ );
541  */
542  if ( ! hit_ptopoCmp( lay1.topo.pTopology, lay2.topo.pTopology ) )
543  return hit_patternLayRedistribute2( lay1, lay2, tileP1, tileP2, baseType );
544 
545  /* 2. SKIP IF MY PROC IS NOT ACTIVE IN ANY LAYOUT */
546  if ( ! hit_layImActive( lay1 ) && ! hit_layImActive( lay2 ) ) return HIT_PATTERN_NULL;
547 
548  /* 3. DECLARE THE NEW COMM. PATTERN */
549  HitPattern allToAll = hit_pattern( HIT_PAT_UNORDERED );
550  // @arturo 2015/01/22
551  //int myRank = lay1.topo.linearRank;
552  int myRank = hit_topSelfRankInternal( lay1.topo );
553  int numProcs = hit_topCard( lay1.topo );
554 
555  /* 4. ONLY COMPUTE SENDS IF I'M ACTIVE IN THE FIRST LAYOUT (I HAVE DATA) */
556  if ( hit_layImActive( lay1 ) ) {
557  HitShape localShp = hit_layShape( lay1 );
558 
559  /* FOR ALL PROCESSORS IN THE TOPOLOGY */
560  /* (FUNCTIONS INTERNALLY CHECK ACTIVE STATUS IN TOPOLOGY AND LAYOUT) */
561  /* REORDER THE ORDER OF EXPLORATION TO CREATE A SKEWED PATTERN AND
562  * AVOID POTENTIAL COMM. BOTTLENECKS */
563  for ( i=0; i<numProcs; i++ ) {
564  int foreignId = ( myRank + i ) % numProcs;
565 
566  HitRanks foreignProc = hit_topRanksInternal( lay2.topo, foreignId );
567  HitShape foreignShp1 = hit_layShapeOther( lay1, foreignProc );
568  HitShape foreignShp2 = hit_layShapeOther( lay2, foreignProc );
569 
570  /* INTERSECTION: NON-EMPTY IMPLIES COMMUNICATION */
571  /* FOR COPY SIG. LAYOUTS: SKIP COMM. IF THE FOREIGN PROC. ALREADY HAS THIS SHAPE */
572  HitShape overlapShp2 = hit_shapeIntersect( localShp, foreignShp2 );
573  HitShape alreadyThere = hit_shapeIntersect( overlapShp2, foreignShp1 );
574 
575  if ( ! hit_shapeCmp( overlapShp2, HIT_SHAPE_NULL )
576  && ( i == 0 || hit_shapeCmp( alreadyThere, HIT_SHAPE_NULL ) ) ) {
577 
578 #ifdef DEBUG
579 fprintf(stderr, "%s Adding send from %d to %d with shape %d [%d:%d:%d][%d:%d:%d]\n", __FUNCTION__, myRank, foreignId,
580  hit_shapeDims( overlapShp2 ),
581  hit_shapeSig( overlapShp2, 0 ).begin,
582  hit_shapeSig( overlapShp2, 0 ).end,
583  hit_shapeSig( overlapShp2, 0 ).stride,
584  hit_shapeSig( overlapShp2, 1 ).begin,
585  hit_shapeSig( overlapShp2, 1 ).end,
586  hit_shapeSig( overlapShp2, 1 ).stride
587  );
588 #endif
589  hit_patternAdd( &allToAll,
591  lay2, foreignProc, tileP1, overlapShp2, HIT_COM_ARRAYCOORDS, baseType,
593  )
594  );
595  }
596  }
597  }
598 
599  /* 6. ONLY COMPUTE RECEIVES IF I'M ACTIVE IN THE SECOND LAYOUT (I WILL HAVE DATA) */
600  if ( hit_layImActive( lay2 ) ) {
601  HitShape localShp1 = hit_layShape( lay1 );
602  HitShape localShp2 = hit_layShape( lay2 );
603 
604  /* FOR ALL PROCESSORS IN THE TOPOLOGY */
605  /* (FUNCTIONS INTERNALLY CHECK ACTIVE STATUS IN TOPOLOGY AND LAYOUT) */
606  /* REORDER THE ORDER OF EXPLORATION TO CREATE A SKEWED PATTERN AND
607  * AVOID POTENTIAL COMM. BOTTLENECKS */
608  for ( i=0; i<numProcs; i++ ) {
609  int foreignId = ( myRank + i ) % numProcs;
610 
611  HitRanks foreignProc = hit_topRanksInternal( lay1.topo, foreignId );
612  HitShape foreignShp1 = hit_layShapeOther( lay1, foreignProc );
613 
614  /* INTERSECTION: NON-EMPTY IMPLIES COMMUNICATION */
615  /* FOR COPY SIG. LAYOUTS: SKIP COMM. IF THE FOREIGN PROC. ALREADY HAS THIS SHAPE */
616  HitShape overlapShp = hit_shapeIntersect( localShp2, foreignShp1 );
617  HitShape alreadyHere = hit_shapeIntersect( overlapShp, localShp1 );
618 
619  if ( ! hit_shapeCmp( overlapShp, HIT_SHAPE_NULL )
620  && ( i == 0 || hit_shapeCmp( alreadyHere, HIT_SHAPE_NULL ) ) ) {
621 
622 #ifdef DEBUG
623 fprintf(stderr, "%s Adding recv from %d to %d with shape %d [%d:%d:%d][%d:%d:%d]\n", __FUNCTION__, foreignId, myRank,
624  hit_shapeDims( overlapShp ),
625  hit_shapeSig( overlapShp, 0 ).begin,
626  hit_shapeSig( overlapShp, 0 ).end,
627  hit_shapeSig( overlapShp, 0 ).stride,
628  hit_shapeSig( overlapShp, 1 ).begin,
629  hit_shapeSig( overlapShp, 1 ).end,
630  hit_shapeSig( overlapShp, 1 ).stride
631  );
632 #endif
633  hit_patternAdd( &allToAll,
635  lay1, foreignProc, tileP2, overlapShp, HIT_COM_ARRAYCOORDS, baseType,
637  )
638  );
639  }
640  }
641  }
642 
643  /* 7. RETURN */
644  return allToAll;
645 }
646 
647 
648 /*
649  * @arturo Feb 2013
650  * hit_patternRedistribute: Redistribute data of a distributed tile into another distributed tile
651  */
652 #define HIT_PAT_REDISTRIBUTE_TAG2 15002
653 HitPattern hit_patternRedistribute( HitLayout lay, void *tileP1, void *tileP2, HitType baseType, int flagCompact ) {
654  int i;
655  HitTile tile1 = *(HitTile *)tileP1;
656  HitTile tile2 = *(HitTile *)tileP2;
657 
658 
659  /* 0. SKIP IF MY PROC IS NOT ACTIVE IN THE LAYOUT */
660  if ( ! hit_layImActive( lay ) ) return HIT_PATTERN_NULL;
661 
662  /* 1. COMMUNICATE THE SHAPES OF ALL THE PROCESSES WITH ALLGATHER */
663  //int myRank = lay.topo.linearRank;
664  //int numProcs = hit_topCard( lay.topo );
665  int myRank = lay.pTopology[0]->selfRank;
666  int numProcs = lay.pTopology[0]->numProcs;
667  HitSigShape tileSigShapes[ numProcs ][2];
668  HitShape tileShapes[ numProcs ][2];
669  // @javfres Initialize this to avoid warnings
670  HitShape tileShapesOrig[2] = {HIT_SHAPE_NULL_STATIC,HIT_SHAPE_NULL_STATIC};
671  int prefixSums[2][ HIT_MAXDIMS ];
672 
673  /*
674  //if ( ! hit_topImActive( hit_layTopology( lay ) ) ) {
675  if ( ! hit_layImActive( lay ) ) {
676  tileSigShapes[ myRank ][0] = hit_sShapeAccess( HIT_SHAPE_NULL );
677  tileSigShapes[ myRank ][1] = hit_sShapeAccess( HIT_SHAPE_NULL );
678  } else {
679  */
680  tileSigShapes[ myRank ][0] = hit_sShapeAccess( hit_tileShape( tile1 ) );
681  tileSigShapes[ myRank ][1] = hit_sShapeAccess( hit_tileShape( tile2 ) );
682  /*
683  }
684  */
685 
686 #ifdef DEBUG
687 printf("[%d](%d) CTRL Redistribute: Before Allgather Local Send[%d:%d:%d][%d:%d:%d] Recv[%d:%d:%d][%d:%d:%d]\n",
688  hit_Rank, numProcs,
689  tileSigShapes[ myRank ][ 0 ].sig[ 0 ].begin,
690  tileSigShapes[ myRank ][ 0 ].sig[ 0 ].end,
691  tileSigShapes[ myRank ][ 0 ].sig[ 0 ].stride,
692  tileSigShapes[ myRank ][ 0 ].sig[ 1 ].begin,
693  tileSigShapes[ myRank ][ 0 ].sig[ 1 ].end,
694  tileSigShapes[ myRank ][ 0 ].sig[ 1 ].stride,
695  tileSigShapes[ myRank ][ 1 ].sig[ 0 ].begin,
696  tileSigShapes[ myRank ][ 1 ].sig[ 0 ].end,
697  tileSigShapes[ myRank ][ 1 ].sig[ 0 ].stride,
698  tileSigShapes[ myRank ][ 1 ].sig[ 1 ].begin,
699  tileSigShapes[ myRank ][ 1 ].sig[ 1 ].end,
700  tileSigShapes[ myRank ][ 1 ].sig[ 1 ].stride
701  );
702 #endif
703 
704  int ok = MPI_Allgather( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, tileSigShapes, 2, HIT_SHAPE_SIG,
705  //*( (MPI_Comm *)(lay.topo.pTopology.lowLevel)) );
706  //*( (MPI_Comm *)(lay.pTopology[0].lowLevel)) );
707  lay.pTopology[0]->comm );
708  hit_mpiTestError( ok, "Communication shapes with Allgather");
709 
710  /* 2. SKIP IF MY PROC IS NOT ACTIVE IN THE TOPOLOGY */
711  // if ( ! hit_topImActive( hit_layTopology( lay ) ) ) return HIT_PATTERN_NULL;
712 
713  /* 3. BUILD SHAPES */
714  int acumCard[ HIT_MAXDIMS ][2];
715  for( i=0; i<HIT_MAXDIMS; i++) {
716  acumCard[i][0] = 0;
717  acumCard[i][1] = 0;
718  }
719  for( i=0; i<numProcs; i++ ) {
720  /* 3.1. BUILD SHAPE OBJECT FROM SIGNATURE */
721  tileShapes[i][0] = hit_shapeFromSigShape( tileSigShapes[i][0] );
722  tileShapes[i][1] = hit_shapeFromSigShape( tileSigShapes[i][1] );
723 
724 #ifdef DEBUG
725 printf("[%d] CTRL 2 After Allgather(A) %d Send[%d:%d:%d][%d:%d:%d] Recv[%d:%d:%d][%d:%d:%d]\n",
726  hit_Rank,
727  i,
728  tileSigShapes[ i ][ 0 ].sig[ 0 ].begin,
729  tileSigShapes[ i ][ 0 ].sig[ 0 ].end,
730  tileSigShapes[ i ][ 0 ].sig[ 0 ].stride,
731  tileSigShapes[ i ][ 0 ].sig[ 1 ].begin,
732  tileSigShapes[ i ][ 0 ].sig[ 1 ].end,
733  tileSigShapes[ i ][ 0 ].sig[ 1 ].stride,
734  tileSigShapes[ i ][ 1 ].sig[ 0 ].begin,
735  tileSigShapes[ i ][ 1 ].sig[ 0 ].end,
736  tileSigShapes[ i ][ 1 ].sig[ 0 ].stride,
737  tileSigShapes[ i ][ 1 ].sig[ 1 ].begin,
738  tileSigShapes[ i ][ 1 ].sig[ 1 ].end,
739  tileSigShapes[ i ][ 1 ].sig[ 1 ].stride
740  );
741 #endif
742  /* 3.2. TRANSLATION OF SHAPES FOR COMPACTED COORDINATES */
743  if ( flagCompact ) {
744  int j;
745  for ( j = 0; j < hit_shapeDims( tileShapes[i][0] ); j++ ) {
746  if ( i == myRank ) prefixSums[0][j] = acumCard[j][0];
747  if ( ! hit_sigCmp( tileSigShapes[i][0].sig[j], HIT_SIG_NULL ) ) {
748  int card0 = hit_sigCard( tileSigShapes[i][0].sig[j] );
749  tileSigShapes[i][0].sig[j].begin = acumCard[j][0];
750  tileSigShapes[i][0].sig[j].end = acumCard[j][0] + card0 - 1;
751  tileSigShapes[i][0].sig[j].stride = 1;
752  acumCard[j][0] = acumCard[j][0] + card0;
753  }
754  }
755  for ( j = 0; j < hit_shapeDims( tileShapes[i][1] ); j++ ) {
756  if ( i == myRank ) prefixSums[1][j] = acumCard[j][1];
757  if ( ! hit_sigCmp( tileSigShapes[i][1].sig[j], HIT_SIG_NULL ) ) {
758  int card1 = hit_sigCard( tileSigShapes[i][1].sig[j] );
759  tileSigShapes[i][1].sig[j].begin = acumCard[j][1];
760  tileSigShapes[i][1].sig[j].end = acumCard[j][1] + card1 - 1;
761  tileSigShapes[i][1].sig[j].stride = 1;
762  acumCard[j][1] = acumCard[j][1] + card1;
763  }
764  }
765 
766  if ( i == myRank ) {
767  /* STORE ORIGINAL SHAPES TO TRANSLATE BACK */
768  tileShapesOrig[0] = tileShapes[i][0];
769  tileShapesOrig[1] = tileShapes[i][1];
770  }
771  /* SAVE SHAPES WITH NEW COMPACTED COORDINATES */
772  tileShapes[i][0] = hit_shapeFromSigShape( tileSigShapes[i][0] );
773  tileShapes[i][1] = hit_shapeFromSigShape( tileSigShapes[i][1] );
774  }
775 #ifdef DEBUG
776 printf("[%d] CTRL 2 After Allgather(C) %d Send[%d:%d:%d][%d:%d:%d] Recv[%d:%d:%d][%d:%d:%d]\n",
777  hit_Rank,
778  i,
779  /*
780  hit_shapeSig( tileShapes[ i ][ 0 ], 0 ).begin,
781  hit_shapeSig( tileShapes[ i ][ 0 ], 0 ).end,
782  hit_shapeSig( tileShapes[ i ][ 0 ], 0 ).stride,
783  hit_shapeSig( tileShapes[ i ][ 0 ], 1 ).begin,
784  hit_shapeSig( tileShapes[ i ][ 0 ], 1 ).end,
785  hit_shapeSig( tileShapes[ i ][ 0 ], 1 ).stride,
786  hit_shapeSig( tileShapes[ i ][ 1 ], 0 ).begin,
787  hit_shapeSig( tileShapes[ i ][ 1 ], 0 ).end,
788  hit_shapeSig( tileShapes[ i ][ 1 ], 0 ).stride,
789  hit_shapeSig( tileShapes[ i ][ 1 ], 1 ).begin,
790  hit_shapeSig( tileShapes[ i ][ 1 ], 1 ).end,
791  hit_shapeSig( tileShapes[ i ][ 1 ], 1 ).stride
792  */
793  tileSigShapes[ i ][ 0 ].sig[ 0 ].begin,
794  tileSigShapes[ i ][ 0 ].sig[ 0 ].end,
795  tileSigShapes[ i ][ 0 ].sig[ 0 ].stride,
796  tileSigShapes[ i ][ 0 ].sig[ 1 ].begin,
797  tileSigShapes[ i ][ 0 ].sig[ 1 ].end,
798  tileSigShapes[ i ][ 0 ].sig[ 1 ].stride,
799  tileSigShapes[ i ][ 1 ].sig[ 0 ].begin,
800  tileSigShapes[ i ][ 1 ].sig[ 0 ].end,
801  tileSigShapes[ i ][ 1 ].sig[ 0 ].stride,
802  tileSigShapes[ i ][ 1 ].sig[ 1 ].begin,
803  tileSigShapes[ i ][ 1 ].sig[ 1 ].end,
804  tileSigShapes[ i ][ 1 ].sig[ 1 ].stride
805  );
806 #endif
807  }
808 
809  /* 4. DECLARE THE NEW COMM. PATTERN */
810  HitPattern allToAll = hit_pattern( HIT_PAT_UNORDERED );
811 
812  /* 5. COMPUTE SENDS: SKIP IF MY TILE HAS NULL SHAPE */
813  HitShape localShp = tileShapes[ myRank ][0];
814  if ( ! hit_shapeCmp( localShp, HIT_SHAPE_NULL ) ) {
815  /* FOR ALL PROCESSORS IN THE TOPOLOGY */
816  /* REORDER THE ORDER OF EXPLORATION TO CREATE A SKEWED PATTERN AND
817  * AVOID POTENTIAL COMM. BOTTLENECKS */
818  for ( i=0; i<numProcs; i++ ) {
819  int foreignId = ( myRank + i ) % numProcs;
820 
821  //HitRanks foreignProc = hit_topRanksInternal( lay.topo, foreignId );
822  HitRanks foreignProc = hit_layToTopoRanks( lay, hit_ranks( 1, foreignId ) );
823  HitShape foreignShp1 = tileShapes[ foreignId ][0];
824  HitShape foreignShp2 = tileShapes[ foreignId ][1];
825 
826  /* INTERSECTION: NON-EMPTY IMPLIES COMMUNICATION */
827  /* FOR COPY SIG. LAYOUTS: SKIP COMM. IF THE FOREIGN PROC. ALREADY HAS THIS SHAPE */
828  HitShape overlapShp2 = hit_shapeIntersect( localShp, foreignShp2 );
829  HitShape alreadyThere = hit_shapeIntersect( overlapShp2, foreignShp1 );
830 
831  if ( ! hit_shapeCmp( overlapShp2, HIT_SHAPE_NULL )
832  && ( i == 0 || hit_shapeCmp( alreadyThere, HIT_SHAPE_NULL ) ) ) {
833 
834  /* TRANSLATE COMPACT COORDINATES BACK TO ARRAY COORDINATES */
835  if ( flagCompact ) {
836 #ifdef DEBUG
837 fprintf(stderr, "[%d] Init translation send from %d to %d with shape %d [%d:%d:%d][%d:%d:%d]\n",
838  hit_Rank,
839  myRank, foreignId,
840  hit_shapeDims( overlapShp2 ),
841  hit_shapeSig( overlapShp2, 0 ).begin,
842  hit_shapeSig( overlapShp2, 0 ).end,
843  hit_shapeSig( overlapShp2, 0 ).stride,
844  hit_shapeSig( overlapShp2, 1 ).begin,
845  hit_shapeSig( overlapShp2, 1 ).end,
846  hit_shapeSig( overlapShp2, 1 ).stride
847  );
848 #endif
849  //overlapShp2 = hit_shapeTileToArray( tileShapesOrig[0], overlapShp2 );
850  int j;
851  for (j=0; j<hit_shapeDims( overlapShp2 ); j++) {
852 /*
853 fprintf(stderr, "[%d] In translation %d, through %d, %d, %d\n",
854  hit_Rank,
855  hit_shapeSig( overlapShp2, j ).begin,
856  prefixSums[0][j],
857  hit_shapeSig( tileShapesOrig[0], j ).stride,
858  hit_shapeSig( tileShapesOrig[0], j ).begin
859  );
860 */
861 
862  hit_shapeSig( overlapShp2, j ).begin =
863  ( hit_shapeSig( overlapShp2, j ).begin - prefixSums[0][j] )
864  *
865  hit_shapeSig( tileShapesOrig[0], j ).stride
866  +
867  hit_shapeSig( tileShapesOrig[0], j ).begin;
868 /*
869 fprintf(stderr, "[%d] In translation result %d\n",
870  hit_Rank,
871  hit_shapeSig( overlapShp2, j ).begin
872  );
873 fprintf(stderr, "[%d] In translation end %d, through %d, %d, %d\n",
874  hit_Rank,
875  hit_shapeSig( overlapShp2, j ).end,
876  prefixSums[0][j],
877  hit_shapeSig( tileShapesOrig[0], j ).stride,
878  hit_shapeSig( tileShapesOrig[0], j ).begin
879  );
880 */
881  hit_shapeSig( overlapShp2, j ).end =
882  ( hit_shapeSig( overlapShp2, j ).end - prefixSums[0][j] )
883  *
884  hit_shapeSig( tileShapesOrig[0], j ).stride
885  +
886  hit_shapeSig( tileShapesOrig[0], j ).begin;
887  hit_shapeSig( overlapShp2, j ).stride =
888  hit_shapeSig( tileShapesOrig[0], j ).stride;
889  }
890  }
891 
892 #ifdef DEBUG
893 fprintf(stderr, "%s Adding send from %d to %d with shape %d [%d:%d:%d][%d:%d:%d]\n", __FUNCTION__, myRank, foreignId,
894  hit_shapeDims( overlapShp2 ),
895  hit_shapeSig( overlapShp2, 0 ).begin,
896  hit_shapeSig( overlapShp2, 0 ).end,
897  hit_shapeSig( overlapShp2, 0 ).stride,
898  hit_shapeSig( overlapShp2, 1 ).begin,
899  hit_shapeSig( overlapShp2, 1 ).end,
900  hit_shapeSig( overlapShp2, 1 ).stride
901  );
902 #endif
903  /* ADD COMM. TO PATTERN */
904  hit_patternAdd( &allToAll,
906  lay, foreignProc, tileP1, overlapShp2, HIT_COM_ARRAYCOORDS, baseType,
908  )
909  );
910  }
911  }
912  }
913 
914  //MPI_Barrier( MPI_COMM_WORLD );
915 
916  /* 6. COMPUTE RECEIVES: SKIP IF MY RECEIVING TILE HAS NULL SHAPE */
917  HitShape localShp1 = tileShapes[ myRank ][0];
918  HitShape localShp2 = tileShapes[ myRank ][1];
919  if ( ! hit_shapeCmp( localShp2, HIT_SHAPE_NULL ) ) {
920 
921  /* FOR ALL PROCESSORS IN THE TOPOLOGY */
922  /* REORDER THE ORDER OF EXPLORATION TO CREATE A SKEWED PATTERN AND
923  * AVOID POTENTIAL COMM. BOTTLENECKS */
924  for ( i=0; i<numProcs; i++ ) {
925  int foreignId = ( myRank + i ) % numProcs;
926 
927  //HitRanks foreignProc = hit_topRanksInternal( lay.topo, foreignId );
928  HitRanks foreignProc = hit_layToTopoRanks( lay, hit_ranks( 1, foreignId ) );
929  HitShape foreignShp1 = tileShapes[ foreignId ][0];
930 
931  /* INTERSECTION: NON-EMPTY IMPLIES COMMUNICATION */
932  /* FOR COPY SIG. LAYOUTS: SKIP COMM. IF THE FOREIGN PROC. ALREADY HAS THIS SHAPE */
933  HitShape overlapShp = hit_shapeIntersect( localShp2, foreignShp1 );
934  HitShape alreadyHere = hit_shapeIntersect( overlapShp, localShp1 );
935 
936  if ( ! hit_shapeCmp( overlapShp, HIT_SHAPE_NULL )
937  && ( i == 0 || hit_shapeCmp( alreadyHere, HIT_SHAPE_NULL ) ) ) {
938 
939  /* TRANSLATE COMPACT COORDINATES BACK TO ARRAY COORDINATES */
940  if ( flagCompact ) {
941 #ifdef DEBUG
942 fprintf(stderr, "[%d] Init translation recv from %d to %d with shape %d [%d:%d:%d][%d:%d:%d]\n",
943  hit_Rank,
944  foreignId,
945  myRank,
946  hit_shapeDims( overlapShp ),
947  hit_shapeSig( overlapShp, 0 ).begin,
948  hit_shapeSig( overlapShp, 0 ).end,
949  hit_shapeSig( overlapShp, 0 ).stride,
950  hit_shapeSig( overlapShp, 1 ).begin,
951  hit_shapeSig( overlapShp, 1 ).end,
952  hit_shapeSig( overlapShp, 1 ).stride
953  );
954 #endif
955  // overlapShp = hit_shapeTileToArray( tileShapesOrig[1], overlapShp );
956  int j;
957  for (j=0; j<hit_shapeDims( overlapShp ); j++) {
958  hit_shapeSig( overlapShp, j ).begin =
959  ( hit_shapeSig( overlapShp, j ).begin - prefixSums[1][j] )
960  *
961  hit_shapeSig( tileShapesOrig[1], j ).stride
962  +
963  hit_shapeSig( tileShapesOrig[1], j ).begin;
964  hit_shapeSig( overlapShp, j ).end =
965  ( hit_shapeSig( overlapShp, j ).end - prefixSums[1][j] )
966  *
967  hit_shapeSig( tileShapesOrig[1], j ).stride
968  +
969  hit_shapeSig( tileShapesOrig[1], j ).begin;
970  hit_shapeSig( overlapShp, j ).stride =
971  hit_shapeSig( tileShapesOrig[1], j ).stride;
972  }
973  }
974 #ifdef DEBUG
975 fprintf(stderr, "%s Adding recv from %d to %d with shape %d [%d:%d:%d][%d:%d:%d]\n", __FUNCTION__, foreignId, myRank,
976  hit_shapeDims( overlapShp ),
977  hit_shapeSig( overlapShp, 0 ).begin,
978  hit_shapeSig( overlapShp, 0 ).end,
979  hit_shapeSig( overlapShp, 0 ).stride,
980  hit_shapeSig( overlapShp, 1 ).begin,
981  hit_shapeSig( overlapShp, 1 ).end,
982  hit_shapeSig( overlapShp, 1 ).stride
983  );
984 #endif
985 
986  /* ADD COMM. TO PATTERN */
987  hit_patternAdd( &allToAll,
989  lay, foreignProc, tileP2, overlapShp, HIT_COM_ARRAYCOORDS, baseType,
991  )
992  );
993  }
994  }
995  }
996 
997  /* 7. RETURN */
998  return allToAll;
999 }
1000 
1001 
1002 // @note The recv works because the rows are contiguous distributed.
1003 void hit_patMatMultInternal(HitPattern *pattern, HitLayout lay, HitShape origin_matrix, HitShape matrix, const void * tilePSend, const void * tilePRecv, HitType baseType, const char * file, int line){
1004 
1005  HIT_NOT_USED(file);
1006  HIT_NOT_USED(line);
1007  HIT_NOT_USED(origin_matrix);
1008 
1009  /* Get the communicator of the topology */
1010  //MPI_Comm comm = *((MPI_Comm *)lay.pTopology[0].lowLevel);
1011  MPI_Comm comm = lay.pTopology[0]->comm;
1012 
1013 
1014  /* Number of processors */
1015  int numProcs = 1;
1016  int i;
1017  for(i=0;i<lay.topo.numDims;i++){
1018  numProcs *= lay.numActives[i];
1019  }
1020 
1021 
1022  // Calculate the recv
1023  int * recvcnts;
1024  int * rdispls;
1025 
1026  hit_calloc(recvcnts, int, numProcs );
1027  hit_calloc(rdispls, int, numProcs );
1028  // @arturo Ago 2015: New allocP interface
1029  /*
1030  hit_calloc(recvcnts, sizeof(int), (size_t) numProcs,int*);
1031  hit_calloc(rdispls,sizeof(int), (size_t) numProcs,int*);
1032  */
1033 
1034  int nrecv = hit_cShapeCard(matrix,1);
1035 
1036  //printf("[%d] Recibo %d, other %d\n",lay.topo.linearRank,nrecv,hit_cShapeNameList(matrix,1).nNames);
1037  //printf("numElementsTotal1 %d\n",lay.info.layoutList.numElementsTotal);
1038 
1039  for(i=0; i<nrecv; i++){
1040  int name = hit_cShapeNameList(matrix,1).names[i];
1041  int owner = hit_lgr_elementGroup(lay,name);
1042  recvcnts[owner] ++;
1043  }
1044 
1045  rdispls[0] = 0;
1046  for(i=0; i<numProcs-1; i++){
1047  rdispls[i+1] = rdispls[i] + recvcnts[i];
1048  }
1049 
1050 
1051  int * sendcnts;
1052  int * sdispls;
1053  hit_calloc(sendcnts, int, numProcs );
1054  hit_calloc(sdispls, int, numProcs );
1055  // @arturo Ago 2015: New allocP interface
1056  /*
1057  hit_calloc(sendcnts, sizeof(int), (size_t) numProcs,int*);
1058  hit_calloc(sdispls,sizeof(int), (size_t) numProcs,int*);
1059  */
1060 
1061  MPI_Alltoall(recvcnts, 1, MPI_INT,sendcnts, 1, MPI_INT, MPI_COMM_WORLD);
1062 
1063  sdispls[0] = 0;
1064  for(i=0; i<numProcs-1; i++){
1065  sdispls[i+1] = sdispls[i] + sendcnts[i];
1066 
1067  }
1068 
1069  int nsend = sdispls[numProcs-1] + sendcnts[numProcs-1];
1070 
1071 
1072  int * sendlist;
1073  // @arturo Ago 2015: New allocP interface
1074  // hit_malloc(sendlist,(size_t) nsend * sizeof(int),int*);
1075  hit_malloc(sendlist, int, nsend );
1076 
1077  MPI_Alltoallv(hit_cShapeNameList(matrix,1).names, recvcnts, rdispls, MPI_INT,
1078  sendlist, sendcnts, sdispls, MPI_INT, MPI_COMM_WORLD);
1079 
1080 
1081  const HitTile *tileSend = (const HitTile *)tilePSend;
1082  const HitTile *tileRecv = (const HitTile *)tilePRecv;
1083 
1084  for(i=0; i<nsend; i++){
1085  sendlist[i] -= hit_shapeSig(tileSend->shape,0).begin;
1086  }
1087 
1088 
1089  // @arturo 2015/01/22
1090  //int myself = lay.topo.linearRank;
1091  int myself = hit_topSelfRankInternal( lay.topo );
1092 
1093 
1094  // Create the comms
1095  for(i=0; i<numProcs; i++){
1096 
1097  //if(i == myself) continue;
1098 
1099  HitCom csend = HIT_COM_NULL;
1100  HitCom crecv = HIT_COM_NULL;
1101 
1102  MPI_Datatype tsend;
1103  MPI_Datatype trecv;
1104 
1105  int ok;
1106 
1107  // Send type
1108 /*
1109  {
1110 
1111  int j;
1112  printf("[%d] displ=%d List: ",myself,sdispls[i]);
1113  int * list = &(sendlist[sdispls[i]]);
1114  for(j=0; j<sendcnts[i]; j++){
1115  printf("%d, ",list[j]);
1116  }
1117  printf("\n");
1118 
1119  }
1120 */
1121 
1122  ok = MPI_Type_create_indexed_block(sendcnts[i],1,&(sendlist[sdispls[i]]),baseType,&tsend);
1123  hit_mpiTestError(ok,"Error type indexed block");
1124  ok = MPI_Type_commit(&tsend);
1125  hit_mpiTestError(ok,"Error type commit");
1126 
1127 
1128  // Send
1129  csend.myself = myself;
1130  csend.sendTo = i;
1131  //csend.recvFrom = HIT_RANK_NULL;
1132  csend.typeSend = tsend;
1133  csend.dataSend = tileSend->data;
1134  csend.comm = comm;
1135  csend.tag = i;
1136  csend.commType = HIT_SENDRECV;
1137 
1138 
1139 
1140  // Recv type
1141  ok = MPI_Type_contiguous(recvcnts[i],baseType,&trecv);
1142  hit_mpiTestError(ok,"Error type contiguous");
1143  ok = MPI_Type_commit(&trecv);
1144  hit_mpiTestError(ok,"Error type commit");
1145 
1146  // Recv
1147  crecv.myself = myself;
1148  //crecv.sendTo = HIT_RANK_NULL;
1149  crecv.recvFrom = i;
1150  crecv.typeRecv = trecv;
1151  crecv.dataRecv = (char*) tileRecv->data + (size_t) rdispls[i] * tileRecv->baseExtent;
1152  crecv.comm = comm;
1153  crecv.tag = myself;
1154  crecv.commType = HIT_SENDRECV;
1155 
1156  //{
1157  // int j;
1158  // printf("[%d] displ recv =%d List: ",myself,rdispls[i]);
1159  // printf("\n");
1160  //}
1161 
1162 
1163 
1164  if(sendcnts[i] != 0 )
1165  hit_patternAdd(pattern,csend);
1166  if(recvcnts[i] != 0 )
1167  hit_patternAdd(pattern,crecv);
1168 
1169 
1170  }
1171 
1172 }
1173 
1174 
1175 
1176 
1177 
1178 
1179 
1180 
1181 // @note The recv works because the rows are contiguous distributed.
1182 void hit_patMatMultBitmapInternal(HitPattern *pattern, HitLayout lay, HitShape origin_matrix, HitShape matrix, const void * tilePSend, const void * tilePRecv, HitType baseType, const char * file, int line){
1183 
1184  HIT_NOT_USED(file);
1185  HIT_NOT_USED(line);
1186  HIT_NOT_USED(origin_matrix);
1187 
1188 
1189  /* Get the communicator of the topology */
1190  //MPI_Comm comm = *((MPI_Comm *)lay.pTopology[0].lowLevel);
1191  MPI_Comm comm = lay.pTopology[0]->comm;
1192 
1193 
1194  /* Number of processors */
1195  int numProcs = 1;
1196  int i;
1197  for(i=0;i<lay.topo.numDims;i++){
1198  numProcs *= lay.numActives[i];
1199  }
1200 
1201  //printf("Procs: %d\n",numProcs);
1202 
1203  // Calculate the recv
1204  int * recvcnts;
1205  int * rdispls;
1206 
1207  hit_calloc(recvcnts, int, numProcs );
1208  hit_calloc(rdispls, int, numProcs );
1209  // @arturo Ago 2015: New allocP interface
1210  /*
1211  hit_calloc(recvcnts, sizeof(int), (size_t) numProcs,int*);
1212  hit_calloc(rdispls,sizeof(int), (size_t) numProcs,int*);
1213  */
1214 
1215  int nrecv = hit_bShapeCard(matrix,1);
1216 
1217  //printf("[%d] Recibo %d, other %d\n",lay.topo.linearRank,nrecv,hit_bShapeNameList(matrix,1).nNames);
1218  //printf("numElementsTotal1 %d\n",lay.info.layoutList.numElementsTotal);
1219 
1220  for(i=0; i<nrecv; i++){
1221  int name = hit_bShapeNameList(matrix,1).names[i];
1222  int owner = lay.info.layoutList.assignedGroups[name];
1223  recvcnts[owner] ++;
1224  //printf("**[%d] Recivo %d from %d\n",lay.topo.linearRank,name,owner);
1225  }
1226 
1227 
1228 
1229  rdispls[0] = 0;
1230  for(i=0; i<numProcs-1; i++){
1231  rdispls[i+1] = rdispls[i] + recvcnts[i];
1232  }
1233 
1234  /*
1235  sleep(lay.topo.linearRank);
1236  fflush(stdout);
1237 
1238 
1239  {
1240  int i,j;
1241  for(i=0; i<numProcs; i++){
1242  printf("Proc %d\n",i);
1243  for(j=rdispls[i];j<rdispls[i]+recvcnts[i];j++){
1244  printf("**[%d] Recivo %d from %d\n",lay.topo.linearRank,hit_bShapeNameList(matrix,1).names[j],i);
1245 
1246  }
1247 
1248  }
1249  }
1250  fflush(stdout);
1251 */
1252 
1253 
1254  int * sendcnts;
1255  int * sdispls;
1256  hit_calloc(sendcnts, int, numProcs );
1257  hit_calloc(sdispls, int, numProcs );
1258  // @arturo Ago 2015: New allocP interface
1259  /*
1260  hit_calloc(sendcnts, sizeof(int), (size_t) numProcs,int*);
1261  hit_calloc(sdispls,sizeof(int), (size_t) numProcs,int*);
1262  */
1263 
1264 
1265  MPI_Alltoall(recvcnts, 1, MPI_INT,sendcnts, 1, MPI_INT, MPI_COMM_WORLD);
1266 
1267 
1268  sdispls[0] = 0;
1269  for(i=0; i<numProcs-1; i++){
1270  sdispls[i+1] = sdispls[i] + sendcnts[i];
1271 
1272  }
1273 
1274  int nsend = sdispls[numProcs-1] + sendcnts[numProcs-1];
1275 
1276 
1277  int * sendlist;
1278  // @arturo Ago 2015: New allocP interface
1279  // hit_malloc(sendlist,(size_t) nsend * sizeof(int),int*);
1280  hit_malloc(sendlist, int, nsend );
1281 
1282  MPI_Alltoallv(hit_bShapeNameList(matrix,1).names, recvcnts, rdispls, MPI_INT,
1283  sendlist, sendcnts, sdispls, MPI_INT, MPI_COMM_WORLD);
1284 
1285 
1286  const HitTile *tileSend = (const HitTile *)tilePSend;
1287  const HitTile *tileRecv = (const HitTile *)tilePRecv;
1288 
1289 
1290  /*
1291  {
1292  int i,j;
1293  for(i=0; i<numProcs; i++){
1294 
1295  for(j=sdispls[i];j<sdispls[i]+sendcnts[i];j++){
1296  printf("**[%d] Envio %d to %d\n",lay.topo.linearRank,sendlist[j],i);
1297 
1298  }
1299 
1300  }
1301  }
1302  */
1303 
1304 
1305  for(i=0; i<nsend; i++){
1306  sendlist[i] -= hit_shapeSig(tileSend->shape,0).begin;
1307  }
1308 
1309  // @arturo 2015/01/22
1310  //int myself = lay.topo.linearRank;
1311  int myself = hit_topSelfRankInternal( lay.topo );
1312 
1313 
1314  // Create the comms
1315  for(i=0; i<numProcs; i++){
1316 
1317  //if(i == myself) continue;
1318 
1319  HitCom csend = HIT_COM_NULL;
1320  HitCom crecv = HIT_COM_NULL;
1321 
1322  MPI_Datatype tsend;
1323  MPI_Datatype trecv;
1324 
1325  int ok;
1326 
1327  // Send type
1328 
1329 // fflush(stdout);
1330 
1331  // {
1332 //
1333  // int j;
1334  // printf("[%d] displ=%d List: ",myself,sdispls[i]);
1335  // int * list = &(sendlist[sdispls[i]]);
1336  // for(j=0; j<sendcnts[i]; j++){
1337  // printf("%d, ",list[j]);
1338  // }
1339  // printf("\n");
1340 
1341  // }
1342 
1343  // fflush(stdout);
1344 
1345 
1346  ok = MPI_Type_create_indexed_block(sendcnts[i],1,&(sendlist[sdispls[i]]),baseType,&tsend);
1347  hit_mpiTestError(ok,"Error type indexed block");
1348  ok = MPI_Type_commit(&tsend);
1349  hit_mpiTestError(ok,"Error type commit");
1350 
1351 
1352  // Send
1353  csend.myself = myself;
1354  csend.sendTo = i;
1355  //csend.recvFrom = HIT_RANK_NULL;
1356  csend.typeSend = tsend;
1357  csend.dataSend = tileSend->data;
1358  csend.comm = comm;
1359  csend.tag = i;
1360  csend.commType = HIT_SENDRECV;
1361 
1362 
1363 
1364  // Recv type
1365  ok = MPI_Type_contiguous(recvcnts[i],baseType,&trecv);
1366  hit_mpiTestError(ok,"Error type contiguous");
1367  ok = MPI_Type_commit(&trecv);
1368  hit_mpiTestError(ok,"Error type commit");
1369 
1370  // Recv
1371  crecv.myself = myself;
1372  //crecv.sendTo = HIT_RANK_NULL;
1373  crecv.recvFrom = i;
1374  crecv.typeRecv = trecv;
1375  crecv.dataRecv = (char*) tileRecv->data + (size_t) rdispls[i] * tileRecv->baseExtent;
1376  crecv.comm = comm;
1377  crecv.tag = myself;
1378  crecv.commType = HIT_SENDRECV;
1379 
1380  //{
1381  // int j;
1382  // printf("[%d] displ recv =%d List: ",myself,rdispls[i]);
1383  // printf("\n");
1384  //}
1385 
1386 
1387 
1388  if(sendcnts[i] != 0 )
1389  hit_patternAdd(pattern,csend);
1390  if(recvcnts[i] != 0 )
1391  hit_patternAdd(pattern,crecv);
1392 
1393 
1394  }
1395 
1396 
1397 
1398 }
1399 
1400 
HitCom commSpec
Definition: hit_pattern.h:62
void hit_patternEndAsync(HitPattern pattern)
Definition: hit_pattern.c:200
HitPatS * last
Definition: hit_pattern.h:75
void hit_comDoReduce(HitCom *issue)
Definition: hit_com.c:2126
MPI_Comm comm
Definition: hit_com.h:235
HitLayout HIT_LAYOUT_NULL
Definition: hit_layout.c:71
#define hit_layShape(lay)
Definition: hit_layout.h:650
MPI_Request requestSend
Definition: hit_com.h:236
#define hit_Rank
Definition: hit_com.h:140
HitShape hit_shapeIntersect(HitShape sh1, HitShape sh2)
Definition: hit_shape.c:123
void hit_comDoSendRecvReplace(HitCom *issue)
Definition: hit_com.c:2116
void hit_patternDoOrdered(HitPattern pattern)
Definition: hit_pattern.c:107
int rank[HIT_MAXDIMS]
Definition: hit_topology.h:135
HitTopology topo
Definition: hit_layout.h:255
#define hit_bShapeCard(shape, dim)
Definition: hit_bshape.h:144
HitPTopology * pTopology
Definition: hit_topology.h:253
void hit_comFree(HitCom issue)
Definition: hit_com.c:1995
HitPattern hit_patternLayRedistribute(HitLayout lay1, HitLayout lay2, void *tileP1, void *tileP2, HitType baseType)
Definition: hit_pattern.c:533
void hit_comDo(HitCom *issue)
Definition: hit_com.c:2408
HitType HIT_SHAPE_SIG
Definition: hit_com.c:71
#define hit_bShapeNameList(shape, dim)
Definition: hit_bshape.h:171
int hit_shapeCmp(HitShape sh1, HitShape sh2)
Definition: hit_shape.c:109
void hit_comStartRecv(HitCom *issue)
Definition: hit_com.c:2094
#define hit_calloc(ptr, type, nmemb)
Definition: hit_allocP.h:114
HitPattern HIT_PATTERN_NULL
Definition: hit_pattern.c:57
#define HIT_ALLTOALL
Definition: hit_com.h:97
HitType typeSend
Definition: hit_com.h:231
HitPatS * first
Definition: hit_pattern.h:74
#define hit_layShapeOther(lay, ranks)
Definition: hit_layout.h:732
MPI_Request requestRecv
Definition: hit_com.h:237
HitRanks HIT_RANKS_NULL
Definition: hit_topology.c:68
#define hit_sigCard(sig)
Definition: hit_sig.h:162
#define HIT_MAXDIMS
Definition: hit_shape.h:72
void * dataSend
Definition: hit_com.h:233
#define hit_cShapeCard(shape, dim)
Definition: hit_cshape.h:108
HitPTopology * pTopology[HIT_MAXDIMS+1]
Definition: hit_layout.h:278
void hit_patMatMultInternal(HitPattern *pattern, HitLayout lay, HitShape origin_matrix, HitShape matrix, const void *tilePSend, const void *tilePRecv, HitType baseType, const char *file, int line)
Definition: hit_pattern.c:1003
#define HIT_COM_ARRAYCOORDS
Definition: hit_com.h:345
HitPattern hit_patternLayRedistribute2(HitLayout lay1, HitLayout lay2, void *tileP1, void *tileP2, HitType baseType)
Definition: hit_pattern.c:268
#define HIT_PAT_REDISTRIBUTE2_TAG
Definition: hit_pattern.c:267
#define HIT_PAT_UNORDERED
Definition: hit_pattern.h:81
void hit_patternAdd(HitPattern *pattern, HitCom comm)
Definition: hit_pattern.c:61
int sendTo
Definition: hit_com.h:228
int begin
Definition: hit_sig.h:80
MPI_Comm comm
Definition: SWpar_ref.c:193
HitSig HIT_SIG_NULL
Definition: hit_sig.c:48
void hit_comDoAlltoallv(HitCom *issue)
Definition: hit_com.c:2165
void hit_comDoAlltoall(HitCom *issue)
Definition: hit_com.c:2155
#define hit_shapeDims(shape)
Definition: hit_sshape.h:364
HitRanks hit_topRanksInternal(HitTopology topo, int linealRank)
Definition: hit_topology.c:438
#define hit_mpiTestError(ok, cad)
Definition: hit_com.h:62
#define HIT_ALLTOALLV
Definition: hit_com.h:99
int commType
Definition: hit_com.h:226
#define HIT_COMTYPE_NULL
Definition: hit_com.h:89
int * assignedGroups
Definition: hit_layout.h:227
int recvFrom
Definition: hit_com.h:229
#define HIT_PAT_REDISTRIBUTE_TAG
Definition: hit_pattern.c:532
int tag
Definition: hit_com.h:230
int end
Definition: hit_sig.h:81
#define hit_cShapeNameList(shape, dim)
Definition: hit_cshape.h:162
MPI_Datatype HitType
Definition: hit_com.h:70
#define hit_malloc(ptr, type, nmemb)
Definition: hit_allocP.h:93
#define HIT_NOT_USED(x)
Definition: hit_error.h:50
#define HIT_SENDRECV
Definition: hit_com.h:91
void hit_patternFree(HitPattern *pattern)
Definition: hit_pattern.c:94
struct HitPatS * next
Definition: hit_pattern.h:63
#define HIT_PAT_REDISTRIBUTE_TAG2
Definition: hit_pattern.c:652
HitShape HIT_SHAPE_NULL
Definition: hit_shape.c:60
HitPattern hit_patternRedistribute(HitLayout lay, void *tileP1, void *tileP2, HitType baseType, int flagCompact)
Definition: hit_pattern.c:653
#define hit_layImActive(lay)
Definition: hit_layout.h:797
int card[HIT_MAXDIMS]
Definition: hit_topology.h:246
HitLayoutList layoutList
Definition: hit_layout.h:285
void hit_comDoBroadcast(HitCom *issue)
Definition: hit_com.c:2143
void hit_patMatMultBitmapInternal(HitPattern *pattern, HitLayout lay, HitShape origin_matrix, HitShape matrix, const void *tilePSend, const void *tilePRecv, HitType baseType, const char *file, int line)
Definition: hit_pattern.c:1182
int hit_topCard(HitTopology topo)
Definition: hit_topology.c:361
void * dataRecv
Definition: hit_com.h:234
HitCom HIT_COM_NULL
Definition: hit_com.c:58
void hit_patternDoUnordered(HitPattern pattern)
Definition: hit_pattern.c:117
int myself
Definition: hit_com.h:227
int stride
Definition: hit_sig.h:82
union HitLayout::@0 info
#define hit_lgr_elementGroup(lay, element)
Definition: hit_layout.h:938
void hit_patternCompose(HitPattern *pattern, HitPattern *pattern2)
Definition: hit_pattern.c:85
HitType typeRecv
Definition: hit_com.h:232
#define HIT_PATTERN_NULL_STATIC
Definition: hit_pattern.h:84
#define HIT_SENDRECV_REPLACE
Definition: hit_com.h:103
#define HIT_REDUCE
Definition: hit_com.h:93
void hit_patternStartAsync(HitPattern pattern)
Definition: hit_pattern.c:187
int active
Definition: hit_layout.h:263
#define hit_topSelfRankInternal(topo)
Definition: hit_topology.h:439
int numActives[HIT_MAXDIMS]
Definition: hit_layout.h:257
HitLayout lay
Definition: heat.c:107
#define hit_shapeSig(shape, dim)
Definition: hit_sshape.h:400
#define hit_layToTopoRanks(lay, ranks)
Definition: hit_layout.h:1062
#define hit_comSendSelectTag(lay, sendTo, tileP, selection, modeSelect, baseType, tag)
Definition: hit_com.h:419
#define hit_ranks(numDims,...)
Definition: hit_topology.h:169
#define hit_errInternal(routine, text, extraParam, file, numLine)
Definition: hit_error.h:63
#define hit_sigCmp(s1, s2)
Definition: hit_sig.h:174
#define hit_tileShape(var)
Definition: hit_tile.h:723
#define hit_comRecvSelectTag(lay, receiveFrom, tileP, selection, modeSelect, baseType, tag)
Definition: hit_com.h:466
#define HIT_BROADCAST
Definition: hit_com.h:101
void hit_comStartSend(HitCom *issue)
Definition: hit_com.c:2051