Actual source code: mpi.c

  1: /*
  2:       This provides a few of the MPI-uni functions that cannot be implemented
  3:     with C macros
  4: */
  5: #include <petscsys.h>
  6: #if !defined(MPIUNI_H)
  7: #error "Wrong mpi.h included! require mpi.h from MPIUNI"
  8: #endif

 10: #include <petsc/private/petscimpl.h>

 12: #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_HIP)
 13: #include <petscdevice.h>
 14: #endif

 16: #define MPI_SUCCESS 0
 17: #define MPI_FAILURE 1

 19: void *MPIUNI_TMP = NULL;

 21: /*
 22:        With MPI Uni there are exactly four distinct communicators:
 23:     MPI_COMM_SELF, MPI_COMM_WORLD, and a MPI_Comm_dup() of each of these (duplicates of duplicates return the same communictor)

 25:     MPI_COMM_SELF and MPI_COMM_WORLD are MPI_Comm_free() in MPI_Finalize() but in general with PETSc,
 26:      the other communicators are freed once the last PETSc object is freed (before MPI_Finalize()).

 28: */
 29: #define MAX_ATTR 256
 30: #define MAX_COMM 128

 32: typedef struct {
 33:   void *attribute_val;
 34:   int  active;
 35: } MPI_Attr;

 37: typedef struct {
 38:   void                *extra_state;
 39:   MPI_Delete_function *del;
 40:   int                 active;  /* Is this keyval in use by some comm? */
 41: } MPI_Attr_keyval;

 43: static MPI_Attr_keyval attr_keyval[MAX_ATTR];
 44: static MPI_Attr        attr[MAX_COMM][MAX_ATTR];
 45: static int             comm_active[MAX_COMM];  /* Boolean array indicating which comms are in use */
 46: static int             mpi_tag_ub = 100000000;
 47: static int             num_attr = 1; /* Maximal number of keyvals/attributes ever created, including the predefined MPI_TAG_UB attribute. */
 48: static int             MaxComm  = 2; /* Maximal number of communicators ever created, including comm_self(1), comm_world(2), but not comm_null(0) */
 49: static void*           MPIUNIF_mpi_in_place = 0;

 51: #define CommIdx(comm)  ((comm)-1)  /* the communicator's internal index used in attr[idx][] and comm_active[idx]. comm_null does not occupy slots in attr[][] */

 53: #if defined(__cplusplus)
 54: extern "C" {
 55: #endif

 57: /*
 58:    To avoid problems with prototypes to the system memcpy() it is duplicated here
 59: */
 60: int MPIUNI_Memcpy(void *dst,const void *src,int n)
 61: {
 62:   if (dst == MPI_IN_PLACE || dst == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
 63:   if (src == MPI_IN_PLACE || src == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
 64:   if (!n) return MPI_SUCCESS;

 66:   /* GPU-aware MPIUNI. Use synchronous copy per MPI semantics */
 67: #if defined(PETSC_HAVE_CUDA)
 68:   if (PetscDeviceInitialized(PETSC_DEVICE_CUDA)) {cudaError_t cerr = cudaMemcpy(dst,src,n,cudaMemcpyDefault);if (cerr != cudaSuccess) return MPI_FAILURE;} else
 69: #elif defined(PETSC_HAVE_HIP)
 70:   if (PetscDeviceInitialized(PETSC_DEVICE_HIP))  {hipError_t  cerr = hipMemcpy(dst,src,n,hipMemcpyDefault);  if (cerr != hipSuccess)  return MPI_FAILURE;} else
 71: #endif
 72:   {memcpy(dst,src,n);}
 73:   return MPI_SUCCESS;
 74: }

 76: static int classcnt = 0;
 77: static int codecnt = 0;

 79: int MPI_Add_error_class(int *cl)
 80: {
 81:   *cl = classcnt++;
 82:   return MPI_SUCCESS;
 83: }

 85: int MPI_Add_error_code(int cl,int *co)
 86: {
 87:   if (cl >= classcnt) return MPI_FAILURE;
 88:   *co = codecnt++;
 89:   return MPI_SUCCESS;
 90: }

 92: int MPI_Type_get_envelope(MPI_Datatype datatype,int *num_integers,int *num_addresses,int *num_datatypes,int *combiner)
 93: {
 94:   int comb = datatype >> 28;
 95:   switch (comb) {
 96:   case MPI_COMBINER_NAMED:
 97:     *num_integers = 0;
 98:     *num_addresses = 0;
 99:     *num_datatypes = 0;
100:     *combiner = comb;
101:     break;
102:   case MPI_COMBINER_DUP:
103:     *num_integers = 0;
104:     *num_addresses = 0;
105:     *num_datatypes = 1;
106:     *combiner = comb;
107:     break;
108:   case MPI_COMBINER_CONTIGUOUS:
109:     *num_integers = 1;
110:     *num_addresses = 0;
111:     *num_datatypes = 1;
112:     *combiner = comb;
113:     break;
114:   default:
115:     return MPIUni_Abort(MPI_COMM_SELF,1);
116:   }
117:   return MPI_SUCCESS;
118: }

120: int MPI_Type_get_contents(MPI_Datatype datatype,int max_integers,int max_addresses,int max_datatypes,int *array_of_integers,MPI_Aint *array_of_addresses,MPI_Datatype *array_of_datatypes)
121: {
122:   int comb = datatype >> 28;
123:   switch (comb) {
124:   case MPI_COMBINER_NAMED:
125:     return MPIUni_Abort(MPI_COMM_SELF,1);
126:   case MPI_COMBINER_DUP:
127:     if (max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF,1);
128:     array_of_datatypes[0] = datatype & 0x0fffffff;
129:     break;
130:   case MPI_COMBINER_CONTIGUOUS:
131:     if (max_integers < 1 || max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF,1);
132:     array_of_integers[0] = (datatype >> 8) & 0xfff; /* count */
133:     array_of_datatypes[0] = (datatype & 0x0ff000ff) | 0x100;  /* basic named type (count=1) from which the contiguous type is derived */
134:     break;
135:   default:
136:     return MPIUni_Abort(MPI_COMM_SELF,1);
137:   }
138:   return MPI_SUCCESS;
139: }

141: /*
142:    Used to set the built-in MPI_TAG_UB attribute
143: */
144: static int Keyval_setup(void)
145: {
146:   attr[CommIdx(MPI_COMM_WORLD)][0].active        = 1;
147:   attr[CommIdx(MPI_COMM_WORLD)][0].attribute_val = &mpi_tag_ub;
148:   attr[CommIdx(MPI_COMM_SELF)][0].active        = 1;
149:   attr[CommIdx(MPI_COMM_SELF)][0].attribute_val = &mpi_tag_ub;
150:   attr_keyval[0].active                          = 1;
151:   return MPI_SUCCESS;
152: }

154: int MPI_Comm_create_keyval(MPI_Copy_function *copy_fn,MPI_Delete_function *delete_fn,int *keyval,void *extra_state)
155: {
156:   int i,keyid;
157:   for (i=1; i<num_attr; i++) { /* the first attribute is always in use */
158:     if (!attr_keyval[i].active) {
159:       keyid = i;
160:       goto found;
161:     }
162:   }
163:   if (num_attr >= MAX_ATTR) return MPIUni_Abort(MPI_COMM_WORLD,1);
164:   keyid = num_attr++;

166: found:
167:   attr_keyval[keyid].extra_state = extra_state;
168:   attr_keyval[keyid].del         = delete_fn;
169:   attr_keyval[keyid].active      = 1;
170:   *keyval                        = keyid;
171:   return MPI_SUCCESS;
172: }

174: int MPI_Comm_free_keyval(int *keyval)
175: {
176:   attr_keyval[*keyval].extra_state = 0;
177:   attr_keyval[*keyval].del         = 0;
178:   attr_keyval[*keyval].active      = 0;
179:   *keyval = 0;
180:   return MPI_SUCCESS;
181: }

183: int MPI_Comm_set_attr(MPI_Comm comm,int keyval,void *attribute_val)
184: {
185:   int idx = CommIdx(comm);
186:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
187:   attr[idx][keyval].active        = 1;
188:   attr[idx][keyval].attribute_val = attribute_val;
189:   return MPI_SUCCESS;
190: }

192: int MPI_Comm_delete_attr(MPI_Comm comm,int keyval)
193: {
194:   int idx = CommIdx(comm);
195:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
196:   if (attr[idx][keyval].active && attr_keyval[keyval].del) {
197:     void *save_attribute_val        = attr[idx][keyval].attribute_val;
198:     attr[idx][keyval].active        = 0;
199:     attr[idx][keyval].attribute_val = 0;
200:     (*(attr_keyval[keyval].del))(comm,keyval,save_attribute_val,attr_keyval[keyval].extra_state);
201:   }
202:   return MPI_SUCCESS;
203: }

205: int MPI_Comm_get_attr(MPI_Comm comm,int keyval,void *attribute_val,int *flag)
206: {
207:   int idx = CommIdx(comm);
208:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
209:   if (!keyval) Keyval_setup();
210:   *flag                  = attr[idx][keyval].active;
211:   *(void**)attribute_val = attr[idx][keyval].attribute_val;
212:   return MPI_SUCCESS;
213: }

215: static char all_comm_names[MAX_COMM][MPI_MAX_OBJECT_NAME] = {
216:   "MPI_COMM_SELF",
217:   "MPI_COMM_WORLD"
218: };

220: int MPI_Comm_get_name(MPI_Comm comm,char *comm_name,int *resultlen)
221: {
222:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
223:   if (!comm_name || !resultlen) return MPI_FAILURE;
224:   strncpy(comm_name,all_comm_names[CommIdx(comm)],MPI_MAX_OBJECT_NAME-1);
225:   *resultlen = (int)strlen(comm_name);
226:   return MPI_SUCCESS;
227: }

229: int MPI_Comm_set_name(MPI_Comm comm,const char *comm_name)
230: {
231:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
232:   if (!comm_name) return MPI_FAILURE;
233:   if (strlen(comm_name) > MPI_MAX_OBJECT_NAME-1) return MPI_FAILURE;
234:   strncpy(all_comm_names[CommIdx(comm)],comm_name,MPI_MAX_OBJECT_NAME-1);
235:   return MPI_SUCCESS;
236: }

238: int MPI_Comm_create(MPI_Comm comm,MPI_Group group,MPI_Comm *newcomm)
239: {
240:   int j;
241:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
242:   for (j=3; j<=MaxComm; j++) {
243:     if (!comm_active[CommIdx(j)]) {
244:       comm_active[CommIdx(j)] = 1;
245:       *newcomm = j;
246:       return MPI_SUCCESS;
247:     }
248:   }
249:   if (MaxComm >= MAX_COMM) return MPI_FAILURE;
250:   *newcomm = ++MaxComm;
251:   comm_active[CommIdx(*newcomm)] = 1;
252:   return MPI_SUCCESS;
253: }

255: int MPI_Comm_dup(MPI_Comm comm,MPI_Comm *out)
256: {
257:   int j;
258:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
259:   for (j=3; j<=MaxComm; j++) {
260:     if (!comm_active[CommIdx(j)]) {
261:       comm_active[CommIdx(j)] = 1;
262:       *out = j;
263:       return MPI_SUCCESS;
264:     }
265:   }
266:   if (MaxComm >= MAX_COMM) return MPI_FAILURE;
267:   *out = ++MaxComm;
268:   comm_active[CommIdx(*out)] = 1;
269:   return MPI_SUCCESS;
270: }

272: int MPI_Comm_free(MPI_Comm *comm)
273: {
274:   int i;
275:   int idx = CommIdx(*comm);

277:   if (*comm < 1 || *comm > MaxComm) return MPI_FAILURE;
278:   for (i=0; i<num_attr; i++) {
279:     if (attr[idx][i].active && attr_keyval[i].del) (*attr_keyval[i].del)(*comm,i,attr[idx][i].attribute_val,attr_keyval[i].extra_state);
280:     attr[idx][i].active        = 0;
281:     attr[idx][i].attribute_val = 0;
282:   }
283:   if (*comm >= 3) comm_active[idx] = 0;
284:   *comm = 0;
285:   return MPI_SUCCESS;
286: }

288: int MPI_Comm_size(MPI_Comm comm, int *size)
289: {
290:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
291:   *size=1;
292:   return MPI_SUCCESS;
293: }

295: int MPI_Comm_rank(MPI_Comm comm, int *rank)
296: {
297:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
298:   *rank=0;
299:   return MPI_SUCCESS;
300: }

302: int MPIUni_Abort(MPI_Comm comm,int errorcode)
303: {
304:   printf("MPI operation not supported by PETSc's sequential MPI wrappers\n");
305:   return MPI_ERR_NOSUPPORT;
306: }

308: int MPI_Abort(MPI_Comm comm,int errorcode)
309: {
310:   abort();
311:   return MPI_SUCCESS;
312: }

314: /* --------------------------------------------------------------------------*/

316: static int MPI_was_initialized = 0;
317: static int MPI_was_finalized   = 0;

319: int MPI_Init(int *argc, char ***argv)
320: {
321:   if (MPI_was_initialized) return MPI_FAILURE;
322:   /* MPI standard says "once MPI_Finalize returns, no MPI routine (not even MPI_Init) may be called", so an MPI standard compliant
323:      MPIU should have this 'if (MPI_was_finalized) return MPI_FAILURE;' check. We relax it here to make life easier for users
324:      of MPIU so that they can do multiple PetscInitialize/Finalize().
325:   */
326:   /* if (MPI_was_finalized) return MPI_FAILURE; */
327:   MPI_was_initialized = 1;
328:   MPI_was_finalized   = 0;
329:   return MPI_SUCCESS;
330: }

332: int MPI_Init_thread(int *argc, char ***argv, int required, int* provided)
333: {
334:   MPI_Query_thread(provided);
335:   return MPI_Init(argc,argv);
336: }

338: int MPI_Query_thread(int* provided)
339: {
340:   *provided = MPI_THREAD_FUNNELED;
341:   return MPI_SUCCESS;
342: }

344: int MPI_Finalize(void)
345: {
346:   MPI_Comm comm;
347:   if (MPI_was_finalized) return MPI_FAILURE;
348:   if (!MPI_was_initialized) return MPI_FAILURE;
349:   comm = MPI_COMM_WORLD;
350:   MPI_Comm_free(&comm);
351:   comm = MPI_COMM_SELF;
352:   MPI_Comm_free(&comm);
353: #if defined(PETSC_USE_DEBUG)
354:   {
355:     int i;
356:     for (i=3; i<=MaxComm; i++) {
357:       if (comm_active[CommIdx(i)]) printf("MPIUni warning: MPI communicator %d is not freed before MPI_Finalize()\n", i);
358:     }
359:   }
360: #endif
361:   /* reset counters */
362:   MaxComm  = 2;
363:   num_attr = 1;
364:   MPI_was_finalized   = 1;
365:   MPI_was_initialized = 0;
366:   PETSC_COMM_WORLD    = MPI_COMM_NULL;
367:   return MPI_SUCCESS;
368: }

370: int MPI_Initialized(int *flag)
371: {
372:   *flag = MPI_was_initialized;
373:   return MPI_SUCCESS;
374: }

376: int MPI_Finalized(int *flag)
377: {
378:   *flag = MPI_was_finalized;
379:   return MPI_SUCCESS;
380: }

382: /* -------------------     Fortran versions of several routines ------------------ */

384: #if defined(PETSC_HAVE_FORTRAN_CAPS)
385: #define mpiunisetmoduleblock_          MPIUNISETMODULEBLOCK
386: #define mpiunisetfortranbasepointers_  MPIUNISETFORTRANBASEPOINTERS
387: #define petsc_mpi_init_                PETSC_MPI_INIT
388: #define petsc_mpi_finalize_            PETSC_MPI_FINALIZE
389: #define petsc_mpi_comm_size_           PETSC_MPI_COMM_SIZE
390: #define petsc_mpi_comm_rank_           PETSC_MPI_COMM_RANK
391: #define petsc_mpi_abort_               PETSC_MPI_ABORT
392: #define petsc_mpi_reduce_              PETSC_MPI_REDUCE
393: #define petsc_mpi_allreduce_           PETSC_MPI_ALLREDUCE
394: #define petsc_mpi_barrier_             PETSC_MPI_BARRIER
395: #define petsc_mpi_bcast_               PETSC_MPI_BCAST
396: #define petsc_mpi_gather_              PETSC_MPI_GATHER
397: #define petsc_mpi_allgather_           PETSC_MPI_ALLGATHER
398: #define petsc_mpi_comm_split_          PETSC_MPI_COMM_SPLIT
399: #define petsc_mpi_scan_                PETSC_MPI_SCAN
400: #define petsc_mpi_send_                PETSC_MPI_SEND
401: #define petsc_mpi_recv_                PETSC_MPI_RECV
402: #define petsc_mpi_reduce_scatter_      PETSC_MPI_REDUCE_SCATTER
403: #define petsc_mpi_irecv_               PETSC_MPI_IRECV
404: #define petsc_mpi_isend_               PETSC_MPI_ISEND
405: #define petsc_mpi_sendrecv_            PETSC_MPI_SENDRECV
406: #define petsc_mpi_test_                PETSC_MPI_TEST
407: #define petsc_mpi_waitall_             PETSC_MPI_WAITALL
408: #define petsc_mpi_waitany_             PETSC_MPI_WAITANY
409: #define petsc_mpi_allgatherv_          PETSC_MPI_ALLGATHERV
410: #define petsc_mpi_alltoallv_           PETSC_MPI_ALLTOALLV
411: #define petsc_mpi_comm_create_         PETSC_MPI_COMM_CREATE
412: #define petsc_mpi_address_             PETSC_MPI_ADDRESS
413: #define petsc_mpi_pack_                PETSC_MPI_PACK
414: #define petsc_mpi_unpack_              PETSC_MPI_UNPACK
415: #define petsc_mpi_pack_size_           PETSC_MPI_PACK_SIZE
416: #define petsc_mpi_type_struct_         PETSC_MPI_TYPE_STRUCT
417: #define petsc_mpi_type_commit_         PETSC_MPI_TYPE_COMMIT
418: #define petsc_mpi_wtime_               PETSC_MPI_WTIME
419: #define petsc_mpi_cancel_              PETSC_MPI_CANCEL
420: #define petsc_mpi_comm_dup_            PETSC_MPI_COMM_DUP
421: #define petsc_mpi_comm_free_           PETSC_MPI_COMM_FREE
422: #define petsc_mpi_get_count_           PETSC_MPI_GET_COUNT
423: #define petsc_mpi_get_processor_name_  PETSC_MPI_GET_PROCESSOR_NAME
424: #define petsc_mpi_initialized_         PETSC_MPI_INITIALIZED
425: #define petsc_mpi_iprobe_              PETSC_MPI_IPROBE
426: #define petsc_mpi_probe_               PETSC_MPI_PROBE
427: #define petsc_mpi_request_free_        PETSC_MPI_REQUEST_FREE
428: #define petsc_mpi_ssend_               PETSC_MPI_SSEND
429: #define petsc_mpi_wait_                PETSC_MPI_WAIT
430: #define petsc_mpi_comm_group_          PETSC_MPI_COMM_GROUP
431: #define petsc_mpi_exscan_              PETSC_MPI_EXSCAN
432: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
433: #define mpiunisetmoduleblock_          mpiunisetmoduleblock
434: #define mpiunisetfortranbasepointers_  mpiunisetfortranbasepointers
435: #define petsc_mpi_init_                petsc_mpi_init
436: #define petsc_mpi_finalize_            petsc_mpi_finalize
437: #define petsc_mpi_comm_size_           petsc_mpi_comm_size
438: #define petsc_mpi_comm_rank_           petsc_mpi_comm_rank
439: #define petsc_mpi_abort_               petsc_mpi_abort
440: #define petsc_mpi_reduce_              petsc_mpi_reduce
441: #define petsc_mpi_allreduce_           petsc_mpi_allreduce
442: #define petsc_mpi_barrier_             petsc_mpi_barrier
443: #define petsc_mpi_bcast_               petsc_mpi_bcast
444: #define petsc_mpi_gather_              petsc_mpi_gather
445: #define petsc_mpi_allgather_           petsc_mpi_allgather
446: #define petsc_mpi_comm_split_          petsc_mpi_comm_split
447: #define petsc_mpi_scan_                petsc_mpi_scan
448: #define petsc_mpi_send_                petsc_mpi_send
449: #define petsc_mpi_recv_                petsc_mpi_recv
450: #define petsc_mpi_reduce_scatter_      petsc_mpi_reduce_scatter
451: #define petsc_mpi_irecv_               petsc_mpi_irecv
452: #define petsc_mpi_isend_               petsc_mpi_isend
453: #define petsc_mpi_sendrecv_            petsc_mpi_sendrecv
454: #define petsc_mpi_test_                petsc_mpi_test
455: #define petsc_mpi_waitall_             petsc_mpi_waitall
456: #define petsc_mpi_waitany_             petsc_mpi_waitany
457: #define petsc_mpi_allgatherv_          petsc_mpi_allgatherv
458: #define petsc_mpi_alltoallv_           petsc_mpi_alltoallv
459: #define petsc_mpi_comm_create_         petsc_mpi_comm_create
460: #define petsc_mpi_address_             petsc_mpi_address
461: #define petsc_mpi_pack_                petsc_mpi_pack
462: #define petsc_mpi_unpack_              petsc_mpi_unpack
463: #define petsc_mpi_pack_size_           petsc_mpi_pack_size
464: #define petsc_mpi_type_struct_         petsc_mpi_type_struct
465: #define petsc_mpi_type_commit_         petsc_mpi_type_commit
466: #define petsc_mpi_wtime_               petsc_mpi_wtime
467: #define petsc_mpi_cancel_              petsc_mpi_cancel
468: #define petsc_mpi_comm_dup_            petsc_mpi_comm_dup
469: #define petsc_mpi_comm_free_           petsc_mpi_comm_free
470: #define petsc_mpi_get_count_           petsc_mpi_get_count
471: #define petsc_mpi_get_processor_name_  petsc_mpi_get_processor_name
472: #define petsc_mpi_initialized_         petsc_mpi_initialized
473: #define petsc_mpi_iprobe_              petsc_mpi_iprobe
474: #define petsc_mpi_probe_               petsc_mpi_probe
475: #define petsc_mpi_request_free_        petsc_mpi_request_free
476: #define petsc_mpi_ssend_               petsc_mpi_ssend
477: #define petsc_mpi_wait_                petsc_mpi_wait
478: #define petsc_mpi_comm_group_          petsc_mpi_comm_group
479: #define petsc_mpi_exscan_              petsc_mpi_exscan
480: #endif

482: #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
483: #define petsc_mpi_init_                petsc_mpi_init__
484: #define petsc_mpi_finalize_            petsc_mpi_finalize__
485: #define petsc_mpi_comm_size_           petsc_mpi_comm_size__
486: #define petsc_mpi_comm_rank_           petsc_mpi_comm_rank__
487: #define petsc_mpi_abort_               petsc_mpi_abort__
488: #define petsc_mpi_reduce_              petsc_mpi_reduce__
489: #define petsc_mpi_allreduce_           petsc_mpi_allreduce__
490: #define petsc_mpi_barrier_             petsc_mpi_barrier__
491: #define petsc_mpi_bcast_               petsc_mpi_bcast__
492: #define petsc_mpi_gather_              petsc_mpi_gather__
493: #define petsc_mpi_allgather_           petsc_mpi_allgather__
494: #define petsc_mpi_comm_split_          petsc_mpi_comm_split__
495: #define petsc_mpi_scan_                petsc_mpi_scan__
496: #define petsc_mpi_send_                petsc_mpi_send__
497: #define petsc_mpi_recv_                petsc_mpi_recv__
498: #define petsc_mpi_reduce_scatter_      petsc_mpi_reduce_scatter__
499: #define petsc_mpi_irecv_               petsc_mpi_irecv__
500: #define petsc_mpi_isend_               petsc_mpi_isend__
501: #define petsc_mpi_sendrecv_            petsc_mpi_sendrecv__
502: #define petsc_mpi_test_                petsc_mpi_test__
503: #define petsc_mpi_waitall_             petsc_mpi_waitall__
504: #define petsc_mpi_waitany_             petsc_mpi_waitany__
505: #define petsc_mpi_allgatherv_          petsc_mpi_allgatherv__
506: #define petsc_mpi_alltoallv_           petsc_mpi_alltoallv__
507: #define petsc_mpi_comm_create_         petsc_mpi_comm_create__
508: #define petsc_mpi_address_             petsc_mpi_address__
509: #define petsc_mpi_pack_                petsc_mpi_pack__
510: #define petsc_mpi_unpack_              petsc_mpi_unpack__
511: #define petsc_mpi_pack_size_           petsc_mpi_pack_size__
512: #define petsc_mpi_type_struct_         petsc_mpi_type_struct__
513: #define petsc_mpi_type_commit_         petsc_mpi_type_commit__
514: #define petsc_mpi_wtime_               petsc_mpi_wtime__
515: #define petsc_mpi_cancel_              petsc_mpi_cancel__
516: #define petsc_mpi_comm_dup_            petsc_mpi_comm_dup__
517: #define petsc_mpi_comm_free_           petsc_mpi_comm_free__
518: #define petsc_mpi_get_count_           petsc_mpi_get_count__
519: #define petsc_mpi_get_processor_name_  petsc_mpi_get_processor_name__
520: #define petsc_mpi_initialized_         petsc_mpi_initialized__
521: #define petsc_mpi_iprobe_              petsc_mpi_iprobe__
522: #define petsc_mpi_probe_               petsc_mpi_probe__
523: #define petsc_mpi_request_free_        petsc_mpi_request_free__
524: #define petsc_mpi_ssend_               petsc_mpi_ssend__
525: #define petsc_mpi_wait_                petsc_mpi_wait__
526: #define petsc_mpi_comm_group_          petsc_mpi_comm_group__
527: #define petsc_mpi_exscan_              petsc_mpi_exscan__
528: #endif

530: /* Do not build fortran interface if MPI namespace colision is to be avoided */
531: #if defined(PETSC_HAVE_FORTRAN)

533: PETSC_EXTERN void mpiunisetmoduleblock_(void);

535: PETSC_EXTERN void mpiunisetfortranbasepointers_(void *f_mpi_in_place)
536: {
537:   MPIUNIF_mpi_in_place   = f_mpi_in_place;
538: }

540: PETSC_EXTERN void petsc_mpi_init_(int *ierr)
541: {
542:   mpiunisetmoduleblock_();
543:   *MPI_Init((int*)0, (char***)0);
544: }

546: PETSC_EXTERN void petsc_mpi_finalize_(int *ierr)
547: {
548:   *MPI_Finalize();
549: }

551: PETSC_EXTERN void petsc_mpi_comm_size_(MPI_Comm *comm,int *size,int *ierr)
552: {
553:   *size = 1;
554:   *0;
555: }

557: PETSC_EXTERN void petsc_mpi_comm_rank_(MPI_Comm *comm,int *rank,int *ierr)
558: {
559:   *rank = 0;
560:   *MPI_SUCCESS;
561: }

563: PETSC_EXTERN void petsc_mpi_comm_split_(MPI_Comm *comm,int *color,int *key, MPI_Comm *newcomm, int *ierr)
564: {
565:   *newcomm = *comm;
566:   *MPI_SUCCESS;
567: }

569: PETSC_EXTERN void petsc_mpi_abort_(MPI_Comm *comm,int *errorcode,int *ierr)
570: {
571:   abort();
572:   *MPI_SUCCESS;
573: }

575: PETSC_EXTERN void petsc_mpi_reduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *root,int *comm,int *ierr)
576: {
577:   *MPI_Reduce(sendbuf,recvbuf,*count,*datatype,*op,*root,*comm);
578: }

580: PETSC_EXTERN void petsc_mpi_allreduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
581: {
582:   *MPI_Allreduce(sendbuf,recvbuf,*count,*datatype,*op,*comm);
583: }

585: PETSC_EXTERN void petsc_mpi_barrier_(MPI_Comm *comm,int *ierr)
586: {
587:   *MPI_SUCCESS;
588: }

590: PETSC_EXTERN void petsc_mpi_bcast_(void *buf,int *count,int *datatype,int *root,int *comm,int *ierr)
591: {
592:   *MPI_SUCCESS;
593: }

595: PETSC_EXTERN void petsc_mpi_gather_(void *sendbuf,int *scount,int *sdatatype, void *recvbuf, int *rcount, int *rdatatype, int *root,int *comm,int *ierr)
596: {
597:   *MPI_Gather(sendbuf,*scount,*sdatatype,recvbuf,rcount,rdatatype,*root,*comm);
598: }

600: PETSC_EXTERN void petsc_mpi_allgather_(void *sendbuf,int *scount,int *sdatatype, void *recvbuf, int *rcount, int *rdatatype,int *comm,int *ierr)
601: {
602:   *MPI_Allgather(sendbuf,*scount,*sdatatype,recvbuf,rcount,rdatatype,*comm);
603: }

605: PETSC_EXTERN void petsc_mpi_scan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
606: {
607:   *MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPI_sizeof(*datatype));
608: }

610: PETSC_EXTERN void petsc_mpi_send_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr)
611: {
612:   *MPIUni_Abort(MPI_COMM_WORLD,0);
613: }

615: PETSC_EXTERN void petsc_mpi_recv_(void *buf,int *count,int *datatype,int *source,int *tag,int *comm,int status,int *ierr)
616: {
617:   *MPIUni_Abort(MPI_COMM_WORLD,0);
618: }

620: PETSC_EXTERN void petsc_mpi_reduce_scatter_(void *sendbuf,void *recvbuf,int *recvcounts,int *datatype,int *op,int *comm,int *ierr)
621: {
622:   *MPIUni_Abort(MPI_COMM_WORLD,0);
623: }

625: PETSC_EXTERN void petsc_mpi_irecv_(void *buf,int *count, int *datatype, int *source, int *tag, int *comm, int *request, int *ierr)
626: {
627:   *MPIUni_Abort(MPI_COMM_WORLD,0);
628: }

630: PETSC_EXTERN void petsc_mpi_isend_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *request, int *ierr)
631: {
632:   *MPIUni_Abort(MPI_COMM_WORLD,0);
633: }

635: PETSC_EXTERN void petsc_mpi_sendrecv_(void *sendbuf,int *sendcount,int *sendtype,int *dest,int *sendtag,void *recvbuf,int *recvcount,int *recvtype,int *source,int *recvtag,int *comm,int *status,int *ierr)
636: {
637:   *MPIUNI_Memcpy(recvbuf,sendbuf,(*sendcount)*MPI_sizeof(*sendtype));
638: }

640: PETSC_EXTERN void petsc_mpi_test_(int *request,int *flag,int *status,int *ierr)
641: {
642:   *MPIUni_Abort(MPI_COMM_WORLD,0);
643: }

645: PETSC_EXTERN void petsc_mpi_waitall_(int *count,int *array_of_requests,int *array_of_statuses,int *ierr)
646: {
647:   *MPI_SUCCESS;
648: }

650: PETSC_EXTERN void petsc_mpi_waitany_(int *count,int *array_of_requests,int * index, int *status,int *ierr)
651: {
652:   *MPI_SUCCESS;
653: }

655: PETSC_EXTERN void petsc_mpi_allgatherv_(void *sendbuf,int *sendcount,int *sendtype,void *recvbuf,int *recvcounts,int *displs,int *recvtype,int *comm,int *ierr)
656: {
657:   *MPI_Allgatherv(sendbuf,*sendcount,*sendtype,recvbuf,recvcounts,displs,*recvtype,*comm);
658: }

660: PETSC_EXTERN void petsc_mpi_alltoallv_(void *sendbuf,int *sendcounts,int *sdispls,int *sendtype,void *recvbuf,int *recvcounts,int *rdispls,int *recvtype,int *comm,int *ierr)
661: {
662:   *MPI_Alltoallv(sendbuf,sendcounts,sdispls,*sendtype,recvbuf,recvcounts,rdispls,*recvtype,*comm);
663: }

665: PETSC_EXTERN void petsc_mpi_comm_create_(int *comm,int *group,int *newcomm,int *ierr)
666: {
667:   *newcomm =  *comm;
668:   *MPI_SUCCESS;
669: }

671: PETSC_EXTERN void petsc_mpi_address_(void *location,MPI_Aint *address,int *ierr)
672: {
673:   *address =  (MPI_Aint) ((char *)location);
674:   *MPI_SUCCESS;
675: }

677: PETSC_EXTERN void petsc_mpi_pack_(void *inbuf,int *incount,int *datatype,void *outbuf,int *outsize,int *position,int *comm,int *ierr)
678: {
679:   *MPIUni_Abort(MPI_COMM_WORLD,0);
680: }

682: PETSC_EXTERN void petsc_mpi_unpack_(void *inbuf,int *insize,int *position,void *outbuf,int *outcount,int *datatype,int *comm,int *ierr)
683: {
684:   *MPIUni_Abort(MPI_COMM_WORLD,0);
685: }

687: PETSC_EXTERN void petsc_mpi_pack_size_(int *incount,int *datatype,int *comm,int *size,int *ierr)
688: {
689:   *MPIUni_Abort(MPI_COMM_WORLD,0);
690: }

692: PETSC_EXTERN void petsc_mpi_type_struct_(int *count,int *array_of_blocklengths,int * array_of_displaments,int *array_of_types,int *newtype,int *ierr)
693: {
694:   *MPIUni_Abort(MPI_COMM_WORLD,0);
695: }

697: PETSC_EXTERN void petsc_mpi_type_commit_(int *datatype,int *ierr)
698: {
699:   *MPI_SUCCESS;
700: }

702: double petsc_mpi_wtime_(void)
703: {
704:   return 0.0;
705: }

707: PETSC_EXTERN void petsc_mpi_cancel_(int *request,int *ierr)
708: {
709:   *MPI_SUCCESS;
710: }

712: PETSC_EXTERN void petsc_mpi_comm_dup_(int *comm,int *out,int *ierr)
713: {
714:   *out  = *comm;
715:   *MPI_SUCCESS;
716: }

718: PETSC_EXTERN void petsc_mpi_comm_free_(int *comm,int *ierr)
719: {
720:   *MPI_SUCCESS;
721: }

723: PETSC_EXTERN void petsc_mpi_get_count_(int *status,int *datatype,int *count,int *ierr)
724: {
725:   *MPIUni_Abort(MPI_COMM_WORLD,0);
726: }

728: PETSC_EXTERN void petsc_mpi_get_processor_name_(char *name,int *result_len,int *ierr,PETSC_FORTRAN_CHARLEN_T len)
729: {
730:   MPIUNI_Memcpy(name,"localhost",9*sizeof(char));
731:   *result_len = 9;
732:   *MPI_SUCCESS;
733: }

735: PETSC_EXTERN void petsc_mpi_initialized_(int *flag,int *ierr)
736: {
737:   *flag = MPI_was_initialized;
738:   *MPI_SUCCESS;
739: }

741: PETSC_EXTERN void petsc_mpi_iprobe_(int *source,int *tag,int *comm,int *glag,int *status,int *ierr)
742: {
743:   *MPI_SUCCESS;
744: }

746: PETSC_EXTERN void petsc_mpi_probe_(int *source,int *tag,int *comm,int *flag,int *status,int *ierr)
747: {
748:   *MPI_SUCCESS;
749: }

751: PETSC_EXTERN void petsc_mpi_request_free_(int *request,int *ierr)
752: {
753:   *MPI_SUCCESS;
754: }

756: PETSC_EXTERN void petsc_mpi_ssend_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr)
757: {
758:   *MPIUni_Abort(MPI_COMM_WORLD,0);
759: }

761: PETSC_EXTERN void petsc_mpi_wait_(int *request,int *status,int *ierr)
762: {
763:   *MPI_SUCCESS;
764: }

766: PETSC_EXTERN void petsc_mpi_comm_group_(int *comm,int *group,int *ierr)
767: {
768:   *MPI_SUCCESS;
769: }

771: PETSC_EXTERN void petsc_mpi_exscan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
772: {
773:   *MPI_SUCCESS;
774: }

776: #endif /* PETSC_HAVE_FORTRAN */

778: #if defined(__cplusplus)
779: }
780: #endif