33#ifndef GKO_PUBLIC_CORE_BASE_MPI_HPP_
34#define GKO_PUBLIC_CORE_BASE_MPI_HPP_
42#include <ginkgo/config.hpp>
43#include <ginkgo/core/base/exception.hpp>
44#include <ginkgo/core/base/exception_helpers.hpp>
45#include <ginkgo/core/base/executor.hpp>
46#include <ginkgo/core/base/types.hpp>
47#include <ginkgo/core/base/utils_helper.hpp>
57namespace experimental {
72#if GINKGO_HAVE_GPU_AWARE_MPI
90#define GKO_REGISTER_MPI_TYPE(input_type, mpi_type) \
92 struct type_impl<input_type> { \
93 static MPI_Datatype get_type() { return mpi_type; } \
166 *
this = std::move(
other);
178 if (
this != &
other) {
229 static bool is_finalized()
236 static bool is_initialized()
261 this->required_thread_support_ =
static_cast<int>(
thread_t);
262 GKO_ASSERT_NO_MPI_ERRORS(
264 &(this->provided_thread_support_)));
278 int required_thread_support_;
279 int provided_thread_support_;
293 void operator()(pointer
comm)
const
331 template <
typename T>
415 std::vector<status>
stat;
416 for (std::size_t
i = 0;
i <
req.size(); ++
i) {
450 : comm_(), force_host_buffer_(force_host_buffer)
481 GKO_ASSERT_NO_MPI_ERRORS(
493 bool force_host_buffer()
const {
return force_host_buffer_; }
500 int size()
const {
return get_num_ranks(); }
507 int rank()
const {
return get_my_rank(); };
523 return compare(rhs.get());
555 template <
typename SendType>
560 auto guard = exec->get_scoped_device_id_guard();
561 GKO_ASSERT_NO_MPI_ERRORS(
582 template <
typename SendType>
587 auto guard = exec->get_scoped_device_id_guard();
589 GKO_ASSERT_NO_MPI_ERRORS(
610 template <
typename RecvType>
615 auto guard = exec->get_scoped_device_id_guard();
617 GKO_ASSERT_NO_MPI_ERRORS(
638 template <
typename RecvType>
643 auto guard = exec->get_scoped_device_id_guard();
645 GKO_ASSERT_NO_MPI_ERRORS(
663 template <
typename BroadcastType>
667 auto guard = exec->get_scoped_device_id_guard();
688 template <
typename BroadcastType>
692 auto guard = exec->get_scoped_device_id_guard();
694 GKO_ASSERT_NO_MPI_ERRORS(
714 template <
typename ReduceType>
715 void reduce(std::shared_ptr<const Executor> exec,
719 auto guard = exec->get_scoped_device_id_guard();
741 template <
typename ReduceType>
746 auto guard = exec->get_scoped_device_id_guard();
767 template <
typename ReduceType>
771 auto guard = exec->get_scoped_device_id_guard();
774 operation, this->
get()));
792 template <
typename ReduceType>
797 auto guard = exec->get_scoped_device_id_guard();
801 operation, this->
get(), req.
get()));
819 template <
typename ReduceType>
822 int count,
MPI_Op operation)
const
824 auto guard = exec->get_scoped_device_id_guard();
827 operation, this->
get()));
846 template <
typename ReduceType>
849 int count,
MPI_Op operation)
const
851 auto guard = exec->get_scoped_device_id_guard();
855 operation, this->
get(), req.
get()));
875 template <
typename SendType,
typename RecvType>
876 void gather(std::shared_ptr<const Executor> exec,
881 auto guard = exec->get_scoped_device_id_guard();
882 GKO_ASSERT_NO_MPI_ERRORS(
907 template <
typename SendType,
typename RecvType>
913 auto guard = exec->get_scoped_device_id_guard();
940 template <
typename SendType,
typename RecvType>
941 void gather_v(std::shared_ptr<const Executor> exec,
946 auto guard = exec->get_scoped_device_id_guard();
973 template <
typename SendType,
typename RecvType>
979 auto guard = exec->get_scoped_device_id_guard();
1004 template <
typename SendType,
typename RecvType>
1009 auto guard = exec->get_scoped_device_id_guard();
1034 template <
typename SendType,
typename RecvType>
1039 auto guard = exec->get_scoped_device_id_guard();
1044 this->
get(), req.
get()));
1063 template <
typename SendType,
typename RecvType>
1064 void scatter(std::shared_ptr<const Executor> exec,
1069 auto guard = exec->get_scoped_device_id_guard();
1094 template <
typename SendType,
typename RecvType>
1100 auto guard = exec->get_scoped_device_id_guard();
1105 this->
get(), req.
get()));
1127 template <
typename SendType,
typename RecvType>
1133 auto guard = exec->get_scoped_device_id_guard();
1160 template <
typename SendType,
typename RecvType>
1166 auto guard = exec->get_scoped_device_id_guard();
1168 GKO_ASSERT_NO_MPI_ERRORS(
1192 template <
typename RecvType>
1196 auto guard = exec->get_scoped_device_id_guard();
1221 template <
typename RecvType>
1225 auto guard = exec->get_scoped_device_id_guard();
1230 this->
get(), req.
get()));
1250 template <
typename SendType,
typename RecvType>
1255 auto guard = exec->get_scoped_device_id_guard();
1280 template <
typename SendType,
typename RecvType>
1285 auto guard = exec->get_scoped_device_id_guard();
1290 this->
get(), req.
get()));
1313 template <
typename SendType,
typename RecvType>
1346 auto guard = exec->get_scoped_device_id_guard();
1378 auto guard = exec->get_scoped_device_id_guard();
1406 template <
typename SendType,
typename RecvType>
1433 template <
typename ScanType>
1437 auto guard = exec->get_scoped_device_id_guard();
1440 operation, this->
get()));
1459 template <
typename ScanType>
1462 int count,
MPI_Op operation)
const
1464 auto guard = exec->get_scoped_device_id_guard();
1468 operation, this->
get(), req.
get()));
1473 std::shared_ptr<MPI_Comm> comm_;
1474 bool force_host_buffer_;
1476 int get_my_rank()
const
1483 int get_node_local_rank()
const
1494 int get_num_ranks()
const
1534template <
typename ValueType>
1540 enum class create_type { allocate = 1, create = 2, dynamic_create = 3 };
1593 auto guard = exec->get_scoped_device_id_guard();
1594 unsigned size =
num_elems *
sizeof(ValueType);
1595 if (
c_type == create_type::create) {
1598 }
else if (
c_type == create_type::dynamic_create) {
1599 GKO_ASSERT_NO_MPI_ERRORS(
1601 }
else if (
c_type == create_type::allocate) {
1605 GKO_NOT_IMPLEMENTED;
1638 if (
lock_t == lock_type::shared) {
1639 GKO_ASSERT_NO_MPI_ERRORS(
1641 }
else if (
lock_t == lock_type::exclusive) {
1642 GKO_ASSERT_NO_MPI_ERRORS(
1645 GKO_NOT_IMPLEMENTED;
1688 GKO_ASSERT_NO_MPI_ERRORS(
MPI_Win_flush(rank, this->window_));
1745 template <
typename PutType>
1750 auto guard = exec->get_scoped_device_id_guard();
1751 GKO_ASSERT_NO_MPI_ERRORS(
1769 template <
typename PutType>
1775 auto guard = exec->get_scoped_device_id_guard();
1795 template <
typename PutType>
1801 auto guard = exec->get_scoped_device_id_guard();
1821 template <
typename PutType>
1827 auto guard = exec->get_scoped_device_id_guard();
1847 template <
typename GetType>
1852 auto guard = exec->get_scoped_device_id_guard();
1853 GKO_ASSERT_NO_MPI_ERRORS(
1871 template <
typename GetType>
1876 auto guard = exec->get_scoped_device_id_guard();
1898 template <
typename GetType>
1905 auto guard = exec->get_scoped_device_id_guard();
1928 template <
typename GetType>
1936 auto guard = exec->get_scoped_device_id_guard();
1957 template <
typename GetType>
1963 auto guard = exec->get_scoped_device_id_guard();
A thin wrapper of MPI_Comm that supports most MPI calls.
Definition mpi.hpp:437
status recv(std::shared_ptr< const Executor > exec, RecvType *recv_buffer, const int recv_count, const int source_rank, const int recv_tag) const
Receive data from source rank.
Definition mpi.hpp:611
void scatter_v(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int *send_counts, const int *displacements, RecvType *recv_buffer, const int recv_count, int root_rank) const
Scatter data from root rank to all ranks in the communicator with offsets.
Definition mpi.hpp:1128
request i_broadcast(std::shared_ptr< const Executor > exec, BroadcastType *buffer, int count, int root_rank) const
(Non-blocking) Broadcast data from calling process to all ranks in the communicator
Definition mpi.hpp:689
void gather(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count, int root_rank) const
Gather data onto the root rank from all ranks in the communicator.
Definition mpi.hpp:876
request i_recv(std::shared_ptr< const Executor > exec, RecvType *recv_buffer, const int recv_count, const int source_rank, const int recv_tag) const
Receive (Non-blocking, Immediate return) data from source rank.
Definition mpi.hpp:639
request i_scatter_v(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int *send_counts, const int *displacements, RecvType *recv_buffer, const int recv_count, int root_rank) const
(Non-blocking) Scatter data from root rank to all ranks in the communicator with offsets.
Definition mpi.hpp:1161
void all_to_all(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count) const
Communicate data from all ranks to all other ranks (MPI_Alltoall).
Definition mpi.hpp:1251
request i_all_to_all(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count) const
(Non-blocking) Communicate data from all ranks to all other ranks (MPI_Ialltoall).
Definition mpi.hpp:1281
request i_all_to_all_v(std::shared_ptr< const Executor > exec, const void *send_buffer, const int *send_counts, const int *send_offsets, MPI_Datatype send_type, void *recv_buffer, const int *recv_counts, const int *recv_offsets, MPI_Datatype recv_type) const
Communicate data from all ranks to all other ranks with offsets (MPI_Ialltoallv).
Definition mpi.hpp:1371
bool operator!=(const communicator &rhs) const
Compare two communicator objects for non-equality.
Definition mpi.hpp:531
void scatter(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count, int root_rank) const
Scatter data from root rank to all ranks in the communicator.
Definition mpi.hpp:1064
void synchronize() const
This function is used to synchronize the ranks in the communicator.
Definition mpi.hpp:537
int rank() const
Return the rank of the calling process in the communicator.
Definition mpi.hpp:507
request i_reduce(std::shared_ptr< const Executor > exec, const ReduceType *send_buffer, ReduceType *recv_buffer, int count, MPI_Op operation, int root_rank) const
(Non-blocking) Reduce data into root from all calling processes on the same communicator.
Definition mpi.hpp:742
int size() const
Return the size of the communicator (number of ranks).
Definition mpi.hpp:500
void send(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, const int destination_rank, const int send_tag) const
Send (Blocking) data from calling process to destination rank.
Definition mpi.hpp:556
request i_all_to_all_v(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int *send_counts, const int *send_offsets, RecvType *recv_buffer, const int *recv_counts, const int *recv_offsets) const
Communicate data from all ranks to all other ranks with offsets (MPI_Ialltoallv).
Definition mpi.hpp:1407
request i_gather(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count, int root_rank) const
(Non-blocking) Gather data onto the root rank from all ranks in the communicator.
Definition mpi.hpp:908
void all_to_all(std::shared_ptr< const Executor > exec, RecvType *recv_buffer, const int recv_count) const
(In-place) Communicate data from all ranks to all other ranks in place (MPI_Alltoall).
Definition mpi.hpp:1193
void all_to_all_v(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int *send_counts, const int *send_offsets, RecvType *recv_buffer, const int *recv_counts, const int *recv_offsets) const
Communicate data from all ranks to all other ranks with offsets (MPI_Alltoallv).
Definition mpi.hpp:1314
request i_all_reduce(std::shared_ptr< const Executor > exec, ReduceType *recv_buffer, int count, MPI_Op operation) const
(In-place, non-blocking) Reduce data from all calling processes from all calling processes on same co...
Definition mpi.hpp:793
request i_all_to_all(std::shared_ptr< const Executor > exec, RecvType *recv_buffer, const int recv_count) const
(In-place, Non-blocking) Communicate data from all ranks to all other ranks in place (MPI_Ialltoall).
Definition mpi.hpp:1222
void all_to_all_v(std::shared_ptr< const Executor > exec, const void *send_buffer, const int *send_counts, const int *send_offsets, MPI_Datatype send_type, void *recv_buffer, const int *recv_counts, const int *recv_offsets, MPI_Datatype recv_type) const
Communicate data from all ranks to all other ranks with offsets (MPI_Alltoallv).
Definition mpi.hpp:1340
int node_local_rank() const
Return the node local rank of the calling process in the communicator.
Definition mpi.hpp:514
void broadcast(std::shared_ptr< const Executor > exec, BroadcastType *buffer, int count, int root_rank) const
Broadcast data from calling process to all ranks in the communicator.
Definition mpi.hpp:664
const MPI_Comm & get() const
Return the underlying MPI_Comm object.
Definition mpi.hpp:491
communicator(const MPI_Comm &comm, int color, int key)
Create a communicator object from an existing MPI_Comm object using color and key.
Definition mpi.hpp:463
void all_reduce(std::shared_ptr< const Executor > exec, ReduceType *recv_buffer, int count, MPI_Op operation) const
(In-place) Reduce data from all calling processes from all calling processes on same communicator.
Definition mpi.hpp:768
void all_gather(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count) const
Gather data onto all ranks from all ranks in the communicator.
Definition mpi.hpp:1005
request i_all_gather(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count) const
(Non-blocking) Gather data onto all ranks from all ranks in the communicator.
Definition mpi.hpp:1035
bool operator==(const communicator &rhs) const
Compare two communicator objects for equality.
Definition mpi.hpp:521
void all_reduce(std::shared_ptr< const Executor > exec, const ReduceType *send_buffer, ReduceType *recv_buffer, int count, MPI_Op operation) const
Reduce data from all calling processes from all calling processes on same communicator.
Definition mpi.hpp:820
request i_gather_v(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int *recv_counts, const int *displacements, int root_rank) const
(Non-blocking) Gather data onto the root rank from all ranks in the communicator with offsets.
Definition mpi.hpp:974
request i_all_reduce(std::shared_ptr< const Executor > exec, const ReduceType *send_buffer, ReduceType *recv_buffer, int count, MPI_Op operation) const
Reduce data from all calling processes from all calling processes on same communicator.
Definition mpi.hpp:847
communicator(const MPI_Comm &comm, bool force_host_buffer=false)
Non-owning constructor for an existing communicator of type MPI_Comm.
Definition mpi.hpp:449
request i_scan(std::shared_ptr< const Executor > exec, const ScanType *send_buffer, ScanType *recv_buffer, int count, MPI_Op operation) const
Does a scan operation with the given operator.
Definition mpi.hpp:1460
void reduce(std::shared_ptr< const Executor > exec, const ReduceType *send_buffer, ReduceType *recv_buffer, int count, MPI_Op operation, int root_rank) const
Reduce data into root from all calling processes on the same communicator.
Definition mpi.hpp:715
request i_scatter(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count, int root_rank) const
(Non-blocking) Scatter data from root rank to all ranks in the communicator.
Definition mpi.hpp:1095
void scan(std::shared_ptr< const Executor > exec, const ScanType *send_buffer, ScanType *recv_buffer, int count, MPI_Op operation) const
Does a scan operation with the given operator.
Definition mpi.hpp:1434
void gather_v(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int *recv_counts, const int *displacements, int root_rank) const
Gather data onto the root rank from all ranks in the communicator with offsets.
Definition mpi.hpp:941
request i_send(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, const int destination_rank, const int send_tag) const
Send (Non-blocking, Immediate return) data from calling process to destination rank.
Definition mpi.hpp:583
communicator(const communicator &comm, int color, int key)
Create a communicator object from an existing MPI_Comm object using color and key.
Definition mpi.hpp:478
A move-only wrapper for a contiguous MPI_Datatype.
Definition mpi.hpp:130
MPI_Datatype get() const
Access the underlying MPI_Datatype.
Definition mpi.hpp:199
contiguous_type(int count, MPI_Datatype old_type)
Constructs a wrapper for a contiguous MPI_Datatype.
Definition mpi.hpp:138
contiguous_type()
Constructs empty wrapper with MPI_DATATYPE_NULL.
Definition mpi.hpp:147
contiguous_type(const contiguous_type &)=delete
Disallow copying of wrapper type.
contiguous_type(contiguous_type &&other) noexcept
Move constructor, leaves other with MPI_DATATYPE_NULL.
Definition mpi.hpp:164
contiguous_type & operator=(contiguous_type &&other) noexcept
Move assignment, leaves other with MPI_DATATYPE_NULL.
Definition mpi.hpp:176
contiguous_type & operator=(const contiguous_type &)=delete
Disallow copying of wrapper type.
~contiguous_type()
Destructs object by freeing wrapped MPI_Datatype.
Definition mpi.hpp:187
Class that sets up and finalizes the MPI environment.
Definition mpi.hpp:227
~environment()
Call MPI_Finalize at the end of the scope of this class.
Definition mpi.hpp:270
int get_provided_thread_support() const
Return the provided thread support.
Definition mpi.hpp:248
environment(int &argc, char **&argv, const thread_type thread_t=thread_type::serialized)
Call MPI_Init_thread and initialize the MPI environment.
Definition mpi.hpp:258
The request class is a light, move-only wrapper around the MPI_Request handle.
Definition mpi.hpp:348
request()
The default constructor.
Definition mpi.hpp:354
MPI_Request * get()
Get a pointer to the underlying MPI_Request handle.
Definition mpi.hpp:385
status wait()
Allows a rank to wait on a particular request handle.
Definition mpi.hpp:393
This class wraps the MPI_Window class with RAII functionality.
Definition mpi.hpp:1535
void get(std::shared_ptr< const Executor > exec, GetType *origin_buffer, const int origin_count, const int target_rank, const unsigned int target_disp, const int target_count) const
Get data from the target window.
Definition mpi.hpp:1848
request r_put(std::shared_ptr< const Executor > exec, const PutType *origin_buffer, const int origin_count, const int target_rank, const unsigned int target_disp, const int target_count) const
Put data into the target window.
Definition mpi.hpp:1770
window()
The default constructor.
Definition mpi.hpp:1550
void get_accumulate(std::shared_ptr< const Executor > exec, GetType *origin_buffer, const int origin_count, GetType *result_buffer, const int result_count, const int target_rank, const unsigned int target_disp, const int target_count, MPI_Op operation) const
Get Accumulate data from the target window.
Definition mpi.hpp:1899
void put(std::shared_ptr< const Executor > exec, const PutType *origin_buffer, const int origin_count, const int target_rank, const unsigned int target_disp, const int target_count) const
Put data into the target window.
Definition mpi.hpp:1746
~window()
The deleter which calls MPI_Win_free when the window leaves its scope.
Definition mpi.hpp:1728
lock_type
The lock type for passive target synchronization of the windows.
Definition mpi.hpp:1545
window & operator=(window &&other)
The move assignment operator.
Definition mpi.hpp:1571
request r_accumulate(std::shared_ptr< const Executor > exec, const PutType *origin_buffer, const int origin_count, const int target_rank, const unsigned int target_disp, const int target_count, MPI_Op operation) const
(Non-blocking) Accumulate data into the target window.
Definition mpi.hpp:1822
request r_get_accumulate(std::shared_ptr< const Executor > exec, GetType *origin_buffer, const int origin_count, GetType *result_buffer, const int result_count, const int target_rank, const unsigned int target_disp, const int target_count, MPI_Op operation) const
(Non-blocking) Get Accumulate data (with handle) from the target window.
Definition mpi.hpp:1929
void fetch_and_op(std::shared_ptr< const Executor > exec, GetType *origin_buffer, GetType *result_buffer, const int target_rank, const unsigned int target_disp, MPI_Op operation) const
Fetch and operate on data from the target window (An optimized version of Get_accumulate).
Definition mpi.hpp:1958
void sync() const
Synchronize the public and private buffers for the window object.
Definition mpi.hpp:1723
void unlock(int rank) const
Close the epoch using MPI_Win_unlock for the window object.
Definition mpi.hpp:1655
void fence(int assert=0) const
The active target synchronization using MPI_Win_fence for the window object.
Definition mpi.hpp:1622
void flush(int rank) const
Flush the existing RDMA operations on the target rank for the calling process for the window object.
Definition mpi.hpp:1686
void unlock_all() const
Close the epoch on all ranks using MPI_Win_unlock_all for the window object.
Definition mpi.hpp:1675
create_type
The create type for the window object.
Definition mpi.hpp:1540
window(std::shared_ptr< const Executor > exec, ValueType *base, int num_elems, const communicator &comm, const int disp_unit=sizeof(ValueType), MPI_Info input_info=MPI_INFO_NULL, create_type c_type=create_type::create)
Create a window object with a given data pointer and type.
Definition mpi.hpp:1588
void accumulate(std::shared_ptr< const Executor > exec, const PutType *origin_buffer, const int origin_count, const int target_rank, const unsigned int target_disp, const int target_count, MPI_Op operation) const
Accumulate data into the target window.
Definition mpi.hpp:1796
void lock_all(int assert=0) const
Create the epoch on all ranks using MPI_Win_lock_all for the window object.
Definition mpi.hpp:1666
void lock(int rank, lock_type lock_t=lock_type::shared, int assert=0) const
Create an epoch using MPI_Win_lock for the window object.
Definition mpi.hpp:1635
void flush_all_local() const
Flush all the local existing RDMA operations on the calling rank for the window object.
Definition mpi.hpp:1715
window(window &&other)
The move constructor.
Definition mpi.hpp:1562
void flush_local(int rank) const
Flush the existing RDMA operations on the calling rank from the target rank for the window object.
Definition mpi.hpp:1697
MPI_Win get_window() const
Get the underlying window object of MPI_Win type.
Definition mpi.hpp:1614
request r_get(std::shared_ptr< const Executor > exec, GetType *origin_buffer, const int origin_count, const int target_rank, const unsigned int target_disp, const int target_count) const
Get data (with handle) from the target window.
Definition mpi.hpp:1872
void flush_all() const
Flush all the existing RDMA operations for the calling process for the window object.
Definition mpi.hpp:1706
int map_rank_to_device_id(MPI_Comm comm, int num_devices)
Maps each MPI rank to a single device id in a round robin manner.
bool requires_host_buffer(const std::shared_ptr< const Executor > &exec, const communicator &comm)
Checks if the combination of Executor and communicator requires passing MPI buffers from the host mem...
double get_walltime()
Get the rank in the communicator of the calling process.
Definition mpi.hpp:1523
constexpr bool is_gpu_aware()
Return if GPU aware functionality is available.
Definition mpi.hpp:70
thread_type
This enum specifies the threading type to be used when creating an MPI environment.
Definition mpi.hpp:210
std::vector< status > wait_all(std::vector< request > &req)
Allows a rank to wait on multiple request handles.
Definition mpi.hpp:413
The Ginkgo namespace.
Definition abstract_factory.hpp:48
constexpr T one()
Returns the multiplicative identity for T.
Definition math.hpp:803
The status struct is a light wrapper around the MPI_Status struct.
Definition mpi.hpp:308
int get_count(const T *data) const
Get the count of the number of elements received by the communication call.
Definition mpi.hpp:332
status()
The default constructor.
Definition mpi.hpp:312
MPI_Status * get()
Get a pointer to the underlying MPI_Status object.
Definition mpi.hpp:319
A struct that is used to determine the MPI_Datatype of a specified type.
Definition mpi.hpp:105