Commit c3ae74a8 authored by Iulian Grindeanu's avatar Iulian Grindeanu
Browse files

fix clang warnings on OSX

parent 44b9e0de
......@@ -359,7 +359,7 @@ ErrorCode Core::query_interface_type( const std::type_info& type, void*& ptr )
}
else if (type == typeid(WriteUtilIface)) {
if(!mMBWriteUtil)
mMBWriteUtil = new WriteUtil(this, mError);
mMBWriteUtil = new WriteUtil(this);
ptr = static_cast<WriteUtilIface*>(mMBWriteUtil);
}
else if (type == typeid(ReaderWriterSet)) {
......
......@@ -104,7 +104,10 @@ ErrorCode DenseTag::get_array(const SequenceManager* seqman,
const unsigned char* const& ptr,
size_t& count) const
{
return get_array(seqman, NULL, h, ptr, count);
// cast away the const-ness; do we really want to do this?
// probably we are not calling this anywhere;
// clang compiler found this
return get_array(seqman, NULL, h, const_cast<unsigned char *> ( ptr ), count);
}
ErrorCode DenseTag::get_array(const SequenceManager* seqman,
......@@ -140,7 +143,9 @@ ErrorCode DenseTag::get_array(const SequenceManager* seqman,
ErrorCode DenseTag::get_array(const EntitySequence* seq,
const unsigned char* const & ptr) const
{
return get_array(seq, ptr);
// cast away the constness; otherwise it would be infinite recursion
// probably we are not calling this anywhere
return get_array(seq, const_cast<unsigned char *> ( ptr ));
}
ErrorCode DenseTag::get_array(const EntitySequence* seq,
......
......@@ -104,7 +104,7 @@ namespace moab
// find best initial guess to improve convergence
CartVect tmp_params[] = {CartVect(-1,-1,-1), CartVect(1,-1,-1), CartVect(-1,1,-1), CartVect(-1,-1,1)};
double resl = HUGE;
double resl = MAXFLOAT;
CartVect new_pos, tmp_pos;
ErrorCode rval;
for (unsigned int i = 0; i < 4; i++) {
......
......@@ -101,7 +101,7 @@ namespace moab
// find best initial guess to improve convergence
CartVect tmp_params[] = {CartVect(-1,-1,-1), CartVect(1,-1,-1), CartVect(-1,1,-1)};
double resl = HUGE;
double resl = MAXFLOAT;
CartVect new_pos, tmp_pos;
ErrorCode rval;
for (unsigned int i = 0; i < 3; i++) {
......
......@@ -37,12 +37,8 @@ using namespace moab;
const bool debug = false;
MetisPartitioner::MetisPartitioner( Interface *impl,
const bool use_coords,
int argc,
char **argv)
: PartitionerBase(impl,use_coords),
argcArg(argc),
argvArg(argv)
const bool use_coords)
: PartitionerBase(impl,use_coords)
{
}
......@@ -613,7 +609,7 @@ ErrorCode MetisPartitioner::write_partition(const idx_t nparts,
for (i = 0; i < nparts; i++) dum_ids[i] = i;
result = mbImpl->tag_set_data(part_set_tag, partSets, dum_ids);
delete dum_ids;
delete [] dum_ids;
// assign entities to the relevant sets
std::vector<EntityHandle> tmp_part_sets;
......
......@@ -717,7 +717,7 @@ ErrorCode ScdInterface::tag_shared_vertices(ParallelComm *pcomm, ScdBox *box)
send_reqs(procs.size(), MPI_REQUEST_NULL);
std::vector<EntityHandle> rhandles(4*procs.size()), shandles(4);
for (unsigned int i = 0; i < procs.size(); i++) {
int success = MPI_Irecv(&rhandles[4*i], 4*sizeof(EntityHandle),
int success = MPI_Irecv((void*)&rhandles[4*i], 4*sizeof(EntityHandle),
MPI_UNSIGNED_CHAR, procs[i],
1, pcomm->proc_config().proc_comm(),
&recv_reqs[i]);
......@@ -740,7 +740,7 @@ ErrorCode ScdInterface::tag_shared_vertices(ParallelComm *pcomm, ScdBox *box)
shandles[3] = box->start_element();
}
for (unsigned int i = 0; i < procs.size(); i++) {
int success = MPI_Isend(&shandles[0], 4*sizeof(EntityHandle), MPI_UNSIGNED_CHAR, procs[i],
int success = MPI_Isend((void*)&shandles[0], 4*sizeof(EntityHandle), MPI_UNSIGNED_CHAR, procs[i],
1, pcomm->proc_config().proc_comm(), &send_reqs[i]);
if (success != MPI_SUCCESS) return MB_FAILURE;
}
......
......@@ -42,8 +42,8 @@
namespace moab {
WriteUtil::WriteUtil(Core* mdb, Error* error_handler)
: WriteUtilIface(), mMB(mdb), mError(error_handler)
WriteUtil::WriteUtil(Core* mdb)
: WriteUtilIface(), mMB(mdb)
{
}
......
......@@ -25,18 +25,16 @@
namespace moab {
class Core;
class Error;
class WriteUtil : public WriteUtilIface
{
private:
//! Pointer to the Core
Core* mMB;
Error* mError;
public:
//! Constructor takes Core pointer
WriteUtil(Core* mdb, Error* error_handler);
WriteUtil(Core* mdb);
//! Destructor
~WriteUtil(){}
......
......@@ -966,9 +966,9 @@ ErrCode iMOAB_SetIntTagStorage(iMOAB_AppID pid, const iMOAB_String tag_storage_n
return 1;
// set it on a subset of entities, based on type and length
Range * ents_to_set;
if (* ent_type == 0)// vertices
if (*ent_type == 0)// vertices
ents_to_set = &data.all_verts;
else if (* ent_type == 1)
else // if (*ent_type == 1) // *ent_type can be 0 (vertices) or 1 (elements)
ents_to_set = &data.primary_elems;
int nents_to_be_set = *num_tag_storage_length /tagLength;
......@@ -1008,9 +1008,9 @@ ErrCode iMOAB_GetIntTagStorage(iMOAB_AppID pid, const iMOAB_String tag_storage_n
// set it on a subset of entities, based on type and length
Range * ents_to_get;
if (* ent_type == 0)// vertices
if (*ent_type == 0)// vertices
ents_to_get = &data.all_verts;
else if (* ent_type == 1)
else // if (*ent_type == 1)
ents_to_get = &data.primary_elems;
int nents_to_get = *num_tag_storage_length /tagLength;
......
......@@ -169,8 +169,8 @@ void IODebugTrack::all_reduce()
std::vector<DRange> send(dataSet.size()), recv(total);
std::copy( dataSet.begin(), dataSet.end(), send.begin() );
MPI_Gatherv( &send[0], 3*send.size(), MPI_UNSIGNED_LONG,
&recv[0], &counts[0], &displs[0], MPI_UNSIGNED_LONG,
MPI_Gatherv( (void*)&send[0], 3*send.size(), MPI_UNSIGNED_LONG,
(void*)&recv[0], &counts[0], &displs[0], MPI_UNSIGNED_LONG,
0, MPI_COMM_WORLD );
if (0 == mpiRank) {
......
......@@ -2045,7 +2045,7 @@ ErrorCode ReadHDF5::read_all_set_meta()
if (bcast) {
#ifdef MOAB_HAVE_MPI
int ierr = MPI_Bcast(setMeta, num_sets*4, MPI_LONG, 0, comm);
int ierr = MPI_Bcast((void*)setMeta, num_sets*4, MPI_LONG, 0, comm);
if (MPI_SUCCESS != ierr)
MB_SET_ERR(MB_FAILURE, "ReadHDF5 Failure");
#else
......@@ -3337,7 +3337,7 @@ ErrorCode ReadHDF5::read_var_len_tag(Tag tag_handle,
{
ErrorCode rval1;
if (isHandle) {
assert(readSize == sizeof(EntityHandle));
assert( (void) (readSize == sizeof(EntityHandle)));
rval1 = readHDF5->convert_id_to_handle((EntityHandle*)data, count);MB_CHK_ERR(rval1);
}
int n = count;
......
......@@ -62,7 +62,7 @@ const char geom_category[][CATEGORY_TAG_SIZE] =
// Constructor
ReadOBJ::ReadOBJ(Interface* impl)
: MBI(impl),geom_tag(0), id_tag(0), name_tag(0), category_tag(0),
faceting_tol_tag(0), geometry_resabs_tag(0), obj_name_tag(0), sense_tag(0)
faceting_tol_tag(0), geometry_resabs_tag(0), obj_name_tag(0)
{
assert(NULL != impl);
MBI->query_interface(readMeshIface);
......@@ -297,16 +297,16 @@ ErrorCode ReadOBJ::load_file(const char *filename,
*/
void ReadOBJ::tokenize( const std::string& str,
std::vector<std::string>& tokens,
const char* delimiters)
const char* delimiters2)
{
tokens.clear();
std::string::size_type next_token_end, next_token_start =
str.find_first_not_of( delimiters, 0);
str.find_first_not_of( delimiters2, 0);
while ( std::string::npos != next_token_start )
{
next_token_end = str.find_first_of( delimiters, next_token_start );
next_token_end = str.find_first_of( delimiters2, next_token_start );
if ( std::string::npos == next_token_end )
{
tokens.push_back(str.substr(next_token_start));
......@@ -316,7 +316,7 @@ void ReadOBJ::tokenize( const std::string& str,
{
tokens.push_back( str.substr( next_token_start, next_token_end -
next_token_start ) );
next_token_start = str.find_first_not_of( delimiters, next_token_end );
next_token_start = str.find_first_not_of( delimiters2, next_token_end );
}
}
}
......
......@@ -148,8 +148,7 @@ private:
GeomTopoTool* myGeomTool;
Tag geom_tag,id_tag,name_tag,category_tag,faceting_tol_tag, geometry_resabs_tag, obj_name_tag,
sense_tag;
Tag geom_tag,id_tag,name_tag,category_tag,faceting_tol_tag, geometry_resabs_tag, obj_name_tag;
/* The keyword type function matches the first character extracted from each line to a type of line
*/
......
......@@ -4,7 +4,6 @@
#include "MBParallelConventions.h"
#include "moab/Interface.hpp"
#include "moab/Range.hpp"
#include "moab/WriteUtilIface.hpp"
#include "moab/FileOptions.hpp"
#include "GmshUtil.hpp"
......
......@@ -95,7 +95,7 @@ namespace moab {
template <typename T> inline
void VALGRIND_MAKE_VEC_UNDEFINED(std::vector<T>& v) {
(void)VALGRIND_MAKE_MEM_UNDEFINED(&v[0], v.size() * sizeof(T));
(void)VALGRIND_MAKE_MEM_UNDEFINED( (T*)&v[0], v.size() * sizeof(T));
}
#define WRITE_HDF5_BUFFER_SIZE (40 * 1024 * 1024)
......
......@@ -36,9 +36,7 @@ using namespace moab;
public:
MetisPartitioner( Interface *impl = NULL,
const bool use_coords = false,
int argc = 0,
char **argv = NULL);
const bool use_coords = false);
virtual ~MetisPartitioner();
......@@ -84,10 +82,6 @@ using namespace moab;
private:
int argcArg;
char **argvArg;
ErrorCode assemble_graph(const int dimension,
std::vector<double> &coords,
std::vector<idx_t> &moab_ids,
......
......@@ -5907,7 +5907,7 @@ ErrorCode ParallelComm::get_remote_handles(EntityHandle *local_vec, EntityHandle
this_incoming++;
PRINT_DEBUG_IRECV(procConfig.proc_rank(), to_proc, (unsigned char*)ack_buff,
sizeof(int), mesg_tag - 1, this_incoming);
success = MPI_Irecv(ack_buff, sizeof(int),
success = MPI_Irecv((void*)ack_buff, sizeof(int),
MPI_UNSIGNED_CHAR, to_proc,
mesg_tag - 1, procConfig.proc_comm(),
&ack_req);
......@@ -8301,7 +8301,7 @@ ErrorCode ParallelComm::get_remote_handles(EntityHandle *local_vec, EntityHandle
// Set up to receive data
for (int i = 0; i < num_proc; i++) {
result[i].resize(sizes_recv[i]);
ierr = MPI_Irecv(&result[i][0],
ierr = MPI_Irecv( (void *)( &(result[i][0]) ),
sizeof(SharedEntityData)*sizes_recv[i],
MPI_UNSIGNED_CHAR,
buffProcs[i], tag, cm, &recv_req[i]);
......@@ -8311,7 +8311,7 @@ ErrorCode ParallelComm::get_remote_handles(EntityHandle *local_vec, EntityHandle
// Send data
for (int i = 0; i < num_proc; i++) {
ierr = MPI_Isend(&send_data[i][0],
ierr = MPI_Isend((void *)( &(send_data[i][0]) ),
sizeof(SharedEntityData)*sizes_send[i],
MPI_UNSIGNED_CHAR,
buffProcs[i], tag, cm, &send_req[i]);
......
......@@ -149,7 +149,7 @@ namespace moab{
box.bMax *= -1;
/*Communicate to all processors*/
MPI_Allreduce(&box, gbox, 6, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
MPI_Allreduce( (void*)&box, gbox, 6, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
/*Assemble Global Bounding Box*/
//Flip the max back
......
......@@ -945,14 +945,14 @@ ErrorCode WriteHDF5Parallel::create_tag_tables()
// Copy values into local structs and if root then create tables
size_t idx = 0;
for (tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter, ++idx) {
assert(idx < counts.size());
assert( (void) (idx < counts.size()));
tag_iter->sparse_offset = offsets[idx];
tag_iter->max_num_ents = maxima[idx];
tag_iter->write_sparse = (0 != totals[idx]);
int s;
if (MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length(tag_iter->tag_id, s)) {
++idx;
assert(idx < counts.size());
assert((void) (idx < counts.size()));
tag_iter->var_data_offset = offsets[idx];
tag_iter->max_num_vals = maxima[idx];
}
......@@ -968,13 +968,13 @@ ErrorCode WriteHDF5Parallel::create_tag_tables()
if (0 == myPcomm->proc_config().proc_rank()) {
size_t iidx = 0;
for (tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter, ++iidx) {
assert(iidx < totals.size());
assert( (void) (iidx < totals.size()));
unsigned long num_ents = totals[iidx];
unsigned long num_val = 0;
int s;
if (MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length(tag_iter->tag_id, s)) {
++iidx;
assert(iidx < totals.size());
assert((void) (iidx < totals.size()));
num_val = totals[iidx];
}
dbgOut.printf(2, "Writing tag description for tag 0x%lx with %lu values\n",
......@@ -1011,7 +1011,7 @@ struct DatasetVals {
long max_count;
long total;
};
STATIC_ASSERT(sizeof(DatasetVals) == 3 * sizeof(long));
STATIC_ASSERT( (sizeof(DatasetVals) == 3 * sizeof(long)));
ErrorCode WriteHDF5Parallel::create_dataset(int num_datasets,
const long* num_owned,
......@@ -1060,7 +1060,7 @@ ErrorCode WriteHDF5Parallel::create_dataset(int num_datasets,
}
// Send id offset to every proc
result = MPI_Bcast(&cumulative[0], 3 * num_datasets, MPI_LONG, 0, comm);CHECK_MPI(result);
result = MPI_Bcast((void*)&cumulative[0], 3 * num_datasets, MPI_LONG, 0, comm);CHECK_MPI(result);
for (int index = 0; index < num_datasets; ++index) {
if (first_ids_out)
first_ids_out[index] = (wid_t)cumulative[index].start_id;
......@@ -1188,7 +1188,7 @@ ErrorCode WriteHDF5Parallel::negotiate_type_list()
typelist root_types(num_types0);
if (0 == myPcomm->proc_config().proc_rank())
root_types = my_types;
result = MPI_Bcast(&root_types[0], 2 * num_types0, MPI_INT, 0, comm);CHECK_MPI(result);
result = MPI_Bcast((void*)&root_types[0], 2 * num_types0, MPI_INT, 0, comm);CHECK_MPI(result);
// Build local list of any types that root did not know about
typelist non_root_types;
......@@ -1220,8 +1220,8 @@ ErrorCode WriteHDF5Parallel::negotiate_type_list()
typelist alltypes(total/2);
(void)VALGRIND_MAKE_VEC_UNDEFINED(alltypes);
(void)VALGRIND_CHECK_MEM_IS_DEFINED(&non_root_types[0], non_root_types.size()*sizeof(int));
result = MPI_Gatherv(&non_root_types[0], 2 * non_root_count, MPI_INT,
&alltypes[0], &counts[0], &displs[0], MPI_INT, 0, comm);CHECK_MPI(result);
result = MPI_Gatherv((void*)&non_root_types[0], 2 * non_root_count, MPI_INT,
(int*)&alltypes[0], &counts[0], &displs[0], MPI_INT, 0, comm);CHECK_MPI(result);
// Merge type lists.
// Prefer O(n) insertions with O(ln n) search time because
......@@ -1245,7 +1245,7 @@ ErrorCode WriteHDF5Parallel::negotiate_type_list()
// Send list of types to each processor
my_types.resize(total);
result = MPI_Bcast(&my_types[0], 2 * total, MPI_INT, 0, comm);CHECK_MPI(result);
result = MPI_Bcast((void*)&my_types[0], 2 * total, MPI_INT, 0, comm);CHECK_MPI(result);
}
else {
// Special case: if root had types but some subset of procs did not
......@@ -1463,7 +1463,7 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_ids(const Range& owned,
unsigned r;
EntityHandle h;
myPcomm->get_entityset_owner(*j, r, &h);
assert(r == procs[i]);
assert((void) (r == procs[i]));
remote_handles.insert(h);
}
dbgOut.print(6, remote_handles);
......@@ -1489,7 +1489,7 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_ids(const Range& owned,
if (set_procs[j] != myPcomm->proc_config().proc_rank())
send_sets[set_procs[j]].insert(*i);
}
assert(send_sets.find(myPcomm->proc_config().proc_rank()) == send_sets.end());
assert((void) (send_sets.find(myPcomm->proc_config().proc_rank()) == send_sets.end()));
// Now send the data
std::vector< std::vector<unsigned long> > send_buf(send_sets.size());
......@@ -1517,8 +1517,8 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_ids(const Range& owned,
while (recv_count--) {
mperr = MPI_Waitany(recv_req.size(), &recv_req[0], &idx, &status);CHECK_MPI(mperr);
assert((unsigned)status.MPI_SOURCE == procs[idx]);
assert(2*recv_buf[idx].front() + 1 == recv_buf[idx].size());
assert((void) ((unsigned)status.MPI_SOURCE == procs[idx]));
assert((void) (2*recv_buf[idx].front() + 1 == recv_buf[idx].size()));
const size_t n = std::min<size_t>(recv_buf[idx].front(), (recv_buf[idx].size() - 1) / 2);
dbgOut.printf(5, "Received buffer of size %lu from proc %d\n",
(unsigned long)(2*n + 1), (int)status.MPI_SOURCE);
......@@ -1526,15 +1526,15 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_ids(const Range& owned,
for (size_t i = 0; i < n; ++i) {
EntityHandle handle = 0;
rval = myPcomm->get_entityset_local_handle(procs[idx], recv_buf[idx][2*i + 1], handle);CHECK_MB(rval);
assert(handle != 0);
assert((void) (handle != 0));
if (!idMap.insert(handle, recv_buf[idx][2*i + 2], 1).second)
error(MB_FAILURE); // Conflicting IDs??????
}
recv_req[idx] = MPI_REQUEST_NULL;
}
assert(MPI_SUCCESS == MPI_Waitany(recv_req.size(), &recv_req[0], &idx, &status)
&& MPI_UNDEFINED == idx); // Check that we got them all
assert((void) (MPI_SUCCESS == MPI_Waitany(recv_req.size(), &recv_req[0], &idx, &status)
&& MPI_UNDEFINED == idx)); // Check that we got them all
// Wait for all sends to complete before we release send
// buffers (implicitly releases when we return from this function)
......@@ -1584,7 +1584,7 @@ ErrorCode WriteHDF5Parallel::pack_set(Range::const_iterator it,
size_t newlen;
// Buffer must always contain at least flags and desired sizes
assert(buffer_size >= 4);
assert((void) (buffer_size >= 4));
buffer_size -= 4;
Range::const_iterator nd = it; ++nd;
......@@ -1604,7 +1604,7 @@ ErrorCode WriteHDF5Parallel::pack_set(Range::const_iterator it,
if (len && !(flags & MESHSET_ORDERED)) {
tmp.clear();
bool blocked = false;
assert(0 == len % 2);
assert((void) (0 == len % 2));
rval = range_to_blocked_list(ptr, len / 2, tmp, blocked);CHECK_MB(rval);
if (blocked)
flags |= mhdf_SET_RANGE_BIT;
......@@ -1667,8 +1667,8 @@ static void merge_ranged_ids(const unsigned long* range_list,
std::vector<WriteHDF5::wid_t>& result)
{
typedef WriteHDF5::wid_t wid_t;
assert(0 == len%2);
assert(0 == result.size()%2);
assert( (void) (0 == len%2));
assert( (void) (0 == result.size()%2));
STATIC_ASSERT(sizeof(std::pair<wid_t, wid_t>) == 2 * sizeof(wid_t));
result.insert(result.end(), range_list, range_list + len);
......@@ -1701,8 +1701,8 @@ ErrorCode WriteHDF5Parallel::unpack_set(EntityHandle set,
size_t buffer_size)
{
// Use local variables for readability
assert(buffer_size >= 4);
assert(buffer[1] + buffer[2] + buffer[3] <= buffer_size);
assert( (void) (buffer_size >= 4));
assert( (void) (buffer[1] + buffer[2] + buffer[3] <= buffer_size));
const unsigned long flags = buffer[0];
unsigned long num_content = buffer[1];
const unsigned long num_child = buffer[2];
......@@ -1712,7 +1712,7 @@ ErrorCode WriteHDF5Parallel::unpack_set(EntityHandle set,
const unsigned long* parents = children + num_child;
SpecialSetData* data = find_set_data(set);
assert(NULL != data);
assert( (void) (NULL != data));
if (NULL == data)
return MB_FAILURE;
......@@ -1837,7 +1837,7 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_data(const Range& owned,
++idx;
}
}
assert((size_t)idx == numrecv);
assert( (void) ((size_t)idx == numrecv));
// Now send set data for all remote sets that I know about
std::vector<MPI_Request> send_req(remote.size());
......@@ -1851,7 +1851,7 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_data(const Range& owned,
rval = myPcomm->get_entityset_owner(*i, owner, &remote_handle);CHECK_MB(rval);
int tag = ID_FROM_HANDLE(remote_handle);
assert(remote_handle == CREATE_HANDLE(MBENTITYSET, tag));
assert( (void) (remote_handle == CREATE_HANDLE(MBENTITYSET, tag)));
dbgOut.printf(5, "Sending %lu values for set %d to proc %u\n",
send_buf[idx][1] + send_buf[idx][2] + send_buf[idx][3] + 4, tag, owner);
mperr = MPI_Isend(&send_buf[idx][0], init_buff_size, MPI_UNSIGNED_LONG,
......@@ -1862,7 +1862,7 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_data(const Range& owned,
//iFace->tag_get_handle(MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, mattag);
// Now initialize local data for managing contents of owned, shared sets
assert(specialSets.empty());
assert( (void) (specialSets.empty()));
specialSets.clear();
specialSets.reserve(owned.size());
for (Range::iterator i = owned.begin(); i != owned.end(); ++i) {
......@@ -1954,7 +1954,7 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_data(const Range& owned,
rval = myPcomm->get_entityset_owner(*i, owner, &remote_handle);CHECK_MB(rval);
int tag = ID_FROM_HANDLE(remote_handle);
assert(remote_handle == CREATE_HANDLE(MBENTITYSET, tag));
assert( (void) (remote_handle == CREATE_HANDLE(MBENTITYSET, tag)));
dbgOut.printf(5, "Sending %lu values for set %d to proc %u\n",
(unsigned long)size, tag, owner);
mperr = MPI_Isend(&buff[0], size, MPI_UNSIGNED_LONG,
......
......@@ -255,13 +255,13 @@ void gs_data::nonlocal_info::nonlocal(realType *u, int op, MPI_Comm comm)
start = buf;
for (;c;--c)
*buf++ = u[*sh_ind++];
MPI_Isend(start,nshared[i]*sizeof(realType),
MPI_Isend((void*)start,nshared[i]*sizeof(realType),
MPI_UNSIGNED_CHAR, targ[i],id,comm,reqs++);
}
start = buf;
for(i=0; i<np; ++i)
{
MPI_Irecv(start,nshared[i]*sizeof(realType),MPI_UNSIGNED_CHAR,
MPI_Irecv((void*)start,nshared[i]*sizeof(realType),MPI_UNSIGNED_CHAR,
targ[i],targ[i],comm,reqs++);
start+=nshared[i];
}
......@@ -315,13 +315,13 @@ void gs_data::nonlocal_info::nonlocal_vec(realType *u, uint n,
memcpy(buf,u+n*(*sh_ind++),size);
buf+=n;
}
MPI_Isend(start,ns*size,MPI_UNSIGNED_CHAR,targ[i],id,comm,reqs++);
MPI_Isend((void*)start,ns*size,MPI_UNSIGNED_CHAR,targ[i],id,comm,reqs++);
}
start = buf;
for (i=0; i<np; ++i)
{
int nsn=n*nshared[i];
MPI_Irecv(start,nsn*size,MPI_UNSIGNED_CHAR,targ[i],targ[i],comm,reqs++);
MPI_Irecv((void*)start,nsn*size,MPI_UNSIGNED_CHAR,targ[i],targ[i],comm,reqs++);
start+=nsn;
}
for (reqs=this->_reqs,i=np*2;i;--i)
......@@ -379,13 +379,13 @@ void gs_data::nonlocal_info::nonlocal_many(realType **u, uint n, int op,
*buf++=uu[sh_ind[c]];
}
sh_ind+=ns;
MPI_Isend(start,n*ns*sizeof(realType),MPI_UNSIGNED_CHAR,targ[i],id,comm,reqs++);
MPI_Isend((void*)start,n*ns*sizeof(realType),MPI_UNSIGNED_CHAR,targ[i],id,comm,reqs++);
}
start = buf;
for (i=0; i<np; ++i)
{
int nsn = n*nshared[i];
MPI_Irecv(start,nsn*sizeof(realType),MPI_UNSIGNED_CHAR,
MPI_Irecv((void*)start,nsn*sizeof(realType),MPI_UNSIGNED_CHAR,
targ[i],targ[i],comm,reqs++);
start+=nsn;
}
......@@ -491,10 +491,10 @@ void gs_data::crystal_data::send_(uint target, int recvn)
int i;
(void)VALGRIND_CHECK_MEM_IS_DEFINED( &send->n, sizeof(uint) );
MPI_Isend(&send->n,sizeof(uint),MPI_UNSIGNED_CHAR,
MPI_Isend((void*)&send->n,sizeof(uint),MPI_UNSIGNED_CHAR,
target ,_id ,_comm,&req[ 0]);
for (i=0; i<recvn; ++i)
MPI_Irecv(&count[i] ,sizeof(uint),MPI_UNSIGNED_CHAR,
MPI_Irecv((void*)&count[i] ,sizeof(uint),MPI_UNSIGNED_CHAR,
target+i,target+i,_comm,&req[i+1]);
MPI_Waitall(recvn+1,req,status);
sum = keep->n;
......@@ -507,14 +507,14 @@ void gs_data::crystal_data::send_(uint target, int recvn)
keep->n=sum;
(void)VALGRIND_CHECK_MEM_IS_DEFINED( send->buf.ptr,send->n*sizeof(uint) );
MPI_Isend(send->buf.ptr,send->n*sizeof(uint),
MPI_Isend((void*)send->buf.ptr,send->n*sizeof(uint),
MPI_UNSIGNED_CHAR,target,_id,_comm,&req[0]);
if (recvn)
{
MPI_Irecv(recv[0],count[0]*sizeof(uint),MPI_UNSIGNED_CHAR,
MPI_Irecv((void*)recv[0],count[0]*sizeof(uint),MPI_UNSIGNED_CHAR,
target,target,_comm,&req[1]);
if (recvn==2)