Commit 32b6d77d authored by vijaysm's avatar vijaysm

Merged in iulian07/clang_osx_warnings (pull request #262)

Several warning fixes for OSX/Clang
parents 7a0f27b4 81e4f023
......@@ -192,6 +192,9 @@ if (test "x$enable_debug" != "xno"); then # debug flags
# GNU
EXTRA_GNU_CXXFLAGS="-Wall -Wno-long-long -pipe -pedantic -Wshadow -Wunused-parameter -Wpointer-arith -Wformat -Wformat-security -Wextra -Wno-variadic-macros -Wno-unknown-pragmas"
EXTRA_GNU_FCFLAGS="-pipe -pedantic -ffree-line-length-0"
# CLANG
EXTRA_CLANG_CXXFLAGS="$EXTRA_GNU_CXXFLAGS"
EXTRA_CLANG_FCFLAGS="$EXTRA_GNU_FCFLAGS"
# Intel
EXTRA_INTEL_CXXFLAGS="-pipe -C"
EXTRA_INTEL_FCFLAGS="-C"
......@@ -205,8 +208,11 @@ fi
if (test "x$enable_cxx_optimize" != "xno"); then # optimization flags
#GNU
EXTRA_GNU_CXXFLAGS="$EXTRA_GNU_CXXFLAGS -finline-functions"
EXTRA_GNU_CXXFLAGS="$EXTRA_GNU_CXXFLAGS -fprefetch-loop-arrays -finline-functions -march=native"
EXTRA_GNU_FCFLAGS="$EXTRA_GNU_FCFLAGS -ffree-line-length-0 -finline-functions"
#CLANG
EXTRA_CLANG_CXXFLAGS="$EXTRA_CLANG_CXXFLAGS -march=native"
EXTRA_CLANG_FCFLAGS="$EXTRA_CLANG_FCFLAGS -ffree-line-length-0 -finline-functions"
# Intel
EXTRA_INTEL_CXXFLAGS="$EXTRA_INTEL_CXXFLAGS -xHost -ip -no-prec-div" # -fast
EXTRA_INTEL_FCFLAGS="$EXTRA_INTEL_FCFLAGS -xHost -ip -no-prec-div" # -fast
......@@ -755,7 +761,7 @@ case "$cxx_compiler:$host_cpu" in
FATHOM_CXX_SPECIAL="$EXTRA_PGI_CXXFLAGS"
;;
Clang:*)
FATHOM_CXX_SPECIAL="$EXTRA_GNU_CXXFLAGS"
FATHOM_CXX_SPECIAL="$EXTRA_CLANG_CXXFLAGS"
FATHOM_CXX_32BIT=-m32
FATHOM_CXX_64BIT=-m64
;;
......@@ -934,8 +940,8 @@ case "$cc_compiler:$host_cpu" in
FATHOM_F77_SPECIAL="$EXTRA_PGI_FCFLAGS"
;;
Clang:*)
FATHOM_CC_SPECIAL="$EXTRA_GNU_CXXFLAGS"
FATHOM_FC_SPECIAL="$EXTRA_GNU_FCFLAGS"
FATHOM_CC_SPECIAL="$EXTRA_CLANG_CXXFLAGS"
FATHOM_FC_SPECIAL="$EXTRA_CLANG_FCFLAGS"
FATHOM_F77_SPECIAL="$FATHOM_FC_SPECIAL"
FATHOM_CC_32BIT=-m32
FATHOM_CC_64BIT=-m64
......
......@@ -2354,7 +2354,7 @@ int test_entity_copies( iMesh_Instance imesh, iMeshP_PartitionHandle prtn, const
// add all vertices to local_data
for (size_t j = 0; j < verts.size(); ++j) {
int tag;
int tag=0;
ierr = vertex_tag( imesh, verts[j], tag );
if (ierr)
break;
......
......@@ -2406,7 +2406,7 @@ extern "C" {
EntityHandle set1 = ENTITY_HANDLE(entity_set_1),
set2 = ENTITY_HANDLE(entity_set_2);
int isList1, isList2;
int isList1=0, isList2=0;
iMesh_isList(instance, entity_set_1, &isList1, err);
if (*err != iBase_SUCCESS) return;
iMesh_isList(instance, entity_set_2, &isList2, err);
......@@ -2455,7 +2455,7 @@ extern "C" {
EntityHandle set1 = ENTITY_HANDLE(entity_set_1),
set2 = ENTITY_HANDLE(entity_set_2);
int isList1, isList2;
int isList1=0, isList2=0;
iMesh_isList(instance, entity_set_1, &isList1, err);
if (*err != iBase_SUCCESS) return;
iMesh_isList(instance, entity_set_2, &isList2, err);
......@@ -2561,7 +2561,7 @@ extern "C" {
EntityHandle set1 = ENTITY_HANDLE(entity_set_1),
set2 = ENTITY_HANDLE(entity_set_2);
int isList1, isList2;
int isList1=0, isList2=0;
iMesh_isList(instance, entity_set_1, &isList1, err);
if (*err != iBase_SUCCESS) return;
iMesh_isList(instance, entity_set_2, &isList2, err);
......
......@@ -34,7 +34,7 @@ typedef void* iRel_EntityHandle;
*/
void handle_error_code(const int result,
int *number_failed,
int *number_not_implemented,
int * /*number_not_implemented*/,
int *number_successful)
{
if (result) {
......
......@@ -359,7 +359,7 @@ ErrorCode Core::query_interface_type( const std::type_info& type, void*& ptr )
}
else if (type == typeid(WriteUtilIface)) {
if(!mMBWriteUtil)
mMBWriteUtil = new WriteUtil(this, mError);
mMBWriteUtil = new WriteUtil(this);
ptr = static_cast<WriteUtilIface*>(mMBWriteUtil);
}
else if (type == typeid(ReaderWriterSet)) {
......
......@@ -104,7 +104,10 @@ ErrorCode DenseTag::get_array(const SequenceManager* seqman,
const unsigned char* const& ptr,
size_t& count) const
{
return get_array(seqman, NULL, h, ptr, count);
// cast away the const-ness; do we really want to do this?
// probably we are not calling this anywhere;
// clang compiler found this
return get_array(seqman, NULL, h, const_cast<unsigned char *> ( ptr ), count);
}
ErrorCode DenseTag::get_array(const SequenceManager* seqman,
......@@ -140,7 +143,9 @@ ErrorCode DenseTag::get_array(const SequenceManager* seqman,
ErrorCode DenseTag::get_array(const EntitySequence* seq,
const unsigned char* const & ptr) const
{
return get_array(seq, ptr);
// cast away the constness; otherwise it would be infinite recursion
// probably we are not calling this anywhere
return get_array(seq, const_cast<unsigned char *> ( ptr ));
}
ErrorCode DenseTag::get_array(const EntitySequence* seq,
......
......@@ -104,7 +104,7 @@ namespace moab
// find best initial guess to improve convergence
CartVect tmp_params[] = {CartVect(-1,-1,-1), CartVect(1,-1,-1), CartVect(-1,1,-1), CartVect(-1,-1,1)};
double resl = HUGE;
double resl = std::numeric_limits<double>::max();
CartVect new_pos, tmp_pos;
ErrorCode rval;
for (unsigned int i = 0; i < 4; i++) {
......
......@@ -101,7 +101,7 @@ namespace moab
// find best initial guess to improve convergence
CartVect tmp_params[] = {CartVect(-1,-1,-1), CartVect(1,-1,-1), CartVect(-1,1,-1)};
double resl = HUGE;
double resl = std::numeric_limits<double>::max();
CartVect new_pos, tmp_pos;
ErrorCode rval;
for (unsigned int i = 0; i < 3; i++) {
......
This diff is collapsed.
......@@ -717,7 +717,7 @@ ErrorCode ScdInterface::tag_shared_vertices(ParallelComm *pcomm, ScdBox *box)
send_reqs(procs.size(), MPI_REQUEST_NULL);
std::vector<EntityHandle> rhandles(4*procs.size()), shandles(4);
for (unsigned int i = 0; i < procs.size(); i++) {
int success = MPI_Irecv(&rhandles[4*i], 4*sizeof(EntityHandle),
int success = MPI_Irecv((void*)&rhandles[4*i], 4*sizeof(EntityHandle),
MPI_UNSIGNED_CHAR, procs[i],
1, pcomm->proc_config().proc_comm(),
&recv_reqs[i]);
......@@ -740,7 +740,7 @@ ErrorCode ScdInterface::tag_shared_vertices(ParallelComm *pcomm, ScdBox *box)
shandles[3] = box->start_element();
}
for (unsigned int i = 0; i < procs.size(); i++) {
int success = MPI_Isend(&shandles[0], 4*sizeof(EntityHandle), MPI_UNSIGNED_CHAR, procs[i],
int success = MPI_Isend((void*)&shandles[0], 4*sizeof(EntityHandle), MPI_UNSIGNED_CHAR, procs[i],
1, pcomm->proc_config().proc_comm(), &send_reqs[i]);
if (success != MPI_SUCCESS) return MB_FAILURE;
}
......
......@@ -42,8 +42,8 @@
namespace moab {
WriteUtil::WriteUtil(Core* mdb, Error* error_handler)
: WriteUtilIface(), mMB(mdb), mError(error_handler)
WriteUtil::WriteUtil(Core* mdb)
: WriteUtilIface(), mMB(mdb)
{
}
......
......@@ -25,18 +25,16 @@
namespace moab {
class Core;
class Error;
class WriteUtil : public WriteUtilIface
{
private:
//! Pointer to the Core
Core* mMB;
Error* mError;
public:
//! Constructor takes Core pointer
WriteUtil(Core* mdb, Error* error_handler);
WriteUtil(Core* mdb);
//! Destructor
~WriteUtil(){}
......
......@@ -208,7 +208,7 @@ ErrCode iMOAB_RegisterFortranApplication( const iMOAB_String app_name,
ErrCode iMOAB_DeregisterApplication( iMOAB_AppID pid )
{
// the file set , parallel comm are all in vectors indexed by *pid
// the file set , parallel comm are all in vectors indexed by *pid
// assume we did not delete anything yet
// *pid will not be reused if we register another application
......@@ -966,9 +966,9 @@ ErrCode iMOAB_SetIntTagStorage(iMOAB_AppID pid, const iMOAB_String tag_storage_n
return 1;
// set it on a subset of entities, based on type and length
Range * ents_to_set;
if (* ent_type == 0)// vertices
if (*ent_type == 0)// vertices
ents_to_set = &data.all_verts;
else if (* ent_type == 1)
else // if (*ent_type == 1) // *ent_type can be 0 (vertices) or 1 (elements)
ents_to_set = &data.primary_elems;
int nents_to_be_set = *num_tag_storage_length /tagLength;
......@@ -1008,9 +1008,9 @@ ErrCode iMOAB_GetIntTagStorage(iMOAB_AppID pid, const iMOAB_String tag_storage_n
// set it on a subset of entities, based on type and length
Range * ents_to_get;
if (* ent_type == 0)// vertices
if (*ent_type == 0)// vertices
ents_to_get = &data.all_verts;
else if (* ent_type == 1)
else // if (*ent_type == 1)
ents_to_get = &data.primary_elems;
int nents_to_get = *num_tag_storage_length /tagLength;
......@@ -1052,7 +1052,7 @@ ErrCode iMOAB_SetDoubleTagStorage(iMOAB_AppID pid, const iMOAB_String tag_storag
return 1;
// set it on a subset of entities, based on type and length
Range * ents_to_set;
Range * ents_to_set = NULL;
if (* ent_type == 0)// vertices
ents_to_set = &data.all_verts;
else if (* ent_type == 1)
......@@ -1097,7 +1097,7 @@ ErrCode iMOAB_GetDoubleTagStorage(iMOAB_AppID pid, const iMOAB_String tag_storag
return 1;
// set it on a subset of entities, based on type and length
Range * ents_to_get;
Range * ents_to_get = NULL;
if (* ent_type == 0)// vertices
ents_to_get = &data.all_verts;
else if (* ent_type == 1)
......
......@@ -169,8 +169,8 @@ void IODebugTrack::all_reduce()
std::vector<DRange> send(dataSet.size()), recv(total);
std::copy( dataSet.begin(), dataSet.end(), send.begin() );
MPI_Gatherv( &send[0], 3*send.size(), MPI_UNSIGNED_LONG,
&recv[0], &counts[0], &displs[0], MPI_UNSIGNED_LONG,
MPI_Gatherv( (void*)&send[0], 3*send.size(), MPI_UNSIGNED_LONG,
(void*)&recv[0], &counts[0], &displs[0], MPI_UNSIGNED_LONG,
0, MPI_COMM_WORLD );
if (0 == mpiRank) {
......
......@@ -2045,7 +2045,7 @@ ErrorCode ReadHDF5::read_all_set_meta()
if (bcast) {
#ifdef MOAB_HAVE_MPI
int ierr = MPI_Bcast(setMeta, num_sets*4, MPI_LONG, 0, comm);
int ierr = MPI_Bcast((void*)setMeta, num_sets*4, MPI_LONG, 0, comm);
if (MPI_SUCCESS != ierr)
MB_SET_ERR(MB_FAILURE, "ReadHDF5 Failure");
#else
......@@ -3337,7 +3337,7 @@ ErrorCode ReadHDF5::read_var_len_tag(Tag tag_handle,
{
ErrorCode rval1;
if (isHandle) {
assert(readSize == sizeof(EntityHandle));
assert( readSize == sizeof(EntityHandle) );
rval1 = readHDF5->convert_id_to_handle((EntityHandle*)data, count);MB_CHK_ERR(rval1);
}
int n = count;
......
......@@ -62,7 +62,7 @@ const char geom_category[][CATEGORY_TAG_SIZE] =
// Constructor
ReadOBJ::ReadOBJ(Interface* impl)
: MBI(impl),geom_tag(0), id_tag(0), name_tag(0), category_tag(0),
faceting_tol_tag(0), geometry_resabs_tag(0), obj_name_tag(0), sense_tag(0)
faceting_tol_tag(0), geometry_resabs_tag(0), obj_name_tag(0)
{
assert(NULL != impl);
MBI->query_interface(readMeshIface);
......@@ -297,16 +297,16 @@ ErrorCode ReadOBJ::load_file(const char *filename,
*/
void ReadOBJ::tokenize( const std::string& str,
std::vector<std::string>& tokens,
const char* delimiters)
const char* delimiters2)
{
tokens.clear();
std::string::size_type next_token_end, next_token_start =
str.find_first_not_of( delimiters, 0);
str.find_first_not_of( delimiters2, 0);
while ( std::string::npos != next_token_start )
{
next_token_end = str.find_first_of( delimiters, next_token_start );
next_token_end = str.find_first_of( delimiters2, next_token_start );
if ( std::string::npos == next_token_end )
{
tokens.push_back(str.substr(next_token_start));
......@@ -316,7 +316,7 @@ void ReadOBJ::tokenize( const std::string& str,
{
tokens.push_back( str.substr( next_token_start, next_token_end -
next_token_start ) );
next_token_start = str.find_first_not_of( delimiters, next_token_end );
next_token_start = str.find_first_not_of( delimiters2, next_token_end );
}
}
}
......
......@@ -148,8 +148,7 @@ private:
GeomTopoTool* myGeomTool;
Tag geom_tag,id_tag,name_tag,category_tag,faceting_tol_tag, geometry_resabs_tag, obj_name_tag,
sense_tag;
Tag geom_tag,id_tag,name_tag,category_tag,faceting_tol_tag, geometry_resabs_tag, obj_name_tag;
/* The keyword type function matches the first character extracted from each line to a type of line
*/
......
......@@ -4,7 +4,6 @@
#include "MBParallelConventions.h"
#include "moab/Interface.hpp"
#include "moab/Range.hpp"
#include "moab/WriteUtilIface.hpp"
#include "moab/FileOptions.hpp"
#include "GmshUtil.hpp"
......
......@@ -55,28 +55,8 @@
#error Attempt to compile WriteHDF5 with HDF5 support disabled
#endif
/* Access HDF5 file handle for debugging
#include <H5Fpublic.h>
struct file { uint32_t magic; hid_t handle; };
*/
#undef DEBUG
#undef BLOCKED_COORD_IO
#ifdef DEBUG
/*
# include <H5Epublic.h>
extern "C" herr_t hdf_error_handler(void*)
{
H5Eprint(stderr);
assert(0);
}
*/
# define myassert(A) assert(A)
#else
# define myassert(A)
#endif
#ifdef MOAB_HAVE_VALGRIND
# include <valgrind/memcheck.h>
#else
......@@ -95,7 +75,7 @@ namespace moab {
template <typename T> inline
void VALGRIND_MAKE_VEC_UNDEFINED(std::vector<T>& v) {
(void)VALGRIND_MAKE_MEM_UNDEFINED(&v[0], v.size() * sizeof(T));
(void)VALGRIND_MAKE_MEM_UNDEFINED( (T*)&v[0], v.size() * sizeof(T));
}
#define WRITE_HDF5_BUFFER_SIZE (40 * 1024 * 1024)
......@@ -172,7 +152,7 @@ static herr_t handle_hdf5_error(void* data)
do { \
if (mhdf_isError(&(A))) { \
MB_SET_ERR_CONT(mhdf_message(&(A))); \
myassert(0); \
assert(0); \
return error(MB_FAILURE); \
} \
} while (false)
......@@ -181,7 +161,7 @@ do { \
do { \
if (mhdf_isError(&(A))) { \
MB_SET_ERR_CONT(mhdf_message(&(A))); \
myassert(0); \
assert(0); \
mhdf_closeData(filePtr, (B), &(A)); \
return error(MB_FAILURE); \
} \
......@@ -191,7 +171,7 @@ do { \
do { \
if (mhdf_isError(&(A))) { \
MB_SET_ERR_CONT(mhdf_message(&(A))); \
myassert(0); \
assert(0); \
mhdf_closeData(filePtr, (B)[0], &(A)); \
mhdf_closeData(filePtr, (B)[1], &(A)); \
return error(MB_FAILURE); \
......@@ -202,7 +182,7 @@ do { \
do { \
if (mhdf_isError(&(A))) { \
MB_SET_ERR_CONT(mhdf_message(&(A))); \
myassert(0); \
assert(0); \
mhdf_closeData(filePtr, (B)[0], &(A)); \
mhdf_closeData(filePtr, (B)[1], &(A)); \
mhdf_closeData(filePtr, (B)[2], &(A)); \
......@@ -214,7 +194,7 @@ do { \
do { \
if (mhdf_isError(&(A))) { \
MB_SET_ERR_CONT(mhdf_message(&(A))); \
myassert(0); \
assert(0); \
mhdf_closeData(filePtr, (B), &(A)); \
if (C) mhdf_closeData(filePtr, (D), &(A)); \
return error(MB_FAILURE); \
......@@ -234,7 +214,7 @@ do { \
if (MB_SUCCESS != (A)) { \
MB_CHK_ERR_CONT((A)); \
mhdf_closeData(filePtr, (B), &(C)); \
myassert(0); \
assert(0); \
return error(A); \
} \
} while (false)
......@@ -246,7 +226,7 @@ do { \
mhdf_closeData(filePtr, (B)[0], &(C)); \
mhdf_closeData(filePtr, (B)[1], &(C)); \
write_finished(); \
myassert(0); \
assert(0); \
return error(A); \
} \
} while (false)
......@@ -259,7 +239,7 @@ do { \
mhdf_closeData(filePtr, (B)[1], &(C)); \
mhdf_closeData(filePtr, (B)[2], &(C)); \
write_finished(); \
myassert(0); \
assert(0); \
return error(A); \
} \
} while (false)
......@@ -272,7 +252,7 @@ do { \
if (C) \
mhdf_closeData(filePtr, (D), &(E)); \
write_finished(); \
myassert(0); \
assert(0); \
return error(A); \
} \
} while (false)
......
......@@ -36,9 +36,7 @@ using namespace moab;
public:
MetisPartitioner( Interface *impl = NULL,
const bool use_coords = false,
int argc = 0,
char **argv = NULL);
const bool use_coords = false);
virtual ~MetisPartitioner();
......@@ -84,10 +82,6 @@ using namespace moab;
private:
int argcArg;
char **argvArg;
ErrorCode assemble_graph(const int dimension,
std::vector<double> &coords,
std::vector<idx_t> &moab_ids,
......
......@@ -5907,7 +5907,7 @@ ErrorCode ParallelComm::get_remote_handles(EntityHandle *local_vec, EntityHandle
this_incoming++;
PRINT_DEBUG_IRECV(procConfig.proc_rank(), to_proc, (unsigned char*)ack_buff,
sizeof(int), mesg_tag - 1, this_incoming);
success = MPI_Irecv(ack_buff, sizeof(int),
success = MPI_Irecv((void*)ack_buff, sizeof(int),
MPI_UNSIGNED_CHAR, to_proc,
mesg_tag - 1, procConfig.proc_comm(),
&ack_req);
......@@ -8301,7 +8301,7 @@ ErrorCode ParallelComm::get_remote_handles(EntityHandle *local_vec, EntityHandle
// Set up to receive data
for (int i = 0; i < num_proc; i++) {
result[i].resize(sizes_recv[i]);
ierr = MPI_Irecv(&result[i][0],
ierr = MPI_Irecv( (void *)( &(result[i][0]) ),
sizeof(SharedEntityData)*sizes_recv[i],
MPI_UNSIGNED_CHAR,
buffProcs[i], tag, cm, &recv_req[i]);
......@@ -8311,7 +8311,7 @@ ErrorCode ParallelComm::get_remote_handles(EntityHandle *local_vec, EntityHandle
// Send data
for (int i = 0; i < num_proc; i++) {
ierr = MPI_Isend(&send_data[i][0],
ierr = MPI_Isend((void *)( &(send_data[i][0]) ),
sizeof(SharedEntityData)*sizes_send[i],
MPI_UNSIGNED_CHAR,
buffProcs[i], tag, cm, &send_req[i]);
......
......@@ -149,7 +149,7 @@ namespace moab{
box.bMax *= -1;
/*Communicate to all processors*/
MPI_Allreduce(&box, gbox, 6, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
MPI_Allreduce( (void*)&box, gbox, 6, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
/*Assemble Global Bounding Box*/
//Flip the max back
......
This diff is collapsed.
......@@ -255,13 +255,13 @@ void gs_data::nonlocal_info::nonlocal(realType *u, int op, MPI_Comm comm)
start = buf;
for (;c;--c)
*buf++ = u[*sh_ind++];
MPI_Isend(start,nshared[i]*sizeof(realType),
MPI_Isend((void*)start,nshared[i]*sizeof(realType),
MPI_UNSIGNED_CHAR, targ[i],id,comm,reqs++);
}
start = buf;
for(i=0; i<np; ++i)
{
MPI_Irecv(start,nshared[i]*sizeof(realType),MPI_UNSIGNED_CHAR,
MPI_Irecv((void*)start,nshared[i]*sizeof(realType),MPI_UNSIGNED_CHAR,
targ[i],targ[i],comm,reqs++);
start+=nshared[i];
}
......@@ -315,13 +315,13 @@ void gs_data::nonlocal_info::nonlocal_vec(realType *u, uint n,
memcpy(buf,u+n*(*sh_ind++),size);
buf+=n;
}
MPI_Isend(start,ns*size,MPI_UNSIGNED_CHAR,targ[i],id,comm,reqs++);
MPI_Isend((void*)start,ns*size,MPI_UNSIGNED_CHAR,targ[i],id,comm,reqs++);
}
start = buf;
for (i=0; i<np; ++i)
{
int nsn=n*nshared[i];
MPI_Irecv(start,nsn*size,MPI_UNSIGNED_CHAR,targ[i],targ[i],comm,reqs++);
MPI_Irecv((void*)start,nsn*size,MPI_UNSIGNED_CHAR,targ[i],targ[i],comm,reqs++);
start+=nsn;
}
for (reqs=this->_reqs,i=np*2;i;--i)
......@@ -379,13 +379,13 @@ void gs_data::nonlocal_info::nonlocal_many(realType **u, uint n, int op,
*buf++=uu[sh_ind[c]];
}
sh_ind+=ns;
MPI_Isend(start,n*ns*sizeof(realType),MPI_UNSIGNED_CHAR,targ[i],id,comm,reqs++);
MPI_Isend((void*)start,n*ns*sizeof(realType),MPI_UNSIGNED_CHAR,targ[i],id,comm,reqs++);
}
start = buf;
for (i=0; i<np; ++i)
{
int nsn = n*nshared[i];
MPI_Irecv(start,nsn*sizeof(realType),MPI_UNSIGNED_CHAR,
MPI_Irecv((void*)start,nsn*sizeof(realType),MPI_UNSIGNED_CHAR,
targ[i],targ[i],comm,reqs++);
start+=nsn;
}
......@@ -491,10 +491,10 @@ void gs_data::crystal_data::send_(uint target, int recvn)
int i;
(void)VALGRIND_CHECK_MEM_IS_DEFINED( &send->n, sizeof(uint) );
MPI_Isend(&send->n,sizeof(uint),MPI_UNSIGNED_CHAR,
MPI_Isend((void*)&send->n,sizeof(uint),MPI_UNSIGNED_CHAR,
target ,_id ,_comm,&req[ 0]);
for (i=0; i<recvn; ++i)
MPI_Irecv(&count[i] ,sizeof(uint),MPI_UNSIGNED_CHAR,
MPI_Irecv((void*)&count[i] ,sizeof(uint),MPI_UNSIGNED_CHAR,
target+i,target+i,_comm,&req[i+1]);
MPI_Waitall(recvn+1,req,status);
sum = keep->n;
......@@ -507,14 +507,14 @@ void gs_data::crystal_data::send_(uint target, int recvn)
keep->n=sum;
(void)VALGRIND_CHECK_MEM_IS_DEFINED( send->buf.ptr,send->n*sizeof(uint) );
MPI_Isend(send->buf.ptr,send->n*sizeof(uint),
MPI_Isend((void*)send->buf.ptr,send->n*sizeof(uint),
MPI_UNSIGNED_CHAR,target,_id,_comm,&req[0]);
if (recvn)
{
MPI_Irecv(recv[0],count[0]*sizeof(uint),MPI_UNSIGNED_CHAR,
MPI_Irecv((void*)recv[0],count[0]*sizeof(uint),MPI_UNSIGNED_CHAR,
target,target,_comm,&req[1]);
if (recvn==2)
MPI_Irecv(recv[1],count[1]*sizeof(uint),MPI_UNSIGNED_CHAR,
MPI_Irecv((void*)recv[1],count[1]*sizeof(uint),MPI_UNSIGNED_CHAR,
target+1,target+1,_comm,&req[2]);
}
MPI_Waitall(recvn+1,req,status);
......
......@@ -1218,8 +1218,8 @@ ErrorCode Coupler::get_matching_entities(EntityHandle
}
// Send all buffers to the master proc for consolidation
MPI_Gatherv(tuple_buf, tuple_buf_len, MPI_INT,
all_tuples_buf, recv_cnts, offsets, MPI_INT, MASTER_PROC,
MPI_Gatherv((void*)tuple_buf, tuple_buf_len, MPI_INT,
(void*)all_tuples_buf, recv_cnts, offsets, MPI_INT, MASTER_PROC,
myPc->proc_config().proc_comm());
ERRORMPI("Gathering tuple_lists failed.", err);
free(tuple_buf); // malloc'd in pack_tuples
......@@ -1264,7 +1264,7 @@ ErrorCode Coupler::get_matching_entities(EntityHandle
if (rank != MASTER_PROC)
ctl_buf = (uint*)malloc(ctl_buf_sz * sizeof(uint));
ierr = MPI_Bcast(ctl_buf, ctl_buf_sz, MPI_INT, MASTER_PROC, myPc->proc_config().proc_comm());
ierr = MPI_Bcast((void*)ctl_buf, ctl_buf_sz, MPI_INT, MASTER_PROC, myPc->proc_config().proc_comm());
ERRORMPI("Broadcasting tuple_list failed.", ierr);
if (rank != MASTER_PROC)
......
......@@ -227,7 +227,7 @@ int main(int argc, char* argv[])
#ifdef MOAB_HAVE_METIS
MetisPartitioner *metis_tool = NULL;
if (moab_use_metis && !metis_tool) {
metis_tool = new MetisPartitioner (&mb, false, argc, argv);
metis_tool = new MetisPartitioner (&mb, false);
}
if ((aggregating_tag.empty() && partition_tagged_sets) || (aggregating_tag.empty() && partition_tagged_ents))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment