Commit 298b8876 authored by Iulian Grindeanu's avatar Iulian Grindeanu Committed by vijaysm

fix clang warnings on OSX

parent 0e0273d0
......@@ -359,7 +359,7 @@ ErrorCode Core::query_interface_type( const std::type_info& type, void*& ptr )
}
else if (type == typeid(WriteUtilIface)) {
if(!mMBWriteUtil)
mMBWriteUtil = new WriteUtil(this, mError);
mMBWriteUtil = new WriteUtil(this);
ptr = static_cast<WriteUtilIface*>(mMBWriteUtil);
}
else if (type == typeid(ReaderWriterSet)) {
......
......@@ -104,7 +104,10 @@ ErrorCode DenseTag::get_array(const SequenceManager* seqman,
const unsigned char* const& ptr,
size_t& count) const
{
return get_array(seqman, NULL, h, ptr, count);
// cast away the const-ness; do we really want to do this?
// probably we are not calling this anywhere;
// clang compiler found this
return get_array(seqman, NULL, h, const_cast<unsigned char *> ( ptr ), count);
}
ErrorCode DenseTag::get_array(const SequenceManager* seqman,
......@@ -140,7 +143,9 @@ ErrorCode DenseTag::get_array(const SequenceManager* seqman,
ErrorCode DenseTag::get_array(const EntitySequence* seq,
const unsigned char* const & ptr) const
{
return get_array(seq, ptr);
// cast away the constness; otherwise it would be infinite recursion
// probably we are not calling this anywhere
return get_array(seq, const_cast<unsigned char *> ( ptr ));
}
ErrorCode DenseTag::get_array(const EntitySequence* seq,
......
......@@ -104,7 +104,7 @@ namespace moab
// find best initial guess to improve convergence
CartVect tmp_params[] = {CartVect(-1,-1,-1), CartVect(1,-1,-1), CartVect(-1,1,-1), CartVect(-1,-1,1)};
double resl = HUGE;
double resl = MAXFLOAT;
CartVect new_pos, tmp_pos;
ErrorCode rval;
for (unsigned int i = 0; i < 4; i++) {
......
......@@ -101,7 +101,7 @@ namespace moab
// find best initial guess to improve convergence
CartVect tmp_params[] = {CartVect(-1,-1,-1), CartVect(1,-1,-1), CartVect(-1,1,-1)};
double resl = HUGE;
double resl = MAXFLOAT;
CartVect new_pos, tmp_pos;
ErrorCode rval;
for (unsigned int i = 0; i < 3; i++) {
......
......@@ -37,12 +37,8 @@ using namespace moab;
const bool debug = false;
MetisPartitioner::MetisPartitioner( Interface *impl,
const bool use_coords,
int argc,
char **argv)
const bool use_coords)
: PartitionerBase<idx_t>(impl,use_coords),
argcArg(argc),
argvArg(argv)
{
}
......@@ -613,7 +609,7 @@ ErrorCode MetisPartitioner::write_partition(const idx_t nparts,
for (i = 0; i < nparts; i++) dum_ids[i] = i;
result = mbImpl->tag_set_data(part_set_tag, partSets, dum_ids);
delete dum_ids;
delete [] dum_ids;
// assign entities to the relevant sets
std::vector<EntityHandle> tmp_part_sets;
......
......@@ -717,7 +717,7 @@ ErrorCode ScdInterface::tag_shared_vertices(ParallelComm *pcomm, ScdBox *box)
send_reqs(procs.size(), MPI_REQUEST_NULL);
std::vector<EntityHandle> rhandles(4*procs.size()), shandles(4);
for (unsigned int i = 0; i < procs.size(); i++) {
int success = MPI_Irecv(&rhandles[4*i], 4*sizeof(EntityHandle),
int success = MPI_Irecv((void*)&rhandles[4*i], 4*sizeof(EntityHandle),
MPI_UNSIGNED_CHAR, procs[i],
1, pcomm->proc_config().proc_comm(),
&recv_reqs[i]);
......@@ -740,7 +740,7 @@ ErrorCode ScdInterface::tag_shared_vertices(ParallelComm *pcomm, ScdBox *box)
shandles[3] = box->start_element();
}
for (unsigned int i = 0; i < procs.size(); i++) {
int success = MPI_Isend(&shandles[0], 4*sizeof(EntityHandle), MPI_UNSIGNED_CHAR, procs[i],
int success = MPI_Isend((void*)&shandles[0], 4*sizeof(EntityHandle), MPI_UNSIGNED_CHAR, procs[i],
1, pcomm->proc_config().proc_comm(), &send_reqs[i]);
if (success != MPI_SUCCESS) return MB_FAILURE;
}
......
......@@ -42,8 +42,8 @@
namespace moab {
WriteUtil::WriteUtil(Core* mdb, Error* error_handler)
: WriteUtilIface(), mMB(mdb), mError(error_handler)
WriteUtil::WriteUtil(Core* mdb)
: WriteUtilIface(), mMB(mdb)
{
}
......
......@@ -25,18 +25,16 @@
namespace moab {
class Core;
class Error;
class WriteUtil : public WriteUtilIface
{
private:
//! Pointer to the Core
Core* mMB;
Error* mError;
public:
//! Constructor takes Core pointer
WriteUtil(Core* mdb, Error* error_handler);
WriteUtil(Core* mdb);
//! Destructor
~WriteUtil(){}
......
......@@ -966,9 +966,9 @@ ErrCode iMOAB_SetIntTagStorage(iMOAB_AppID pid, const iMOAB_String tag_storage_n
return 1;
// set it on a subset of entities, based on type and length
Range * ents_to_set;
if (* ent_type == 0)// vertices
if (*ent_type == 0)// vertices
ents_to_set = &data.all_verts;
else if (* ent_type == 1)
else // if (*ent_type == 1) // *ent_type can be 0 (vertices) or 1 (elements)
ents_to_set = &data.primary_elems;
int nents_to_be_set = *num_tag_storage_length /tagLength;
......@@ -1008,9 +1008,9 @@ ErrCode iMOAB_GetIntTagStorage(iMOAB_AppID pid, const iMOAB_String tag_storage_n
// set it on a subset of entities, based on type and length
Range * ents_to_get;
if (* ent_type == 0)// vertices
if (*ent_type == 0)// vertices
ents_to_get = &data.all_verts;
else if (* ent_type == 1)
else // if (*ent_type == 1)
ents_to_get = &data.primary_elems;
int nents_to_get = *num_tag_storage_length /tagLength;
......
......@@ -169,8 +169,8 @@ void IODebugTrack::all_reduce()
std::vector<DRange> send(dataSet.size()), recv(total);
std::copy( dataSet.begin(), dataSet.end(), send.begin() );
MPI_Gatherv( &send[0], 3*send.size(), MPI_UNSIGNED_LONG,
&recv[0], &counts[0], &displs[0], MPI_UNSIGNED_LONG,
MPI_Gatherv( (void*)&send[0], 3*send.size(), MPI_UNSIGNED_LONG,
(void*)&recv[0], &counts[0], &displs[0], MPI_UNSIGNED_LONG,
0, MPI_COMM_WORLD );
if (0 == mpiRank) {
......
......@@ -2045,7 +2045,7 @@ ErrorCode ReadHDF5::read_all_set_meta()
if (bcast) {
#ifdef MOAB_HAVE_MPI
int ierr = MPI_Bcast(setMeta, num_sets*4, MPI_LONG, 0, comm);
int ierr = MPI_Bcast((void*)setMeta, num_sets*4, MPI_LONG, 0, comm);
if (MPI_SUCCESS != ierr)
MB_SET_ERR(MB_FAILURE, "ReadHDF5 Failure");
#else
......@@ -3337,7 +3337,7 @@ ErrorCode ReadHDF5::read_var_len_tag(Tag tag_handle,
{
ErrorCode rval1;
if (isHandle) {
assert(readSize == sizeof(EntityHandle));
assert( (void) (readSize == sizeof(EntityHandle)));
rval1 = readHDF5->convert_id_to_handle((EntityHandle*)data, count);MB_CHK_ERR(rval1);
}
int n = count;
......
......@@ -62,7 +62,7 @@ const char geom_category[][CATEGORY_TAG_SIZE] =
// Constructor
ReadOBJ::ReadOBJ(Interface* impl)
: MBI(impl),geom_tag(0), id_tag(0), name_tag(0), category_tag(0),
faceting_tol_tag(0), geometry_resabs_tag(0), obj_name_tag(0), sense_tag(0)
faceting_tol_tag(0), geometry_resabs_tag(0), obj_name_tag(0)
{
assert(NULL != impl);
MBI->query_interface(readMeshIface);
......@@ -297,16 +297,16 @@ ErrorCode ReadOBJ::load_file(const char *filename,
*/
void ReadOBJ::tokenize( const std::string& str,
std::vector<std::string>& tokens,
const char* delimiters)
const char* delimiters2)
{
tokens.clear();
std::string::size_type next_token_end, next_token_start =
str.find_first_not_of( delimiters, 0);
str.find_first_not_of( delimiters2, 0);
while ( std::string::npos != next_token_start )
{
next_token_end = str.find_first_of( delimiters, next_token_start );
next_token_end = str.find_first_of( delimiters2, next_token_start );
if ( std::string::npos == next_token_end )
{
tokens.push_back(str.substr(next_token_start));
......@@ -316,7 +316,7 @@ void ReadOBJ::tokenize( const std::string& str,
{
tokens.push_back( str.substr( next_token_start, next_token_end -
next_token_start ) );
next_token_start = str.find_first_not_of( delimiters, next_token_end );
next_token_start = str.find_first_not_of( delimiters2, next_token_end );
}
}
}
......
......@@ -148,8 +148,7 @@ private:
GeomTopoTool* myGeomTool;
Tag geom_tag,id_tag,name_tag,category_tag,faceting_tol_tag, geometry_resabs_tag, obj_name_tag,
sense_tag;
Tag geom_tag,id_tag,name_tag,category_tag,faceting_tol_tag, geometry_resabs_tag, obj_name_tag;
/* The keyword type function matches the first character extracted from each line to a type of line
*/
......
......@@ -4,7 +4,6 @@
#include "MBParallelConventions.h"
#include "moab/Interface.hpp"
#include "moab/Range.hpp"
#include "moab/WriteUtilIface.hpp"
#include "moab/FileOptions.hpp"
#include "GmshUtil.hpp"
......
......@@ -95,7 +95,7 @@ namespace moab {
template <typename T> inline
void VALGRIND_MAKE_VEC_UNDEFINED(std::vector<T>& v) {
(void)VALGRIND_MAKE_MEM_UNDEFINED(&v[0], v.size() * sizeof(T));
(void)VALGRIND_MAKE_MEM_UNDEFINED( (T*)&v[0], v.size() * sizeof(T));
}
#define WRITE_HDF5_BUFFER_SIZE (40 * 1024 * 1024)
......
......@@ -36,9 +36,7 @@ using namespace moab;
public:
MetisPartitioner( Interface *impl = NULL,
const bool use_coords = false,
int argc = 0,
char **argv = NULL);
const bool use_coords = false);
virtual ~MetisPartitioner();
......@@ -84,10 +82,6 @@ using namespace moab;
private:
int argcArg;
char **argvArg;
ErrorCode assemble_graph(const int dimension,
std::vector<double> &coords,
std::vector<idx_t> &moab_ids,
......
......@@ -5907,7 +5907,7 @@ ErrorCode ParallelComm::get_remote_handles(EntityHandle *local_vec, EntityHandle
this_incoming++;
PRINT_DEBUG_IRECV(procConfig.proc_rank(), to_proc, (unsigned char*)ack_buff,
sizeof(int), mesg_tag - 1, this_incoming);
success = MPI_Irecv(ack_buff, sizeof(int),
success = MPI_Irecv((void*)ack_buff, sizeof(int),
MPI_UNSIGNED_CHAR, to_proc,
mesg_tag - 1, procConfig.proc_comm(),
&ack_req);
......@@ -8301,7 +8301,7 @@ ErrorCode ParallelComm::get_remote_handles(EntityHandle *local_vec, EntityHandle
// Set up to receive data
for (int i = 0; i < num_proc; i++) {
result[i].resize(sizes_recv[i]);
ierr = MPI_Irecv(&result[i][0],
ierr = MPI_Irecv( (void *)( &(result[i][0]) ),
sizeof(SharedEntityData)*sizes_recv[i],
MPI_UNSIGNED_CHAR,
buffProcs[i], tag, cm, &recv_req[i]);
......@@ -8311,7 +8311,7 @@ ErrorCode ParallelComm::get_remote_handles(EntityHandle *local_vec, EntityHandle
// Send data
for (int i = 0; i < num_proc; i++) {
ierr = MPI_Isend(&send_data[i][0],
ierr = MPI_Isend((void *)( &(send_data[i][0]) ),
sizeof(SharedEntityData)*sizes_send[i],
MPI_UNSIGNED_CHAR,
buffProcs[i], tag, cm, &send_req[i]);
......
......@@ -149,7 +149,7 @@ namespace moab{
box.bMax *= -1;
/*Communicate to all processors*/
MPI_Allreduce(&box, gbox, 6, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
MPI_Allreduce( (void*)&box, gbox, 6, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
/*Assemble Global Bounding Box*/
//Flip the max back
......
This diff is collapsed.
......@@ -255,13 +255,13 @@ void gs_data::nonlocal_info::nonlocal(realType *u, int op, MPI_Comm comm)
start = buf;
for (;c;--c)
*buf++ = u[*sh_ind++];
MPI_Isend(start,nshared[i]*sizeof(realType),
MPI_Isend((void*)start,nshared[i]*sizeof(realType),
MPI_UNSIGNED_CHAR, targ[i],id,comm,reqs++);
}
start = buf;
for(i=0; i<np; ++i)
{
MPI_Irecv(start,nshared[i]*sizeof(realType),MPI_UNSIGNED_CHAR,
MPI_Irecv((void*)start,nshared[i]*sizeof(realType),MPI_UNSIGNED_CHAR,
targ[i],targ[i],comm,reqs++);
start+=nshared[i];
}
......@@ -315,13 +315,13 @@ void gs_data::nonlocal_info::nonlocal_vec(realType *u, uint n,
memcpy(buf,u+n*(*sh_ind++),size);
buf+=n;
}
MPI_Isend(start,ns*size,MPI_UNSIGNED_CHAR,targ[i],id,comm,reqs++);
MPI_Isend((void*)start,ns*size,MPI_UNSIGNED_CHAR,targ[i],id,comm,reqs++);
}
start = buf;
for (i=0; i<np; ++i)
{
int nsn=n*nshared[i];
MPI_Irecv(start,nsn*size,MPI_UNSIGNED_CHAR,targ[i],targ[i],comm,reqs++);
MPI_Irecv((void*)start,nsn*size,MPI_UNSIGNED_CHAR,targ[i],targ[i],comm,reqs++);
start+=nsn;
}
for (reqs=this->_reqs,i=np*2;i;--i)
......@@ -379,13 +379,13 @@ void gs_data::nonlocal_info::nonlocal_many(realType **u, uint n, int op,
*buf++=uu[sh_ind[c]];
}
sh_ind+=ns;
MPI_Isend(start,n*ns*sizeof(realType),MPI_UNSIGNED_CHAR,targ[i],id,comm,reqs++);
MPI_Isend((void*)start,n*ns*sizeof(realType),MPI_UNSIGNED_CHAR,targ[i],id,comm,reqs++);
}
start = buf;
for (i=0; i<np; ++i)
{
int nsn = n*nshared[i];
MPI_Irecv(start,nsn*sizeof(realType),MPI_UNSIGNED_CHAR,
MPI_Irecv((void*)start,nsn*sizeof(realType),MPI_UNSIGNED_CHAR,
targ[i],targ[i],comm,reqs++);
start+=nsn;
}
......@@ -491,10 +491,10 @@ void gs_data::crystal_data::send_(uint target, int recvn)
int i;
(void)VALGRIND_CHECK_MEM_IS_DEFINED( &send->n, sizeof(uint) );
MPI_Isend(&send->n,sizeof(uint),MPI_UNSIGNED_CHAR,
MPI_Isend((void*)&send->n,sizeof(uint),MPI_UNSIGNED_CHAR,
target ,_id ,_comm,&req[ 0]);
for (i=0; i<recvn; ++i)
MPI_Irecv(&count[i] ,sizeof(uint),MPI_UNSIGNED_CHAR,
MPI_Irecv((void*)&count[i] ,sizeof(uint),MPI_UNSIGNED_CHAR,
target+i,target+i,_comm,&req[i+1]);
MPI_Waitall(recvn+1,req,status);
sum = keep->n;
......@@ -507,14 +507,14 @@ void gs_data::crystal_data::send_(uint target, int recvn)
keep->n=sum;
(void)VALGRIND_CHECK_MEM_IS_DEFINED( send->buf.ptr,send->n*sizeof(uint) );
MPI_Isend(send->buf.ptr,send->n*sizeof(uint),
MPI_Isend((void*)send->buf.ptr,send->n*sizeof(uint),
MPI_UNSIGNED_CHAR,target,_id,_comm,&req[0]);
if (recvn)
{
MPI_Irecv(recv[0],count[0]*sizeof(uint),MPI_UNSIGNED_CHAR,
MPI_Irecv((void*)recv[0],count[0]*sizeof(uint),MPI_UNSIGNED_CHAR,
target,target,_comm,&req[1]);
if (recvn==2)
MPI_Irecv(recv[1],count[1]*sizeof(uint),MPI_UNSIGNED_CHAR,
MPI_Irecv((void*)recv[1],count[1]*sizeof(uint),MPI_UNSIGNED_CHAR,
target+1,target+1,_comm,&req[2]);
}
MPI_Waitall(recvn+1,req,status);
......
......@@ -1218,8 +1218,8 @@ ErrorCode Coupler::get_matching_entities(EntityHandle
}
// Send all buffers to the master proc for consolidation
MPI_Gatherv(tuple_buf, tuple_buf_len, MPI_INT,
all_tuples_buf, recv_cnts, offsets, MPI_INT, MASTER_PROC,
MPI_Gatherv((void*)tuple_buf, tuple_buf_len, MPI_INT,
(void*)all_tuples_buf, recv_cnts, offsets, MPI_INT, MASTER_PROC,
myPc->proc_config().proc_comm());
ERRORMPI("Gathering tuple_lists failed.", err);
free(tuple_buf); // malloc'd in pack_tuples
......@@ -1264,7 +1264,7 @@ ErrorCode Coupler::get_matching_entities(EntityHandle
if (rank != MASTER_PROC)
ctl_buf = (uint*)malloc(ctl_buf_sz * sizeof(uint));
ierr = MPI_Bcast(ctl_buf, ctl_buf_sz, MPI_INT, MASTER_PROC, myPc->proc_config().proc_comm());
ierr = MPI_Bcast((void*)ctl_buf, ctl_buf_sz, MPI_INT, MASTER_PROC, myPc->proc_config().proc_comm());
ERRORMPI("Broadcasting tuple_list failed.", ierr);
if (rank != MASTER_PROC)
......
......@@ -227,7 +227,7 @@ int main(int argc, char* argv[])
#ifdef MOAB_HAVE_METIS
MetisPartitioner *metis_tool = NULL;
if (moab_use_metis && !metis_tool) {
metis_tool = new MetisPartitioner (&mb, false, argc, argv);
metis_tool = new MetisPartitioner (&mb, false);
}
if ((aggregating_tag.empty() && partition_tagged_sets) || (aggregating_tag.empty() && partition_tagged_ents))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment