diff --git a/config/compiler.m4 b/config/compiler.m4 index c2ad110d950821de3ab6aed18cabbaee4bead780..b80d463fefb665adff7562a13eaa333f053db539 100644 --- a/config/compiler.m4 +++ b/config/compiler.m4 @@ -192,6 +192,9 @@ if (test "x$enable_debug" != "xno"); then # debug flags # GNU EXTRA_GNU_CXXFLAGS="-Wall -Wno-long-long -pipe -pedantic -Wshadow -Wunused-parameter -Wpointer-arith -Wformat -Wformat-security -Wextra -Wno-variadic-macros -Wno-unknown-pragmas" EXTRA_GNU_FCFLAGS="-pipe -pedantic -ffree-line-length-0" +# CLANG +EXTRA_CLANG_CXXFLAGS="$EXTRA_GNU_CXXFLAGS" +EXTRA_CLANG_FCFLAGS="$EXTRA_GNU_FCFLAGS" # Intel EXTRA_INTEL_CXXFLAGS="-pipe -C" EXTRA_INTEL_FCFLAGS="-C" @@ -205,8 +208,11 @@ fi if (test "x$enable_cxx_optimize" != "xno"); then # optimization flags #GNU -EXTRA_GNU_CXXFLAGS="$EXTRA_GNU_CXXFLAGS -finline-functions" +EXTRA_GNU_CXXFLAGS="$EXTRA_GNU_CXXFLAGS -fprefetch-loop-arrays -finline-functions -march=native" EXTRA_GNU_FCFLAGS="$EXTRA_GNU_FCFLAGS -ffree-line-length-0 -finline-functions" +#CLANG +EXTRA_CLANG_CXXFLAGS="$EXTRA_CLANG_CXXFLAGS -march=native" +EXTRA_CLANG_FCFLAGS="$EXTRA_CLANG_FCFLAGS -ffree-line-length-0 -finline-functions" # Intel EXTRA_INTEL_CXXFLAGS="$EXTRA_INTEL_CXXFLAGS -xHost -ip -no-prec-div" # -fast EXTRA_INTEL_FCFLAGS="$EXTRA_INTEL_FCFLAGS -xHost -ip -no-prec-div" # -fast @@ -755,7 +761,7 @@ case "$cxx_compiler:$host_cpu" in FATHOM_CXX_SPECIAL="$EXTRA_PGI_CXXFLAGS" ;; Clang:*) - FATHOM_CXX_SPECIAL="$EXTRA_GNU_CXXFLAGS" + FATHOM_CXX_SPECIAL="$EXTRA_CLANG_CXXFLAGS" FATHOM_CXX_32BIT=-m32 FATHOM_CXX_64BIT=-m64 ;; @@ -934,8 +940,8 @@ case "$cc_compiler:$host_cpu" in FATHOM_F77_SPECIAL="$EXTRA_PGI_FCFLAGS" ;; Clang:*) - FATHOM_CC_SPECIAL="$EXTRA_GNU_CXXFLAGS" - FATHOM_FC_SPECIAL="$EXTRA_GNU_FCFLAGS" + FATHOM_CC_SPECIAL="$EXTRA_CLANG_CXXFLAGS" + FATHOM_FC_SPECIAL="$EXTRA_CLANG_FCFLAGS" FATHOM_F77_SPECIAL="$FATHOM_FC_SPECIAL" FATHOM_CC_32BIT=-m32 FATHOM_CC_64BIT=-m64 diff --git a/itaps/imesh/MOAB_iMeshP_unit_tests.cpp b/itaps/imesh/MOAB_iMeshP_unit_tests.cpp index 9f1bee5bc27f14a03025e41a7f421f68bf24cd92..11a0f2a6a284c7a6a71bafe36e9bbc33f3769153 100644 --- a/itaps/imesh/MOAB_iMeshP_unit_tests.cpp +++ b/itaps/imesh/MOAB_iMeshP_unit_tests.cpp @@ -2354,7 +2354,7 @@ int test_entity_copies( iMesh_Instance imesh, iMeshP_PartitionHandle prtn, const // add all vertices to local_data for (size_t j = 0; j < verts.size(); ++j) { - int tag; + int tag=0; ierr = vertex_tag( imesh, verts[j], tag ); if (ierr) break; diff --git a/itaps/imesh/iMesh_MOAB.cpp b/itaps/imesh/iMesh_MOAB.cpp index 2ae2a290b9c1817b1a5d6d0bf1ce5f3bc173ee55..a4c5a01ca73eaadfc454bcfd7689368d0e00c628 100644 --- a/itaps/imesh/iMesh_MOAB.cpp +++ b/itaps/imesh/iMesh_MOAB.cpp @@ -2406,7 +2406,7 @@ extern "C" { EntityHandle set1 = ENTITY_HANDLE(entity_set_1), set2 = ENTITY_HANDLE(entity_set_2); - int isList1, isList2; + int isList1=0, isList2=0; iMesh_isList(instance, entity_set_1, &isList1, err); if (*err != iBase_SUCCESS) return; iMesh_isList(instance, entity_set_2, &isList2, err); @@ -2455,7 +2455,7 @@ extern "C" { EntityHandle set1 = ENTITY_HANDLE(entity_set_1), set2 = ENTITY_HANDLE(entity_set_2); - int isList1, isList2; + int isList1=0, isList2=0; iMesh_isList(instance, entity_set_1, &isList1, err); if (*err != iBase_SUCCESS) return; iMesh_isList(instance, entity_set_2, &isList2, err); @@ -2561,7 +2561,7 @@ extern "C" { EntityHandle set1 = ENTITY_HANDLE(entity_set_1), set2 = ENTITY_HANDLE(entity_set_2); - int isList1, isList2; + int isList1=0, isList2=0; iMesh_isList(instance, entity_set_1, &isList1, err); if (*err != iBase_SUCCESS) return; iMesh_isList(instance, entity_set_2, &isList2, err); diff --git a/itaps/irel/test_entset.cpp b/itaps/irel/test_entset.cpp index 07890416be00f676085a6331b5e33278971678be..b321b63cab107f644a011d8d8512033641cc1f98 100644 --- a/itaps/irel/test_entset.cpp +++ b/itaps/irel/test_entset.cpp @@ -34,7 +34,7 @@ typedef void* iRel_EntityHandle; */ void handle_error_code(const int result, int *number_failed, - int *number_not_implemented, + int * /*number_not_implemented*/, int *number_successful) { if (result) { diff --git a/src/Core.cpp b/src/Core.cpp index 3f0b534a56e46ced28f5da7ed1310138bd0b992f..862341630ff168c326316687fb6c5d62de524e47 100644 --- a/src/Core.cpp +++ b/src/Core.cpp @@ -359,7 +359,7 @@ ErrorCode Core::query_interface_type( const std::type_info& type, void*& ptr ) } else if (type == typeid(WriteUtilIface)) { if(!mMBWriteUtil) - mMBWriteUtil = new WriteUtil(this, mError); + mMBWriteUtil = new WriteUtil(this); ptr = static_cast(mMBWriteUtil); } else if (type == typeid(ReaderWriterSet)) { diff --git a/src/DenseTag.cpp b/src/DenseTag.cpp index 5cf3317be069c66a150ea1159f2acb43734c9644..23ee3f85f9ff508ce99e0f9477361676768bc9de 100644 --- a/src/DenseTag.cpp +++ b/src/DenseTag.cpp @@ -104,7 +104,10 @@ ErrorCode DenseTag::get_array(const SequenceManager* seqman, const unsigned char* const& ptr, size_t& count) const { - return get_array(seqman, NULL, h, ptr, count); + // cast away the const-ness; do we really want to do this? + // probably we are not calling this anywhere; + // clang compiler found this + return get_array(seqman, NULL, h, const_cast ( ptr ), count); } ErrorCode DenseTag::get_array(const SequenceManager* seqman, @@ -140,7 +143,9 @@ ErrorCode DenseTag::get_array(const SequenceManager* seqman, ErrorCode DenseTag::get_array(const EntitySequence* seq, const unsigned char* const & ptr) const { - return get_array(seq, ptr); + // cast away the constness; otherwise it would be infinite recursion + // probably we are not calling this anywhere + return get_array(seq, const_cast ( ptr )); } ErrorCode DenseTag::get_array(const EntitySequence* seq, diff --git a/src/LocalDiscretization/LinearTet.cpp b/src/LocalDiscretization/LinearTet.cpp index 741593197963c0ace12af237b4b15c3e38713c30..ac3eada477628320cd440da504b2a18d805b62c5 100644 --- a/src/LocalDiscretization/LinearTet.cpp +++ b/src/LocalDiscretization/LinearTet.cpp @@ -104,7 +104,7 @@ namespace moab // find best initial guess to improve convergence CartVect tmp_params[] = {CartVect(-1,-1,-1), CartVect(1,-1,-1), CartVect(-1,1,-1), CartVect(-1,-1,1)}; - double resl = HUGE; + double resl = std::numeric_limits::max(); CartVect new_pos, tmp_pos; ErrorCode rval; for (unsigned int i = 0; i < 4; i++) { diff --git a/src/LocalDiscretization/LinearTri.cpp b/src/LocalDiscretization/LinearTri.cpp index ba17c029ebc11fe90ff9a983081efb8066930bc1..56be01ca4d510ae498713a2f69825b4d656930c4 100644 --- a/src/LocalDiscretization/LinearTri.cpp +++ b/src/LocalDiscretization/LinearTri.cpp @@ -101,7 +101,7 @@ namespace moab // find best initial guess to improve convergence CartVect tmp_params[] = {CartVect(-1,-1,-1), CartVect(1,-1,-1), CartVect(-1,1,-1)}; - double resl = HUGE; + double resl = std::numeric_limits::max(); CartVect new_pos, tmp_pos; ErrorCode rval; for (unsigned int i = 0; i < 3; i++) { diff --git a/src/MetisPartitioner.cpp b/src/MetisPartitioner.cpp index f5453fa9ad42fbf6669cbff8bf1ae3dfb1f0859f..44c7b3a2795965941a3eb5d5bf2fc1ce7ba4a4a4 100644 --- a/src/MetisPartitioner.cpp +++ b/src/MetisPartitioner.cpp @@ -36,17 +36,14 @@ using namespace moab; const bool debug = false; -MetisPartitioner::MetisPartitioner( Interface *impl, - const bool use_coords, - int argc, - char **argv) - : PartitionerBase(impl,use_coords), - argcArg(argc), - argvArg(argv) +MetisPartitioner::MetisPartitioner( Interface *impl, + const bool use_coords) + : PartitionerBase(impl,use_coords) + { } -MetisPartitioner::~MetisPartitioner() +MetisPartitioner::~MetisPartitioner() { } @@ -63,7 +60,7 @@ ErrorCode MetisPartitioner::partition_mesh(const idx_t nparts, #ifdef MOAB_HAVE_MPI // should only be called in serial if (mbpc->proc_config().proc_size() != 1) { - std::cout << "MetisPartitioner::partition_mesh_and_geometry must be called in serial." + std::cout << "MetisPartitioner::partition_mesh_and_geometry must be called in serial." << std::endl; return MB_FAILURE; } @@ -76,7 +73,7 @@ ErrorCode MetisPartitioner::partition_mesh(const idx_t nparts, << std::endl; return MB_FAILURE; } - + std::vector pts; // x[0], y[0], z[0], ... from MOAB std::vector ids; // poidx_t ids from MOAB std::vector adjs, parts; @@ -85,17 +82,17 @@ ErrorCode MetisPartitioner::partition_mesh(const idx_t nparts, // Get a mesh from MOAB and diide it across processors. clock_t t = clock(); - + ErrorCode result; if (!partition_tagged_sets && !partition_tagged_ents) { result = assemble_graph(part_dim, pts, ids, adjs, length, elems);MB_CHK_ERR(result); } - else if (partition_tagged_sets) + else if (partition_tagged_sets) { result = assemble_taggedsets_graph(part_dim, pts, ids, adjs, length, elems, &(*aggregating_tag));MB_CHK_ERR(result); } - else if (partition_tagged_ents) + else if (partition_tagged_ents) { result = assemble_taggedents_graph(part_dim, pts, ids, adjs, length, elems, &(*aggregating_tag));MB_CHK_ERR(result); } @@ -109,7 +106,7 @@ ErrorCode MetisPartitioner::partition_mesh(const idx_t nparts, t = clock(); } - std::cout << "Computing partition using " << method + std::cout << "Computing partition using " << method <<" method for " << nparts << " processors..." << std::endl; idx_t nelems = length.size()-1; @@ -124,14 +121,14 @@ ErrorCode MetisPartitioner::partition_mesh(const idx_t nparts, { idx_t options[METIS_NOPTIONS]; METIS_SetDefaultOptions(options); - options[METIS_OPTION_CONTIG] = 1; + options[METIS_OPTION_CONTIG] = 1; metis_RESULT = METIS_PartGraphKway(&nelems, &nconstraidx_ts, &length[0], &adjs[0], NULL, NULL, NULL, &nOfPartitions, NULL, NULL, options, &edgeCut, assign_parts); } else if (strcmp(method, "ML_RB") == 0) { idx_t options[METIS_NOPTIONS]; METIS_SetDefaultOptions(options); - options[METIS_OPTION_OBJTYPE] = METIS_OBJTYPE_CUT; // CUT + options[METIS_OPTION_OBJTYPE] = METIS_OBJTYPE_CUT; // CUT options[METIS_OPTION_IPTYPE] = METIS_IPTYPE_GROW; // GROW or RANDOM options[METIS_OPTION_CTYPE] = METIS_CTYPE_RM; // RM or SHEM options[METIS_OPTION_RTYPE] = METIS_RTYPE_FM; // FM @@ -157,7 +154,7 @@ ErrorCode MetisPartitioner::partition_mesh(const idx_t nparts, if (metis_RESULT != METIS_OK) return MB_FAILURE; - + // take results & write onto MOAB partition sets std::cout << "Saving partition information to MOAB..." << std::endl; { @@ -184,21 +181,21 @@ ErrorCode MetisPartitioner::partition_mesh(const idx_t nparts, ErrorCode MetisPartitioner::assemble_taggedents_graph(const int dimension, std::vector &coords, std::vector &moab_ids, - std::vector &adjacencies, + std::vector &adjacencies, std::vector &length, Range &elems, - const char *aggregating_tag) + const char *aggregating_tag) { Tag partSetTag; ErrorCode result = mbImpl->tag_get_handle(aggregating_tag, 1, MB_TYPE_INTEGER, partSetTag); if (MB_SUCCESS != result) return result; - + Range allSubElems; result = mbImpl->get_entities_by_dimension(0, dimension, allSubElems); if (MB_SUCCESS != result || allSubElems.empty()) return result; idx_t partSet; std::map aggloElems; - for (Range::iterator rit = allSubElems.begin(); rit != allSubElems.end(); rit++) + for (Range::iterator rit = allSubElems.begin(); rit != allSubElems.end(); rit++) { EntityHandle entity = *rit; result = mbImpl->tag_get_data(partSetTag,&entity,1,&partSet); @@ -212,26 +209,26 @@ ErrorCode MetisPartitioner::assemble_taggedents_graph(const int dimension, if (type == MB_TAG_DENSE) { // clear tag on ents and sets - result = mbImpl->tag_delete(partSetTag); + result = mbImpl->tag_delete(partSetTag); if (MB_SUCCESS != result) return result; } if (type == MB_TAG_SPARSE) { // clear tag on ents - result = mbImpl->tag_delete_data(partSetTag, allSubElems); + result = mbImpl->tag_delete_data(partSetTag, allSubElems); if (MB_SUCCESS != result) return result; // clear tag on sets result = mbImpl->get_entities_by_type_and_tag(0 , MBENTITYSET, &partSetTag, 0, 1, elems); if (MB_SUCCESS != result) return result; - result = mbImpl->tag_delete_data(partSetTag, elems); + result = mbImpl->tag_delete_data(partSetTag, elems); if (MB_SUCCESS != result) return result; elems.clear(); } result = mbImpl->tag_get_handle("PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, - partSetTag, MB_TAG_SPARSE|MB_TAG_CREAT); + partSetTag, MB_TAG_SPARSE|MB_TAG_CREAT); if (MB_SUCCESS != result) return result; - - for (std::map::iterator mit = aggloElems.begin(); mit != aggloElems.end(); mit++) + + for (std::map::iterator mit = aggloElems.begin(); mit != aggloElems.end(); mit++) { EntityHandle new_set; result = mbImpl->create_meshset(MESHSET_SET, new_set); @@ -249,16 +246,16 @@ ErrorCode MetisPartitioner::assemble_taggedents_graph(const int dimension, ErrorCode MetisPartitioner::assemble_taggedsets_graph(const int dimension, std::vector &coords, std::vector &moab_ids, - std::vector &adjacencies, + std::vector &adjacencies, std::vector &length, Range &elems, - const char *aggregating_tag) + const char *aggregating_tag) { length.push_back(0); // assemble a graph with vertices equal to elements of specified dimension, edges // signified by list of other elements to which an element is connected - // get the tagged elements + // get the tagged elements Tag partSetTag; ErrorCode result = mbImpl->tag_get_handle(aggregating_tag, 1, MB_TYPE_INTEGER, partSetTag);MB_CHK_ERR(result); //ErrorCode result = mbImpl->tag_get_handle("PARALLEL_PARTITION_SET", 1, MB_TYPE_INTEGER, partSetTag);MB_CHK_ERR(result); @@ -266,11 +263,11 @@ ErrorCode MetisPartitioner::assemble_taggedsets_graph(const int dimension, result = mbImpl->get_entities_by_type_and_tag(0 , MBENTITYSET, &partSetTag, 0, 1, elems); if (MB_SUCCESS != result || elems.empty()) return result; - //assign globla ids to elem sets based on aggregating_tag data + //assign globla ids to elem sets based on aggregating_tag data Tag gid_tag; idx_t zero1 = -1; result = mbImpl->tag_get_handle("GLOBAL_ID_AGGLO", 1, MB_TYPE_INTEGER, gid_tag, MB_TAG_SPARSE|MB_TAG_CREAT, &zero1);MB_CHK_ERR(result); - for (Range::iterator rit = elems.begin(); rit != elems.end(); rit++) + for (Range::iterator rit = elems.begin(); rit != elems.end(); rit++) { idx_t partSet; result = mbImpl->tag_get_data(partSetTag,&(*rit),1,&partSet);MB_CHK_ERR(result); @@ -287,15 +284,15 @@ ErrorCode MetisPartitioner::assemble_taggedsets_graph(const int dimension, { result = mbImpl->tag_delete_data(partSetTag, elems);MB_CHK_ERR(result); } - + // assemble the graph, using Skinner to get d-1 dimensional neighbors and then idx_tersecting to get adjacencies std::vector skin_subFaces(elems.size()); unsigned int i = 0; - for (Range::iterator rit = elems.begin(); rit != elems.end(); rit++) + for (Range::iterator rit = elems.begin(); rit != elems.end(); rit++) { Range part_ents; result = mbImpl->get_entities_by_handle(*rit, part_ents, false); - if (mbImpl->dimension_from_handle(*part_ents.rbegin()) != mbImpl->dimension_from_handle(*part_ents.begin())) + if (mbImpl->dimension_from_handle(*part_ents.rbegin()) != mbImpl->dimension_from_handle(*part_ents.begin())) { Range::iterator lower = part_ents.lower_bound(CN::TypeDimensionMap[0].first), upper = part_ents.upper_bound(CN::TypeDimensionMap[dimension-1].second); @@ -320,20 +317,20 @@ ErrorCode MetisPartitioner::assemble_taggedsets_graph(const int dimension, { Range subFaces = intersect(skin_subFaces[k],skin_subFaces[t]); if (subFaces.size() > 0) - adjs.push_back(elems[t]); + adjs.push_back(elems[t]); } } - if (!adjs.empty()) + if (!adjs.empty()) { neighbors.resize(adjs.size()); - result = mbImpl->tag_get_data(gid_tag, &adjs[0], adjs.size(), &neighbors[0]);MB_CHK_ERR(result); + result = mbImpl->tag_get_data(gid_tag, &adjs[0], adjs.size(), &neighbors[0]);MB_CHK_ERR(result); } // copy those idx_to adjacencies vector length.push_back(length.back()+(idx_t)adjs.size()); std::copy(neighbors.begin(), neighbors.end(), std::back_inserter(adjacencies)); // get the graph vertex id for this element const EntityHandle& setk = elems[k]; - result = mbImpl->tag_get_data(gid_tag, &setk, 1, &moab_id); + result = mbImpl->tag_get_data(gid_tag, &setk, 1, &moab_id); moab_ids.push_back(moab_id); // get average position of vertices Range part_ents; @@ -371,9 +368,9 @@ ErrorCode MetisPartitioner::assemble_taggedsets_graph(const int dimension, ErrorCode MetisPartitioner::assemble_graph(const int dimension, std::vector &coords, std::vector &moab_ids, - std::vector &adjacencies, + std::vector &adjacencies, std::vector &length, - Range &elems) + Range &elems) { length.push_back(0); // assemble a graph with vertices equal to elements of specified dimension, edges @@ -382,10 +379,10 @@ ErrorCode MetisPartitioner::assemble_graph(const int dimension, // get the elements of that dimension ErrorCode result = mbImpl->get_entities_by_dimension(0, dimension, elems); if (MB_SUCCESS != result || elems.empty()) return result; - + #ifdef MOAB_HAVE_MPI // assign global ids - result = mbpc->assign_global_ids(0, dimension, 0); + result = mbpc->assign_global_ids(0, dimension, 0); #endif // now assemble the graph, calling MeshTopoUtil to get bridge adjacencies through d-1 dimensional @@ -398,19 +395,19 @@ ErrorCode MetisPartitioner::assemble_graph(const int dimension, double avg_position[3]; int moab_id; - + // get the global id tag hanlde Tag gid; result = mbImpl->tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, gid, MB_TAG_DENSE|MB_TAG_CREAT);MB_CHK_ERR(result); - + for (Range::iterator rit = elems.begin(); rit != elems.end(); rit++) { // get bridge adjacencies adjs.clear(); - result = mtu.get_bridge_adjacencies(*rit, (dimension > 0 ? dimension-1 : 3), + result = mtu.get_bridge_adjacencies(*rit, (dimension > 0 ? dimension-1 : 3), dimension, adjs);MB_CHK_ERR(result); - + // get the graph vertex ids of those if (!adjs.empty()) { assert(adjs.size() < 5*MAX_SUB_ENTITIES); @@ -424,7 +421,7 @@ ErrorCode MetisPartitioner::assemble_graph(const int dimension, // get average position of vertices result = mtu.get_average_position(*rit, avg_position);MB_CHK_ERR(result); - + // get the graph vertex id for this element result = mbImpl->tag_get_data(gid, &(*rit), 1, &moab_id);MB_CHK_ERR(result); @@ -452,7 +449,7 @@ ErrorCode MetisPartitioner::assemble_graph(const int dimension, } ErrorCode MetisPartitioner::write_aggregationtag_partition(const idx_t nparts, - Range &elems, + Range &elems, const idx_t *assignment, const bool write_as_sets, const bool write_as_tags) @@ -469,16 +466,16 @@ ErrorCode MetisPartitioner::write_aggregationtag_partition(const idx_t nparts, result = mbImpl->get_entities_by_type_and_tag(0, MBENTITYSET, &part_set_tag, NULL, 1, tagged_sets, Interface::UNION);MB_CHK_ERR(result); if (!tagged_sets.empty()) { - result = mbImpl->clear_meshset(tagged_sets); + result = mbImpl->clear_meshset(tagged_sets); if (!write_as_sets) { result = mbImpl->tag_delete_data(part_set_tag, tagged_sets);MB_CHK_ERR(result); } } - + if (write_as_sets) { // first, create partition sets and store in vector partSets.clear(); - + if (nparts > (idx_t) tagged_sets.size()) { // too few partition sets - create missing ones idx_t num_new = nparts - tagged_sets.size(); @@ -496,15 +493,15 @@ ErrorCode MetisPartitioner::write_aggregationtag_partition(const idx_t nparts, result = mbImpl->delete_entities(&old_set, 1);MB_CHK_ERR(result); } } - + // assign partition sets to vector partSets.swap(tagged_sets); - + // write a tag to those sets denoting they're partition sets, with a value of the // proc number idx_t *dum_ids = new idx_t[nparts]; for (idx_t i = 0; i < nparts; i++) dum_ids[i] = i; - + result = mbImpl->tag_set_data(part_set_tag, partSets, dum_ids);MB_CHK_ERR(result); // assign entities to the relevant sets @@ -534,18 +531,18 @@ ErrorCode MetisPartitioner::write_aggregationtag_partition(const idx_t nparts, if (write_as_tags) { Tag gid_tag; result = mbImpl->tag_get_handle("GLOBAL_ID_AGGLO", 1, MB_TYPE_INTEGER, gid_tag, MB_TAG_SPARSE);MB_CHK_ERR(result); - + // allocate idx_teger-size partitions unsigned int i = 0; idx_t gid; - for (Range::iterator rit = elems.begin(); rit != elems.end(); rit++) + for (Range::iterator rit = elems.begin(); rit != elems.end(); rit++) { result = mbImpl->tag_get_data(gid_tag, &(*rit), 1, &gid); Range part_ents; // std::cout<<"part ents "<get_entities_by_handle(*rit, part_ents, false);MB_CHK_ERR(result); - for (Range::iterator eit = part_ents.begin(); eit != part_ents.end(); eit++) + for (Range::iterator eit = part_ents.begin(); eit != part_ents.end(); eit++) { result = mbImpl->tag_set_data(part_set_tag, &(*eit), 1, &assignment[i]);MB_CHK_ERR(result); @@ -558,10 +555,10 @@ ErrorCode MetisPartitioner::write_aggregationtag_partition(const idx_t nparts, } ErrorCode MetisPartitioner::write_partition(const idx_t nparts, - Range &elems, + Range &elems, const idx_t *assignment, const bool write_as_sets, - const bool write_as_tags) + const bool write_as_tags) { ErrorCode result; @@ -570,13 +567,13 @@ ErrorCode MetisPartitioner::write_partition(const idx_t nparts, idx_t dum_id = -1, i; result = mbImpl->tag_get_handle("PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_set_tag, MB_TAG_SPARSE|MB_TAG_CREAT, &dum_id);MB_CHK_ERR(result); - + // get any sets already with this tag, and clear them Range tagged_sets; result = mbImpl->get_entities_by_type_and_tag(0, MBENTITYSET, &part_set_tag, NULL, 1, tagged_sets, Interface::UNION);MB_CHK_ERR(result); if (!tagged_sets.empty()) { - result = mbImpl->clear_meshset(tagged_sets); + result = mbImpl->clear_meshset(tagged_sets); if (!write_as_sets) { result = mbImpl->tag_delete_data(part_set_tag, tagged_sets);MB_CHK_ERR(result); } @@ -585,7 +582,7 @@ ErrorCode MetisPartitioner::write_partition(const idx_t nparts, if (write_as_sets) { // first, create partition sets and store in vector partSets.clear(); - + if (nparts > (int) tagged_sets.size()) { // too few partition sets - create missing ones idx_t num_new = nparts - tagged_sets.size(); @@ -603,17 +600,17 @@ ErrorCode MetisPartitioner::write_partition(const idx_t nparts, result = mbImpl->delete_entities(&old_set, 1);MB_CHK_ERR(result); } } - + // assign partition sets to vector partSets.swap(tagged_sets); - + // write a tag to those sets denoting they're partition sets, with a value of the // proc number int *dum_ids = new int[nparts]; // this remains integer for (i = 0; i < nparts; i++) dum_ids[i] = i; - - result = mbImpl->tag_set_data(part_set_tag, partSets, dum_ids); - delete dum_ids; + + result = mbImpl->tag_set_data(part_set_tag, partSets, dum_ids); + delete [] dum_ids; // assign entities to the relevant sets std::vector tmp_part_sets; @@ -637,7 +634,7 @@ ErrorCode MetisPartitioner::write_partition(const idx_t nparts, std::cout << std::endl; } } - + if (write_as_tags) { if (sizeof(int) != sizeof(idx_t)) { @@ -652,7 +649,7 @@ ErrorCode MetisPartitioner::write_partition(const idx_t nparts, else result = mbImpl->tag_set_data(part_set_tag, elems, assignment);MB_CHK_ERR(result); } - + return MB_SUCCESS; } diff --git a/src/ScdInterface.cpp b/src/ScdInterface.cpp index 5b25d4db7b28ff20a12a656587169284ae31cfad..9d15df25a0557eae9bbe426de1c8d21bc4f18196 100644 --- a/src/ScdInterface.cpp +++ b/src/ScdInterface.cpp @@ -717,7 +717,7 @@ ErrorCode ScdInterface::tag_shared_vertices(ParallelComm *pcomm, ScdBox *box) send_reqs(procs.size(), MPI_REQUEST_NULL); std::vector rhandles(4*procs.size()), shandles(4); for (unsigned int i = 0; i < procs.size(); i++) { - int success = MPI_Irecv(&rhandles[4*i], 4*sizeof(EntityHandle), + int success = MPI_Irecv((void*)&rhandles[4*i], 4*sizeof(EntityHandle), MPI_UNSIGNED_CHAR, procs[i], 1, pcomm->proc_config().proc_comm(), &recv_reqs[i]); @@ -740,7 +740,7 @@ ErrorCode ScdInterface::tag_shared_vertices(ParallelComm *pcomm, ScdBox *box) shandles[3] = box->start_element(); } for (unsigned int i = 0; i < procs.size(); i++) { - int success = MPI_Isend(&shandles[0], 4*sizeof(EntityHandle), MPI_UNSIGNED_CHAR, procs[i], + int success = MPI_Isend((void*)&shandles[0], 4*sizeof(EntityHandle), MPI_UNSIGNED_CHAR, procs[i], 1, pcomm->proc_config().proc_comm(), &send_reqs[i]); if (success != MPI_SUCCESS) return MB_FAILURE; } diff --git a/src/WriteUtil.cpp b/src/WriteUtil.cpp index d1980574f4456ee44fe82426acb5ebdc6a753ece..f506da46ef48cd5381e3fe1d8170ea4901c4a641 100644 --- a/src/WriteUtil.cpp +++ b/src/WriteUtil.cpp @@ -42,8 +42,8 @@ namespace moab { -WriteUtil::WriteUtil(Core* mdb, Error* error_handler) - : WriteUtilIface(), mMB(mdb), mError(error_handler) +WriteUtil::WriteUtil(Core* mdb) + : WriteUtilIface(), mMB(mdb) { } diff --git a/src/WriteUtil.hpp b/src/WriteUtil.hpp index 8897e684839d389d05b367942a606cbed1f82bb7..d47ac4eaaac5dd62c20578fa712c483a8c52c35e 100644 --- a/src/WriteUtil.hpp +++ b/src/WriteUtil.hpp @@ -25,18 +25,16 @@ namespace moab { class Core; -class Error; class WriteUtil : public WriteUtilIface { private: //! Pointer to the Core Core* mMB; - Error* mError; public: //! Constructor takes Core pointer - WriteUtil(Core* mdb, Error* error_handler); + WriteUtil(Core* mdb); //! Destructor ~WriteUtil(){} diff --git a/src/iMOAB.cpp b/src/iMOAB.cpp index b73bfbfa65cece6ff5a1767dbb833675143d8a91..32a529f8bc43d7da74a996d138f220de653f4f74 100644 --- a/src/iMOAB.cpp +++ b/src/iMOAB.cpp @@ -208,7 +208,7 @@ ErrCode iMOAB_RegisterFortranApplication( const iMOAB_String app_name, ErrCode iMOAB_DeregisterApplication( iMOAB_AppID pid ) { - // the file set , parallel comm are all in vectors indexed by *pid + // the file set , parallel comm are all in vectors indexed by *pid // assume we did not delete anything yet // *pid will not be reused if we register another application @@ -966,9 +966,9 @@ ErrCode iMOAB_SetIntTagStorage(iMOAB_AppID pid, const iMOAB_String tag_storage_n return 1; // set it on a subset of entities, based on type and length Range * ents_to_set; - if (* ent_type == 0)// vertices + if (*ent_type == 0)// vertices ents_to_set = &data.all_verts; - else if (* ent_type == 1) + else // if (*ent_type == 1) // *ent_type can be 0 (vertices) or 1 (elements) ents_to_set = &data.primary_elems; int nents_to_be_set = *num_tag_storage_length /tagLength; @@ -1008,9 +1008,9 @@ ErrCode iMOAB_GetIntTagStorage(iMOAB_AppID pid, const iMOAB_String tag_storage_n // set it on a subset of entities, based on type and length Range * ents_to_get; - if (* ent_type == 0)// vertices + if (*ent_type == 0)// vertices ents_to_get = &data.all_verts; - else if (* ent_type == 1) + else // if (*ent_type == 1) ents_to_get = &data.primary_elems; int nents_to_get = *num_tag_storage_length /tagLength; @@ -1052,7 +1052,7 @@ ErrCode iMOAB_SetDoubleTagStorage(iMOAB_AppID pid, const iMOAB_String tag_storag return 1; // set it on a subset of entities, based on type and length - Range * ents_to_set; + Range * ents_to_set = NULL; if (* ent_type == 0)// vertices ents_to_set = &data.all_verts; else if (* ent_type == 1) @@ -1097,7 +1097,7 @@ ErrCode iMOAB_GetDoubleTagStorage(iMOAB_AppID pid, const iMOAB_String tag_storag return 1; // set it on a subset of entities, based on type and length - Range * ents_to_get; + Range * ents_to_get = NULL; if (* ent_type == 0)// vertices ents_to_get = &data.all_verts; else if (* ent_type == 1) diff --git a/src/io/IODebugTrack.cpp b/src/io/IODebugTrack.cpp index 1a8cd6c52e2dc45ec0af08a1ad289b0b2660b4ea..d6fdb4a6b627f98dbf7878227b92f320b8890a14 100644 --- a/src/io/IODebugTrack.cpp +++ b/src/io/IODebugTrack.cpp @@ -169,8 +169,8 @@ void IODebugTrack::all_reduce() std::vector send(dataSet.size()), recv(total); std::copy( dataSet.begin(), dataSet.end(), send.begin() ); - MPI_Gatherv( &send[0], 3*send.size(), MPI_UNSIGNED_LONG, - &recv[0], &counts[0], &displs[0], MPI_UNSIGNED_LONG, + MPI_Gatherv( (void*)&send[0], 3*send.size(), MPI_UNSIGNED_LONG, + (void*)&recv[0], &counts[0], &displs[0], MPI_UNSIGNED_LONG, 0, MPI_COMM_WORLD ); if (0 == mpiRank) { diff --git a/src/io/ReadHDF5.cpp b/src/io/ReadHDF5.cpp index 830541a9048a2123fa13aca00c99246e9164ace6..c4e2200d581259314601dc8fe7023ac676f7cf04 100644 --- a/src/io/ReadHDF5.cpp +++ b/src/io/ReadHDF5.cpp @@ -2045,7 +2045,7 @@ ErrorCode ReadHDF5::read_all_set_meta() if (bcast) { #ifdef MOAB_HAVE_MPI - int ierr = MPI_Bcast(setMeta, num_sets*4, MPI_LONG, 0, comm); + int ierr = MPI_Bcast((void*)setMeta, num_sets*4, MPI_LONG, 0, comm); if (MPI_SUCCESS != ierr) MB_SET_ERR(MB_FAILURE, "ReadHDF5 Failure"); #else @@ -3337,7 +3337,7 @@ ErrorCode ReadHDF5::read_var_len_tag(Tag tag_handle, { ErrorCode rval1; if (isHandle) { - assert(readSize == sizeof(EntityHandle)); + assert( readSize == sizeof(EntityHandle) ); rval1 = readHDF5->convert_id_to_handle((EntityHandle*)data, count);MB_CHK_ERR(rval1); } int n = count; diff --git a/src/io/ReadOBJ.cpp b/src/io/ReadOBJ.cpp index 0da7a842c554d3f52070fe8a045c4ddc6bc5c257..dfdde50d1426760d26b077228a095e5887ca2a2d 100644 --- a/src/io/ReadOBJ.cpp +++ b/src/io/ReadOBJ.cpp @@ -62,7 +62,7 @@ const char geom_category[][CATEGORY_TAG_SIZE] = // Constructor ReadOBJ::ReadOBJ(Interface* impl) : MBI(impl),geom_tag(0), id_tag(0), name_tag(0), category_tag(0), - faceting_tol_tag(0), geometry_resabs_tag(0), obj_name_tag(0), sense_tag(0) + faceting_tol_tag(0), geometry_resabs_tag(0), obj_name_tag(0) { assert(NULL != impl); MBI->query_interface(readMeshIface); @@ -297,16 +297,16 @@ ErrorCode ReadOBJ::load_file(const char *filename, */ void ReadOBJ::tokenize( const std::string& str, std::vector& tokens, - const char* delimiters) + const char* delimiters2) { tokens.clear(); std::string::size_type next_token_end, next_token_start = - str.find_first_not_of( delimiters, 0); + str.find_first_not_of( delimiters2, 0); while ( std::string::npos != next_token_start ) { - next_token_end = str.find_first_of( delimiters, next_token_start ); + next_token_end = str.find_first_of( delimiters2, next_token_start ); if ( std::string::npos == next_token_end ) { tokens.push_back(str.substr(next_token_start)); @@ -316,7 +316,7 @@ void ReadOBJ::tokenize( const std::string& str, { tokens.push_back( str.substr( next_token_start, next_token_end - next_token_start ) ); - next_token_start = str.find_first_not_of( delimiters, next_token_end ); + next_token_start = str.find_first_not_of( delimiters2, next_token_end ); } } } diff --git a/src/io/ReadOBJ.hpp b/src/io/ReadOBJ.hpp index 8f73108315f17ad1814337af6b20dd0d845983a0..ddd8f84179698293000a9bf490c92e4ce218ce85 100644 --- a/src/io/ReadOBJ.hpp +++ b/src/io/ReadOBJ.hpp @@ -148,8 +148,7 @@ private: GeomTopoTool* myGeomTool; - Tag geom_tag,id_tag,name_tag,category_tag,faceting_tol_tag, geometry_resabs_tag, obj_name_tag, - sense_tag; + Tag geom_tag,id_tag,name_tag,category_tag,faceting_tol_tag, geometry_resabs_tag, obj_name_tag; /* The keyword type function matches the first character extracted from each line to a type of line */ diff --git a/src/io/WriteCGNS.cpp b/src/io/WriteCGNS.cpp index 3bb0ef2768db845654b5041ce81e5f06658f6563..e9ac7ce51f6f28d1a2ee65d4cf103e1d21e2fdf1 100644 --- a/src/io/WriteCGNS.cpp +++ b/src/io/WriteCGNS.cpp @@ -4,7 +4,6 @@ #include "MBParallelConventions.h" #include "moab/Interface.hpp" #include "moab/Range.hpp" -#include "moab/WriteUtilIface.hpp" #include "moab/FileOptions.hpp" #include "GmshUtil.hpp" diff --git a/src/io/WriteHDF5.cpp b/src/io/WriteHDF5.cpp index 10ff54db0b2729015213630ffdb6e31c5e0b99cc..a647147753edb7359720de9dbcd43ae65333a5ff 100644 --- a/src/io/WriteHDF5.cpp +++ b/src/io/WriteHDF5.cpp @@ -55,28 +55,8 @@ #error Attempt to compile WriteHDF5 with HDF5 support disabled #endif -/* Access HDF5 file handle for debugging -#include -struct file { uint32_t magic; hid_t handle; }; -*/ -#undef DEBUG - #undef BLOCKED_COORD_IO -#ifdef DEBUG -/* -# include - extern "C" herr_t hdf_error_handler(void*) - { - H5Eprint(stderr); - assert(0); - } -*/ -# define myassert(A) assert(A) -#else -# define myassert(A) -#endif - #ifdef MOAB_HAVE_VALGRIND # include #else @@ -95,7 +75,7 @@ namespace moab { template inline void VALGRIND_MAKE_VEC_UNDEFINED(std::vector& v) { - (void)VALGRIND_MAKE_MEM_UNDEFINED(&v[0], v.size() * sizeof(T)); + (void)VALGRIND_MAKE_MEM_UNDEFINED( (T*)&v[0], v.size() * sizeof(T)); } #define WRITE_HDF5_BUFFER_SIZE (40 * 1024 * 1024) @@ -172,7 +152,7 @@ static herr_t handle_hdf5_error(void* data) do { \ if (mhdf_isError(&(A))) { \ MB_SET_ERR_CONT(mhdf_message(&(A))); \ - myassert(0); \ + assert(0); \ return error(MB_FAILURE); \ } \ } while (false) @@ -181,7 +161,7 @@ do { \ do { \ if (mhdf_isError(&(A))) { \ MB_SET_ERR_CONT(mhdf_message(&(A))); \ - myassert(0); \ + assert(0); \ mhdf_closeData(filePtr, (B), &(A)); \ return error(MB_FAILURE); \ } \ @@ -191,7 +171,7 @@ do { \ do { \ if (mhdf_isError(&(A))) { \ MB_SET_ERR_CONT(mhdf_message(&(A))); \ - myassert(0); \ + assert(0); \ mhdf_closeData(filePtr, (B)[0], &(A)); \ mhdf_closeData(filePtr, (B)[1], &(A)); \ return error(MB_FAILURE); \ @@ -202,7 +182,7 @@ do { \ do { \ if (mhdf_isError(&(A))) { \ MB_SET_ERR_CONT(mhdf_message(&(A))); \ - myassert(0); \ + assert(0); \ mhdf_closeData(filePtr, (B)[0], &(A)); \ mhdf_closeData(filePtr, (B)[1], &(A)); \ mhdf_closeData(filePtr, (B)[2], &(A)); \ @@ -214,7 +194,7 @@ do { \ do { \ if (mhdf_isError(&(A))) { \ MB_SET_ERR_CONT(mhdf_message(&(A))); \ - myassert(0); \ + assert(0); \ mhdf_closeData(filePtr, (B), &(A)); \ if (C) mhdf_closeData(filePtr, (D), &(A)); \ return error(MB_FAILURE); \ @@ -234,7 +214,7 @@ do { \ if (MB_SUCCESS != (A)) { \ MB_CHK_ERR_CONT((A)); \ mhdf_closeData(filePtr, (B), &(C)); \ - myassert(0); \ + assert(0); \ return error(A); \ } \ } while (false) @@ -246,7 +226,7 @@ do { \ mhdf_closeData(filePtr, (B)[0], &(C)); \ mhdf_closeData(filePtr, (B)[1], &(C)); \ write_finished(); \ - myassert(0); \ + assert(0); \ return error(A); \ } \ } while (false) @@ -259,7 +239,7 @@ do { \ mhdf_closeData(filePtr, (B)[1], &(C)); \ mhdf_closeData(filePtr, (B)[2], &(C)); \ write_finished(); \ - myassert(0); \ + assert(0); \ return error(A); \ } \ } while (false) @@ -272,7 +252,7 @@ do { \ if (C) \ mhdf_closeData(filePtr, (D), &(E)); \ write_finished(); \ - myassert(0); \ + assert(0); \ return error(A); \ } \ } while (false) diff --git a/src/moab/MetisPartitioner.hpp b/src/moab/MetisPartitioner.hpp index eb2ca1bba961e9c71a2484b20ee1a4e022d5af48..8870d96079f31047d830a99f4d5bfb3bcb18b6a5 100644 --- a/src/moab/MetisPartitioner.hpp +++ b/src/moab/MetisPartitioner.hpp @@ -36,9 +36,7 @@ using namespace moab; public: MetisPartitioner( Interface *impl = NULL, - const bool use_coords = false, - int argc = 0, - char **argv = NULL); + const bool use_coords = false); virtual ~MetisPartitioner(); @@ -84,10 +82,6 @@ using namespace moab; private: - int argcArg; - - char **argvArg; - ErrorCode assemble_graph(const int dimension, std::vector &coords, std::vector &moab_ids, diff --git a/src/parallel/ParallelComm.cpp b/src/parallel/ParallelComm.cpp index 4f64b56cb522bbb9fbe69772951d5f9ba4e5b42d..031703043f9d325578cceeb518209d39d1653022 100644 --- a/src/parallel/ParallelComm.cpp +++ b/src/parallel/ParallelComm.cpp @@ -5907,7 +5907,7 @@ ErrorCode ParallelComm::get_remote_handles(EntityHandle *local_vec, EntityHandle this_incoming++; PRINT_DEBUG_IRECV(procConfig.proc_rank(), to_proc, (unsigned char*)ack_buff, sizeof(int), mesg_tag - 1, this_incoming); - success = MPI_Irecv(ack_buff, sizeof(int), + success = MPI_Irecv((void*)ack_buff, sizeof(int), MPI_UNSIGNED_CHAR, to_proc, mesg_tag - 1, procConfig.proc_comm(), &ack_req); @@ -8301,7 +8301,7 @@ ErrorCode ParallelComm::get_remote_handles(EntityHandle *local_vec, EntityHandle // Set up to receive data for (int i = 0; i < num_proc; i++) { result[i].resize(sizes_recv[i]); - ierr = MPI_Irecv(&result[i][0], + ierr = MPI_Irecv( (void *)( &(result[i][0]) ), sizeof(SharedEntityData)*sizes_recv[i], MPI_UNSIGNED_CHAR, buffProcs[i], tag, cm, &recv_req[i]); @@ -8311,7 +8311,7 @@ ErrorCode ParallelComm::get_remote_handles(EntityHandle *local_vec, EntityHandle // Send data for (int i = 0; i < num_proc; i++) { - ierr = MPI_Isend(&send_data[i][0], + ierr = MPI_Isend((void *)( &(send_data[i][0]) ), sizeof(SharedEntityData)*sizes_send[i], MPI_UNSIGNED_CHAR, buffProcs[i], tag, cm, &send_req[i]); diff --git a/src/parallel/ParallelMergeMesh.cpp b/src/parallel/ParallelMergeMesh.cpp index 3da0133753cfdcfc8ed400e212a7865a82aef991..1c8515621ab4030d97d2d22803568baff892bb4e 100644 --- a/src/parallel/ParallelMergeMesh.cpp +++ b/src/parallel/ParallelMergeMesh.cpp @@ -149,7 +149,7 @@ namespace moab{ box.bMax *= -1; /*Communicate to all processors*/ - MPI_Allreduce(&box, gbox, 6, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + MPI_Allreduce( (void*)&box, gbox, 6, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); /*Assemble Global Bounding Box*/ //Flip the max back diff --git a/src/parallel/WriteHDF5Parallel.cpp b/src/parallel/WriteHDF5Parallel.cpp index 22f66f3c93ef520b0873816ff543a3079ff4767d..f0fc4c9c1fc5aa758e481b25485d54dd762adffc 100644 --- a/src/parallel/WriteHDF5Parallel.cpp +++ b/src/parallel/WriteHDF5Parallel.cpp @@ -4,6 +4,8 @@ #include #include #include + +#include #include #include @@ -118,6 +120,13 @@ const char* mpi_err_str(int errorcode) { #ifdef VALGRIND #include + +template inline +void VALGRIND_MAKE_VEC_UNDEFINED(std::vector& v) { + if (v.size()) {} + (void)VALGRIND_MAKE_MEM_UNDEFINED(&v[0], v.size() * sizeof(T)); +} + #else #ifndef VALGRIND_CHECK_MEM_IS_DEFINED #define VALGRIND_CHECK_MEM_IS_DEFINED(a, b) ((void)0) @@ -128,14 +137,14 @@ const char* mpi_err_str(int errorcode) { #ifndef VALGRIND_MAKE_MEM_UNDEFINED #define VALGRIND_MAKE_MEM_UNDEFINED(a, b) ((void)0) #endif -#endif -template inline -void VALGRIND_MAKE_VEC_UNDEFINED(std::vector& v) { - if (v.size()) {} - (void)VALGRIND_MAKE_MEM_UNDEFINED(&v[0], v.size() * sizeof(T)); +template inline +void VALGRIND_MAKE_VEC_UNDEFINED(std::vector& ) { + /* Nothing to do */ } +#endif + #ifndef NDEBUG #define START_SERIAL \ for (unsigned _x = 0; _x < myPcomm->proc_config().proc_size(); ++_x) { \ @@ -249,7 +258,7 @@ WriteHDF5Parallel::WriteHDF5Parallel(Interface* iface) WriteHDF5Parallel::~WriteHDF5Parallel() { - if (pcommAllocated && myPcomm) + if (pcommAllocated && myPcomm) delete myPcomm; } @@ -634,7 +643,7 @@ ErrorCode WriteHDF5Parallel::check_serial_tag_data(const std::vectorsize == MB_VARIABLE_LENGTH) + if (ptr->size == MB_VARIABLE_LENGTH) rval = iFace->tag_get_handle(name.c_str(), ptr->def_val_len, ptr->type, newtag.tag_id, MB_TAG_VARLEN|MB_TAG_CREAT|ptr->storage, ptr->default_value()); else rval = iFace->tag_get_handle(name.c_str(), ptr->size, ptr->type, newtag.tag_id, MB_TAG_CREAT|ptr->storage, ptr->default_value()); @@ -681,7 +690,7 @@ ErrorCode WriteHDF5Parallel::check_serial_tag_data(const std::vectorpush_back(&*tag_iter); } @@ -924,14 +933,14 @@ ErrorCode WriteHDF5Parallel::create_tag_tables() // Copy values into local structs and if root then create tables size_t idx = 0; for (tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter, ++idx) { - assert(idx < counts.size()); + assert( idx < counts.size() ); tag_iter->sparse_offset = offsets[idx]; tag_iter->max_num_ents = maxima[idx]; tag_iter->write_sparse = (0 != totals[idx]); int s; if (MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length(tag_iter->tag_id, s)) { ++idx; - assert(idx < counts.size()); + assert( idx < counts.size() ); tag_iter->var_data_offset = offsets[idx]; tag_iter->max_num_vals = maxima[idx]; } @@ -947,13 +956,13 @@ ErrorCode WriteHDF5Parallel::create_tag_tables() if (0 == myPcomm->proc_config().proc_rank()) { size_t iidx = 0; for (tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter, ++iidx) { - assert(iidx < totals.size()); + assert( iidx < totals.size() ); unsigned long num_ents = totals[iidx]; unsigned long num_val = 0; int s; if (MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length(tag_iter->tag_id, s)) { ++iidx; - assert(iidx < totals.size()); + assert( iidx < totals.size() ); num_val = totals[iidx]; } dbgOut.printf(2, "Writing tag description for tag 0x%lx with %lu values\n", @@ -990,7 +999,7 @@ struct DatasetVals { long max_count; long total; }; -STATIC_ASSERT(sizeof(DatasetVals) == 3 * sizeof(long)); +STATIC_ASSERT( (sizeof(DatasetVals) == 3 * sizeof(long))); ErrorCode WriteHDF5Parallel::create_dataset(int num_datasets, const long* num_owned, @@ -1039,7 +1048,7 @@ ErrorCode WriteHDF5Parallel::create_dataset(int num_datasets, } // Send id offset to every proc - result = MPI_Bcast(&cumulative[0], 3 * num_datasets, MPI_LONG, 0, comm);CHECK_MPI(result); + result = MPI_Bcast((void*)&cumulative[0], 3 * num_datasets, MPI_LONG, 0, comm);CHECK_MPI(result); for (int index = 0; index < num_datasets; ++index) { if (first_ids_out) first_ids_out[index] = (wid_t)cumulative[index].start_id; @@ -1086,7 +1095,7 @@ ErrorCode WriteHDF5Parallel::create_node_table(int dimension) nodeSet.num_nodes = dimension; // Put it here so NodeSetCreator can access it struct NodeSetCreator : public DataSetCreator { ErrorCode operator()(WriteHDF5* file, long count, const ExportSet* group, long& start_id) const - { + { mhdf_Status status; hid_t handle = mhdf_createNodeCoords(file->file_ptr(), group->num_nodes, count, &start_id, &status);CHECK_HDFN(status); mhdf_closeData(file->file_ptr(), handle, &status);CHECK_HDFN(status); @@ -1149,7 +1158,7 @@ ErrorCode WriteHDF5Parallel::negotiate_type_list() for (std::list::iterator eiter = exportList.begin(); eiter != exportList.end(); ++eiter) { viter->first = eiter->type; - viter->second = eiter->num_nodes; + viter->second = eiter->num_nodes; ++viter; } @@ -1167,7 +1176,7 @@ ErrorCode WriteHDF5Parallel::negotiate_type_list() typelist root_types(num_types0); if (0 == myPcomm->proc_config().proc_rank()) root_types = my_types; - result = MPI_Bcast(&root_types[0], 2 * num_types0, MPI_INT, 0, comm);CHECK_MPI(result); + result = MPI_Bcast((void*)&root_types[0], 2 * num_types0, MPI_INT, 0, comm);CHECK_MPI(result); // Build local list of any types that root did not know about typelist non_root_types; @@ -1199,8 +1208,8 @@ ErrorCode WriteHDF5Parallel::negotiate_type_list() typelist alltypes(total/2); (void)VALGRIND_MAKE_VEC_UNDEFINED(alltypes); (void)VALGRIND_CHECK_MEM_IS_DEFINED(&non_root_types[0], non_root_types.size()*sizeof(int)); - result = MPI_Gatherv(&non_root_types[0], 2 * non_root_count, MPI_INT, - &alltypes[0], &counts[0], &displs[0], MPI_INT, 0, comm);CHECK_MPI(result); + result = MPI_Gatherv((void*)&non_root_types[0], 2 * non_root_count, MPI_INT, + (int*)&alltypes[0], &counts[0], &displs[0], MPI_INT, 0, comm);CHECK_MPI(result); // Merge type lists. // Prefer O(n) insertions with O(ln n) search time because @@ -1224,7 +1233,7 @@ ErrorCode WriteHDF5Parallel::negotiate_type_list() // Send list of types to each processor my_types.resize(total); - result = MPI_Bcast(&my_types[0], 2 * total, MPI_INT, 0, comm);CHECK_MPI(result); + result = MPI_Bcast((void*)&my_types[0], 2 * total, MPI_INT, 0, comm);CHECK_MPI(result); } else { // Special case: if root had types but some subset of procs did not @@ -1442,7 +1451,7 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_ids(const Range& owned, unsigned r; EntityHandle h; myPcomm->get_entityset_owner(*j, r, &h); - assert(r == procs[i]); + assert( r == procs[i] ); remote_handles.insert(h); } dbgOut.print(6, remote_handles); @@ -1468,7 +1477,7 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_ids(const Range& owned, if (set_procs[j] != myPcomm->proc_config().proc_rank()) send_sets[set_procs[j]].insert(*i); } - assert(send_sets.find(myPcomm->proc_config().proc_rank()) == send_sets.end()); + assert( send_sets.find(myPcomm->proc_config().proc_rank()) == send_sets.end() ); // Now send the data std::vector< std::vector > send_buf(send_sets.size()); @@ -1496,8 +1505,8 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_ids(const Range& owned, while (recv_count--) { mperr = MPI_Waitany(recv_req.size(), &recv_req[0], &idx, &status);CHECK_MPI(mperr); - assert((unsigned)status.MPI_SOURCE == procs[idx]); - assert(2*recv_buf[idx].front() + 1 == recv_buf[idx].size()); + assert( (unsigned)status.MPI_SOURCE == procs[idx] ); + assert( 2*recv_buf[idx].front() + 1 == recv_buf[idx].size() ); const size_t n = std::min(recv_buf[idx].front(), (recv_buf[idx].size() - 1) / 2); dbgOut.printf(5, "Received buffer of size %lu from proc %d\n", (unsigned long)(2*n + 1), (int)status.MPI_SOURCE); @@ -1505,15 +1514,15 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_ids(const Range& owned, for (size_t i = 0; i < n; ++i) { EntityHandle handle = 0; rval = myPcomm->get_entityset_local_handle(procs[idx], recv_buf[idx][2*i + 1], handle);CHECK_MB(rval); - assert(handle != 0); + assert( handle != 0 ); if (!idMap.insert(handle, recv_buf[idx][2*i + 2], 1).second) error(MB_FAILURE); // Conflicting IDs?????? } recv_req[idx] = MPI_REQUEST_NULL; } - assert(MPI_SUCCESS == MPI_Waitany(recv_req.size(), &recv_req[0], &idx, &status) - && MPI_UNDEFINED == idx); // Check that we got them all + assert( MPI_SUCCESS == MPI_Waitany(recv_req.size(), &recv_req[0], &idx, &status) + && MPI_UNDEFINED == idx ); // Check that we got them all // Wait for all sends to complete before we release send // buffers (implicitly releases when we return from this function) @@ -1524,7 +1533,7 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_ids(const Range& owned, if (dbgOut.get_verbosity() >= SSVB) print_shared_sets(); - return MB_SUCCESS; + return MB_SUCCESS; } //void get_global_ids(Interface* iFace, const unsigned long* ptr, @@ -1583,7 +1592,7 @@ ErrorCode WriteHDF5Parallel::pack_set(Range::const_iterator it, if (len && !(flags & MESHSET_ORDERED)) { tmp.clear(); bool blocked = false; - assert(0 == len % 2); + assert((0 == len % 2)); rval = range_to_blocked_list(ptr, len / 2, tmp, blocked);CHECK_MB(rval); if (blocked) flags |= mhdf_SET_RANGE_BIT; @@ -1646,8 +1655,8 @@ static void merge_ranged_ids(const unsigned long* range_list, std::vector& result) { typedef WriteHDF5::wid_t wid_t; - assert(0 == len%2); - assert(0 == result.size()%2); + assert( 0 == len%2 ); + assert( 0 == result.size()%2 ); STATIC_ASSERT(sizeof(std::pair) == 2 * sizeof(wid_t)); result.insert(result.end(), range_list, range_list + len); @@ -1680,8 +1689,8 @@ ErrorCode WriteHDF5Parallel::unpack_set(EntityHandle set, size_t buffer_size) { // Use local variables for readability - assert(buffer_size >= 4); - assert(buffer[1] + buffer[2] + buffer[3] <= buffer_size); + assert( buffer_size >= 4 ); + assert( buffer[1] + buffer[2] + buffer[3] <= buffer_size ); const unsigned long flags = buffer[0]; unsigned long num_content = buffer[1]; const unsigned long num_child = buffer[2]; @@ -1691,7 +1700,7 @@ ErrorCode WriteHDF5Parallel::unpack_set(EntityHandle set, const unsigned long* parents = children + num_child; SpecialSetData* data = find_set_data(set); - assert(NULL != data); + assert( NULL != data ); if (NULL == data) return MB_FAILURE; @@ -1760,7 +1769,7 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_data(const Range& owned, for (Range::iterator i = shared.begin(); i != shared.end(); ++i) { procs.clear(); rval = myPcomm->get_entityset_procs(*i, procs);CHECK_MB(rval); - nummess += procs.size(); + nummess += procs.size(); } // Choose a receive buffer size. We need 4*sizeof(long) minimum, @@ -1830,7 +1839,7 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_data(const Range& owned, rval = myPcomm->get_entityset_owner(*i, owner, &remote_handle);CHECK_MB(rval); int tag = ID_FROM_HANDLE(remote_handle); - assert(remote_handle == CREATE_HANDLE(MBENTITYSET, tag)); + assert( remote_handle == CREATE_HANDLE(MBENTITYSET, tag)); dbgOut.printf(5, "Sending %lu values for set %d to proc %u\n", send_buf[idx][1] + send_buf[idx][2] + send_buf[idx][3] + 4, tag, owner); mperr = MPI_Isend(&send_buf[idx][0], init_buff_size, MPI_UNSIGNED_LONG, @@ -1911,7 +1920,7 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_data(const Range& owned, mperr = MPI_Irecv(&buff[0], size, MPI_UNSIGNED_LONG, status.MPI_SOURCE, status.MPI_TAG, comm, &lrecv_req[idx]);CHECK_MPI(mperr); ++numrecv; - } + } recv_req[idx] = MPI_REQUEST_NULL; } @@ -1933,7 +1942,7 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_data(const Range& owned, rval = myPcomm->get_entityset_owner(*i, owner, &remote_handle);CHECK_MB(rval); int tag = ID_FROM_HANDLE(remote_handle); - assert(remote_handle == CREATE_HANDLE(MBENTITYSET, tag)); + assert( remote_handle == CREATE_HANDLE(MBENTITYSET, tag) ); dbgOut.printf(5, "Sending %lu values for set %d to proc %u\n", (unsigned long)size, tag, owner); mperr = MPI_Isend(&buff[0], size, MPI_UNSIGNED_LONG, diff --git a/src/parallel/gs.cpp b/src/parallel/gs.cpp index f5273ca14277a9b68f0fd88812517e14c9e5ef2d..a104078f011cdd325db7c15c63bea00f124a227c 100644 --- a/src/parallel/gs.cpp +++ b/src/parallel/gs.cpp @@ -255,13 +255,13 @@ void gs_data::nonlocal_info::nonlocal(realType *u, int op, MPI_Comm comm) start = buf; for (;c;--c) *buf++ = u[*sh_ind++]; - MPI_Isend(start,nshared[i]*sizeof(realType), + MPI_Isend((void*)start,nshared[i]*sizeof(realType), MPI_UNSIGNED_CHAR, targ[i],id,comm,reqs++); } start = buf; for(i=0; i_reqs,i=np*2;i;--i) @@ -379,13 +379,13 @@ void gs_data::nonlocal_info::nonlocal_many(realType **u, uint n, int op, *buf++=uu[sh_ind[c]]; } sh_ind+=ns; - MPI_Isend(start,n*ns*sizeof(realType),MPI_UNSIGNED_CHAR,targ[i],id,comm,reqs++); + MPI_Isend((void*)start,n*ns*sizeof(realType),MPI_UNSIGNED_CHAR,targ[i],id,comm,reqs++); } start = buf; for (i=0; in, sizeof(uint) ); - MPI_Isend(&send->n,sizeof(uint),MPI_UNSIGNED_CHAR, + MPI_Isend((void*)&send->n,sizeof(uint),MPI_UNSIGNED_CHAR, target ,_id ,_comm,&req[ 0]); for (i=0; in; @@ -507,14 +507,14 @@ void gs_data::crystal_data::send_(uint target, int recvn) keep->n=sum; (void)VALGRIND_CHECK_MEM_IS_DEFINED( send->buf.ptr,send->n*sizeof(uint) ); - MPI_Isend(send->buf.ptr,send->n*sizeof(uint), + MPI_Isend((void*)send->buf.ptr,send->n*sizeof(uint), MPI_UNSIGNED_CHAR,target,_id,_comm,&req[0]); if (recvn) { - MPI_Irecv(recv[0],count[0]*sizeof(uint),MPI_UNSIGNED_CHAR, + MPI_Irecv((void*)recv[0],count[0]*sizeof(uint),MPI_UNSIGNED_CHAR, target,target,_comm,&req[1]); if (recvn==2) - MPI_Irecv(recv[1],count[1]*sizeof(uint),MPI_UNSIGNED_CHAR, + MPI_Irecv((void*)recv[1],count[1]*sizeof(uint),MPI_UNSIGNED_CHAR, target+1,target+1,_comm,&req[2]); } MPI_Waitall(recvn+1,req,status); diff --git a/tools/mbcoupler/Coupler.cpp b/tools/mbcoupler/Coupler.cpp index e2037fae4f1adb50dcb411b759e4398449d295ca..518bce12032819e530ef805fe3dfa560058a4799 100644 --- a/tools/mbcoupler/Coupler.cpp +++ b/tools/mbcoupler/Coupler.cpp @@ -1218,8 +1218,8 @@ ErrorCode Coupler::get_matching_entities(EntityHandle } // Send all buffers to the master proc for consolidation - MPI_Gatherv(tuple_buf, tuple_buf_len, MPI_INT, - all_tuples_buf, recv_cnts, offsets, MPI_INT, MASTER_PROC, + MPI_Gatherv((void*)tuple_buf, tuple_buf_len, MPI_INT, + (void*)all_tuples_buf, recv_cnts, offsets, MPI_INT, MASTER_PROC, myPc->proc_config().proc_comm()); ERRORMPI("Gathering tuple_lists failed.", err); free(tuple_buf); // malloc'd in pack_tuples @@ -1264,7 +1264,7 @@ ErrorCode Coupler::get_matching_entities(EntityHandle if (rank != MASTER_PROC) ctl_buf = (uint*)malloc(ctl_buf_sz * sizeof(uint)); - ierr = MPI_Bcast(ctl_buf, ctl_buf_sz, MPI_INT, MASTER_PROC, myPc->proc_config().proc_comm()); + ierr = MPI_Bcast((void*)ctl_buf, ctl_buf_sz, MPI_INT, MASTER_PROC, myPc->proc_config().proc_comm()); ERRORMPI("Broadcasting tuple_list failed.", ierr); if (rank != MASTER_PROC) diff --git a/tools/mbpart.cpp b/tools/mbpart.cpp index b3756adb9f3ed997e4fc67fb1ed59f30d4629241..02b94ae1f6f45fc306d1c98b1e806f0aa2560133 100644 --- a/tools/mbpart.cpp +++ b/tools/mbpart.cpp @@ -227,7 +227,7 @@ int main(int argc, char* argv[]) #ifdef MOAB_HAVE_METIS MetisPartitioner *metis_tool = NULL; if (moab_use_metis && !metis_tool) { - metis_tool = new MetisPartitioner (&mb, false, argc, argv); + metis_tool = new MetisPartitioner (&mb, false); } if ((aggregating_tag.empty() && partition_tagged_sets) || (aggregating_tag.empty() && partition_tagged_ents))