Commit bba39188 authored by vijaysm's avatar vijaysm

Asserts should not be cast to void. Bad change in the first place.

parent ca375e35
......@@ -3337,7 +3337,7 @@ ErrorCode ReadHDF5::read_var_len_tag(Tag tag_handle,
{
ErrorCode rval1;
if (isHandle) {
assert( (void) (readSize == sizeof(EntityHandle)));
assert( readSize == sizeof(EntityHandle) );
rval1 = readHDF5->convert_id_to_handle((EntityHandle*)data, count);MB_CHK_ERR(rval1);
}
int n = count;
......
......@@ -924,14 +924,14 @@ ErrorCode WriteHDF5Parallel::create_tag_tables()
// Copy values into local structs and if root then create tables
size_t idx = 0;
for (tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter, ++idx) {
assert( (void) (idx < counts.size()));
assert( idx < counts.size() );
tag_iter->sparse_offset = offsets[idx];
tag_iter->max_num_ents = maxima[idx];
tag_iter->write_sparse = (0 != totals[idx]);
int s;
if (MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length(tag_iter->tag_id, s)) {
++idx;
assert((void) (idx < counts.size()));
assert( idx < counts.size() );
tag_iter->var_data_offset = offsets[idx];
tag_iter->max_num_vals = maxima[idx];
}
......@@ -947,13 +947,13 @@ ErrorCode WriteHDF5Parallel::create_tag_tables()
if (0 == myPcomm->proc_config().proc_rank()) {
size_t iidx = 0;
for (tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter, ++iidx) {
assert( (void) (iidx < totals.size()));
assert( iidx < totals.size() );
unsigned long num_ents = totals[iidx];
unsigned long num_val = 0;
int s;
if (MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length(tag_iter->tag_id, s)) {
++iidx;
assert((void) (iidx < totals.size()));
assert( iidx < totals.size() );
num_val = totals[iidx];
}
dbgOut.printf(2, "Writing tag description for tag 0x%lx with %lu values\n",
......@@ -1442,7 +1442,7 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_ids(const Range& owned,
unsigned r;
EntityHandle h;
myPcomm->get_entityset_owner(*j, r, &h);
assert((void) (r == procs[i]));
assert( r == procs[i] );
remote_handles.insert(h);
}
dbgOut.print(6, remote_handles);
......@@ -1468,7 +1468,7 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_ids(const Range& owned,
if (set_procs[j] != myPcomm->proc_config().proc_rank())
send_sets[set_procs[j]].insert(*i);
}
assert((void) (send_sets.find(myPcomm->proc_config().proc_rank()) == send_sets.end()));
assert( send_sets.find(myPcomm->proc_config().proc_rank()) == send_sets.end() );
// Now send the data
std::vector< std::vector<unsigned long> > send_buf(send_sets.size());
......@@ -1496,8 +1496,8 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_ids(const Range& owned,
while (recv_count--) {
mperr = MPI_Waitany(recv_req.size(), &recv_req[0], &idx, &status);CHECK_MPI(mperr);
assert((void) ((unsigned)status.MPI_SOURCE == procs[idx]));
assert((void) (2*recv_buf[idx].front() + 1 == recv_buf[idx].size()));
assert( (unsigned)status.MPI_SOURCE == procs[idx] );
assert( 2*recv_buf[idx].front() + 1 == recv_buf[idx].size() );
const size_t n = std::min<size_t>(recv_buf[idx].front(), (recv_buf[idx].size() - 1) / 2);
dbgOut.printf(5, "Received buffer of size %lu from proc %d\n",
(unsigned long)(2*n + 1), (int)status.MPI_SOURCE);
......@@ -1505,15 +1505,15 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_ids(const Range& owned,
for (size_t i = 0; i < n; ++i) {
EntityHandle handle = 0;
rval = myPcomm->get_entityset_local_handle(procs[idx], recv_buf[idx][2*i + 1], handle);CHECK_MB(rval);
assert((void) (handle != 0));
assert( handle != 0 );
if (!idMap.insert(handle, recv_buf[idx][2*i + 2], 1).second)
error(MB_FAILURE); // Conflicting IDs??????
}
recv_req[idx] = MPI_REQUEST_NULL;
}
assert((void) (MPI_SUCCESS == MPI_Waitany(recv_req.size(), &recv_req[0], &idx, &status)
&& MPI_UNDEFINED == idx)); // Check that we got them all
assert( MPI_SUCCESS == MPI_Waitany(recv_req.size(), &recv_req[0], &idx, &status)
&& MPI_UNDEFINED == idx ); // Check that we got them all
// Wait for all sends to complete before we release send
// buffers (implicitly releases when we return from this function)
......@@ -1563,7 +1563,7 @@ ErrorCode WriteHDF5Parallel::pack_set(Range::const_iterator it,
size_t newlen;
// Buffer must always contain at least flags and desired sizes
assert((void) (buffer_size >= 4));
assert(buffer_size >= 4);
buffer_size -= 4;
Range::const_iterator nd = it; ++nd;
......@@ -1583,7 +1583,7 @@ ErrorCode WriteHDF5Parallel::pack_set(Range::const_iterator it,
if (len && !(flags & MESHSET_ORDERED)) {
tmp.clear();
bool blocked = false;
assert((void) (0 == len % 2));
assert((0 == len % 2));
rval = range_to_blocked_list(ptr, len / 2, tmp, blocked);CHECK_MB(rval);
if (blocked)
flags |= mhdf_SET_RANGE_BIT;
......@@ -1646,8 +1646,8 @@ static void merge_ranged_ids(const unsigned long* range_list,
std::vector<WriteHDF5::wid_t>& result)
{
typedef WriteHDF5::wid_t wid_t;
assert( (void) (0 == len%2));
assert( (void) (0 == result.size()%2));
assert( 0 == len%2 );
assert( 0 == result.size()%2 );
STATIC_ASSERT(sizeof(std::pair<wid_t, wid_t>) == 2 * sizeof(wid_t));
result.insert(result.end(), range_list, range_list + len);
......@@ -1680,8 +1680,8 @@ ErrorCode WriteHDF5Parallel::unpack_set(EntityHandle set,
size_t buffer_size)
{
// Use local variables for readability
assert( (void) (buffer_size >= 4));
assert( (void) (buffer[1] + buffer[2] + buffer[3] <= buffer_size));
assert( buffer_size >= 4 );
assert( buffer[1] + buffer[2] + buffer[3] <= buffer_size );
const unsigned long flags = buffer[0];
unsigned long num_content = buffer[1];
const unsigned long num_child = buffer[2];
......@@ -1691,7 +1691,7 @@ ErrorCode WriteHDF5Parallel::unpack_set(EntityHandle set,
const unsigned long* parents = children + num_child;
SpecialSetData* data = find_set_data(set);
assert( (void) (NULL != data));
assert( NULL != data );
if (NULL == data)
return MB_FAILURE;
......@@ -1816,7 +1816,7 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_data(const Range& owned,
++idx;
}
}
assert( (void) ((size_t)idx == numrecv));
assert((size_t)idx == numrecv);
// Now send set data for all remote sets that I know about
std::vector<MPI_Request> send_req(remote.size());
......@@ -1830,7 +1830,7 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_data(const Range& owned,
rval = myPcomm->get_entityset_owner(*i, owner, &remote_handle);CHECK_MB(rval);
int tag = ID_FROM_HANDLE(remote_handle);
assert( (void) (remote_handle == CREATE_HANDLE(MBENTITYSET, tag)));
assert( remote_handle == CREATE_HANDLE(MBENTITYSET, tag));
dbgOut.printf(5, "Sending %lu values for set %d to proc %u\n",
send_buf[idx][1] + send_buf[idx][2] + send_buf[idx][3] + 4, tag, owner);
mperr = MPI_Isend(&send_buf[idx][0], init_buff_size, MPI_UNSIGNED_LONG,
......@@ -1841,7 +1841,7 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_data(const Range& owned,
//iFace->tag_get_handle(MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, mattag);
// Now initialize local data for managing contents of owned, shared sets
assert( (void) (specialSets.empty()));
assert(specialSets.empty());
specialSets.clear();
specialSets.reserve(owned.size());
for (Range::iterator i = owned.begin(); i != owned.end(); ++i) {
......@@ -1933,7 +1933,7 @@ ErrorCode WriteHDF5Parallel::communicate_shared_set_data(const Range& owned,
rval = myPcomm->get_entityset_owner(*i, owner, &remote_handle);CHECK_MB(rval);
int tag = ID_FROM_HANDLE(remote_handle);
assert( (void) (remote_handle == CREATE_HANDLE(MBENTITYSET, tag)));
assert( remote_handle == CREATE_HANDLE(MBENTITYSET, tag) );
dbgOut.printf(5, "Sending %lu values for set %d to proc %u\n",
(unsigned long)size, tag, owner);
mperr = MPI_Isend(&buff[0], size, MPI_UNSIGNED_LONG,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment