Commit dda62b58 authored by Kenneth Leiter's avatar Kenneth Leiter

Update XdmfPartitioner to more efficiently handle element sets.

Modify XdmfExodusReader to add element blocks as element sets in xdmf file.
Merge with master.
parents 8e14630a 261e07a6
...@@ -43,8 +43,13 @@ XdmfMap::New(const std::vector<shared_ptr<XdmfAttribute> > & globalNodeIds) ...@@ -43,8 +43,13 @@ XdmfMap::New(const std::vector<shared_ptr<XdmfAttribute> > & globalNodeIds)
std::map<node_id, std::map<task_id, node_id> > globalNodeIdMap; std::map<node_id, std::map<task_id, node_id> > globalNodeIdMap;
// fill globalNodeIdMap using globalNodeIds // fill globalNodeIdMap using globalNodeIds
std::vector<bool> releaseGlobalNodeIds(globalNodeIds.size(), false);
for(unsigned int i=0; i<globalNodeIds.size(); ++i) { for(unsigned int i=0; i<globalNodeIds.size(); ++i) {
const shared_ptr<XdmfAttribute> currGlobalNodeIds = globalNodeIds[i]; const shared_ptr<XdmfAttribute> currGlobalNodeIds = globalNodeIds[i];
if(!currGlobalNodeIds->isInitialized()) {
currGlobalNodeIds->read();
releaseGlobalNodeIds[i] = true;
}
for(unsigned int j=0; j<currGlobalNodeIds->getSize(); ++j) { for(unsigned int j=0; j<currGlobalNodeIds->getSize(); ++j) {
const node_id currGlobalNodeId = currGlobalNodeIds->getValue<node_id>(j); const node_id currGlobalNodeId = currGlobalNodeIds->getValue<node_id>(j);
globalNodeIdMap[currGlobalNodeId][i] = j; globalNodeIdMap[currGlobalNodeId][i] = j;
...@@ -74,6 +79,9 @@ XdmfMap::New(const std::vector<shared_ptr<XdmfAttribute> > & globalNodeIds) ...@@ -74,6 +79,9 @@ XdmfMap::New(const std::vector<shared_ptr<XdmfAttribute> > & globalNodeIds)
} }
} }
} }
if(releaseGlobalNodeIds[i]) {
currGlobalNodeIds->release();
}
} }
return returnValue; return returnValue;
......
...@@ -74,6 +74,7 @@ XdmfDSMCommMPI::XdmfDSMCommMPI() ...@@ -74,6 +74,7 @@ XdmfDSMCommMPI::XdmfDSMCommMPI()
XdmfDSMCommMPI::~XdmfDSMCommMPI() XdmfDSMCommMPI::~XdmfDSMCommMPI()
{ {
#ifndef OPEN_MPI
if (InterComm != MPI_COMM_NULL) { if (InterComm != MPI_COMM_NULL) {
int status = MPI_Comm_free(&InterComm); int status = MPI_Comm_free(&InterComm);
if (status != MPI_SUCCESS) { if (status != MPI_SUCCESS) {
...@@ -96,6 +97,7 @@ XdmfDSMCommMPI::~XdmfDSMCommMPI() ...@@ -96,6 +97,7 @@ XdmfDSMCommMPI::~XdmfDSMCommMPI()
} }
} }
} }
#endif
} }
void void
...@@ -170,7 +172,7 @@ XdmfDSMCommMPI::ClosePort() ...@@ -170,7 +172,7 @@ XdmfDSMCommMPI::ClosePort()
{ {
if (Id == 0) { if (Id == 0) {
int status; int status;
status = MPI_Open_port(MPI_INFO_NULL, DsmPortName); status = MPI_Close_port(DsmPortName);
if (status != MPI_SUCCESS) { if (status != MPI_SUCCESS) {
try { try {
std::string message = "Failed to close port "; std::string message = "Failed to close port ";
...@@ -254,6 +256,7 @@ XdmfDSMCommMPI::Connect() ...@@ -254,6 +256,7 @@ XdmfDSMCommMPI::Connect()
void void
XdmfDSMCommMPI::Disconnect() XdmfDSMCommMPI::Disconnect()
{ {
#ifndef OPEN_MPI
if (InterComm != MPI_COMM_NULL) { if (InterComm != MPI_COMM_NULL) {
int status = MPI_Comm_free(&InterComm); int status = MPI_Comm_free(&InterComm);
if (status != MPI_SUCCESS) { if (status != MPI_SUCCESS) {
...@@ -265,6 +268,7 @@ XdmfDSMCommMPI::Disconnect() ...@@ -265,6 +268,7 @@ XdmfDSMCommMPI::Disconnect()
} }
} }
} }
#endif
InterComm = MPI_COMM_NULL; InterComm = MPI_COMM_NULL;
} }
...@@ -273,6 +277,7 @@ XdmfDSMCommMPI::DupComm(MPI_Comm comm) ...@@ -273,6 +277,7 @@ XdmfDSMCommMPI::DupComm(MPI_Comm comm)
{ {
if (IntraComm != comm) { if (IntraComm != comm) {
int status; int status;
#ifndef OPEN_MPI
if (IntraComm != MPI_COMM_NULL) { if (IntraComm != MPI_COMM_NULL) {
status = MPI_Comm_free(&IntraComm); status = MPI_Comm_free(&IntraComm);
if (status != MPI_SUCCESS) { if (status != MPI_SUCCESS) {
...@@ -284,6 +289,7 @@ XdmfDSMCommMPI::DupComm(MPI_Comm comm) ...@@ -284,6 +289,7 @@ XdmfDSMCommMPI::DupComm(MPI_Comm comm)
} }
} }
} }
#endif
if (comm != MPI_COMM_NULL) { if (comm != MPI_COMM_NULL) {
status = MPI_Comm_dup(comm, &IntraComm); status = MPI_Comm_dup(comm, &IntraComm);
if (status != MPI_SUCCESS) { if (status != MPI_SUCCESS) {
...@@ -307,6 +313,7 @@ XdmfDSMCommMPI::DupInterComm(MPI_Comm comm) ...@@ -307,6 +313,7 @@ XdmfDSMCommMPI::DupInterComm(MPI_Comm comm)
{ {
if (InterComm != comm) { if (InterComm != comm) {
int status; int status;
#ifndef OPEN_MPI
if (InterComm != MPI_COMM_NULL) { if (InterComm != MPI_COMM_NULL) {
status = MPI_Comm_free(&InterComm); status = MPI_Comm_free(&InterComm);
if (status != MPI_SUCCESS) { if (status != MPI_SUCCESS) {
...@@ -318,6 +325,7 @@ XdmfDSMCommMPI::DupInterComm(MPI_Comm comm) ...@@ -318,6 +325,7 @@ XdmfDSMCommMPI::DupInterComm(MPI_Comm comm)
} }
} }
} }
#endif
if (comm != MPI_COMM_NULL) { if (comm != MPI_COMM_NULL) {
status = MPI_Comm_dup(comm, &InterComm); status = MPI_Comm_dup(comm, &InterComm);
if (status != MPI_SUCCESS) { if (status != MPI_SUCCESS) {
......
...@@ -457,6 +457,7 @@ void XdmfHDF5ControllerDSM::setServerMode(bool newMode) ...@@ -457,6 +457,7 @@ void XdmfHDF5ControllerDSM::setServerMode(bool newMode)
void XdmfHDF5ControllerDSM::setWorkerComm(MPI_Comm comm) void XdmfHDF5ControllerDSM::setWorkerComm(MPI_Comm comm)
{ {
int status; int status;
#ifndef OPEN_MPI
if (mWorkerComm != MPI_COMM_NULL) { if (mWorkerComm != MPI_COMM_NULL) {
status = MPI_Comm_free(&mWorkerComm); status = MPI_Comm_free(&mWorkerComm);
if (status != MPI_SUCCESS) { if (status != MPI_SUCCESS) {
...@@ -468,6 +469,7 @@ void XdmfHDF5ControllerDSM::setWorkerComm(MPI_Comm comm) ...@@ -468,6 +469,7 @@ void XdmfHDF5ControllerDSM::setWorkerComm(MPI_Comm comm)
} }
} }
} }
#endif
if (comm != MPI_COMM_NULL) { if (comm != MPI_COMM_NULL) {
status = MPI_Comm_dup(comm, &mWorkerComm); status = MPI_Comm_dup(comm, &mWorkerComm);
if (status != MPI_SUCCESS) { if (status != MPI_SUCCESS) {
......
...@@ -406,6 +406,7 @@ void XdmfHDF5WriterDSM::setServerMode(bool newMode) ...@@ -406,6 +406,7 @@ void XdmfHDF5WriterDSM::setServerMode(bool newMode)
void XdmfHDF5WriterDSM::setWorkerComm(MPI_Comm comm) void XdmfHDF5WriterDSM::setWorkerComm(MPI_Comm comm)
{ {
int status; int status;
#ifndef OPEN_MPI
if (mWorkerComm != MPI_COMM_NULL) { if (mWorkerComm != MPI_COMM_NULL) {
status = MPI_Comm_free(&mWorkerComm); status = MPI_Comm_free(&mWorkerComm);
if (status != MPI_SUCCESS) { if (status != MPI_SUCCESS) {
...@@ -417,6 +418,7 @@ void XdmfHDF5WriterDSM::setWorkerComm(MPI_Comm comm) ...@@ -417,6 +418,7 @@ void XdmfHDF5WriterDSM::setWorkerComm(MPI_Comm comm)
} }
} }
} }
#endif
if (comm != MPI_COMM_NULL) { if (comm != MPI_COMM_NULL) {
status = MPI_Comm_dup(comm, &mWorkerComm); status = MPI_Comm_dup(comm, &mWorkerComm);
if (status != MPI_SUCCESS) { if (status != MPI_SUCCESS) {
......
...@@ -90,7 +90,10 @@ int main(int argc, char *argv[]) ...@@ -90,7 +90,10 @@ int main(int argc, char *argv[])
//#ClosePort begin //#ClosePort begin
exampleWriter->getServerBuffer()->GetComm()->ClosePort(); if (id == 0)
{
exampleWriter->getServerBuffer()->GetComm()->ClosePort();
}
//#ClosePort end //#ClosePort end
......
...@@ -40,7 +40,7 @@ int main(int, char **) ...@@ -40,7 +40,7 @@ int main(int, char **)
std::vector<shared_ptr<XdmfItem> > readItems2 = std::vector<shared_ptr<XdmfItem> > readItems2 =
reader->read("TestXdmfReader1.xmf", "//Attribute"); reader->read("TestXdmfReader1.xmf", "//Attribute");
assert(readItems2.size() == 6); assert(readItems2.size() == 8);
shared_ptr<XdmfAttribute> readAttribute = shared_ptr<XdmfAttribute> readAttribute =
shared_dynamic_cast<XdmfAttribute>(readItems2[0]); shared_dynamic_cast<XdmfAttribute>(readItems2[0]);
assert(readAttribute->getName().compare("Nodal Attribute") == 0); assert(readAttribute->getName().compare("Nodal Attribute") == 0);
......
...@@ -71,7 +71,6 @@ public: ...@@ -71,7 +71,6 @@ public:
gridAttribute->setCenter(XdmfAttributeCenter::Grid()); gridAttribute->setCenter(XdmfAttributeCenter::Grid());
gridAttribute->insert(0, &gridValues[0], 2); gridAttribute->insert(0, &gridValues[0], 2);
// Add Node Set // Add Node Set
shared_ptr<XdmfSet> nodeSet = XdmfSet::New(); shared_ptr<XdmfSet> nodeSet = XdmfSet::New();
int nodeIds[] = {0, 1, 2}; int nodeIds[] = {0, 1, 2};
...@@ -79,6 +78,15 @@ public: ...@@ -79,6 +78,15 @@ public:
nodeSet->setType(XdmfSetType::Node()); nodeSet->setType(XdmfSetType::Node());
nodeSet->insert(0, &nodeIds[0], 3); nodeSet->insert(0, &nodeIds[0], 3);
// Add Node Set Attribute
double nodeSetAttributeValues[] = {10, 11, 12};
shared_ptr<XdmfAttribute> nodeSetAttribute = XdmfAttribute::New();
nodeSetAttribute->setName("Node Set Attribute");
nodeSetAttribute->setType(XdmfAttributeType::Scalar());
nodeSetAttribute->setCenter(XdmfAttributeCenter::Node());
nodeSetAttribute->insert(0, &nodeSetAttributeValues[0], 3);
nodeSet->insert(nodeSetAttribute);
// Add Time // Add Time
shared_ptr<XdmfTime> time = XdmfTime::New(100); shared_ptr<XdmfTime> time = XdmfTime::New(100);
grid->setTime(time); grid->setTime(time);
......
...@@ -644,10 +644,31 @@ XdmfExodusReader::read(const std::string & fileName, ...@@ -644,10 +644,31 @@ XdmfExodusReader::read(const std::string & fileName,
ex_close(exodusHandle); ex_close(exodusHandle);
// add block information as sets
unsigned int elementId = 0;
for(int i=0; i<num_elem_blk; ++i) {
const int numberElementsInBlock = numElemsInBlock[i];
shared_ptr<XdmfSet> set = XdmfSet::New();
std::stringstream setName;
setName << "Block " << i;
set->setName(setName.str());
set->setType(XdmfSetType::Cell());
set->initialize(XdmfArrayType::Int32(), numberElementsInBlock);
for(int j=0; j<numberElementsInBlock; ++j) {
set->insert(j, elementId++);
}
toReturn->insert(set);
if(heavyDataWriter) {
set->accept(heavyDataWriter);
set->release();
}
}
delete [] blockIds; delete [] blockIds;
delete [] numElemsInBlock; delete [] numElemsInBlock;
delete [] numNodesPerElemInBlock; delete [] numNodesPerElemInBlock;
delete [] numElemAttrInBlock; delete [] numElemAttrInBlock;
if(heavyDataWriter) { if(heavyDataWriter) {
heavyDataWriter->closeFile(); heavyDataWriter->closeFile();
......
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment