Commit 7755f6b8 authored by Andrew J. Burns (Cont's avatar Andrew J. Burns (Cont

DSM example working for previous version of DSM, need to adjust for newer version

parent eee28dc6
......@@ -11,6 +11,7 @@ int main(int argc, char *argv[])
{
int size, id, providedThreading, dsmSize;
dsmSize = 16;
MPI_Status status;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &providedThreading);
......@@ -52,17 +53,25 @@ int main(int argc, char *argv[])
<< (int)numServers << std::endl;
}
/*This write algorithm isn't supported yet, the writers all write to the same location instead of propagating for each core
// Create Array
// Array should be distributed among processes
shared_ptr<XdmfArray> array = XdmfArray::New();
array->initialize<int>(0);
array->pushBack(id+id*0);
array->pushBack(id+id*1);
array->pushBack(id+id*2);
array->pushBack(id);
array->pushBack(id*2);
array->pushBack(id*3);
array->pushBack(id*4);
std::cout << "from core #" << id << " generating these values:" << id << " " << id*2 << " " << id*3 << " " << id*4 << std::endl;
std::cout << "setting up DSM writer" << std::endl;
// Create DSM Writer and write to DSM space.
shared_ptr<XdmfHDF5WriterDSM> writer = XdmfHDF5WriterDSM::New("dsm", dsmBuffer);
std::cout << "DSM writer set up" << std::endl;
MPI_Barrier(comm);
array->accept(writer);
std::cout << "DSM writer used" << std::endl;
//ensure all writing finishes
MPI_Barrier(comm);
......@@ -70,13 +79,14 @@ int main(int argc, char *argv[])
if (id == 0)
{
std::cout << "getting output" << std::endl;
// Read data
// Read data (Working on getting this to produce meaningful results.)
std::vector<unsigned int> startVector;
startVector.push_back(0);
startVector.push_back(array->getSize() * id);
std::cout << "starting index" << startVector[0] << std::endl;
std::vector<unsigned int> strideVector;
strideVector.push_back(0);
strideVector.push_back(1);
std::vector<unsigned int> countVector;
countVector.push_back(array->getSize()*size);
countVector.push_back(array->getSize());
shared_ptr<XdmfArray> readArray = XdmfArray::New();
shared_ptr<XdmfHDF5ControllerDSM> readController = XdmfHDF5ControllerDSM::New(
array->getHeavyDataController()->getFilePath(),
......@@ -92,12 +102,76 @@ int main(int argc, char *argv[])
std::cout << "printing output" << std::endl;
for(unsigned int i=0; i<readArray->getSize(); ++i)
{
std::cout << "readArray[" << i << "] = " << readArray->getValue<int>(i) << std::endl;
std::cout << "core #" << id <<" readArray[" << i << "] = " << readArray->getValue<int>(i) << std::endl;
}
}
*/
// Wait for everyone to have finished reading
//variables holding data for building the reading controllers
std::string readFilePath = "";
std::string readSetPath = "";
shared_ptr<const XdmfArrayType> readType = XdmfArrayType::Int32();
int totalValues = 0;
//Array is generated and written to DSM
shared_ptr<XdmfArray> array = XdmfArray::New();
array->initialize<int>(0);
for (int i = 0; i<(size*2); i++)
{
array->pushBack(i);
}
shared_ptr<XdmfHDF5WriterDSM> spreadWriter = XdmfHDF5WriterDSM::New("dsmspread" , dsmBuffer);
array->accept(spreadWriter);
readFilePath = array->getHeavyDataController()->getFilePath();
readSetPath = array->getHeavyDataController()->getDataSetPath();
readType = array->getHeavyDataController()->getType();
totalValues = array->getSize();
MPI_Barrier(comm);
//all cores read part of the written buffer
std::vector<unsigned int> startVector;
startVector.push_back((totalValues/size)*((size-1)-id));
std::vector<unsigned int> strideVector;
strideVector.push_back(1);
std::vector<unsigned int> countVector;
countVector.push_back((totalValues/size));
shared_ptr<XdmfArray> readArray = XdmfArray::New();
shared_ptr<XdmfHDF5ControllerDSM> readController = XdmfHDF5ControllerDSM::New(
readFilePath,
readSetPath,
readType,
startVector,
strideVector,
countVector,
dsmBuffer);
readArray->setHeavyDataController(readController);
readArray->read();
for (int j = 0; j<size; j++)
{
MPI_Barrier(comm);
if (j==id)
{
std::cout << "core #" << id << " starting index" << startVector[0] << std::endl;
std::cout << "core #" << id << " printing output" << std::endl;
for(unsigned int i=0; i<readArray->getSize(); ++i)
{
std::cout << "core #" << id <<" readArray[" << i << "] = " << readArray->getValue<int>(i) << std::endl;
}
}
}
// Wait for everyone to have finished reading
MPI_Barrier(comm);
std::cout << "Finished from Core #" << id << std::endl;
MPI_Barrier(comm);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment