diff --git a/examples/Cxx/XdmfConnectTest2.cpp b/examples/Cxx/XdmfConnectTest2.cpp index 932e6ea9aa23ffd53b4c92aa610638360ac78d07..5111281bd65dfd612bdda52ad8d8d36c63591c52 100644 --- a/examples/Cxx/XdmfConnectTest2.cpp +++ b/examples/Cxx/XdmfConnectTest2.cpp @@ -29,7 +29,7 @@ int main(int argc, char *argv[]) // Initializing objects //since the start and end ids are larger than the size there are no buffers alloted - //thus, not blockage occurs + //thus, no blockage occurs XdmfDSMCommMPI * testComm = new XdmfDSMCommMPI(); testComm->DupComm(comm); testComm->Init(); diff --git a/utils/CMakeLists.txt b/utils/CMakeLists.txt index 291e6a0af5548d05994f16a2db3c86bda315a8ee..f650aadd8a3bccdfdd7f79e7db3bc5a9304d353d 100644 --- a/utils/CMakeLists.txt +++ b/utils/CMakeLists.txt @@ -3,7 +3,7 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR}) option(XDMF_BUILD_EXODUS_IO OFF) option(XDMF_BUILD_PARTITIONER OFF) option(XDMF_BUILD_FORTRAN OFF) - +option(XDMF_BUILD_DSM OFF) set(XdmfUtilsSources XdmfDiff @@ -15,6 +15,10 @@ if(XDMF_BUILD_FORTRAN) set(XdmfUtilsSources ${XdmfUtilsSources} XdmfFortran) endif(XDMF_BUILD_FORTRAN) +if(XDMF_BUILD_DSM) + add_definitions(-DXDMF_BUILD_DSM) +endif(XDMF_BUILD_DSM) + if(XDMF_BUILD_EXODUS_IO) set(XDMF_SWIG_FLAGS ${XDMF_SWIG_FLAGS} -DXDMF_BUILD_EXODUS_IO) find_package(Exodus REQUIRED) diff --git a/utils/XdmfFortran.cpp b/utils/XdmfFortran.cpp index adf75f0dd196b53390ae644674e65a6cfc6e128f..dcb96ad67f0698944a7de8ef2adc41b9efa9271d 100644 --- a/utils/XdmfFortran.cpp +++ b/utils/XdmfFortran.cpp @@ -57,6 +57,11 @@ #include #include +#ifdef XDMF_BUILD_DSM + #include + #include "XdmfHDF5WriterDSM.hpp" + #include "XdmfHDF5ControllerDSM.hpp" +#endif template void @@ -300,6 +305,7 @@ XdmfFortran::XdmfFortran() : mOrigin(shared_ptr()), mDimensions(shared_ptr()), mHeavyDataWriter(shared_ptr()), + mDSMWriter(shared_ptr()), mMaxFileSize(0), mAllowSetSplitting(false) { @@ -7536,18 +7542,325 @@ XdmfFortran::initHDF5(const char * const xmlFilePath, shared_ptr writer = XdmfHDF5Writer::New(xmlFilePath); writer->setFileSizeLimit(mMaxFileSize); writer->setAllowSetSplitting(mAllowSetSplitting); - writer->setReleaseData( release ); + writer->setReleaseData(release); mHeavyDataWriter = writer; } -void +#ifdef XDMF_BUILD_DSM + +void +XdmfFortran::initDSMServer(const char * const filePath, + MPI_Comm comm, + int bufferSize, + int startCoreIndex, + int endCoreIndex) +{ + // Non-Threaded version + std::string writtenFile(filePath); + if (bufferSize > 0) { + mDSMWriter = XdmfHDF5WriterDSM::New(writtenFile, comm, (unsigned int) bufferSize, startCoreIndex, endCoreIndex); + mDSMWriter->setMode(XdmfHeavyDataWriter::Hyperslab); + } + else { + try { + XdmfError::message(XdmfError::FATAL, + "Error: Non-positive DSM buffer size."); + } + catch (XdmfError e) { + throw e; + } + } +} + +void +XdmfFortran::acceptDSM(int numConnections) +{ + if (mDSMWriter) { + mDSMWriter->getServerBuffer()->GetComm()->OpenPort(); + mDSMWriter->getServerBuffer()->SendAccept(numConnections); + } + else { + try { + XdmfError::message(XdmfError::FATAL, + "Error: Attempting to accept connection when DSM is not set up."); + } + catch (XdmfError e) { + throw e; + } + } +} + +void +XdmfFortran::closeDSMPort() +{ + if (mDSMWriter) { + mDSMWriter->getServerBuffer()->GetComm()->ClosePort(); + } + else { + try { + XdmfError::message(XdmfError::FATAL, + "Error: Attempting to close a port when DSM is not set up."); + } + catch (XdmfError e) { + throw e; + } + } +} + +void +XdmfFortran::connectDSM(const char * const filePath, + MPI_Comm comm) +{ + std::string writtenFile(filePath); + XdmfDSMCommMPI * dsmComm = new XdmfDSMCommMPI(); + dsmComm->DupComm(comm); + dsmComm->Init(); + XdmfDSMBuffer * dsmBuffer = new XdmfDSMBuffer(); + dsmBuffer->SetIsServer(false); + dsmBuffer->SetComm(dsmComm); + dsmBuffer->SetIsConnected(true); + + mDSMWriter = XdmfHDF5WriterDSM::New(writtenFile, dsmBuffer); + + mDSMWriter->setMode(XdmfHeavyDataWriter::Hyperslab); + + // Currently uses default config file name + mDSMWriter->getServerBuffer()->GetComm()->ReadDsmPortName(); + + mDSMWriter->getServerManager()->Connect(); + // To check if the DSM writer is using server mode + // bool test = mDSMWriter->getServerMode(); +} + +MPI_Comm +XdmfFortran::getDSMInterComm() +{ + // Sanity check + if (mDSMWriter) { + return mDSMWriter->getServerBuffer()->GetComm()->GetInterComm(); + } + else { + return MPI_COMM_NULL; + } +} + +MPI_Comm +XdmfFortran::getDSMIntraComm() +{ + // Sanity check + if (mDSMWriter) { + return mDSMWriter->getServerBuffer()->GetComm()->GetIntraComm(); + } + else { + return MPI_COMM_NULL; + } +} + +// Call only on one core +void +XdmfFortran::stopDSM() +{ + if (mDSMWriter) { + mDSMWriter->stopDSM(); + } + else { + try { + XdmfError::message(XdmfError::FATAL, + "Error: Stop called when DSM not initialized."); + } + catch (XdmfError e) { + throw e; + } + } + mDSMWriter = shared_ptr(); +} + +void +XdmfFortran::readFromDSM(const char * const dsmDataSetPath, + const int arrayType, + void * values, + const int start, + const int stride, + const int dimensions, + const int dataspace) +{ + if (mDSMWriter) { + shared_ptr writtenArrayType = shared_ptr(); + switch(arrayType) { + case XDMF_ARRAY_TYPE_INT8: + writtenArrayType = XdmfArrayType::Int8(); + break; + case XDMF_ARRAY_TYPE_INT16: + writtenArrayType = XdmfArrayType::Int16(); + break; + case XDMF_ARRAY_TYPE_INT32: + writtenArrayType = XdmfArrayType::Int32(); + break; + case XDMF_ARRAY_TYPE_INT64: + writtenArrayType = XdmfArrayType::Int64(); + break; + case XDMF_ARRAY_TYPE_UINT8: + writtenArrayType = XdmfArrayType::UInt8(); + break; + case XDMF_ARRAY_TYPE_UINT16: + writtenArrayType = XdmfArrayType::UInt16(); + break; + case XDMF_ARRAY_TYPE_UINT32: + writtenArrayType = XdmfArrayType::UInt32(); + break; + case XDMF_ARRAY_TYPE_FLOAT32: + writtenArrayType = XdmfArrayType::Float32(); + break; + case XDMF_ARRAY_TYPE_FLOAT64: + writtenArrayType = XdmfArrayType::Float64(); + break; + default: + try { + XdmfError::message(XdmfError::FATAL, + "Invalid array number type"); + } + catch (XdmfError e) { + throw e; + } + } + std::vector startVector; + startVector.push_back(start); + std::vector strideVector; + strideVector.push_back(stride); + std::vector dimVector; + dimVector.push_back(dimensions); + std::vector dataVector; + dataVector.push_back(dataspace); + std::string writtenPath(dsmDataSetPath); + shared_ptr writerController = + XdmfHDF5ControllerDSM::New(mDSMWriter->getFilePath(), + writtenPath, + writtenArrayType, + startVector, + strideVector, + dimVector, + dataVector, + mDSMWriter->getServerBuffer()); + + shared_ptr readArray = XdmfArray::New(); + + readArray->insert(writerController); + readArray->read(); + + readFromArray(readArray, + arrayType, + values, + dimensions, + 0, + 1, + 1); + } + else { + try { + XdmfError::message(XdmfError::FATAL, + "Error: Attempting to read from DSM when DSM is not set up."); + } + catch (XdmfError e) { + throw e; + } + } +} + +void +XdmfFortran::writeToDSM(const char * const dsmDataSetPath, + const int arrayType, + void * values, + const int start, + const int stride, + const int dimensions, + const int dataspace) +{ + if (mDSMWriter) { + shared_ptr writtenArrayType = shared_ptr(); + switch(arrayType) { + case XDMF_ARRAY_TYPE_INT8: + writtenArrayType = XdmfArrayType::Int8(); + break; + case XDMF_ARRAY_TYPE_INT16: + writtenArrayType = XdmfArrayType::Int16(); + break; + case XDMF_ARRAY_TYPE_INT32: + writtenArrayType = XdmfArrayType::Int32(); + break; + case XDMF_ARRAY_TYPE_INT64: + writtenArrayType = XdmfArrayType::Int64(); + break; + case XDMF_ARRAY_TYPE_UINT8: + writtenArrayType = XdmfArrayType::UInt8(); + break; + case XDMF_ARRAY_TYPE_UINT16: + writtenArrayType = XdmfArrayType::UInt16(); + break; + case XDMF_ARRAY_TYPE_UINT32: + writtenArrayType = XdmfArrayType::UInt32(); + break; + case XDMF_ARRAY_TYPE_FLOAT32: + writtenArrayType = XdmfArrayType::Float32(); + break; + case XDMF_ARRAY_TYPE_FLOAT64: + writtenArrayType = XdmfArrayType::Float64(); + break; + default: + try { + XdmfError::message(XdmfError::FATAL, + "Invalid array number type"); + } + catch (XdmfError e) { + throw e; + } + } + std::vector startVector; + startVector.push_back(start); + std::vector strideVector; + strideVector.push_back(stride); + std::vector dimVector; + dimVector.push_back(dimensions); + std::vector dataVector; + dataVector.push_back(dataspace); + std::string writtenPath(dsmDataSetPath); + shared_ptr writerController = + XdmfHDF5ControllerDSM::New(mDSMWriter->getFilePath(), + writtenPath, + writtenArrayType, + startVector, + strideVector, + dimVector, + dataVector, + mDSMWriter->getServerBuffer()); + + shared_ptr writtenArray = XdmfArray::New(); + writeToArray(writtenArray, + dimensions, + arrayType, + values); + writtenArray->insert(writerController); + writtenArray->accept(mDSMWriter); + } + else { + try { + XdmfError::message(XdmfError::FATAL, + "Error: Attempting to write to DSM when DSM is not set up."); + } + catch (XdmfError e) { + throw e; + } + } +} + +#endif + +void XdmfFortran::read(const char * const xmlFilePath) { shared_ptr reader = XdmfReader::New(); mDomain = shared_dynamic_cast(reader->read( xmlFilePath )); } - //temporary fix, hopefully int XdmfFortran::setTopologyPolyline(const unsigned int nodesPerElement, @@ -9519,6 +9832,114 @@ extern "C" xdmfFortran->write(xmlFilePath, *datalimit, *release); } +#ifdef XDMF_BUILD_DSM + + void + XdmfInitDSMServer(long * pointer, + char * filePath, + MPI_Fint * comm, + int * bufferSize, + int * startCoreIndex, + int * endCoreIndex) + { + XdmfFortran * xdmfFortran = reinterpret_cast(*pointer); + MPI_Comm tempComm = MPI_Comm_f2c(*comm); + xdmfFortran->initDSMServer(filePath, + tempComm, + *bufferSize, + *startCoreIndex, + *endCoreIndex); + } + + void + XdmfAcceptDSM(long * pointer, int * numConnections) + { + XdmfFortran * xdmfFortran = reinterpret_cast(*pointer); + xdmfFortran->acceptDSM(*numConnections); + } + + void + XdmfCloseDSMPort(long * pointer) + { + XdmfFortran * xdmfFortran = reinterpret_cast(*pointer); + xdmfFortran->closeDSMPort(); + } + + void + XdmfConnectDSM(long * pointer, + char * filePath, + MPI_Fint * comm) + { + XdmfFortran * xdmfFortran = reinterpret_cast(*pointer); + MPI_Comm tempComm = MPI_Comm_f2c(*comm); + xdmfFortran->connectDSM(filePath, tempComm); + } + + void + XdmfGetDSMInterComm(long * pointer, MPI_Fint * returnComm) + { + XdmfFortran * xdmfFortran = reinterpret_cast(*pointer); + MPI_Comm tempComm = xdmfFortran->getDSMInterComm(); + *returnComm = MPI_Comm_c2f(tempComm); + } + + void + XdmfGetDSMIntraComm(long * pointer, MPI_Fint * returnComm) + { + XdmfFortran * xdmfFortran = reinterpret_cast(*pointer); + MPI_Comm tempComm = xdmfFortran->getDSMIntraComm(); + *returnComm = MPI_Comm_c2f(tempComm); + } + + void + XdmfStopDSM(long * pointer) + { + XdmfFortran * xdmfFortran = reinterpret_cast(*pointer); + xdmfFortran->stopDSM(); + } + + void + XdmfReadFromDSM(long * pointer, + char * dsmDataSetPath, + int * arrayType, + void * values, + int * start, + int * stride, + int * dimensions, + int * dataspace) + { + XdmfFortran * xdmfFortran = reinterpret_cast(*pointer); + xdmfFortran->readFromDSM(dsmDataSetPath, + *arrayType, + values, + *start, + *stride, + *dimensions, + *dataspace); + } + + void + XdmfWriteToDSM(long * pointer, + char * dsmDataSetPath, + int * arrayType, + void * values, + int * start, + int * stride, + int * dimensions, + int * dataspace) + { + XdmfFortran * xdmfFortran = reinterpret_cast(*pointer); + xdmfFortran->writeToDSM(dsmDataSetPath, + *arrayType, + values, + *start, + *stride, + *dimensions, + *dataspace); + } + +#endif + void XdmfWriteHDF5(long * pointer, char * xmlFilePath, diff --git a/utils/XdmfFortran.hpp b/utils/XdmfFortran.hpp index ccc9ef33c33f361fba596d21dd15b357d7075828..fa6399ffe55d6889af3e6c96c86532d62ee90e33 100644 --- a/utils/XdmfFortran.hpp +++ b/utils/XdmfFortran.hpp @@ -41,12 +41,16 @@ class XdmfRectilinearGrid; class XdmfRegularGrid; class XdmfUnstructuredGrid; class XdmfHeavyDataWriter; +class XdmfHDF5WriterDSM; //Includes #include #include #include "XdmfUtils.hpp" #include "XdmfSharedPtr.hpp" +#ifdef XDMF_BUILD_DSM + #include "mpi.h" +#endif /** * Array Type @@ -167,6 +171,15 @@ class XdmfHeavyDataWriter; #define XdmfSetMaxFileSize xdmfsetmaxfilesize_ #define XdmfWrite xdmfwrite_ #define XdmfRead xdmfread_ +#define XdmfInitDSMServer xdmfinitdsmserver_ +#define XdmfAcceptDSM xdmfacceptdsm_ +#define XdmfCloseDSMPort xdmfclosedsmport_ +#define XdmfConnectDSM xdmfconnectdsm_ +#define XdmfGetDSMInterComm xdmfgetdsmintercomm_ +#define XdmfGetDSMIntraComm xdmfgetdsmintracomm_ +#define XdmfStopDSM xdmfstopdsm_ +#define XdmfReadFromDSM xdmfreadfromdsm_ +#define XdmfWriteToDSM xdmfwritetodsm_ #define XdmfWriteHDF5 xdmfwritehdf5_ #define XdmfInitHDF5 xdmfinithdf5_ #define XdmfSetTopologyPolyline xdmfsettopologypolyline_ @@ -2887,6 +2900,112 @@ public: */ void writeHDF5(const char * const xmlFilePath, const bool release); +#ifdef XDMF_BUILD_DSM + + /** + * Starts up a dsm server on cores from the start index to the end index. + * The server cores are blocked and will not proceed past this point + * until the dsm is stopped. + * + * @param filePath A string denoting the file path to the virtual file. + * @param comm The Communicator between all cores, + * worker and server. + * @param bufferSize The size of the memory buffer allocated per core. + * @param startCoreIndex The core index on which the dsm section begins. + * @param endCoreIndex The core index on which the dsm section ends. + */ + void initDSMServer(const char * const filePath, + MPI_Comm comm, + int bufferSize, + int startCoreIndex, + int endCoreIndex); + + /** + * Accepts new connections into an initialized dsm server. + * + * @param numConnections The number of incoming connections to accept. + */ + void acceptDSM(int numConnections); + + /** + * Closes the currently open port to the comm. + */ + void closeDSMPort(); + + /** + * Connects to an initialized dsm server. + * + * @param filePath The file path that the DSMWriter will be writing to, + * should be the same as on the server side. + * @param comm The local communicator to be connected to the + * server communicator. + */ + void connectDSM(const char * const filePath, MPI_Comm comm); + + /** + * Gets the communicator across the entire dsm and all connected to it. + * + * @return The overarching communicator. + */ + MPI_Comm getDSMInterComm(); + + /** + * Gets the local communicator that the worker cores share. + * + * @return The communicator for the local worker group + */ + MPI_Comm getDSMIntraComm(); + + /** + * Stops the currently running dsm server. + */ + void stopDSM(); + + /** + * Writes the provided values to the DSM using at the location generated by + * the provided start, stride, dimensions, and dataspace. + * + * @param dsmDataSetPath The path to the dataset being written to. + * Will overwrite current dsm if different. + * @param arrayType The data type of the data to be written to the set. + * @param values A pointer to the values to be written. + * @param start The starting index to write to. + * @param stride The increment between writing data. + * @param dimensions The number of data items written. + * @param dataspace The total number of data items in the dataset. + */ + void writeToDSM(const char * const dsmDataSetPath, + const int arrayType, + void * values, + const int start, + const int stride, + const int dimensions, + const int dataspace); + + /** + * Reads the data in the dsm at the location generated from the + * start, stride, dimensions, and dataspace and places it into the + * provided pointer. + * + * @param dsmDataSetPath The path to the dataset being read. + * @param arrayType The data type of the data being read. + * @param values A pointer to the location that the data + * will be stored at. + * @param start The starting index of the read. + * @param stride The increment between read values. + * @param dimensions The amount of value to be read. + * @param dataspace The total number of data items in the dataset. + */ + void readFromDSM(const char * const dsmDataSetPath, + const int arrayType, + void * values, + const int start, + const int stride, + const int dimensions, + const int dataspace); + +#endif + /** * Generate the persistant hdf5 writer so it doesn't need to be generated later * @@ -2974,6 +3093,7 @@ private: shared_ptr mOrigin; shared_ptr mDimensions; shared_ptr mHeavyDataWriter; + shared_ptr mDSMWriter; std::vector > mAttributes; std::vector > mCoordinates; diff --git a/utils/tests/Fortran/AcceptDSMFortan.f90 b/utils/tests/Fortran/AcceptDSMFortan.f90 new file mode 100644 index 0000000000000000000000000000000000000000..8136aac3c5670959881679901d81680febf8bcc6 --- /dev/null +++ b/utils/tests/Fortran/AcceptDSMFortan.f90 @@ -0,0 +1,62 @@ +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +!! +!! AUTHOR: Andrew Burns (andrew.j.burns2@us.army.mil) +!! +!! A test of the XdmfDSM capabilities using the fortran interface. +!! This part of the program sets up the server to be used by the DSM. +!! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + +PROGRAM XdmfFortranExample + + Implicit none + INCLUDE 'Xdmf.f' + INCLUDE 'mpif.h' + + + + INTEGER*8 obj + character*256 filename + INTEGER id, commsize, buffersize + INTEGER intraID, intraSize + INTEGER ierr + + INTEGER interComm + + filename = 'nested_output.xmf'//CHAR(0) + + buffersize = 16/commsize + + if (buffersize < 1) then + buffersize = 1 + endif + + CALL MPI_INIT (ierr) + + CALL XDMFINIT(obj, filename) + + CALL MPI_COMM_RANK(MPI_COMM_WORLD, id, ierr) + CALL MPI_COMM_SIZE(MPI_COMM_WORLD, commsize, ierr) + +!! Cores 1 to size-1 are blocked by the initialization of the server. +!! When the server finishes they are released and allowed to progress. + CALL XDMFINITDSMSERVER(obj, "dsm"//CHAR(0), MPI_COMM_WORLD, bufferSize, 1, commsize-1) + + if (id == 0) then + CALL XDMFACCEPTDSM(obj, 2) + endif + + CALL XDMFGETDSMINTERCOMM(obj, interComm) + +!! Wait for other processes to finish before closing + + CALL MPI_BARRIER(interComm, ierr) + + CALL XDMFCLOSEDSMPORT(obj) + + CALL XDMFCLOSE(obj) + + CALL MPI_FINALIZE(ierr) + +END PROGRAM XdmfFortranExample diff --git a/utils/tests/Fortran/ConnectDSMFortran.f90 b/utils/tests/Fortran/ConnectDSMFortran.f90 new file mode 100644 index 0000000000000000000000000000000000000000..90b7bc9b54558c9bb11b845cc5bb83f26e27718e --- /dev/null +++ b/utils/tests/Fortran/ConnectDSMFortran.f90 @@ -0,0 +1,129 @@ +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +!! +!! AUTHOR: Andrew Burns (andrew.j.burns2@us.army.mil) +!! +!! A test of the XdmfDSM capabilities using the fortran interface. +!! This part of the program sets up the server to be used by the DSM. +!! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + +PROGRAM XdmfFortranExample + + Implicit none + INCLUDE 'Xdmf.f' + INCLUDE 'mpif.h' + + + + INTEGER*8 obj + character*256 filename + INTEGER i + INTEGER id, commsize, buffersize, statusval(MPI_STATUS_SIZE) + INTEGER intraID, intraSize + INTEGER interID, interSize + INTEGER ierr + INTEGER writeloopcontrol + + INTEGER interComm, intraComm + + REAL*8 dataArray(4) + + filename = 'nested_output.xmf'//CHAR(0) + + buffersize = 16/commsize + writeloopcontrol = 0 + + if (buffersize < 1) then + buffersize = 1 + endif + + CALL MPI_INIT (ierr) + + CALL XDMFINIT(obj, filename) + + CALL MPI_COMM_RANK(MPI_COMM_WORLD, id, ierr) + CALL MPI_COMM_SIZE(MPI_COMM_WORLD, commsize, ierr) + + i = 1 + + do while (i <= 4) + dataArray(i) = (id + 1) * i + i = i + 1 + enddo + + CALL XDMFCONNECTDSM(obj, "dsm"//CHAR(0), MPI_COMM_WORLD) + + CALL XDMFGETDSMINTRACOMM(obj, intraComm) + + CALL MPI_COMM_RANK(intraComm, intraID, ierr) + CALL MPI_COMM_SIZE(intraComm, intraSize, ierr) + + CALL XDMFGETDSMINTERCOMM(obj, interComm) + + CALL MPI_COMM_RANK(interComm, interID, ierr) + CALL MPI_COMM_SIZE(interComm, interSize, ierr) + + i = 0 + do while (i < commsize) + if (i == id) then + print *, "core " , id, "array = ", dataArray + endif + i = i + 1 + CALL MPI_BARRIER(intraComm, ierr) + enddo + + if (id == 0) then + print *, "" + endif + +!! Work section goes here + + do while (writeloopcontrol < 3) + + CALL MPI_BARRIER(intraComm, ierr) + + CALL XDMFWRITETODSM(obj, "Data"//CHAR(0), XDMF_ARRAY_TYPE_FLOAT64, dataArray, id*4, 1, 4, 4*commsize) + + if (id == commsize - 1) then + CALL MPI_SEND(writeloopcontrol, 1, MPI_INT, interID + 1, 0, interComm, ierr) + CALL MPI_RECV(writeloopcontrol, 1, MPI_INT, interID + 1, 0, interComm, statusval, ierr) + endif + + CALL MPI_BARRIER(intraComm, ierr) + + CALL XDMFREADFROMDSM(obj, "Data"//CHAR(0), XDMF_ARRAY_TYPE_FLOAT64, dataArray, (commsize - id - 1)*4, 1, 4, 4*commsize) + + i = 0 + do while (i < commsize) + if (i == id) then + print *, "core " , id, "array = ", dataArray + endif + i = i + 1 + CALL MPI_BARRIER(intraComm, ierr) + enddo + + if (id == 0) then + print *, "" + endif + + writeloopcontrol = writeloopcontrol + 1 + + enddo + + CALL MPI_BARRIER(intraComm, ierr) + + if (id == 0) then + CALL XDMFSTOPDSM(obj) + endif +!!/Work section + +!! Wait for other processes to finish before closing + + CALL MPI_BARRIER(interComm, ierr) + + CALL XDMFCLOSE(obj) + + CALL MPI_FINALIZE(ierr) + +END PROGRAM XdmfFortranExample diff --git a/utils/tests/Fortran/ConnectDSMFortran2.f90 b/utils/tests/Fortran/ConnectDSMFortran2.f90 new file mode 100644 index 0000000000000000000000000000000000000000..98b382908cc1740b25892b020e8dc74f3d2440cc --- /dev/null +++ b/utils/tests/Fortran/ConnectDSMFortran2.f90 @@ -0,0 +1,96 @@ +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +!! +!! AUTHOR: Andrew Burns (andrew.j.burns2@us.army.mil) +!! +!! A test of the XdmfDSM capabilities using the fortran interface. +!! This part of the program sets up the server to be used by the DSM. +!! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + +PROGRAM XdmfFortranExample + + Implicit none + INCLUDE 'Xdmf.f' + INCLUDE 'mpif.h' + + + + INTEGER*8 obj + character*256 filename + INTEGER id, commsize, buffersize, statusval(MPI_STATUS_SIZE) + INTEGER intraID, intraSize + INTEGER interID, interSize + INTEGER ierr + INTEGER writeloopcontrol, i + REAL*8 dataArray(4) + + INTEGER interComm, intraComm + + filename = 'nested_output.xmf'//CHAR(0) + + buffersize = 16/commsize + + if (buffersize < 1) then + buffersize = 1 + endif + + writeloopcontrol = 0 + + CALL MPI_INIT (ierr) + + CALL XDMFINIT(obj, filename) + + CALL MPI_COMM_RANK(MPI_COMM_WORLD, id, ierr) + CALL MPI_COMM_SIZE(MPI_COMM_WORLD, commsize, ierr) + + CALL XDMFCONNECTDSM(obj, "dsm"//CHAR(0), MPI_COMM_WORLD) + + CALL XDMFGETDSMINTRACOMM(obj, intraComm) + + CALL MPI_COMM_RANK(intraComm, intraID, ierr) + CALL MPI_COMM_SIZE(intraComm, intraSize, ierr) + + CALL MPI_BARRIER(intraComm, ierr) + + CALL XDMFGETDSMINTERCOMM(obj, interComm) + + CALL MPI_COMM_RANK(interComm, interID, ierr) + CALL MPI_COMM_SIZE(interComm, interSize, ierr) + +!! Work is done here + + do while (writeloopcontrol < 3) + + if (id == 0) then + CALL MPI_RECV(writeloopcontrol, 1, MPI_INT, interID - 1, 0, interComm, statusval, ierr) + endif + + CALL XDMFREADFROMDSM(obj, "Data"//CHAR(0), XDMF_ARRAY_TYPE_FLOAT64, dataArray, id*4, 1, 4, 4*commsize) + + i = 1 + + do while (i <= 4) + dataArray(i) = dataArray(i) * 2 + i = i + 1 + enddo + + CALL XDMFWRITETODSM(obj, "Data"//CHAR(0), XDMF_ARRAY_TYPE_FLOAT64, dataArray, id*4, 1, 4, 4*commsize) + + if (id == 0) then + CALL MPI_SEND(writeloopcontrol, 1, MPI_INT, interID - 1, 0, interComm, ierr) + endif + + writeloopcontrol = writeloopcontrol + 1 + + enddo + +!! Wait for other processes to finish before closing + + CALL MPI_BARRIER(interComm, ierr) + + CALL XDMFCLOSE(obj) + + CALL MPI_FINALIZE(ierr) + +END PROGRAM XdmfFortranExample diff --git a/utils/tests/Fortran/NestedInfoFortran.f90 b/utils/tests/Fortran/NestedInfoFortran.f90 new file mode 100644 index 0000000000000000000000000000000000000000..9d4d29c6f082909a6397be48908b61217eca435e --- /dev/null +++ b/utils/tests/Fortran/NestedInfoFortran.f90 @@ -0,0 +1,41 @@ +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +!! +!! AUTHOR: Andrew Burns (andrew.j.burns2@us.army.mil) +!! +!! Use the Xdmf Fortran Bindings to write out several links to +!! data structures in different files. +!! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + +PROGRAM XdmfFortranExample + + Implicit none + INCLUDE 'Xdmf.f' + + + + INTEGER*8 obj + character*256 filename + INTEGER tempID; + + filename = 'nested_output.xmf'//CHAR(0) + + CALL XDMFINIT(obj, filename) + + tempID = XDMFADDINFORMATION(obj, 'XIncludes'//CHAR(0), '3'//CHAR(0)) + tempID = XDMFADDINFORMATION(obj, "testoutput.xmf"//CHAR(0), "xpointer(//Xdmf/Domain/Grid[1])"//CHAR(0)) + tempID = XDMFADDINFORMATION(obj, "testoutput2.xmf"//CHAR(0), "xpointer(//Xdmf/Domain/Grid[1])"//CHAR(0)) + tempID = XDMFADDINFORMATION(obj, "editedtestoutput.xmf"//CHAR(0), "xpointer(//Xdmf/Domain/Grid[1])"//CHAR(0)) + CALL XDMFINSERTINFORMATIONINTOINFORMATION(obj, 0, 1, .TRUE.) + CALL XDMFINSERTINFORMATIONINTOINFORMATION(obj, 0, 1, .TRUE.) + CALL XDMFINSERTINFORMATIONINTOINFORMATION(obj, 0, 1, .TRUE.) +!! The Information will be added to the Grid Collection + CALL XDMFADDGRIDCOLLECTION(obj, "MultiFile Reference"//CHAR(0), & + XDMF_GRID_COLLECTION_TYPE_TEMPORAL) + CALL XDMFCLOSEGRIDCOLLECTION(obj, .TRUE.) + CALL XDMFWRITE(obj, filename, 10, .TRUE.) + CALL XDMFCLOSE(obj) + + +END PROGRAM XdmfFortranExample