Commit 6e0d6c9b authored by Andrew J. Burns (Cont's avatar Andrew J. Burns (Cont

Added DSM capabilities to Fortran interface, simple array passing; Also added test for Fortran DSM

parent 8a80f9a7
......@@ -29,7 +29,7 @@ int main(int argc, char *argv[])
// Initializing objects
//since the start and end ids are larger than the size there are no buffers alloted
//thus, not blockage occurs
//thus, no blockage occurs
XdmfDSMCommMPI * testComm = new XdmfDSMCommMPI();
testComm->DupComm(comm);
testComm->Init();
......
......@@ -3,7 +3,7 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR})
option(XDMF_BUILD_EXODUS_IO OFF)
option(XDMF_BUILD_PARTITIONER OFF)
option(XDMF_BUILD_FORTRAN OFF)
option(XDMF_BUILD_DSM OFF)
set(XdmfUtilsSources
XdmfDiff
......@@ -15,6 +15,10 @@ if(XDMF_BUILD_FORTRAN)
set(XdmfUtilsSources ${XdmfUtilsSources} XdmfFortran)
endif(XDMF_BUILD_FORTRAN)
if(XDMF_BUILD_DSM)
add_definitions(-DXDMF_BUILD_DSM)
endif(XDMF_BUILD_DSM)
if(XDMF_BUILD_EXODUS_IO)
set(XDMF_SWIG_FLAGS ${XDMF_SWIG_FLAGS} -DXDMF_BUILD_EXODUS_IO)
find_package(Exodus REQUIRED)
......
This diff is collapsed.
......@@ -41,12 +41,16 @@ class XdmfRectilinearGrid;
class XdmfRegularGrid;
class XdmfUnstructuredGrid;
class XdmfHeavyDataWriter;
class XdmfHDF5WriterDSM;
//Includes
#include <stack>
#include <vector>
#include "XdmfUtils.hpp"
#include "XdmfSharedPtr.hpp"
#ifdef XDMF_BUILD_DSM
#include "mpi.h"
#endif
/**
* Array Type
......@@ -167,6 +171,15 @@ class XdmfHeavyDataWriter;
#define XdmfSetMaxFileSize xdmfsetmaxfilesize_
#define XdmfWrite xdmfwrite_
#define XdmfRead xdmfread_
#define XdmfInitDSMServer xdmfinitdsmserver_
#define XdmfAcceptDSM xdmfacceptdsm_
#define XdmfCloseDSMPort xdmfclosedsmport_
#define XdmfConnectDSM xdmfconnectdsm_
#define XdmfGetDSMInterComm xdmfgetdsmintercomm_
#define XdmfGetDSMIntraComm xdmfgetdsmintracomm_
#define XdmfStopDSM xdmfstopdsm_
#define XdmfReadFromDSM xdmfreadfromdsm_
#define XdmfWriteToDSM xdmfwritetodsm_
#define XdmfWriteHDF5 xdmfwritehdf5_
#define XdmfInitHDF5 xdmfinithdf5_
#define XdmfSetTopologyPolyline xdmfsettopologypolyline_
......@@ -2887,6 +2900,112 @@ public:
*/
void writeHDF5(const char * const xmlFilePath, const bool release);
#ifdef XDMF_BUILD_DSM
/**
* Starts up a dsm server on cores from the start index to the end index.
* The server cores are blocked and will not proceed past this point
* until the dsm is stopped.
*
* @param filePath A string denoting the file path to the virtual file.
* @param comm The Communicator between all cores,
* worker and server.
* @param bufferSize The size of the memory buffer allocated per core.
* @param startCoreIndex The core index on which the dsm section begins.
* @param endCoreIndex The core index on which the dsm section ends.
*/
void initDSMServer(const char * const filePath,
MPI_Comm comm,
int bufferSize,
int startCoreIndex,
int endCoreIndex);
/**
* Accepts new connections into an initialized dsm server.
*
* @param numConnections The number of incoming connections to accept.
*/
void acceptDSM(int numConnections);
/**
* Closes the currently open port to the comm.
*/
void closeDSMPort();
/**
* Connects to an initialized dsm server.
*
* @param filePath The file path that the DSMWriter will be writing to,
* should be the same as on the server side.
* @param comm The local communicator to be connected to the
* server communicator.
*/
void connectDSM(const char * const filePath, MPI_Comm comm);
/**
* Gets the communicator across the entire dsm and all connected to it.
*
* @return The overarching communicator.
*/
MPI_Comm getDSMInterComm();
/**
* Gets the local communicator that the worker cores share.
*
* @return The communicator for the local worker group
*/
MPI_Comm getDSMIntraComm();
/**
* Stops the currently running dsm server.
*/
void stopDSM();
/**
* Writes the provided values to the DSM using at the location generated by
* the provided start, stride, dimensions, and dataspace.
*
* @param dsmDataSetPath The path to the dataset being written to.
* Will overwrite current dsm if different.
* @param arrayType The data type of the data to be written to the set.
* @param values A pointer to the values to be written.
* @param start The starting index to write to.
* @param stride The increment between writing data.
* @param dimensions The number of data items written.
* @param dataspace The total number of data items in the dataset.
*/
void writeToDSM(const char * const dsmDataSetPath,
const int arrayType,
void * values,
const int start,
const int stride,
const int dimensions,
const int dataspace);
/**
* Reads the data in the dsm at the location generated from the
* start, stride, dimensions, and dataspace and places it into the
* provided pointer.
*
* @param dsmDataSetPath The path to the dataset being read.
* @param arrayType The data type of the data being read.
* @param values A pointer to the location that the data
* will be stored at.
* @param start The starting index of the read.
* @param stride The increment between read values.
* @param dimensions The amount of value to be read.
* @param dataspace The total number of data items in the dataset.
*/
void readFromDSM(const char * const dsmDataSetPath,
const int arrayType,
void * values,
const int start,
const int stride,
const int dimensions,
const int dataspace);
#endif
/**
* Generate the persistant hdf5 writer so it doesn't need to be generated later
*
......@@ -2974,6 +3093,7 @@ private:
shared_ptr<XdmfArray> mOrigin;
shared_ptr<XdmfArray> mDimensions;
shared_ptr<XdmfHeavyDataWriter> mHeavyDataWriter;
shared_ptr<XdmfHDF5WriterDSM> mDSMWriter;
std::vector<shared_ptr<XdmfAttribute> > mAttributes;
std::vector<shared_ptr<XdmfArray> > mCoordinates;
......
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!
!! AUTHOR: Andrew Burns (andrew.j.burns2@us.army.mil)
!!
!! A test of the XdmfDSM capabilities using the fortran interface.
!! This part of the program sets up the server to be used by the DSM.
!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
PROGRAM XdmfFortranExample
Implicit none
INCLUDE 'Xdmf.f'
INCLUDE 'mpif.h'
INTEGER*8 obj
character*256 filename
INTEGER id, commsize, buffersize
INTEGER intraID, intraSize
INTEGER ierr
INTEGER interComm
filename = 'nested_output.xmf'//CHAR(0)
buffersize = 16/commsize
if (buffersize < 1) then
buffersize = 1
endif
CALL MPI_INIT (ierr)
CALL XDMFINIT(obj, filename)
CALL MPI_COMM_RANK(MPI_COMM_WORLD, id, ierr)
CALL MPI_COMM_SIZE(MPI_COMM_WORLD, commsize, ierr)
!! Cores 1 to size-1 are blocked by the initialization of the server.
!! When the server finishes they are released and allowed to progress.
CALL XDMFINITDSMSERVER(obj, "dsm"//CHAR(0), MPI_COMM_WORLD, bufferSize, 1, commsize-1)
if (id == 0) then
CALL XDMFACCEPTDSM(obj, 2)
endif
CALL XDMFGETDSMINTERCOMM(obj, interComm)
!! Wait for other processes to finish before closing
CALL MPI_BARRIER(interComm, ierr)
CALL XDMFCLOSEDSMPORT(obj)
CALL XDMFCLOSE(obj)
CALL MPI_FINALIZE(ierr)
END PROGRAM XdmfFortranExample
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!
!! AUTHOR: Andrew Burns (andrew.j.burns2@us.army.mil)
!!
!! A test of the XdmfDSM capabilities using the fortran interface.
!! This part of the program sets up the server to be used by the DSM.
!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
PROGRAM XdmfFortranExample
Implicit none
INCLUDE 'Xdmf.f'
INCLUDE 'mpif.h'
INTEGER*8 obj
character*256 filename
INTEGER i
INTEGER id, commsize, buffersize, statusval(MPI_STATUS_SIZE)
INTEGER intraID, intraSize
INTEGER interID, interSize
INTEGER ierr
INTEGER writeloopcontrol
INTEGER interComm, intraComm
REAL*8 dataArray(4)
filename = 'nested_output.xmf'//CHAR(0)
buffersize = 16/commsize
writeloopcontrol = 0
if (buffersize < 1) then
buffersize = 1
endif
CALL MPI_INIT (ierr)
CALL XDMFINIT(obj, filename)
CALL MPI_COMM_RANK(MPI_COMM_WORLD, id, ierr)
CALL MPI_COMM_SIZE(MPI_COMM_WORLD, commsize, ierr)
i = 1
do while (i <= 4)
dataArray(i) = (id + 1) * i
i = i + 1
enddo
CALL XDMFCONNECTDSM(obj, "dsm"//CHAR(0), MPI_COMM_WORLD)
CALL XDMFGETDSMINTRACOMM(obj, intraComm)
CALL MPI_COMM_RANK(intraComm, intraID, ierr)
CALL MPI_COMM_SIZE(intraComm, intraSize, ierr)
CALL XDMFGETDSMINTERCOMM(obj, interComm)
CALL MPI_COMM_RANK(interComm, interID, ierr)
CALL MPI_COMM_SIZE(interComm, interSize, ierr)
i = 0
do while (i < commsize)
if (i == id) then
print *, "core " , id, "array = ", dataArray
endif
i = i + 1
CALL MPI_BARRIER(intraComm, ierr)
enddo
if (id == 0) then
print *, ""
endif
!! Work section goes here
do while (writeloopcontrol < 3)
CALL MPI_BARRIER(intraComm, ierr)
CALL XDMFWRITETODSM(obj, "Data"//CHAR(0), XDMF_ARRAY_TYPE_FLOAT64, dataArray, id*4, 1, 4, 4*commsize)
if (id == commsize - 1) then
CALL MPI_SEND(writeloopcontrol, 1, MPI_INT, interID + 1, 0, interComm, ierr)
CALL MPI_RECV(writeloopcontrol, 1, MPI_INT, interID + 1, 0, interComm, statusval, ierr)
endif
CALL MPI_BARRIER(intraComm, ierr)
CALL XDMFREADFROMDSM(obj, "Data"//CHAR(0), XDMF_ARRAY_TYPE_FLOAT64, dataArray, (commsize - id - 1)*4, 1, 4, 4*commsize)
i = 0
do while (i < commsize)
if (i == id) then
print *, "core " , id, "array = ", dataArray
endif
i = i + 1
CALL MPI_BARRIER(intraComm, ierr)
enddo
if (id == 0) then
print *, ""
endif
writeloopcontrol = writeloopcontrol + 1
enddo
CALL MPI_BARRIER(intraComm, ierr)
if (id == 0) then
CALL XDMFSTOPDSM(obj)
endif
!!/Work section
!! Wait for other processes to finish before closing
CALL MPI_BARRIER(interComm, ierr)
CALL XDMFCLOSE(obj)
CALL MPI_FINALIZE(ierr)
END PROGRAM XdmfFortranExample
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!
!! AUTHOR: Andrew Burns (andrew.j.burns2@us.army.mil)
!!
!! A test of the XdmfDSM capabilities using the fortran interface.
!! This part of the program sets up the server to be used by the DSM.
!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
PROGRAM XdmfFortranExample
Implicit none
INCLUDE 'Xdmf.f'
INCLUDE 'mpif.h'
INTEGER*8 obj
character*256 filename
INTEGER id, commsize, buffersize, statusval(MPI_STATUS_SIZE)
INTEGER intraID, intraSize
INTEGER interID, interSize
INTEGER ierr
INTEGER writeloopcontrol, i
REAL*8 dataArray(4)
INTEGER interComm, intraComm
filename = 'nested_output.xmf'//CHAR(0)
buffersize = 16/commsize
if (buffersize < 1) then
buffersize = 1
endif
writeloopcontrol = 0
CALL MPI_INIT (ierr)
CALL XDMFINIT(obj, filename)
CALL MPI_COMM_RANK(MPI_COMM_WORLD, id, ierr)
CALL MPI_COMM_SIZE(MPI_COMM_WORLD, commsize, ierr)
CALL XDMFCONNECTDSM(obj, "dsm"//CHAR(0), MPI_COMM_WORLD)
CALL XDMFGETDSMINTRACOMM(obj, intraComm)
CALL MPI_COMM_RANK(intraComm, intraID, ierr)
CALL MPI_COMM_SIZE(intraComm, intraSize, ierr)
CALL MPI_BARRIER(intraComm, ierr)
CALL XDMFGETDSMINTERCOMM(obj, interComm)
CALL MPI_COMM_RANK(interComm, interID, ierr)
CALL MPI_COMM_SIZE(interComm, interSize, ierr)
!! Work is done here
do while (writeloopcontrol < 3)
if (id == 0) then
CALL MPI_RECV(writeloopcontrol, 1, MPI_INT, interID - 1, 0, interComm, statusval, ierr)
endif
CALL XDMFREADFROMDSM(obj, "Data"//CHAR(0), XDMF_ARRAY_TYPE_FLOAT64, dataArray, id*4, 1, 4, 4*commsize)
i = 1
do while (i <= 4)
dataArray(i) = dataArray(i) * 2
i = i + 1
enddo
CALL XDMFWRITETODSM(obj, "Data"//CHAR(0), XDMF_ARRAY_TYPE_FLOAT64, dataArray, id*4, 1, 4, 4*commsize)
if (id == 0) then
CALL MPI_SEND(writeloopcontrol, 1, MPI_INT, interID - 1, 0, interComm, ierr)
endif
writeloopcontrol = writeloopcontrol + 1
enddo
!! Wait for other processes to finish before closing
CALL MPI_BARRIER(interComm, ierr)
CALL XDMFCLOSE(obj)
CALL MPI_FINALIZE(ierr)
END PROGRAM XdmfFortranExample
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!
!! AUTHOR: Andrew Burns (andrew.j.burns2@us.army.mil)
!!
!! Use the Xdmf Fortran Bindings to write out several links to
!! data structures in different files.
!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
PROGRAM XdmfFortranExample
Implicit none
INCLUDE 'Xdmf.f'
INTEGER*8 obj
character*256 filename
INTEGER tempID;
filename = 'nested_output.xmf'//CHAR(0)
CALL XDMFINIT(obj, filename)
tempID = XDMFADDINFORMATION(obj, 'XIncludes'//CHAR(0), '3'//CHAR(0))
tempID = XDMFADDINFORMATION(obj, "testoutput.xmf"//CHAR(0), "xpointer(//Xdmf/Domain/Grid[1])"//CHAR(0))
tempID = XDMFADDINFORMATION(obj, "testoutput2.xmf"//CHAR(0), "xpointer(//Xdmf/Domain/Grid[1])"//CHAR(0))
tempID = XDMFADDINFORMATION(obj, "editedtestoutput.xmf"//CHAR(0), "xpointer(//Xdmf/Domain/Grid[1])"//CHAR(0))
CALL XDMFINSERTINFORMATIONINTOINFORMATION(obj, 0, 1, .TRUE.)
CALL XDMFINSERTINFORMATIONINTOINFORMATION(obj, 0, 1, .TRUE.)
CALL XDMFINSERTINFORMATIONINTOINFORMATION(obj, 0, 1, .TRUE.)
!! The Information will be added to the Grid Collection
CALL XDMFADDGRIDCOLLECTION(obj, "MultiFile Reference"//CHAR(0), &
XDMF_GRID_COLLECTION_TYPE_TEMPORAL)
CALL XDMFCLOSEGRIDCOLLECTION(obj, .TRUE.)
CALL XDMFWRITE(obj, filename, 10, .TRUE.)
CALL XDMFCLOSE(obj)
END PROGRAM XdmfFortranExample
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment