Commit f5c88a96 authored by Andrew J. Burns (Cont's avatar Andrew J. Burns (Cont

Improved backwards compatibility and Fortran interface with file splitting functions

parent 861177e9
......@@ -392,7 +392,7 @@ public:
{
unsigned int size = mStartIndex + mNumValues;
if(mArrayStride > 1) {
size = mStartIndex + mNumValues * mArrayStride - (mStartIndex%mArrayStride);
size = mStartIndex + (mNumValues - 1) * mArrayStride + 1;
}
if(array->size() < size) {
array->resize(size);
......@@ -1579,7 +1579,34 @@ XdmfArray::insert(const unsigned int startIndex,
values),
mArray);
}
/*
//TODO
void
XdmfArray::insert(const std::vector<unsigned int> startIndex,
const shared_ptr<const XdmfArray> values,
const std::vector<unsigned int> valuesStartIndex,
const std::vector<unsigned int> numValues,
const std::vector<unsigned int> arrayStride,
const std::vector<unsigned int> valuesStride)
{
if (startIndex.size() == arrayStride.size() == 1)//if array being inserted into has been reduced to one dimension
{
//check if the array being inserted has been reduced to one dimension
if (valueStartIndex.size() == valuesStride.size() == numValues.size() == 1)//if it has been
{
//do a 1d insert
}
else
{
//reduce dimensions by splitting and calling recursively
}
}
else//otherwise reduce dimensions by splitting and calling recursively
{
}
}
*/
bool
XdmfArray::isInitialized() const
{
......
......@@ -55,6 +55,10 @@ class XdmfHeavyDataController;
* disk using the XdmfHeavyDataController. After the values have been
* read from heavy data on disk, isInitialized() will return true.
*
* This version of Xdmf allows for multiple controllers to be added to
* a single array. Be aware that doing this makes the files written
* incompatible with previous editions.
*
* XdmfArray allows for insertion and retrieval of data in two
* fundamental ways:
*
......@@ -1020,6 +1024,35 @@ public:
const unsigned int arrayStride = 1,
const unsigned int valuesStride = 1);
/**
* Insert values from an XdmfArray into this array. This is the multidimensional version.
*
* Example of use:
*
* C++
*
* @dontinclude ExampleXdmfArray.cpp
*
* Python
*
* @dontinclude XdmfExampleArray.py
*
* @param startIndex the index in this array to begin insertion.
* @param values a shared pointer to an XdmfArray to copy into this array.
* @param valuesStartIndex the index in the XdmfArray to begin copying.
* @param numValues the number of values to copy into this array.
* @param arrayStride number of values to stride in this array between each
* copy.
* @param valuesStride number of values to stride in the XdmfArray between
* each copy.
*/
/* void insert(const std::vector<unsigned int> startIndex,
const shared_ptr<const XdmfArray> values,
const std::vector<unsigned int> valuesStartIndex,
const std::vector<unsigned int> numValues,
const std::vector<unsigned int> arrayStride,
const std::vector<unsigned int> valuesStride);*/
/**
* Insert values into this array.
*
......
......@@ -52,6 +52,7 @@ public:
mChunkSize(DEFAULT_CHUNK_SIZE),
mOpenFile(""),
mHDF5FileSizeLimit(-1),
mAllowSplitDataSets(false),
mDepth(0),
mFileIndex(0)
{
......@@ -118,6 +119,7 @@ public:
unsigned int mChunkSize;
std::string mOpenFile;
int mHDF5FileSizeLimit;
bool mAllowSplitDataSets;
int mFileIndex;
int mDepth;
std::set<const XdmfItem *> mWrittenItems;
......@@ -177,6 +179,18 @@ XdmfHDF5Writer::getFileSizeLimit()
return mImpl->mHDF5FileSizeLimit;
}
void
XdmfHDF5Writer::setAllowSetSplitting(bool newAllow)
{
mImpl->mAllowSplitDataSets = newAllow;
}
int
XdmfHDF5Writer::getAllowSetSplitting()
{
return mImpl->mAllowSplitDataSets;
}
void
XdmfHDF5Writer::setFileIndex(int newSize)
{
......@@ -506,7 +520,6 @@ XdmfHDF5Writer::write(XdmfArray & array,
}
else if (mMode == Hyperslab) {
hyperslabSize = checksize;
printf("size of existing hyperslab = %d\n", hyperslabSize);
}
}
if (fileSize == 0) {
......@@ -973,15 +986,11 @@ else
{
//calculate the number of values of the data type you're using will fit
unsigned int usableSpace = (mImpl->mHDF5FileSizeLimit*(1024*1024) - fileSize) / dataItemSize;
printf("fileSize = %d\n", fileSize);
printf("previousDataSize = %d\n", previousDataSize);
if (mImpl->mHDF5FileSizeLimit*(1024*1024) < fileSize)
{
usableSpace = 0;
}
printf("usableSpace = %d\n", usableSpace);
usableSpace += hyperslabSize-previousDataSize;
printf("usableSpace after adjustment = %d\n", usableSpace);
//if the array hasn't been split
if (amountAlreadyWritten == 0)
......@@ -991,66 +1000,79 @@ else
//otherwise split it.
if ((remainingValues * dataItemSize) + 800 > mImpl->mHDF5FileSizeLimit*(1024*1024) && usableSpace > 0)
{
//figure out the size of the largest block that will fit.
unsigned int blockSizeSubtotal = 1;
int dimensionIndex = 0;
//find the dimension that was split
while (dimensionIndex < dataspaceDimensions.size() && blockSizeSubtotal <= usableSpace)
{
blockSizeSubtotal *= dataspaceDimensions[dimensionIndex];
dimensionIndex++;
}//It should end on the "blockSizeSubtotal <= arrayStartIndex" statement, the other half is for backup
//move back one dimension so we're working on the dimension that was split, not the one after it
dimensionIndex--;
blockSizeSubtotal /= dataspaceDimensions[dimensionIndex];
//determine how many of those blocks will fit
unsigned int numBlocks = usableSpace / blockSizeSubtotal;//this should be less than the current value for the dimension
//add dimensions as required.
int j = 0;
for (j = 0; j < dimensionIndex; j++)
{
partialStarts.push_back(start[j]);
partialStrides.push_back(stride[j]);
partialDimensions.push_back(dimensions[j]);
partialDataSizes.push_back(dataspaceDimensions[j]);
}
if (start[j] > numBlocks)
{
partialStarts.push_back(numBlocks-1);
}
else
{
partialStarts.push_back(start[j]);
}
partialStrides.push_back(stride[j]);
partialDataSizes.push_back(numBlocks);
if (dimensions[j] == dataspaceDimensions[j])//this is for non-hyperslab and specific cases of hyperslab
if (mImpl->mAllowSplitDataSets)
{
partialDimensions.push_back(numBlocks);
}
else
{//for hyperslab in general
//determine how many values from the array will fit into the blocks being used with the dimensions specified
unsigned int displacement = numBlocks / stride[j];
if (((int)displacement * (int)stride[j]) + (start[j] % stride[j]) < numBlocks)
//figure out the size of the largest block that will fit.
unsigned int blockSizeSubtotal = 1;
int dimensionIndex = 0;
//find the dimension that was split
while (dimensionIndex < dataspaceDimensions.size() && blockSizeSubtotal <= usableSpace)
{
blockSizeSubtotal *= dataspaceDimensions[dimensionIndex];
dimensionIndex++;
}//It should end on the "blockSizeSubtotal <= arrayStartIndex" statement, the other half is for backup
//move back one dimension so we're working on the dimension that was split, not the one after it
dimensionIndex--;
blockSizeSubtotal /= dataspaceDimensions[dimensionIndex];
//determine how many of those blocks will fit
unsigned int numBlocks = usableSpace / blockSizeSubtotal;//this should be less than the current value for the dimension
//add dimensions as required.
int j = 0;
for (j = 0; j < dimensionIndex; j++)
{
displacement++;
partialStarts.push_back(start[j]);
partialStrides.push_back(stride[j]);
partialDimensions.push_back(dimensions[j]);
partialDataSizes.push_back(dataspaceDimensions[j]);
}
displacement -= start[j]/stride[j];
if (start[j] > numBlocks)
{
displacement = 0;
partialStarts.push_back(numBlocks-1);
}
if (dimensions[j] <= displacement)//if there are less values than there are space for, just write all of them.
else
{
partialDimensions.push_back(dimensions[j]);
partialStarts.push_back(start[j]);
}
else//otherwise write what space allows for
partialStrides.push_back(stride[j]);
partialDataSizes.push_back(numBlocks);
if (dimensions[j] == dataspaceDimensions[j])//this is for non-hyperslab and specific cases of hyperslab
{
partialDimensions.push_back(displacement);
partialDimensions.push_back(numBlocks);
}
else
{//for hyperslab in general
//determine how many values from the array will fit into the blocks being used with the dimensions specified
unsigned int displacement = numBlocks / stride[j];
if (((int)displacement * (int)stride[j]) + (start[j] % stride[j]) < numBlocks)
{
displacement++;
}
displacement -= start[j]/stride[j];
if (start[j] > numBlocks)
{
displacement = 0;
}
if (dimensions[j] <= displacement)//if there are less values than there are space for, just write all of them.
{
partialDimensions.push_back(dimensions[j]);
}
else//otherwise write what space allows for
{
partialDimensions.push_back(displacement);
}
}
}
else
{
//just pass all data to the partial vectors
for (int j = 0; j < dimensions.size(); j++)//done using a loop so that data is copied, not referenced
{
partialStarts.push_back(start[j]);
partialStrides.push_back(stride[j]);
partialDimensions.push_back(dimensions[j]);
partialDataSizes.push_back(dataspaceDimensions[j]);
}
}
}
......@@ -1148,7 +1170,6 @@ else
}
}
//move to next file
printf("moving to the next file\n");
mImpl->mFileIndex++;
}
}
......
......@@ -123,6 +123,46 @@ public:
*/
int getFileSizeLimit();
/**
* Sets whether to allow the HDF5 writer to split data sets when writing to hdf5.
* Splitting should only occur for massive data sets.
* Setting to false assures compatibility with previous editions.
* Default setting is false
*
* Example of use:
*
* C++
*
* @dontinclude ExampleXdmfHDF5Writer.cpp
*
* Python
*
* @dontinclude XdmfExampleHDF5Writer.py
*
* @param newAllow whether to allow data sets to be split across hdf5 files
*/
void setAllowSetSplitting(bool newAllow);
/**
* Gets whether the HDF5 Writer is allowed to split data sets when writing to hdf5.
* Splitting should only occur for massive data sets.
* Setting to false assures compatibility with previous editions.
* Default setting is false.
*
* Example of use:
*
* C++
*
* @dontinclude ExampleXdmfHDF5Writer.cpp
*
* Python
*
* @dontinclude XdmfExampleHDF5Writer.py
*
* @return whether to allow data sets to be split across hdf5 files
*/
int getAllowSetSplitting();
/**
* Sets the file index. Used when file splitting and incremented when the current file is full. Set to 0 before using hyperslab or overwrite.
*
......
......@@ -320,11 +320,17 @@ XdmfWriter::visit(XdmfArray & array,
}
}
valuesStream.str(std::string());//clear the stream
valuesStream << heavyDataPath << ":"
<< array.getHeavyDataController(i)->getDataSetPath()
<< "|" << dimensionStream.str();
if (i + 1 < array.getNumberHeavyDataControllers()){
valuesStream << "|";
if (array.getNumberHeavyDataControllers() > 1) {
valuesStream << heavyDataPath << ":"
<< array.getHeavyDataController(i)->getDataSetPath()
<< "|" << dimensionStream.str();
if (i + 1 < array.getNumberHeavyDataControllers()){
valuesStream << "|";
}
}
else {
valuesStream << heavyDataPath << ":"
<< array.getHeavyDataController(i)->getDataSetPath();
}
xmlTextValues.push_back(valuesStream.str());
}
......
......@@ -14,7 +14,7 @@ int main(int, char **)
{
writtenArray->pushBack(i);
}
/* std::vector<unsigned int> starts;
std::vector<unsigned int> starts;
std::vector<unsigned int> strides;
std::vector<unsigned int> dimensions;
std::vector<unsigned int> dataspaces;
......@@ -38,10 +38,11 @@ int main(int, char **)
writtenArray->insert(arrayController);
starts[0] = 5;
arrayController = XdmfHDF5Controller::New("arraydata.h5", "Data", XdmfArrayType::Int32(), starts, strides, dimensions, dataspaces);
writtenArray->insert(arrayController);*/
writtenArray->insert(arrayController);
shared_ptr<XdmfHDF5Writer> arrayHeavyWriter = XdmfHDF5Writer::New("arraydata.h5");
arrayHeavyWriter->setFileSizeLimit(1);
// arrayHeavyWriter->setMode(XdmfHDF5Writer::Hyperslab);
//arrayHeavyWriter->setAllowSetSplitting(true);
arrayHeavyWriter->setMode(XdmfHDF5Writer::Hyperslab);
shared_ptr<XdmfWriter> arrayWriter = XdmfWriter::New("arraydata.xmf", arrayHeavyWriter);
arrayWriter->setLightDataLimit(5);
writtenArray->accept(arrayWriter);
......
......@@ -9,7 +9,7 @@ integer xdmfstoremap
integer xdmfaddcoordinate
integer xdmfaddset
integer xdmfsettopologypolyline
......
......@@ -270,7 +270,9 @@ XdmfFortran::XdmfFortran() :
mTopology(shared_ptr<XdmfTopology>()),
mBrick(shared_ptr<XdmfArray>()),
mOrigin(shared_ptr<XdmfArray>()),
mDimensions(shared_ptr<XdmfArray>())
mDimensions(shared_ptr<XdmfArray>()),
mMaxFileSize(0),
mAllowSetSplitting(false)
{
}
......@@ -355,8 +357,9 @@ XdmfFortran::addAttribute(const char * const name,
}
void
XdmfFortran::addGrid(const char * const name, int gridType)
XdmfFortran::addGrid(const char * const name)
{
/*
if (gridType == XDMF_GRID_TYPE_CURVILINEAR)
{
if(mDimensions == NULL) {
......@@ -432,34 +435,31 @@ XdmfFortran::addGrid(const char * const name, int gridType)
mTime,
mDomain,
mGridCollections);
}
else if (gridType == XDMF_GRID_TYPE_UNSTRUCTURED)
{
const shared_ptr<XdmfUnstructuredGrid> grid = XdmfUnstructuredGrid::New();
grid->setName(name);
}*/
const shared_ptr<XdmfUnstructuredGrid> grid = XdmfUnstructuredGrid::New();
grid->setName(name);
if(mGeometry == NULL) {
XdmfError::message(XdmfError::FATAL,
"Must set geometry before adding grid.");
}
if(mGeometry == NULL) {
XdmfError::message(XdmfError::FATAL,
"Must set geometry before adding grid.");
}
if(mTopology == NULL) {
XdmfError::message(XdmfError::FATAL,
"Must set topology before adding grid.");
}
if(mTopology == NULL) {
XdmfError::message(XdmfError::FATAL,
"Must set topology before adding grid.");
}
grid->setGeometry(mGeometry);
grid->setTopology(mTopology);
grid->setGeometry(mGeometry);
grid->setTopology(mTopology);
insertElements(grid,
mAttributes,
mInformations,
mSets,
mMaps,
mTime,
mDomain,
mGridCollections);
}
insertElements(grid,
mAttributes,
mInformations,
mSets,
mMaps,
mTime,
mDomain,
mGridCollections);
}
void
......@@ -5884,13 +5884,26 @@ XdmfFortran::clearPrevious()
void
XdmfFortran::setAllowSetSplitting(bool newAllow)
{
mAllowSetSplitting = newAllow;
}
void
XdmfFortran::setMaxFileSize(int newSize)
{
mMaxFileSize = newSize;
}
void
XdmfFortran::write(const char * const xmlFilePath, const int datalimit, const bool release)
{
shared_ptr<XdmfWriter> writer = XdmfWriter::New(xmlFilePath);
writer->setLightDataLimit(datalimit);
writer->setLightDataLimit(mMaxFileSize);
writer->getHeavyDataWriter()->setReleaseData(release);
shared_dynamic_cast<XdmfHDF5Writer>(writer->getHeavyDataWriter())->setFileSizeLimit(1);
shared_dynamic_cast<XdmfHDF5Writer>(writer->getHeavyDataWriter())->setFileSizeLimit(mMaxFileSize);
shared_dynamic_cast<XdmfHDF5Writer>(writer->getHeavyDataWriter())->setAllowSetSplitting(mAllowSetSplitting);
mDomain->accept(writer);
}
......@@ -5898,6 +5911,8 @@ void
XdmfFortran::writeHDF5(const char * const xmlFilePath)
{
shared_ptr<XdmfHDF5Writer> writer = XdmfHDF5Writer::New(xmlFilePath);
writer->setFileSizeLimit(mMaxFileSize);
writer->setAllowSetSplitting(mAllowSetSplitting);
writer->setReleaseData( true );
mDomain->accept(writer);
}
......@@ -5970,11 +5985,10 @@ extern "C"
void
XdmfAddGrid(long * pointer,
char * gridName,
int * gridType)
char * gridName)
{
XdmfFortran * xdmfFortran = reinterpret_cast<XdmfFortran *>(*pointer);
xdmfFortran->addGrid(gridName, *gridType);
xdmfFortran->addGrid(gridName);
}
void
......@@ -7221,6 +7235,20 @@ extern "C"
void
XdmfSetAllowSetSplitting(long * pointer, bool * newAllow)
{
XdmfFortran * xdmfFortran = reinterpret_cast<XdmfFortran *>(*pointer);
xdmfFortran->setAllowSetSplitting(*newAllow);
}
void
XdmfSetMaxFileSize(long * pointer, int * newSize)
{
XdmfFortran * xdmfFortran = reinterpret_cast<XdmfFortran *>(*pointer);
xdmfFortran->setMaxFileSize(*newSize);
}
void
XdmfWrite(long * pointer,
char * xmlFilePath,
......
......@@ -159,6 +159,8 @@ class XdmfUnstructuredGrid;
#define XdmfSetPreviousTopology xdmfsetprevioustopology_
#define XdmfSetTime xdmfsettime_
#define XdmfSetTopology xdmfsettopology_
#define XdmfSetAllowSetSplitting xdmfsetallowsetsplitting_
#define XdmfSetMaxFileSize xdmfsetmaxfilesize_
#define XdmfWrite xdmfwrite_
#define XdmfRead xdmfread_
#define XdmfWriteHDF5 xdmfwritehdf5_
......@@ -382,9 +384,8 @@ public:
* are placed in mAttributes, mInformations, and mSets
*
* @param name of the grid.
* @param gridType the type of the grid represented as an integer Ex: XDMF_GRID_TYPE_UNSTRUCTURED
*/
void addGrid(const char * const name, int gridType);
void addGrid(const char * const name);
/**
* Add grid collection to domain or collection. Inserts attributes
......@@ -2172,8 +2173,24 @@ public:
/**
* Sets whether to allow the hdf5 writer to split large data sets across multiple files.
* Setting to true removes compatibiliity with previous versions.
* When off it will write sets that will not fit into a file to the current file
* before moving to the next should file splitting be enabled.
* Default is off (false).
*
* @param newAllow whether to allow splitting or not
*/
void setAllowSetSplitting(bool newAllow);
/**
* Sets the file size at which the hdf5 writer will move to a new file.
* Default is no splitting (value=0)
*
* @param newSize new maximum file size before moving
*/
void setMaxFileSize(int newSize);
/**
* Write constructed file to disk.
......@@ -2243,6 +2260,9 @@ private:
std::vector<shared_ptr<XdmfArray> > mPreviousCoordinates;
std::vector<shared_ptr<XdmfMap> > mPreviousMaps;
unsigned int mMaxFileSize;
bool mAllowSetSplitting;
};
......
......@@ -122,7 +122,7 @@ PROGRAM XdmfFortranExample
CALL XDMFSETTIME(obj, myTime)
!! Unstructured Only
tempID = XDMFSETTOPOLOGY(obj, XDMF_TOPOLOGY_TYPE_HEXAHEDRON, 16, &
XDMF_ARRAY_TYPE_INT32, myConnections)
XDMF_ARRAY_TYPE_INT32, myConnections, 0)
!! /Unstructured Only
!! Curvilinear and Rectilinear Only
tempID = XDMFSETDIMENSIONS(obj, 3, XDMF_ARRAY_TYPE_INT32, myDimensions)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment