Commit 85669858 authored by whitlocb's avatar whitlocb

Added write groups to VisIt export.

git-svn-id: http://visit.ilight.com/svn/visit/trunk/src@27171 18c085ea-50e0-402c-830e-de6fd14e8384
parent 54d558e2
......@@ -758,7 +758,13 @@ BoundaryHelperFunctions<T>::CommunicateMixedBoundaryData(const vector<int> &doma
int rank;
MPI_Comm_rank(VISIT_MPI_COMM, &rank);
int mpiMsgTag = GetUniqueMessageTag();
int tags[5];
GetUniqueMessageTags(tags, 5);
int mpiMsgTag = tags[0];
int mpiBndDataTag = tags[1];
int mpiBndMixMatTag = tags[2];
int mpiBndMixZoneTag = tags[3];
int mpiBndMixNextTag = tags[4];
for (size_t d1 = 0; d1 < sdb->boundary.size(); d1++)
{
......@@ -786,11 +792,6 @@ BoundaryHelperFunctions<T>::CommunicateMixedBoundaryData(const vector<int> &doma
}
}
int mpiBndDataTag = GetUniqueMessageTag();
int mpiBndMixMatTag = GetUniqueMessageTag();
int mpiBndMixZoneTag = GetUniqueMessageTag();
int mpiBndMixNextTag = GetUniqueMessageTag();
for (size_t d1 = 0; d1 < sdb->boundary.size(); d1++)
{
Boundary *bi = &sdb->boundary[d1];
......
......@@ -1591,13 +1591,15 @@ avtUnstructuredDomainBoundaries::CommunicateMeshInformation(
int rank = 0;
#ifdef PARALLEL
MPI_Comm_rank(VISIT_MPI_COMM, &rank);
int mpiNPtsTag = GetUniqueMessageTag();
int mpiGainedPointsTag = GetUniqueMessageTag();
int mpiOriginalIdsTag = GetUniqueMessageTag();
int mpiNumGivenCellsTag = GetUniqueMessageTag();
int mpiCellTypesTag = GetUniqueMessageTag();
int mpiNumPointsPerCellTag = GetUniqueMessageTag();
int mpiCellPointIdsTag = GetUniqueMessageTag();
int tags[7];
GetUniqueMessageTags(tags, 7);
int mpiNPtsTag = tags[0];
int mpiGainedPointsTag = tags[1];
int mpiOriginalIdsTag = tags[2];
int mpiNumGivenCellsTag = tags[3];
int mpiCellTypesTag = tags[4];
int mpiNumPointsPerCellTag = tags[5];
int mpiCellPointIdsTag = tags[6];
#endif
gainedPoints = new T**[nTotalDomains];
......@@ -1926,8 +1928,10 @@ avtUnstructuredDomainBoundaries::CommunicateMixvarInformation(
int rank = 0;
#ifdef PARALLEL
MPI_Comm_rank(VISIT_MPI_COMM, &rank);
int mpiNDataTag = GetUniqueMessageTag();
int mpiGainedValsTag = GetUniqueMessageTag();
int tags[2];
GetUniqueMessageTags(tags, 2);
int mpiNDataTag = tags[0];
int mpiGainedValsTag = tags[1];
#endif
vals = new float**[nTotalDomains];
......@@ -2198,10 +2202,12 @@ avtUnstructuredDomainBoundaries::CommunicateMaterialInformation(
int rank = 0;
#ifdef PARALLEL
MPI_Comm_rank(VISIT_MPI_COMM, &rank);
int mpiNDataTag = GetUniqueMessageTag();
int mpiGainedMatlistTag = GetUniqueMessageTag();
int mpiGainedMixmatTag = GetUniqueMessageTag();
int mpiGainedMixvfTag = GetUniqueMessageTag();
int tags[4];
GetUniqueMessageTags(tags, 4);
int mpiNDataTag = tags[0];
int mpiGainedMatlistTag = tags[1];
int mpiGainedMixmatTag = tags[2];
int mpiGainedMixvfTag = tags[3];
#endif
gainedMatlist = new int**[nTotalDomains];
......@@ -2534,8 +2540,10 @@ avtUnstructuredDomainBoundaries::CommunicateDataInformation(
int rank = 0;
#ifdef PARALLEL
MPI_Comm_rank(VISIT_MPI_COMM, &rank);
int mpiNumTuplesTag = GetUniqueMessageTag();
int mpiTupleDataTag = GetUniqueMessageTag();
int tags[2];
GetUniqueMessageTags(tags, 2);
int mpiNumTuplesTag = tags[0];
int mpiTupleDataTag = tags[1];
#endif
int nComponents = 0;
......
......@@ -196,9 +196,12 @@ void avtCurveConstructorFilter::Execute()
MPI_Comm_size(VISIT_MPI_COMM, &numProcs);
MPI_Comm_rank(VISIT_MPI_COMM, &myRank);
int mpiNdsTag = GetUniqueMessageTag();
int mpiSizeTag = GetUniqueMessageTag();
int mpiDataTag = GetUniqueMessageTag();
int tags[3];
GetUniqueMessageTags(tags, 3);
int mpiNdsTag = tags[0];
int mpiSizeTag = tags[1];
int mpiDataTag = tags[2];
if (myRank == 0)
{
......
......@@ -54,9 +54,9 @@
#ifdef PARALLEL
#include <mpi.h>
const int SIZE_TAG = GetUniqueMessageTag();
const int BLOCKID_TAG = GetUniqueMessageTag();
const int DATA_TAG = GetUniqueMessageTag();
const int SIZE_TAG = GetUniqueStaticMessageTag();
const int BLOCKID_TAG = GetUniqueStaticMessageTag();
const int DATA_TAG = GetUniqueStaticMessageTag();
#endif
using std::string;
......
......@@ -53,7 +53,7 @@
#include <avtDataObjectWriter.h>
#include <avtDataRequest.h>
#include <avtMetaData.h>
#include <avtParallel.h>
#include <avtParallelContext.h>
#include <avtContract.h>
#include <avtCommonDataFunctions.h>
......@@ -119,12 +119,16 @@ avtCompactTreeFilter::avtCompactTreeFilter()
// I moved the guts of the Execute method to a static helper method.
// Work partially supported by DOE Grant SC0007548.
//
// Brad Whitlock, Thu Aug 6 14:50:32 PDT 2015
// Use avtParallelContext.
//
// ****************************************************************************
void
avtCompactTreeFilter::Execute(void)
{
avtDataTree_p output = Execute(GetInput(),
avtParallelContext context;
avtDataTree_p output = Execute(context, GetInput(),
executionDependsOnDLB,
parallelMerge,
false, // skipCompact
......@@ -213,10 +217,14 @@ avtCompactTreeFilter::Execute(void)
// lets me compact datasets without a filter execution.
// Work partially supported by DOE Grant SC0007548.
//
// Brad Whitlock, Thu Aug 6 14:50:32 PDT 2015
// Use avtParallelContext so we can use this code on a subset of ranks.
//
// ****************************************************************************
avtDataTree_p
avtCompactTreeFilter::Execute(avtDataObject_p input,
avtCompactTreeFilter::Execute(avtParallelContext &context,
avtDataObject_p input,
bool executionDependsOnDLB,
bool parallelMerge,
bool skipCompact,
......@@ -243,25 +251,27 @@ avtCompactTreeFilter::Execute(avtDataObject_p input,
if (parallelMerge)
{
#ifdef PARALLEL
int mpiSendDataTag = GetUniqueMessageTag();
int mpiSendObjSizeTag = GetUniqueMessageTag();
int tags[2];
context.GetUniqueMessageTags(tags, 2);
int mpiSendDataTag = tags[0];
int mpiSendObjSizeTag = tags[1];
#endif
avtDataObject_p bigDS = input->Clone(); //GetTypedInput()->Clone();
if (PAR_UIProcess())
avtDataObject_p bigDS = input->Clone();
if (context.Rank() == 0)
{
#ifdef PARALLEL
int nprocs = PAR_Size();
int nprocs = context.Size();
for (int i = 1 ; i < nprocs ; i++)
{
avtDataObjectReader reader;
int len = 0;
MPI_Status stat;
MPI_Recv(&len, 1, MPI_INT, i, mpiSendObjSizeTag,
VISIT_MPI_COMM, &stat);
context.GetCommunicator(), &stat);
char *buff = new char[len];
MPI_Recv(buff, len, MPI_CHAR, i, mpiSendDataTag,
VISIT_MPI_COMM, &stat);
context.GetCommunicator(), &stat);
reader.Read(len, buff);
avtDataObject_p ds2 = reader.GetOutput();
bigDS->Merge(*(ds2));
......@@ -283,8 +293,8 @@ avtCompactTreeFilter::Execute(avtDataObject_p input,
int len = 0;
char *buff = NULL;
str.GetWholeString(buff, len);
MPI_Send(&len, 1, MPI_INT, 0, mpiSendObjSizeTag, VISIT_MPI_COMM);
MPI_Send(buff, len, MPI_CHAR, 0, mpiSendDataTag, VISIT_MPI_COMM);
MPI_Send(&len, 1, MPI_INT, 0, mpiSendObjSizeTag, context.GetCommunicator());
MPI_Send(buff, len, MPI_CHAR, 0, mpiSendDataTag, context.GetCommunicator());
#endif
inTree = new avtDataTree(); // Make an empty tree, so we exit early
......
......@@ -47,6 +47,7 @@
#include <avtDatasetToDatasetFilter.h>
class avtParallelContext;
// ****************************************************************************
// Class: avtCompactTreeFilter
......@@ -90,6 +91,9 @@
// return a data tree.
// Work partially supported by DOE Grant SC0007548.
//
// Brad Whitlock, Thu Aug 6 14:49:34 PDT 2015
// Use avtParallelContext.
//
// ****************************************************************************
class PIPELINE_API avtCompactTreeFilter : public avtDatasetToDatasetFilter
......@@ -123,7 +127,8 @@ class PIPELINE_API avtCompactTreeFilter : public avtDatasetToDatasetFilter
void SetCompactDomainsMode(CompactDomainsMode mode, int threshold=-1)
{ compactDomainMode = mode; compactDomainThreshold = threshold; }
static avtDataTree_p Execute(avtDataObject_p input,
static avtDataTree_p Execute(avtParallelContext &context,
avtDataObject_p input,
bool executionDependsOnDLB,
bool parallelMerge,
bool skipCompact,
......
......@@ -148,6 +148,7 @@ Pipeline/avtNamedSelectionManager.C
Pipeline/avtNullDataReader.C
Pipeline/avtNullDataWriter.C
Pipeline/avtParallel.C
Pipeline/avtParallelContext.C
Pipeline/avtWebpage.C
Pipeline/avtExecutionManager.C
Pipeline/ThreadPool.C
......
......@@ -41,6 +41,10 @@
#include <ColorControlPointList.h>
#include <ColorControlPoint.h>
#include <ColorTableManager.h>
#ifdef PARALLEL
#include <avtParallelContext.h>
#endif
#include <DebugStream.h>
//
// Static data that describes the eight default color tables.
......@@ -1064,7 +1068,9 @@ avtColorTables::ExportColorTable(const std::string &ctName,
// Creation: Thu Jul 3 18:30:07 PST 2003
//
// Modifications:
//
// Brad Whitlock, Mon Aug 17 15:15:18 PDT 2015
// Only read the color table on rank 0 and broadcast.
//
// ****************************************************************************
void
......@@ -1074,7 +1080,22 @@ avtColorTables::ImportColorTables()
// Create a color table manager to import the color tables and store
// them in the ctAtts.
//
ColorTableManager importer;
importer.ImportColorTables(ctAtts);
#ifdef PARALLEL
avtParallelContext par;
if(par.Rank() == 0)
{
debug5 << "Reading color tables and sending..." << endl;
#endif
ColorTableManager importer;
importer.ImportColorTables(ctAtts);
#ifdef PARALLEL
par.BroadcastAttributes(*ctAtts);
}
else
{
debug5 << "Receiving color tables..." << endl;
par.BroadcastAttributes(*ctAtts);
}
#endif
}
......@@ -336,10 +336,12 @@ avtDataObjectInformation::ParallelMerge(const avtDataObjectWriter_p dobw)
MPI_Comm_size(VISIT_MPI_COMM, &commSize);
MPI_Comm_rank(VISIT_MPI_COMM, &myRank);
int mpiResultLenTag = GetUniqueMessageTag();
int mpiResultStrTag = GetUniqueMessageTag();
int mpiSwapLenTag = GetUniqueMessageTag();
int mpiSwapStrTag = GetUniqueMessageTag();
int tags[4];
GetUniqueMessageTags(tags, 4);
int mpiResultLenTag = tags[0];
int mpiResultStrTag = tags[1];
int mpiSwapLenTag = tags[2];
int mpiSwapStrTag = tags[3];
groupSize = 1;
// walk up the communication tree, swapping and merging infos
......
This diff is collapsed.
......@@ -56,17 +56,20 @@ class AttributeGroup;
PIPELINE_API extern void *VISIT_MPI_COMM_PTR;
#endif
PIPELINE_API void Barrier(void);
PIPELINE_API bool Collect(float *, int);
PIPELINE_API bool Collect(double *, int);
PIPELINE_API bool Collect(int *, int);
PIPELINE_API void PAR_Exit(void);
PIPELINE_API void PAR_Init(int &argc, char **&argv);
PIPELINE_API int PAR_Rank(void);
PIPELINE_API int PAR_Size(void);
PIPELINE_API bool PAR_UIProcess(void);
PIPELINE_API void PAR_WaitForDebugger(void);
PIPELINE_API bool PAR_SetComm(void *);
PIPELINE_API bool PAR_SetComm(void *);
PIPELINE_API void PullInMPI_IOSymbols();
// NOTE: Think about deprecating these in favor of avtParallelContext.
PIPELINE_API void Barrier(void);
PIPELINE_API bool Collect(float *, int);
PIPELINE_API bool Collect(double *, int);
PIPELINE_API bool Collect(int *, int);
PIPELINE_API void SumIntAcrossAllProcessors(int&);
PIPELINE_API void SumLongAcrossAllProcessors(long&);
PIPELINE_API void SumFloatAcrossAllProcessors(float&);
......@@ -111,6 +114,7 @@ PIPELINE_API void CollectIntArraysOnRootProc(int *&, int *&, int *, int);
PIPELINE_API void CollectDoubleArraysOnRootProc(double *&, int *&, double *, int);
PIPELINE_API int GetUniqueMessageTag();
PIPELINE_API void GetUniqueMessageTags(int *tags, int ntags);
PIPELINE_API int GetUniqueStaticMessageTag();
PIPELINE_API void GetAttToRootProc(AttributeGroup &, int);
......@@ -121,5 +125,4 @@ PIPELINE_API void WaitSome(std::vector<int> &reqs, std::vector<int> &done, st
PIPELINE_API void TestSome(std::vector<int> &reqs, std::vector<int> &done, std::vector<int> &status );
PIPELINE_API void CancelRequest(void *req);
PIPELINE_API void PullInMPI_IOSymbols();
#endif
This diff is collapsed.
/*****************************************************************************
*
* Copyright (c) 2000 - 2015, Lawrence Livermore National Security, LLC
* Produced at the Lawrence Livermore National Laboratory
* LLNL-CODE-442911
* All rights reserved.
*
* This file is part of VisIt. For details, see https://visit.llnl.gov/. The
* full copyright notice is contained in the file COPYRIGHT located at the root
* of the VisIt distribution or at http://www.llnl.gov/visit/copyright.html.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the disclaimer below.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the disclaimer (as noted below) in the
* documentation and/or other materials provided with the distribution.
* - Neither the name of the LLNS/LLNL nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY,
* LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
*****************************************************************************/
#ifndef AVT_PARALLEL_CONTEXT_H
#define AVT_PARALLEL_CONTEXT_H
#include <pipeline_exports.h>
#include <vector>
#include <string>
#ifdef PARALLEL
#include <mpi.h>
#endif
class AttributeGroup;
// ****************************************************************************
// Class: avtParallelContext
//
// Purpose:
// A context that can be used for parallel operations.
//
// Notes: This class does the typical AVT parallel operations but does it
// on ranks specified by an internal communicator. By accessing parallel
// operations through this class, algorithms will use the right
// communicator without us having to expose MPI communicators all
// over the place.
//
// Programmer: Brad Whitlock
// Creation: Tue Aug 4 15:43:45 PDT 2015
//
// Modifications:
//
// ****************************************************************************
class PIPELINE_API avtParallelContext
{
public:
avtParallelContext();
avtParallelContext(const avtParallelContext &);
~avtParallelContext();
void operator = (const avtParallelContext &);
#ifdef PARALLEL
// Direct access to communicator (for now).
MPI_Comm GetCommunicator();
void SetCommunicator(MPI_Comm, bool owns = false);
#endif
/**
Rank within the group.
*/
int Rank();
/**
Number of ranks within this rank's group.
*/
int Size();
/**
Rank of this group within the total number of groups. This will
be set to 0 if the default avtParallelContext is used.
*/
int GroupRank();
/**
The number of groups into which the global group was partitioned. This will
be set to 1 if the default avtParallelContext is used.
*/
int GroupSize();
/**
Rank within the global group.
*/
int GlobalRank();
/**
Size of the global group.
*/
int GlobalSize();
/**
Create a new parallel context consisting of a set of ranks.
*/
avtParallelContext CreateGroup(const std::vector<int> &ranks);
/**
Create a new parallel context where ranks are divided into groups of N
*/
avtParallelContext CreateGroupsOfN(int N);
/**
Assume that we have group ids [0,nGroups], let's make a new parallel
context where this rank joins a specified group. If multiple ranks passed
a groupId of 5 then those ranks would join the new communicator. Other
groupIds would join other communicators.
*/
avtParallelContext Split(int groupId, int nGroups);
// Transplanted avtParallel.h functions that operate on
// the local communicator.
void Barrier(void);
bool Collect(float *, int);
bool Collect(double *, int);
bool Collect(int *, int);
void SumIntAcrossAllProcessors(int&);
void SumLongAcrossAllProcessors(long&);
void SumFloatAcrossAllProcessors(float&);
void SumFloatArrayAcrossAllProcessors(float *, float *, int);
void SumFloatArray(float *, float *, int);
void SumDoubleAcrossAllProcessors(double&);
void SumDoubleArrayAcrossAllProcessors(double *, double *,int);
void SumDoubleArray(double *, double *,int);
void SumDoubleArrayInPlace(double *, int);
void SumIntArrayAcrossAllProcessors(int *, int *, int);
void SumLongLongArrayAcrossAllProcessors(VISIT_LONG_LONG*, VISIT_LONG_LONG*, int);
bool ThisProcessorHasMinimumValue(double);
bool ThisProcessorHasMaximumValue(double);
void UnifyMinMax(double *, int, int=0);
int UnifyMaximumValue(int);
float UnifyMaximumValue(float);
int UnifyMinimumValue(int);
float UnifyMinimumValue(float);
void UnifyMaximumValue(std::vector<int>&, std::vector<int>&);
void UnifyMinimumFloatArrayAcrossAllProcessors(float *, float *, int);
void UnifyMaximumFloatArrayAcrossAllProcessors(float *, float *, int);
void UnifyMinimumDoubleArrayAcrossAllProcessors(double *, double *, int);
void UnifyMaximumDoubleArrayAcrossAllProcessors(double *, double *, int);
void BroadcastInt(int &i);
void BroadcastLongLong(VISIT_LONG_LONG &i);
void BroadcastIntArray(int *array, int nArray);
void BroadcastIntVector(std::vector<int>&, int myrank);
void BroadcastBool(bool &b);
void BroadcastBoolVector(std::vector<bool> &b, int myrank);
void BroadcastDouble(double &i);
void BroadcastDoubleArray(double *array, int nArray);
void BroadcastDoubleVector(std::vector<double>&, int myrank);
void BroadcastString(std::string &s, int myrank);
void BroadcastStringVector(std::vector<std::string>&,
int myrank);
void BroadcastStringVectorVector(std::vector<std::vector<std::string> >&,
int myrank);
void BroadcastAttributes(AttributeGroup &atts);
bool GetListToRootProc(std::vector<std::string> &, int);
void CollectIntArraysOnRootProc(int *&, int *&, int *, int);
void CollectDoubleArraysOnRootProc(double *&, int *&, double *, int);
int GetUniqueMessageTag();
void GetUniqueMessageTags(int *tags, int ntags);
int GetUniqueStaticMessageTag();
void GetAttToRootProc(AttributeGroup &, int);
void GetFloatArrayToRootProc(float *, int, bool &);
void GetDoubleArrayToRootProc(double *, int, bool &);
void WaitAll(std::vector<int> &reqs, std::vector<int> &status );
void WaitSome(std::vector<int> &reqs, std::vector<int> &done, std::vector<int> &status );
void TestSome(std::vector<int> &reqs, std::vector<int> &done, std::vector<int> &status );
void CancelRequest(void *req);
static void Init();
private:
class PrivateData;
PrivateData *d;
};
#endif
This diff is collapsed.
......@@ -44,6 +44,7 @@
#define AVT_DATABASE_WRITER_H
#include <avtTerminatingDatasetSink.h>
#include <avtParallelContext.h>
#include <string>
#include <vector>
......@@ -104,6 +105,9 @@ class vtkPolyData;
// methods to let this class perform polydata aggregation.
// Work partially supported by DOE Grant SC0007548.
//
// Brad Whitlock, Thu Aug 6 16:47:50 PDT 2015
// Added support for writing using groups of MPI ranks.
//
// ****************************************************************************
class PIPELINE_API avtDatabaseWriter : public virtual avtTerminatingDatasetSink
......@@ -126,12 +130,19 @@ class PIPELINE_API avtDatabaseWriter : public virtual avtTerminatingDatasetSink
avtDatabaseWriter();
virtual ~avtDatabaseWriter();
void Write(const std::string &, const avtDatabaseMetaData *);
void Write(const std::string &filename,
const avtDatabaseMetaData *md);
void Write(const std::string &plotName,
const std::string &filename,
const avtDatabaseMetaData *md,
std::vector<std::string> &vars,
bool allVars);
void Write(const std::string &plotName,
const std::string &filename,
const avtDatabaseMetaData *md,
std::vector<std::string> &vars,
bool allVars = true);
bool allVars,
bool writeUsingGroups, int groupSize);
void SetShouldAlwaysDoMIR(bool s)
{ shouldAlwaysDoMIR = s; };
......@@ -148,6 +159,8 @@ class PIPELINE_API avtDatabaseWriter : public virtual avtTerminatingDatasetSink
void SetVariableList(std::vector<std::string> &);
void SetContractToUse(avtContract_p ps);
void SetWriteContext(avtParallelContext &);
avtParallelContext &GetWriteContext();
protected:
bool shouldAlwaysDoMIR;
bool shouldNeverDoMIR;
......@@ -161,19 +174,23 @@ class PIPELINE_API avtDatabaseWriter : public virtual avtTerminatingDatasetSink
int nTargetChunks;
VISIT_LONG_LONG targetTotalZones;
avtContract_p savedContract;
avtContract_p savedContract;
avtParallelContext writeContext;
virtual bool CanHandleMaterials(void) { return false; };
virtual void CheckCompatibility(const std::string &);
virtual void OpenFile(const std::string &, int) = 0;
virtual void WriteHeaders(const avtDatabaseMetaData *,
std::vector<std::string>&,std::vector<std::string>&,
std::vector<std::string> &) = 0;
const std::vector<std::string>&scalars,
const std::vector<std::string>&vectors,
const std::vector<std::string>&materials) = 0;
virtual void BeginPlot(const std::string &);
virtual void WriteChunk(vtkDataSet *, int) = 0;
virtual void WriteChunk(vtkDataSet *, int, int, const std::string &);
virtual void WriteChunk(vtkDataSet *, int) = 0; // DEPRECATED VERSION
virtual void EndPlot(const std::string &);
virtual void CloseFile(void) = 0;
virtual void WriteRootFile();
virtual bool SupportsTargetChunks(void) { return false; };
virtual bool SupportsTargetZones(void) { return false; };
......@@ -182,6 +199,7 @@ class PIPELINE_API avtDatabaseWriter : public virtual avtTerminatingDatasetSink
virtual CombineMode GetCombineMode(const std::string &plotName) const;
virtual bool CreateTrianglePolyData() const;
virtual bool CreateNormals() const;
virtual bool SequentialOutput() const;
std::string GetMeshName(const avtDatabaseMetaData *md) const;
double GetTime() const;
......@@ -213,10 +231,22 @@ class PIPELINE_API avtDatabaseWriter : public virtual avtTerminatingDatasetSink
const std::vector<std::string> &mats,
bool &changed);
vtkPolyData *CreateSinglePolyData(avtDataTree_p rootnode);
vtkPolyData *CreateSinglePolyData(avtParallelContext &context,
avtDataTree_p rootnode);
std::vector<vtkPolyData *> ConvertDatasetsIntoPolyData(vtkDataSet **ds, int nds);
vtkPolyData *CombinePolyData(const std::vector<vtkPolyData *> &pds);
std::vector<vtkPolyData *> SendPolyDataToRank0(const std::vector<vtkPolyData *> &pds);
std::vector<vtkPolyData *> SendPolyDataToRank0(avtParallelContext &context,
const std::vector<vtkPolyData *> &pds);
void GroupWrite(const std::string &plotName,
const std::string &filename,
const avtDatabaseMetaData *md,
const std::vector<std::string> &scalarList,
const std::vector<std::string> &vectorList,
const std::vector<std::string> &materialList,
int numTotalChunks, int startIndex,
int tag, bool writeUsingGroups, int groupSize);
void WaitForTurn(int tag, int &nWritten);