Skip to content
Snippets Groups Projects
Commit 48e15454 authored by David E. DeMarle's avatar David E. DeMarle
Browse files

add temporal processing to Catalyst and add an ex post facto demo

This change inserts an optional vtkTemporalCacheFilter
between the simulation adaptor produced vtkDataObjects and the
processing pipeline. The cache then acts as a time varying source
such that Catalyst pipelines can apply temporal filters.

The API for the feature is demonstrated by the new CatalystExample.
This includes use in a C++ pipeline, and two Python Pipelines.
The second Python Pipeline is unique in that it demonstrates an
after the fact or "ex post facto" trigger. See comments in FEDriver
for how to run the example.
parent de4749ed
No related branches found
No related tags found
No related merge requests found
Showing
with 1329 additions and 0 deletions
......@@ -98,6 +98,9 @@ endif ()
option(PARAVIEW_USE_MPI "Enable MPI support for parallel computing" OFF)
option(PARAVIEW_USE_CUDA "Support CUDA compilation" OFF)
option(PARAVIEW_USE_VTKM "Enable VTK-m accelerated algorithms" "${PARAVIEW_ENABLE_NONESSENTIAL}")
if (UNIX AND NOT APPLE)
option(PARAVIEW_USE_MEMKIND "Build support for extended memory" OFF)
endif ()
# Add option to disable Fortran
if (NOT WIN32)
......
......@@ -408,6 +408,8 @@ else ()
set(VTK_USE_MPI "${PARAVIEW_USE_MPI}")
# Forward CUDA option to VTK
set(VTK_USE_CUDA "${PARAVIEW_USE_CUDA}")
# Forward Memkind option to VTK.
set(VTK_USE_MEMKIND "${PARAVIEW_USE_MEMKIND}")
# Forward logging option to VTK.
set(VTK_ENABLE_LOGGING ON)
......
......@@ -11,6 +11,7 @@ GROUPS
PRIVATE_DEPENDS
ParaView::RemotingApplication
VTK::FiltersGeneral
VTK::FiltersHybrid
VTK::vtksys
OPTIONAL_DEPENDS
VTK::ParallelMPI
......
......@@ -20,6 +20,7 @@
#include "vtkDataSet.h"
#include "vtkObjectFactory.h"
#include "vtkPointData.h"
#include "vtkSMSourceProxy.h"
#include <algorithm>
#include <map>
......@@ -35,6 +36,7 @@ public:
vtkStandardNewMacro(vtkCPInputDataDescription);
vtkCxxSetObjectMacro(vtkCPInputDataDescription, Grid, vtkDataObject);
vtkCxxSetObjectMacro(vtkCPInputDataDescription, TemporalCache, vtkSMSourceProxy);
//----------------------------------------------------------------------------
vtkCPInputDataDescription::vtkCPInputDataDescription()
{
......@@ -44,6 +46,7 @@ vtkCPInputDataDescription::vtkCPInputDataDescription()
this->Internals = new vtkCPInputDataDescription::vtkInternals();
this->WholeExtent[0] = this->WholeExtent[2] = this->WholeExtent[4] = 0;
this->WholeExtent[1] = this->WholeExtent[3] = this->WholeExtent[5] = -1;
this->TemporalCache = nullptr;
}
//----------------------------------------------------------------------------
......@@ -55,6 +58,7 @@ vtkCPInputDataDescription::~vtkCPInputDataDescription()
delete this->Internals;
this->Internals = NULL;
}
this->SetTemporalCache(nullptr);
}
//----------------------------------------------------------------------------
......@@ -173,6 +177,7 @@ void vtkCPInputDataDescription::ShallowCopy(vtkCPInputDataDescription* idd)
this->SetGrid(idd->Grid);
memcpy(this->WholeExtent, idd->WholeExtent, 6 * sizeof(int));
this->Internals->Fields = idd->Internals->Fields;
this->SetTemporalCache(idd->TemporalCache);
}
//----------------------------------------------------------------------------
......
......@@ -18,6 +18,7 @@
class vtkDataObject;
class vtkDataSet;
class vtkFieldData;
class vtkSMSourceProxy;
#include "vtkObject.h"
#include "vtkPVCatalystModule.h" // For windows import/export of shared libraries
......@@ -97,6 +98,16 @@ public:
// Shallow copy.
void ShallowCopy(vtkCPInputDataDescription*);
// Description:
// Set the temporal cache. Adaptors can get a cache from the coprocessor
// object and apply them to the InputDataDescription when they want to
// use temporal filters in their pipelines.
void SetTemporalCache(vtkSMSourceProxy* cache);
// Description:
// Get the temporal cache.
vtkGetObjectMacro(TemporalCache, vtkSMSourceProxy);
protected:
vtkCPInputDataDescription();
~vtkCPInputDataDescription() override;
......@@ -113,6 +124,10 @@ protected:
// The grid for coprocessing. The grid is not owned by the object.
vtkDataObject* Grid;
// Description:
// The temporal cache associated with grid. The cache is not owned by the object.
vtkSMSourceProxy* TemporalCache;
private:
vtkCPInputDataDescription(const vtkCPInputDataDescription&) = delete;
void operator=(const vtkCPInputDataDescription&) = delete;
......
......@@ -23,6 +23,7 @@
#include "vtkDataObject.h"
#include "vtkDoubleArray.h"
#include "vtkFieldData.h"
#include "vtkInformation.h"
#if VTK_MODULE_ENABLE_VTK_ParallelMPI
#include "vtkMPI.h"
#include "vtkMPICommunicator.h"
......@@ -36,10 +37,14 @@
#include "vtkSMProxy.h"
#include "vtkSMProxyManager.h"
#include "vtkSMSessionProxyManager.h"
#include "vtkSMSourceProxy.h"
#include "vtkSmartPointer.h"
#include "vtkStringArray.h"
#include "vtkTemporalDataSetCache.h"
#include <list>
#include <map>
#include <string>
#include <vtksys/SystemTools.hxx>
struct vtkCPProcessorInternals
......@@ -47,6 +52,10 @@ struct vtkCPProcessorInternals
typedef std::list<vtkSmartPointer<vtkCPPipeline> > PipelineList;
typedef PipelineList::iterator PipelineListIterator;
PipelineList Pipelines;
typedef std::map<std::string, vtkSmartPointer<vtkSMSourceProxy> > CacheList;
typedef CacheList::iterator CacheListIterator;
CacheList TemporalCaches;
};
vtkStandardNewMacro(vtkCPProcessor);
......@@ -57,6 +66,7 @@ vtkCPProcessor::vtkCPProcessor()
this->Internal = new vtkCPProcessorInternals;
this->InitializationHelper = nullptr;
this->WorkingDirectory = nullptr;
this->TemporalCacheSize = 0;
}
//----------------------------------------------------------------------------
......@@ -256,6 +266,21 @@ int vtkCPProcessor::CoProcess(vtkCPDataDescription* dataDescription)
time->SetTypedComponent(0, 0, dataDescription->GetTime());
time->SetName("TimeValue");
input->GetFieldData()->AddArray(time);
input->GetInformation()->Set(vtkDataObject::DATA_TIME_STEP(), dataDescription->GetTime());
if (this->GetTemporalCacheSize() > 0)
{
vtkSMSourceProxy* cacheForInput =
this->GetTemporalCache(dataDescription->GetInputDescriptionName(i));
if (cacheForInput)
{
vtkTemporalDataSetCache* tc =
vtkTemporalDataSetCache::SafeDownCast(cacheForInput->GetClientSideObject());
tc->SetInputDataObject(input);
tc->UpdateTimeStep(dataDescription->GetTime());
}
}
}
}
......@@ -351,3 +376,60 @@ void vtkCPProcessor::PrintSelf(ostream& os, vtkIndent indent)
{
this->Superclass::PrintSelf(os, indent);
}
//----------------------------------------------------------------------------
void vtkCPProcessor::SetTemporalCacheSize(int nv)
{
if (this->TemporalCacheSize == nv)
{
return;
}
this->TemporalCacheSize = nv;
for (vtkCPProcessorInternals::CacheListIterator it = this->Internal->TemporalCaches.begin();
it != this->Internal->TemporalCaches.end(); it++)
{
vtkTemporalDataSetCache* tc =
vtkTemporalDataSetCache::SafeDownCast(it->second.GetPointer()->GetClientSideObject());
tc->SetCacheSize(this->TemporalCacheSize);
}
this->Modified();
}
//----------------------------------------------------------------------------
void vtkCPProcessor::MakeTemporalCache(const char* name)
{
if (this->Internal->TemporalCaches.find(name) != this->Internal->TemporalCaches.end())
{
// Its ah, very nice but tell him we already got one!
return;
}
// have to make a ParaView level object so that python can grab and work
// with it. Unfortunately this has to wait until paraview is running or
// we crash with no sessionProxyManager.
vtkSMProxyManager* proxyManager = vtkSMProxyManager::GetProxyManager();
vtkSMSessionProxyManager* sessionProxyManager = proxyManager->GetActiveSessionProxyManager();
if (!sessionProxyManager)
{
return;
}
vtkSmartPointer<vtkSMSourceProxy> producer;
producer.TakeReference(vtkSMSourceProxy::SafeDownCast(
sessionProxyManager->NewProxy("sources", "TemporalCache"))); // note: source
producer->UpdateVTKObjects();
vtkTemporalDataSetCache* tc =
vtkTemporalDataSetCache::SafeDownCast(producer->GetClientSideObject());
tc->SetCacheSize(this->TemporalCacheSize);
tc->CacheInMemkindOn();
this->Internal->TemporalCaches[name] = producer;
}
//----------------------------------------------------------------------------
vtkSMSourceProxy* vtkCPProcessor::GetTemporalCache(const char* name)
{
if (this->Internal->TemporalCaches.find(name) == this->Internal->TemporalCaches.end())
{
return nullptr;
}
return this->Internal->TemporalCaches[name];
}
......@@ -23,6 +23,7 @@ class vtkCPDataDescription;
class vtkCPPipeline;
class vtkMPICommunicatorOpaqueComm;
class vtkMultiProcessController;
class vtkSMSourceProxy;
/// @defgroup CoProcessing ParaView CoProcessing
/// The CoProcessing library is designed to be called from parallel
......@@ -75,6 +76,15 @@ public:
virtual void RemovePipeline(vtkCPPipeline* pipeline);
virtual void RemoveAllPipelines();
// Controls cache size, in terms of timesteps, available for temporal
// data caching. Default is zero, which disables any caching.
virtual void SetTemporalCacheSize(int);
vtkGetMacro(TemporalCacheSize, int);
// Accessor to specific temporal cache. Names match CPInputData names
virtual void MakeTemporalCache(const char* name);
virtual vtkSMSourceProxy* GetTemporalCache(const char* name);
/// Initialize the co-processor. Returns 1 if successful and 0
/// otherwise. If Catalyst is built with MPI then Initialize()
/// can also be called with a specific MPI communicator if
......@@ -140,6 +150,7 @@ private:
vtkObject* InitializationHelper;
static vtkMultiProcessController* Controller;
char* WorkingDirectory;
int TemporalCacheSize = 0;
};
#endif
......@@ -2,6 +2,7 @@ cmake_minimum_required(VERSION 3.10)
project(CatalystExamples C CXX)
include(CMakeDependentOption)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/../../VTK/CMake")
cmake_dependent_option(USE_CATALYST "Link the simulator with Catalyst" ON
"NOT WIN32" OFF)
if (USE_CATALYST)
......@@ -63,6 +64,7 @@ endif()
add_subdirectory(MPISubCommunicatorExample)
add_subdirectory(PythonDolfinExample)
#add_subdirectory(PythonFullExample)
add_subdirectory(TemporalCacheExample)
option(BUILD_FORTRAN_EXAMPLES "Build Fortran Catalyst Examples" OFF)
if (BUILD_FORTRAN_EXAMPLES)
......
if (USE_CATALYST)
add_library(CachingExampleAdaptor
FEAdaptor.cxx
FEAdaptor.h)
target_link_libraries(CachingExampleAdaptor
INTERFACE
VTK::PythonUsed
PUBLIC
ParaView::PythonCatalyst
ParaView::RemotingServerManager
VTK::CommonCore
VTK::CommonDataModel
VTK::CommonSystem
VTK::FiltersGeneral
VTK::FiltersHybrid
VTK::IOXML)
endif ()
add_executable(CachingExample
FEDataStructures.cxx
FEDataStructures.h
FEDriver.cxx)
if (TARGET CachingExampleAdaptor)
target_link_libraries(CachingExample
PRIVATE
CachingExampleAdaptor
VTK::mpi)
else ()
target_link_libraries(CachingExample
PRIVATE
MPI::MPI_C)
endif ()
if (BUILD_TESTING)
add_test(NAME CachingExampleTest COMMAND CachingExample ${CMAKE_CURRENT_SOURCE_DIR}/SampleScripts/exPostFactoMovie.py)
endif()
#include "FEAdaptor.h"
#include "FEDataStructures.h"
#include <iostream>
#include <vtkCPDataDescription.h>
#include <vtkCPInputDataDescription.h>
#include <vtkCPProcessor.h>
#include <vtkCPPythonScriptPipeline.h>
#include <vtkCellData.h>
#include <vtkDoubleArray.h>
#include <vtkImageData.h>
#include <vtkInformation.h>
#include <vtkIntArray.h>
#include <vtkNew.h>
#include <vtkPointData.h>
#include <vtkSMSourceProxy.h>
#include <vtkStreamingDemandDrivenPipeline.h>
#include <vtkTemporalDataSetCache.h>
#include <vtkTemporalStatistics.h>
#include <vtkUnstructuredGrid.h>
#include <vtkXMLDataSetWriter.h>
namespace CPPPipeline
{
class vtkCPTestPipeline : public vtkCPPipeline
{
// A sample C++ pipeline that incorporates a temporal filter
public:
vtkTypeMacro(vtkCPTestPipeline, vtkCPPipeline);
static vtkCPTestPipeline* New();
virtual int RequestDataDescription(vtkCPDataDescription* dataDescription) VTK_OVERRIDE
{
dataDescription->GetInputDescriptionByName("volume")->AllFieldsOn();
dataDescription->GetInputDescriptionByName("volume")->GenerateMeshOn();
return 1;
}
// Execute the pipeline. Returns 1 for success and 0 for failure.
virtual int CoProcess(vtkCPDataDescription* dataDescription) VTK_OVERRIDE
{
this->OutputCounter++;
vtkCPInputDataDescription* idd = dataDescription->GetInputDescriptionByName("volume");
if (!idd)
{
return 1;
}
vtkDataObject* gridNow = idd->GetGrid();
auto dsw = vtkSmartPointer<vtkXMLDataSetWriter>::New();
dsw->SetInputData(gridNow);
// Output the volume at each timestep like you might do normally.
std::string fname = "tcache_ex_time_" + std::to_string(this->OutputCounter) + ".vti";
dsw->SetFileName(fname.c_str());
dsw->Write();
vtkCPInputDataDescription* idd2 = dataDescription->GetInputDescriptionByName("points");
if (!idd2)
{
return 1;
}
vtkDataObject* pointsNow = idd2->GetGrid();
dsw->SetInputData(pointsNow);
// Ditto for the points at each timestep.
fname = "tcache_ex_pts_time_" + std::to_string(this->OutputCounter) + ".vtp";
dsw->SetFileName(fname.c_str());
dsw->Write();
vtkSMSourceProxy* pcache = idd->GetTemporalCache();
// KEY POINT:
// Get access to the cache
vtkTemporalDataSetCache* cache =
vtkTemporalDataSetCache::SafeDownCast(pcache->GetClientSideObject());
if (!cache)
{
cerr << "Something is wrong, pipeline should have a temporal cache." << endl;
return 1;
}
// The fun part do something across timesteps
vtkInformation* info = cache->GetOutputInformation(0);
if (info->Has(vtkStreamingDemandDrivenPipeline::TIME_RANGE()))
{
double* tr = info->Get(vtkStreamingDemandDrivenPipeline::TIME_RANGE());
// We will output a temporal statistics volume every 10 frames
int tdumpcounter = this->OutputCounter / 10;
if (!(this->OutputCounter % 10))
{
auto tstats = vtkSmartPointer<vtkTemporalStatistics>::New();
// KEY POINT:
// Use the cache as input to a processing pipeline
tstats->SetInputConnection(cache->GetOutputPort());
auto dsw = vtkSmartPointer<vtkXMLDataSetWriter>::New();
dsw->SetInputConnection(tstats->GetOutputPort());
std::string fname = "tcache_ex_tstats_" + std::to_string(tdumpcounter) + ".vti";
dsw->SetFileName(fname.c_str());
dsw->Write();
}
}
return 1;
}
protected:
vtkCPTestPipeline() { this->OutputCounter = 0; }
virtual ~vtkCPTestPipeline() {}
int OutputCounter;
private:
vtkCPTestPipeline(const vtkCPTestPipeline&) = delete;
void operator=(const vtkCPTestPipeline&) = delete;
};
vtkStandardNewMacro(vtkCPTestPipeline);
}
//******************************************************************************
namespace
{
// Internal helpers used by the Adaptor itself
vtkCPProcessor* Processor = NULL;
vtkImageData* VTKVolume = NULL;
vtkUnstructuredGrid* VTKPoints = NULL;
void BuildVTKVolume(Grid& grid)
{
// The grid topological structure doesn't change so we just build
// the first time it's needed.
if (VTKVolume == NULL)
{
VTKVolume = vtkImageData::New();
int extent[6];
for (int i = 0; i < 6; i++)
{
extent[i] = grid.GetExtent()[i];
}
VTKVolume->SetExtent(extent);
VTKVolume->SetSpacing(grid.GetSpacing());
}
// The points however do vary, so let's remake them every time.
if (VTKPoints)
VTKPoints->Delete();
VTKPoints = vtkUnstructuredGrid::New();
}
void UpdateVTKAttributes(Grid& grid, Attributes& attributes, vtkCPInputDataDescription* idd)
{
if (idd->IsFieldNeeded("occupancy", vtkDataObject::CELL) == true)
{
if (VTKVolume->GetCellData()->GetNumberOfArrays() == 0)
{
// occupancy array, in other words how many spheres are present at the voxel
vtkNew<vtkDoubleArray> occupancy;
occupancy->SetName("occupancy");
occupancy->SetNumberOfComponents(1);
VTKVolume->GetCellData()->AddArray(occupancy.GetPointer());
}
vtkDoubleArray* occupancy =
vtkDoubleArray::SafeDownCast(VTKVolume->GetCellData()->GetArray("occupancy"));
// The occupancy array is a scalar array so we can reuse
// memory as long as we ordered the points properly.
double* occupancyData = attributes.GetOccupancyArray();
occupancy->SetArray(occupancyData, static_cast<vtkIdType>(grid.GetNumberOfLocalCells()), 1);
occupancy->Modified();
}
const std::vector<double>& pts = attributes.GetParticles();
int numpts = pts.size() / 5;
auto points = vtkSmartPointer<vtkPoints>::New();
auto rads = vtkSmartPointer<vtkDoubleArray>::New();
rads->SetName("radius");
rads->SetNumberOfComponents(1);
auto ids = vtkSmartPointer<vtkIntArray>::New();
ids->SetName("pointid");
ids->SetNumberOfComponents(1);
for (int i = 0; i < numpts; i++)
{
points->InsertNextPoint(pts[i * 5 + 0], pts[i * 5 + 1], pts[i * 5 + 2]);
rads->InsertNextValue(pts[i * 5 + 3]);
ids->InsertNextValue(pts[i * 5 + 4]);
}
VTKPoints->SetPoints(points);
VTKPoints->GetPointData()->AddArray(rads);
VTKPoints->GetPointData()->AddArray(ids);
}
void BuildVTKDataStructures(Grid& grid, Attributes& attributes, vtkCPInputDataDescription* idd)
{
BuildVTKVolume(grid);
UpdateVTKAttributes(grid, attributes, idd);
}
}
namespace FEAdaptor
{
// The main adaptor proper : Initialize(), CoProcess() and Finalize()
void Initialize(int argc, char* argv[])
{
std::string home = ".";
int tcachesize = 100;
bool enableCxxPipeline = false;
int numScripts = 0;
char** scripts = new char*[argc];
for (int a = 0; a < argc; a++)
{
if (!strcmp(argv[a], "-HOME") && a < argc - 1)
{
home = std::string(argv[a + 1]);
}
else if (!strcmp(argv[a], "-CACHESIZE") && a < argc - 1)
{
tcachesize = atoi(argv[a + 1]);
a += 1;
}
else if (!strcmp(argv[a], "-ENABLECXXPIPELINE"))
{
enableCxxPipeline = true;
}
else
{
// pass unmatched arguments through as pythonscripts
scripts[numScripts] = argv[a];
numScripts++;
}
}
// KEY POINT:
// If you want to use memkind features, you have to tell VTK where you want to map from.
cout << "Extended memory is backed by " << home << endl;
vtkObjectBase::SetMemkindDirectory(home.c_str());
if (Processor == NULL)
{
Processor = vtkCPProcessor::New();
// KEY POINT:
// You need to tell the processor how big its temporal caches need to be
Processor->SetTemporalCacheSize(tcachesize);
Processor->Initialize();
// KEY POINT:
// You have to make a temporal cache for every output you want to temporally process
Processor->MakeTemporalCache("volume");
}
else
{
Processor->RemoveAllPipelines();
}
// Python Pipelines
for (int i = 0; i < numScripts; i++)
{
vtkNew<vtkCPPythonScriptPipeline> pipeline;
pipeline->Initialize(scripts[i]);
Processor->AddPipeline(pipeline.GetPointer());
}
// Optionally, the example C++ Pipeline too.
if (enableCxxPipeline)
{
vtkNew<CPPPipeline::vtkCPTestPipeline> cpipeline;
Processor->AddPipeline(cpipeline.GetPointer());
}
delete[] scripts;
}
void CoProcess(
Grid& grid, Attributes& attributes, double time, unsigned int timeStep, bool lastTimeStep)
{
vtkNew<vtkCPDataDescription> dataDescription;
dataDescription->AddInput("volume");
dataDescription->AddInput("points");
dataDescription->SetTimeData(time, timeStep);
dataDescription->ForceOutputOn();
if (lastTimeStep == true)
{
// assume that we want to all the pipelines to execute if it
// is the last time step.
dataDescription->ForceOutputOn();
}
if (Processor->RequestDataDescription(dataDescription.GetPointer()) != 0)
{
vtkCPInputDataDescription* idd = dataDescription->GetInputDescriptionByName("volume");
BuildVTKDataStructures(grid, attributes, idd);
idd->SetGrid(VTKVolume);
int wholeExtent[6];
for (int i = 0; i < 3; i++)
{
wholeExtent[2 * i] = 0;
wholeExtent[2 * i + 1] = grid.GetNumPoints()[i];
}
idd->SetWholeExtent(wholeExtent);
vtkSMSourceProxy* cache = Processor->GetTemporalCache("volume");
if (cache)
{
// KEY POINT:
// The adaptor has to associate the cache with the pipeline every timestep
idd->SetTemporalCache(cache);
}
vtkCPInputDataDescription* idd2 = dataDescription->GetInputDescriptionByName("points");
idd2->SetGrid(VTKPoints);
Processor->CoProcess(dataDescription.GetPointer());
}
}
void Finalize()
{
if (Processor)
{
Processor->Delete();
Processor = NULL;
}
if (VTKVolume)
{
VTKVolume->Delete();
VTKVolume = NULL;
}
if (VTKPoints)
{
VTKPoints->Delete();
VTKPoints = NULL;
}
}
} // end of Catalyst namespace
#ifndef FEADAPTOR_HEADER
#define FEADAPTOR_HEADER
// Description:
// Functions that the simulation calls to setup, populate, and execute catalyst.
//
// In this example the key things to know about are the way that the temporal caching
// feature in Catalyst is turned on and fed. See the code with "KEY POINT" comments.
//
// Besides functioning as an example of set up a temporal capable adaptor,
// there is also vtkCPTestPipeline which is sample C++ CatalystPipeline that demonstrates temporal
// filters.
class Attributes;
class Grid;
namespace FEAdaptor
{
void Initialize(int numScripts, char* scripts[]);
void CoProcess(
Grid& grid, Attributes& attributes, double time, unsigned int timeStep, bool lastTimeStep);
void Finalize();
}
#endif
#include "FEDataStructures.h"
#include <assert.h>
#include <iostream>
#include <math.h>
#include <mpi.h>
#include <string.h>
#include <unistd.h>
class particle
{
// A sphere, moving in some direction
public:
double pos[3];
double vel[3];
double radius;
int id;
static double world[];
public:
particle() = delete;
particle(int i, double r, double x, double y, double z, double vx, double vy, double vz)
: id(i)
, radius(r)
, pos{ x, y, z }
, vel{ vx, vy, vz }
{
}
static void setworld(double x0, double x1, double y0, double y1, double z0, double z1)
{
world[0] = x0;
world[1] = x1;
world[2] = y0;
world[3] = y1;
world[4] = z0;
world[5] = z1;
}
void print() { std::cout << id << ": " << pos[0] << "," << pos[1] << "," << pos[2] << std::endl; }
void update()
{
double n;
for (int c = 0; c < 3; c++)
{
n = pos[c] + vel[c];
// spheres will bound off the walls
if (n < world[c * 2 + 0] || n > world[c * 2 + 1])
{
vel[c] *= -1;
}
pos[c] += vel[c];
}
}
double distance(double x, double y, double z)
{
double r = sqrt((x - pos[0]) * (x - pos[0]) + (y - pos[1]) * (y - pos[1]) +
(z - pos[2]) * (z - pos[2])) -
radius;
return r;
}
bool bbox_test(double x0, double x1, double y0, double y1, double z0, double z1)
{
if ((pos[0] + radius) < x0)
return false;
if ((pos[0] - radius) > x1)
return false;
if ((pos[1] + radius) < y0)
return false;
if ((pos[1] - radius) > y1)
return false;
if ((pos[2] + radius) < z0)
return false;
if ((pos[2] - radius) > z1)
return false;
return true;
}
void getCharacteristics(double* ret)
{
ret[0] = pos[0];
ret[1] = pos[1];
ret[2] = pos[2];
ret[3] = radius;
ret[4] = id;
}
};
// geometric extent of the box that the particles move in
double particle::world[6] = { 0.0, 1.0, 0.0, 1.0, 0.0, 1.0 };
class region
{
// a region of space that an MPI rank is responsible for
private:
double origin[3];
double spacing[3];
int extent[6];
int memsize;
double* values;
double x0, x1, y0, y1, z0, z1;
std::vector<double> myparticles;
public:
region() = delete;
region(double x, double y, double z, double sx, double sy, double sz, int i0, int i1, int j0,
int j1, int k0, int k1)
: origin{ x, y, x }
, spacing{ sx, sy, sz }
, extent{ i0, i1, j0, j1, k0, k1 }
{
memsize = (extent[1] - extent[0]) * (extent[3] - extent[2]) * (extent[5] - extent[4]);
values = new double[memsize];
reset();
x0 = origin[0] + extent[0] * spacing[0];
x1 = origin[0] + extent[1] * spacing[0];
y0 = origin[1] + extent[2] * spacing[1];
y1 = origin[1] + extent[3] * spacing[1];
z0 = origin[2] + extent[4] * spacing[2];
z1 = origin[2] + extent[5] * spacing[2];
}
void reset()
{
memset((void*)values, 0, memsize * sizeof(double));
myparticles.clear();
}
void accumulate(particle p)
{
// sample this particle onto the volume
if (p.bbox_test(x0, x1, y0, y1, z0, z1))
{
// std::cout << getpid() << " hit" << std::endl;
double chars[5];
p.getCharacteristics(chars);
myparticles.push_back(chars[0]); // x
myparticles.push_back(chars[1]); // y
myparticles.push_back(chars[2]); // z
myparticles.push_back(chars[3]); // r
myparticles.push_back(chars[4]); // id
double* value = values;
int i0 = ((p.pos[0] - p.radius) + origin[0]) / spacing[0];
int i1 = ((p.pos[0] + p.radius) + origin[0]) / spacing[0];
int j0 = ((p.pos[1] - p.radius) + origin[1]) / spacing[1];
int j1 = ((p.pos[1] + p.radius) + origin[1]) / spacing[1];
int k0 = ((p.pos[2] - p.radius) + origin[2]) / spacing[2];
int k1 = ((p.pos[2] + p.radius) + origin[2]) / spacing[2];
int di = extent[1] - extent[0];
int dj = extent[3] - extent[2];
int dk = extent[5] - extent[4];
for (int i = i0; i < i1; i++)
{
if (i < 0 || i < extent[0] || i >= extent[1])
continue;
for (int j = j0; j < j1; j++)
{
if (j < 0 || j < extent[2] || j >= extent[3])
continue;
for (int k = k0; k < k1; k++)
{
if (k < 0 || k < extent[4] || k >= extent[5])
continue;
double x = origin[0] + i * spacing[0];
double y = origin[1] + j * spacing[1];
double z = origin[2] + k * spacing[2];
if (p.distance(x, y, z) <= 0.0)
{
value = values + i * dj * dk + j * dk + k;
*value = *value + 1.0;
}
}
}
}
}
}
double* getValues() { return this->values; };
const std::vector<double>& getParticles() { return myparticles; };
};
Grid::Grid()
{
this->NumPoints[0] = this->NumPoints[1] = this->NumPoints[2] = 0;
this->Spacing[0] = this->Spacing[1] = this->Spacing[2] = 0;
this->MyRegion = nullptr;
}
Grid::~Grid()
{
delete this->MyRegion;
}
void Grid::Initialize(const unsigned int numPoints[3], const double spacing[3])
{
if (numPoints[0] == 0 || numPoints[1] == 0 || numPoints[2] == 0)
{
std::cerr << "Must have a non-zero amount of points in each direction." << std::endl;
return;
}
for (int i = 0; i < 3; i++)
{
this->NumPoints[i] = numPoints[i];
this->Spacing[i] = spacing[i];
}
int mpiRank = 0, mpiSize = 1;
MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank);
MPI_Comm_size(MPI_COMM_WORLD, &mpiSize);
this->Extent[0] = mpiRank * numPoints[0] / mpiSize;
this->Extent[1] = (mpiRank + 1) * numPoints[0] / mpiSize;
if (mpiSize != mpiRank + 1)
{
this->Extent[1]++;
}
this->Extent[2] = this->Extent[4] = 0;
this->Extent[3] = numPoints[1];
this->Extent[5] = numPoints[2];
// every rank knows of the entire space
particle::setworld(0, numPoints[2], 0, numPoints[1], 0, numPoints[0]);
// every rank has only its own region of space
this->MyRegion = new region(0, 0, 0, spacing[2], spacing[1], spacing[0], this->Extent[4],
this->Extent[5], this->Extent[2], this->Extent[3], this->Extent[0], this->Extent[1]);
}
unsigned int Grid::GetNumberOfLocalPoints()
{
return (this->Extent[1] - this->Extent[0] + 1) * (this->Extent[3] - this->Extent[2] + 1) *
(this->Extent[5] - this->Extent[4] + 1);
}
unsigned int Grid::GetNumberOfLocalCells()
{
return (this->Extent[1] - this->Extent[0]) * (this->Extent[3] - this->Extent[2]) *
(this->Extent[5] - this->Extent[4]);
}
void Grid::GetLocalPoint(unsigned int pointId, double* point)
{
unsigned int logicalX = pointId % (this->Extent[1] - this->Extent[0] + 1);
assert(logicalX <= this->Extent[1]);
point[0] = this->Spacing[0] * logicalX;
unsigned int logicalY =
pointId % ((this->Extent[1] - this->Extent[0] + 1) * (this->Extent[3] - this->Extent[2] + 1));
logicalY /= this->Extent[1] - this->Extent[0] + 1;
assert(logicalY <= this->Extent[3]);
point[1] = this->Spacing[1] * logicalY;
unsigned int logicalZ =
pointId / ((this->Extent[1] - this->Extent[0] + 1) * (this->Extent[3] - this->Extent[2] + 1));
assert(logicalZ <= this->Extent[5]);
point[2] = this->Spacing[2] * logicalZ;
}
unsigned int* Grid::GetNumPoints()
{
return this->NumPoints;
}
unsigned int* Grid::GetExtent()
{
return this->Extent;
}
double* Grid::GetSpacing()
{
return this->Spacing;
}
Attributes::Attributes(int numparticles)
{
this->GridPtr = nullptr;
this->NumParticles = numparticles;
this->MyParticles = new particle*[this->NumParticles];
for (int i = 0; i < this->NumParticles; i++)
{
this->MyParticles[i] = nullptr;
}
}
Attributes::~Attributes()
{
for (int i = 0; i < this->NumParticles; i++)
{
delete this->MyParticles[i];
}
delete[] this->MyParticles;
}
void Attributes::Initialize(Grid* grid)
{
this->GridPtr = grid;
double* cellspacing = grid->GetSpacing();
double cellsize = sqrt(cellspacing[0] * cellspacing[0] + cellspacing[1] * cellspacing[1] +
cellspacing[2] * cellspacing[2]);
unsigned int* npts = grid->GetNumPoints();
// world extent to place particles within
double x0 = 0;
double x1 = npts[2] * cellspacing[2];
double y0 = 0;
double y1 = npts[1] * cellspacing[1];
double z0 = 0;
double z1 = npts[0] * cellspacing[0];
// a tuning parameter which keeps sizes relatively good in cases I've tried
#define ADJF 1.5
srand48(42l);
// every rank has every particle just to keep the simulation simple
// in real life you would want the particles to live with the processes
for (int i = 0; i < this->NumParticles; i++)
{
switch (i)
{
case 0:
this->MyParticles[0] = new particle(0, 2 * cellsize * ADJF, (x0 + x1) / 2, (y0 + y1) / 2,
z0, 0.0, 0.0, cellsize * 0.5 * ADJF);
break;
case 1:
this->MyParticles[1] = new particle(1, 3 * cellsize * ADJF, (x0 + x1) / 2, (y0 + y1) / 2,
z1, 0.0, 0.0, -cellsize * 0.5 * ADJF);
break;
default:
double r = drand48() * 1.5 * cellsize * ADJF;
double x = drand48() * (x1 - x0) + x0;
double y = drand48() * (y1 - y0) + y0;
double z = drand48() * (z1 - z0) + z0;
double vx = (2.0 * drand48() - 1.0) * cellsize * 2.0 * ADJF;
double vy = (2.0 * drand48() - 1.0) * cellsize * 2.0 * ADJF;
double vz = (2.0 * drand48() - 1.0) * cellsize * 2.0 * ADJF;
this->MyParticles[i] = new particle(i, r, x, y, z, vx, vy, vz);
}
}
}
void Attributes::UpdateFields(double time)
{
// the is the main entry point for each timestep
unsigned int numPoints = this->GridPtr->GetNumberOfLocalPoints();
unsigned int numCells = this->GridPtr->GetNumberOfLocalCells();
this->Occupancy.resize(numCells);
// start off clear for this timestep
std::fill(this->Occupancy.begin(), this->Occupancy.end(), 0.0);
region* r = this->GridPtr->GetMyRegion();
r->reset();
// move all of the particles
for (int i = 0; i < this->NumParticles; i++)
{
this->MyParticles[i]->update();
}
// discretize all of the particles onto the cells of the volume
for (int i = 0; i < this->NumParticles; i++)
{
r->accumulate(*this->MyParticles[i]);
}
// transfer them to the array I am going to output
double* value = r->getValues();
for (int c = 0; c < this->Occupancy.size(); c++)
{
this->Occupancy[c] = *value;
value++;
}
this->Particles = r->getParticles();
}
double* Attributes::GetOccupancyArray()
{
if (this->Occupancy.empty())
{
return nullptr;
}
return &this->Occupancy[0];
}
const std::vector<double>& Attributes::GetParticles()
{
return this->Particles;
}
/**
The code for the toy simulation itself is here. In this example we
make a configurable number of random sized spheres, and make them
bounce around in a cube. At each timestep we move the spheres, and then
sample them onto a configurable sized volume. The points and the volume
are available to Catalyst pipelines, but in this example only the
volume is temporally cached.
Note: Interaction between spheres is left as an exercise to the reader.
*/
#ifndef FEDATASTRUCTURES_HEADER
#define FEDATASTRUCTURES_HEADER
#include <cstddef>
#include <vector>
class particle;
class region;
class Grid
{
// computational domain
public:
~Grid();
Grid();
void Initialize(const unsigned int numPoints[3], const double spacing[3]);
unsigned int GetNumberOfLocalPoints();
unsigned int GetNumberOfLocalCells();
void GetLocalPoint(unsigned int pointId, double* point);
unsigned int* GetNumPoints();
unsigned int* GetExtent();
double* GetSpacing();
region* GetMyRegion() { return MyRegion; };
private:
unsigned int NumPoints[3];
unsigned int Extent[6];
double Spacing[3];
region* MyRegion;
};
class Attributes
{
// array of results, updated every timestep
public:
~Attributes();
Attributes(int numParticles);
void Initialize(Grid* grid);
void UpdateFields(double time);
double* GetOccupancyArray();
const std::vector<double>& GetParticles();
private:
std::vector<double> Occupancy;
std::vector<double> Particles;
Grid* GridPtr;
particle** MyParticles;
int NumParticles;
};
#endif
/**
An example catalyst pipeline that includes temporal processing.
The "simulation" has a number of particles bouncing around in a cube.
Unlike other Catalyst examples in this caseCatalyst maintains a configurably sized cache of the most
recent timesteps worth of produced data. The cache is suitable
for ParaView's time varying filters and for ex post facto / backtracking
analysis. Here the pipeline fully flows only when some
interesting event happens. When it does, the pipelne has access
to more than just the current timestep to perform analysis on.
The command line arguments are:
-DIMS K J I // size of the volume, defaut is 70, 60, 44
-TSTEPS t //number of simulation timesteps, default is 30
-NUMPARTICLES p // number of particles in the box
-CACHESIZE c // number of timesteps to cache
-HOME // When ParaView is compiles with PARAVIEW_USE_MEMKIND, a directory to memory map the cache
in, ideal Optane with -o dax
-ENABLECXXPIPELINE // runs the default C++ Catalyst Pipeline in addition to provided python ones
PythonPipeline... //One or more Catalyst Python Pipelines
*/
#include "FEAdaptor.h"
#include "FEDataStructures.h"
#include <mpi.h>
#include <vtkSmartPointer.h>
#include <vtkTimerLog.h>
int main(int argc, char* argv[])
{
MPI_Init(&argc, &argv);
unsigned int numPoints[3] = { 70, 60, 44 };
unsigned int numberOfTimeSteps = 20;
unsigned int delay = 1000;
int numparticles = 2;
int ac = 0;
char** av = new char*[argc];
for (int i = 0; i < argc; i++)
{
if (!strcmp(argv[i], "-DIMS"))
{
for (int j = 0; j < 3; j++)
{
numPoints[j] = atoi(argv[i + j + 1]);
}
i += 3;
}
else if (!strcmp(argv[i], "-TSTEPS"))
{
numberOfTimeSteps = atoi(argv[i + 1]);
i += 1;
}
else if (!strcmp(argv[i], "-DELAY"))
{
delay = atoi(argv[i + 1]);
i += 1;
}
else if (!strcmp(argv[i], "-NUMPARTICLES"))
{
numparticles = atoi(argv[i + 1]);
i += 1;
}
else
{
// pass unmatched arguments through for FEAdaptor to use
av[ac] = argv[i];
ac++;
}
}
double spacing[3] = { 1, 1.1, 1.3 };
Grid grid;
grid.Initialize(numPoints, spacing);
Attributes attributes(numparticles);
attributes.Initialize(&grid);
// The first argument is the program name
FEAdaptor::Initialize(ac - 1, av + 1);
double tsim = 0.0;
double tcop = 0.0;
auto tlog = vtkSmartPointer<vtkTimerLog>::New();
for (unsigned int timeStep = 0; timeStep < numberOfTimeSteps; timeStep++)
{
// use a time step length of 0.1
cout << "timeStep " << timeStep << endl;
double time = timeStep * 0.1;
tlog->StartTimer();
attributes.UpdateFields(time);
tlog->StopTimer();
tsim += tlog->GetElapsedTime();
tlog->StartTimer();
FEAdaptor::CoProcess(grid, attributes, time, timeStep, timeStep == numberOfTimeSteps - 1);
tlog->StopTimer();
tcop += tlog->GetElapsedTime();
}
cout << "Elapsed Simulation time " << tsim << endl;
cout << "Elapsed CoProcessing time " << tcop << endl;
FEAdaptor::Finalize();
MPI_Finalize();
delete[] av;
return 0;
}
# A sample Catalyst Python pipeline with an ex post facto trigger.
# Here we wait until particles collide. Only then do we output anything.
# Furthermore, we output the timesteps that led up to the collision,
# so that we can reason about what the cause was.
from paraview.simple import *
from paraview import coprocessing
#--------------------------------------------------------------
# Code generated from cpstate.py to create the CoProcessor.
# ----------------------- CoProcessor definition -----------------------
def CreateCoProcessor():
def _CreatePipeline(coprocessor, datadescription):
class Pipeline:
# KEY POINT:
# We don't have to do anything special at each timestep.
# But we do have to make the data available.
# In this case the recent data.
data = coprocessor.CreateTemporalProducer( datadescription, "volume" )
return Pipeline()
class CoProcessor(coprocessing.CoProcessor):
def CreatePipeline(self, datadescription):
self.Pipeline = _CreatePipeline(self, datadescription)
# some bookkeeping structures to make a nice set of outputs
self.StepsWritten = set()
self.TimesToTimesteps = {}
def ProcessTriggers(self, datadescription):
# In this trigger we Look at the simulation's output, and see if we need to do anything
# out of the ordinary. We have access to the N most recent outputs so we write them all out to disk.
data = self.Pipeline.data
timestep = datadescription.GetTimeStep()
time = datadescription.GetTime()
self.TimesToTimesteps[time] = timestep
data.UpdatePipeline(time)
orange = data.GetDataInformation().GetCellDataInformation().GetArrayInformation("occupancy").GetComponentRange(0)
if orange[1] > 1:
# a collision!
print ("collision at tstep %d, time %f, range is [%f,%f]" % (timestep, time, orange[0], orange[1]))
data.UpdatePipelineInformation()
dtimes = data.TimestepValues
for t in dtimes:
if (t not in self.StepsWritten):
print ("write ", t)
self.StepsWritten.add(t)
tstep = self.TimesToTimesteps[t]
writer = servermanager.writers.XMLPImageDataWriter(Input=data)
writer.FileName = "tevent_%d.pvti" % tstep
writer.UpdatePipeline(t)
coprocessor = CoProcessor()
freqs = {'volume': [10, 100]}
coprocessor.SetUpdateFrequencies(freqs)
return coprocessor
#--------------------------------------------------------------
# Global variables that will hold the pipeline for each timestep
# Creating the CoProcessor object, doesn't actually create the ParaView pipeline.
# It will be automatically setup when coprocessor.UpdateProducers() is called the
# first time.
coprocessor = CreateCoProcessor()
#--------------------------------------------------------------
# Enable Live-Visualizaton with ParaView
coprocessor.EnableLiveVisualization(True, 1)
# ---------------------- Data Selection method ----------------------
def RequestDataDescription(datadescription):
"Callback to populate the request for current timestep"
global coprocessor
if datadescription.GetForceOutput() == True:
# We are just going to request all fields and meshes from the simulation
# code/adaptor.
for i in range(datadescription.GetNumberOfInputDescriptions()):
datadescription.GetInputDescription(i).AllFieldsOn()
datadescription.GetInputDescription(i).GenerateMeshOn()
return
# setup requests for all inputs based on the requirements of the
# pipeline.
coprocessor.LoadRequestedData(datadescription)
# ------------------------ Processing method ------------------------
def DoCoProcessing(datadescription):
"Callback to do co-processing for current timestep"
global coprocessor
# Update the coprocessor by providing it the newly generated simulation data.
# If the pipeline hasn't been setup yet, this will setup the pipeline.
coprocessor.UpdateProducers(datadescription)
# KEY POINT:
# We make up a trigger here that is evaluated on every timestep.
coprocessor.ProcessTriggers(datadescription)
# Write output data, if appropriate.
coprocessor.WriteData(datadescription);
# Write image capture (Last arg: rescale lookup table), if appropriate.
coprocessor.WriteImages(datadescription, rescale_lookuptable=False)
# Live Visualization, if enabled.
coprocessor.DoLiveVisualization(datadescription, "localhost", 22222)
# A sample Catalyst Python pipeline that does some temporal processing
# of the data it is given over time.
from paraview.simple import *
from paraview import coprocessing
#--------------------------------------------------------------
# Code generated from cpstate.py to create the CoProcessor.
# ----------------------- CoProcessor definition -----------------------
def CreateCoProcessor():
def _CreatePipeline(coprocessor, datadescription):
class Pipeline:
# KEY POINT:
# Ask the coprocessor for a temporal producer instead of a normal one
# data = coprocessor.CreateProducer( datadescription, "volume" )
data = coprocessor.CreateTemporalProducer( datadescription, "volume" )
# write out each volume like you might do normally
imageDataWriter1 = servermanager.writers.XMLPImageDataWriter(Input=data)
coprocessor.RegisterWriter(imageDataWriter1, filename="tcache_pyex_%t.pvti", freq=10)
# The fun part do something across timesteps
# We will output a temporal statistics volume every 10 frames
temporalStatistics1 = TemporalStatistics(Input=data)
imageDataWriter2 = servermanager.writers.XMLPImageDataWriter(Input=temporalStatistics1)
coprocessor.RegisterWriter(imageDataWriter2, filename="tcache_pyex_tstats_%t.pvti", freq=10)
return Pipeline()
class CoProcessor(coprocessing.CoProcessor):
def CreatePipeline(self, datadescription):
self.Pipeline = _CreatePipeline(self, datadescription)
coprocessor = CoProcessor()
freqs = {'volume': [10, 100]}
coprocessor.SetUpdateFrequencies(freqs)
return coprocessor
#--------------------------------------------------------------
# Global variables that will hold the pipeline for each timestep
# Creating the CoProcessor object, doesn't actually create the ParaView pipeline.
# It will be automatically setup when coprocessor.UpdateProducers() is called the
# first time.
coprocessor = CreateCoProcessor()
#--------------------------------------------------------------
# Enable Live-Visualizaton with ParaView
coprocessor.EnableLiveVisualization(True, 1)
# ---------------------- Data Selection method ----------------------
def RequestDataDescription(datadescription):
"Callback to populate the request for current timestep"
global coprocessor
if datadescription.GetForceOutput() == True:
# We are just going to request all fields and meshes from the simulation
# code/adaptor.
for i in range(datadescription.GetNumberOfInputDescriptions()):
datadescription.GetInputDescription(i).AllFieldsOn()
datadescription.GetInputDescription(i).GenerateMeshOn()
return
# setup requests for all inputs based on the requirements of the
# pipeline.
coprocessor.LoadRequestedData(datadescription)
# ------------------------ Processing method ------------------------
def DoCoProcessing(datadescription):
"Callback to do co-processing for current timestep"
global coprocessor
# Update the coprocessor by providing it the newly generated simulation data.
# If the pipeline hasn't been setup yet, this will setup the pipeline.
coprocessor.UpdateProducers(datadescription)
# Write output data, if appropriate.
coprocessor.WriteData(datadescription);
# Write image capture (Last arg: rescale lookup table), if appropriate.
coprocessor.WriteImages(datadescription, rescale_lookuptable=False)
# Live Visualization, if enabled.
coprocessor.DoLiveVisualization(datadescription, "localhost", 22222)
......@@ -346,4 +346,65 @@
</SourceProxy>
</ProxyGroup>
<ProxyGroup name="sources">
<!-- ==================================================================== -->
<SourceProxy class="vtkTemporalDataSetCache"
label="Temporal Cache Source"
name="TemporalCache">
<Documentation long_help="Saves a copy of the data set for a fixed number of time steps."
short_help="Caches data per time step.">The Temporal Cache
can be used to save multiple copies of a data set at
different time steps to prevent thrashing in the pipeline
caused by downstream filters that adjust the requested
time step. For example, assume that there is a downstream
Temporal Interpolator filter. This filter will (usually)
request two time steps from the upstream filters, which in
turn (usually) causes the upstream filters to run twice,
once for each time step. The next time the interpolator
requests the same two time steps, they might force the
upstream filters to re-evaluate the same two time steps.
The Temporal Cache can keep copies of both of these time
steps and provide the requested data without having to run
upstream filters.</Documentation>
<InputProperty command="SetInputConnection"
name="Input">
<ProxyGroupDomain name="groups">
<Group name="sources" />
<Group name="filters" />
</ProxyGroupDomain>
<DataTypeDomain composite_data_supported="1"
name="input_type">
<DataType value="vtkDataObject" />
</DataTypeDomain>
<Documentation>This property specifies the input of the Temporal Cache
filter.</Documentation>
</InputProperty>
<IntVectorProperty command="SetCacheSize"
default_values="2"
name="CacheSize"
number_of_elements="1">
<IntRangeDomain max="10"
min="2"
name="range" />
<Documentation>The cache size determines the number of time steps that
can be cached at one time. The maximum number is 10. The minimum is 2
(since it makes little sense to cache less than that).</Documentation>
</IntVectorProperty>
<DoubleVectorProperty information_only="1"
name="TimestepValues">
<TimeStepsInformationHelper />
</DoubleVectorProperty>
<IntVectorProperty name="IsASource"
command="SetIsASource"
number_of_elements="1"
default_values="1"
panel_visibility="never">
<BooleanDomain name="bool"/>
<Documentation> Sets up the Algorithm to act as a pipeline source rather than a filter. This is used in Catalyst. </Documentation>
</IntVectorProperty>
<!-- End TemporalCache -->
</SourceProxy>
</ProxyGroup>
</ServerManagerConfiguration>
......@@ -459,6 +459,22 @@ class CoProcessor(object):
producer.UpdatePipeline(datadescription.GetTime())
return producer
def CreateTemporalProducer(self, datadescription, inputname):
"""Python access to a temporal cache object associated with a specific
one simulation product. Much like CreateProducer, only this ends up with
a temporal cache filter instead of a PVTrivialProducer."""
if not datadescription.GetInputDescriptionByName(inputname):
raise RuntimeError ("Simulation input name '%s' does not exist" % inputname)
idd = datadescription.GetInputDescriptionByName(inputname)
cache = idd.GetTemporalCache()
if not cache:
raise RuntimeError ("I see no cache for '%s'" % inputname)
return
return servermanager._getPyProxy(cache)
def ProcessExodusIIWriter(self, writer):
"""Extra work for the ExodusII writer to avoid undesired warnings
and print out a message on how to read the files into Ensight."""
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment