Commit 96cb4879 authored by Joachim P's avatar Joachim P

Clean & refactorize

parent 07b0664d
......@@ -12,4 +12,5 @@ vtk_module(vtkDomainsParallelChemistry
PRIVATE_DEPENDS
vtkCommonCore
vtkParallelMPI
vtkFiltersParallelMPI
)
......@@ -16,9 +16,9 @@
#include "vtkPSimpleBondPerceiver.h"
#include "vtkDataSetAttributes.h"
#include "vtkDistributedPointCloudFilter.h"
#include "vtkFloatArray.h"
#include "vtkMPIController.h"
#include "vtkMPIUtilities.h"
#include "vtkMolecule.h"
#include "vtkNew.h"
#include "vtkObjectFactory.h"
......@@ -75,7 +75,7 @@ bool vtkPSimpleBondPerceiver::CreateGhosts(vtkMolecule* molecule)
dataArray->DeepCopy(molecule->GetVertexData());
vtkNew<vtkPolyData> outputPoly;
vtkMPIUtilities::GetPointsInsideBounds(
vtkDistributedPointCloudFilter::GetPointsInsideBounds(
controller, inputPoly.Get(), outputPoly.Get(), outterBounds);
molecule->Initialize(
......
......@@ -39,7 +39,6 @@
class vtkMPIController;
class vtkMultiProcessController;
class vtkPointSet;
class VTKFILTERSPARALLELMPI_EXPORT vtkDistributedPointCloudFilter : public vtkPointSetAlgorithm
{
......@@ -56,6 +55,13 @@ public:
vtkGetObjectMacro(Controller, vtkMultiProcessController);
//@}
/**
* Get the points that are inside innerBounds and put them in output DataSet.
* Ask other MPI ranks for their corresponding points.
*/
static void GetPointsInsideBounds(vtkMPIController*,
vtkPointSet *input, vtkPointSet *output, const double innerBounds[6]);
protected:
vtkDistributedPointCloudFilter();
~vtkDistributedPointCloudFilter() override;
......@@ -64,13 +70,6 @@ protected:
int RequestData(vtkInformation*, vtkInformationVector**, vtkInformationVector*) override;
struct KdTreeBuildRound
{
vtkMPIController *controller;
int np;
int rank;
};
/**
* Optimize bounding box following this rules:
* - no intersection of bounding box of different MPI nodes
......@@ -79,14 +78,14 @@ protected:
* Return false if input pointSet is nullptr or if no communicator was found.
* Return true otherwise.
*/
bool OptimizeBoundingBox(std::vector<KdTreeBuildRound>&, vtkPointSet*, double bounds[6]);
bool OptimizeBoundingBox(std::vector<vtkMPIController*> &, vtkPointSet *, double bounds[6]);
/**
* Initialize KdTreeRound: creates subControllers from Controller.
* Delete old values if any.
* Return false if KdTree cannot be initialized.
*/
bool InitializeKdTree(std::vector<KdTreeBuildRound>&);
bool InitializeKdTree(std::vector<vtkMPIController*> &);
private:
vtkDistributedPointCloudFilter(const vtkDistributedPointCloudFilter&) = delete;
......
......@@ -15,21 +15,11 @@
#include "vtkMPIUtilities.h"
// VTK includes
#include "vtkCharArray.h"
#include "vtkIdTypeArray.h"
#include "vtkMPICommunicator.h"
#include "vtkMPIController.h"
#include "vtkNew.h"
#include "vtkOctreePointLocator.h"
#include "vtkPointData.h"
#include "vtkPointSet.h"
#include "vtkPoints.h"
#include "vtkPolyData.h"
#include "vtkSmartPointer.h"
// C/C++ includes
#include <cassert>
#include <cmath>
#include <cstdarg>
#include <cstdio>
......@@ -122,233 +112,4 @@ void SynchronizedPrintf(vtkMPIController* comm, const char* format, ...)
comm->Barrier();
}
void GetPointsInsideBounds(vtkMPIController* controller,
vtkPointSet* input,
vtkPointSet* output,
const double outterBounds[6])
{
vtkMPICommunicator* com = vtkMPICommunicator::SafeDownCast(controller->GetCommunicator());
if (!com)
{
return;
}
int np = com->GetNumberOfProcesses();
int rank = com->GetLocalProcessId();
if (np == 1)
{
output->ShallowCopy(input);
return;
}
double localOutterBounds[6];
// round bounds to the nearest float value because locator use float internally.
// Otherwise, points that are exactly on the bounds may be wrongly considered as outside
// because of the cast.
for (int i = 0; i < 3; i++)
{
localOutterBounds[2 * i] =
std::nextafter((float)outterBounds[2 * i], (float)outterBounds[2 * i] - 1);
localOutterBounds[2 * i + 1] =
std::nextafter((float)outterBounds[2 * i + 1], (float)outterBounds[2 * i + 1] + 1);
}
bool emptyData = input->GetNumberOfPoints() == 0;
std::vector<double> allOutterBounds(np * 6, 0);
com->AllGather(localOutterBounds, allOutterBounds.data(), 6);
// size in bytes of messages to be sent to other processes
std::vector<int> messagesSize(np, 0);
// number of points in messages to be sent to other processes
std::vector<int> messagePointCount(np, 0);
// array of point ids
vtkNew<vtkIdTypeArray> idArray;
std::vector<vtkSmartPointer<vtkCharArray> > dataToSend;
dataToSend.resize(np);
// we will need a locator to search points inside each processor assigned regions
vtkNew<vtkOctreePointLocator> locator;
if (!emptyData)
{
vtkNew<vtkPolyData> inputPolyData;
inputPolyData->SetPoints(input->GetPoints());
locator->SetDataSet(inputPolyData.Get());
locator->BuildLocator();
}
// 1st step: define messages to send to each processor (including itself)
// with polydata containing closest points to local data bounding box
for (int partner = 0; partner < np; partner++)
{
idArray->SetNumberOfTuples(0);
vtkIdType nPoints = 0;
vtkIdType* ids = nullptr;
if (!emptyData)
{
double* nbounds = &allOutterBounds[partner * 6];
locator->FindPointsInArea(nbounds, idArray.Get());
nPoints = idArray->GetNumberOfTuples();
ids = idArray->GetPointer(0);
}
vtkNew<vtkPolyData> pointCloudToSend;
vtkNew<vtkPoints> pointsToSend;
pointsToSend->SetNumberOfPoints(nPoints);
vtkPointData* pointsToSendPointData = pointCloudToSend->GetPointData();
pointsToSendPointData->CopyAllocate(input->GetPointData(), nPoints);
for (vtkIdType i = 0; i < nPoints; i++)
{
pointsToSend->SetPoint(i, input->GetPoint(ids[i]));
pointsToSendPointData->CopyData(input->GetPointData(), ids[i], i);
}
pointCloudToSend->SetPoints(pointsToSend.Get());
// flatten point data to byte array
messagePointCount[partner] = nPoints;
dataToSend[partner] = vtkSmartPointer<vtkCharArray>::New();
vtkCommunicator::MarshalDataObject(pointCloudToSend.Get(), dataToSend[partner]);
messagesSize[partner] = dataToSend[partner]->GetNumberOfTuples();
}
std::vector<vtkSmartPointer<vtkCharArray> > dataToReceive;
dataToReceive.resize(np);
std::vector<vtkMPICommunicator::Request> receiveRequests;
receiveRequests.resize(np);
// Calculate size of messages to receive
std::vector<int> receiveSize(np, 0);
std::vector<int> receivePointCount(np, 0);
for (int i = 0; i < np; i++)
{
com->Gather(messagesSize.data() + i, receiveSize.data(), 1, i);
com->Gather(messagePointCount.data() + i, receivePointCount.data(), 1, i);
}
// Starting asynchronous receptions
int nReceive = 0;
vtkIdType totalPointsToReceive = 0;
for (int round = 0; round < np - 1; round++)
{
int partner = (rank + round + 1) % np;
if (receiveSize[partner] > 0)
{
++nReceive;
dataToReceive[partner] = vtkSmartPointer<vtkCharArray>::New();
com->NoBlockReceive(dataToReceive[partner]->WritePointer(0, receiveSize[partner]),
receiveSize[partner],
partner,
0,
receiveRequests[partner]);
totalPointsToReceive += receivePointCount[partner];
}
}
// local sending/receipt is just a pointer assignment
dataToReceive[rank] = dataToSend[rank];
dataToSend[rank] = nullptr;
if (receiveSize[rank] > 0)
{
++nReceive;
totalPointsToReceive += receivePointCount[rank];
}
// Starting asynchronous sends
std::vector<vtkMPICommunicator::Request> sendRequests;
sendRequests.resize(np);
for (int round = 0; round < np - 1; round++)
{
int partner = (rank + round + 1) % np;
if (messagesSize[partner] > 0)
{
com->NoBlockSend(dataToSend[partner]->GetPointer(0),
messagesSize[partner],
partner,
0,
sendRequests[partner]);
}
}
// sum of received points from the different processors
vtkIdType totalPoints = 0;
vtkPointData* outputPointData = output->GetPointData();
outputPointData->SetNumberOfTuples(totalPointsToReceive);
while (nReceive > 0)
{
for (int round = 0; round < np; round++)
{
int partner = (rank + round) % np;
if ((partner == rank || receiveRequests[partner].Test() == 1) && receiveSize[partner] > 0)
{
vtkNew<vtkPolyData> receivedPointCloud;
vtkCommunicator::UnMarshalDataObject(dataToReceive[partner], receivedPointCloud.Get());
dataToReceive[partner] = nullptr;
vtkIdType nbReceivedPoints = receivedPointCloud->GetNumberOfPoints();
vtkPointData* receivedPointData = receivedPointCloud->GetPointData();
vtkPoints* receivedPoints = receivedPointCloud->GetPoints();
vtkPoints* outputPoints = output->GetPoints();
if (!outputPoints)
{
vtkNew<vtkPoints> points;
outputPoints = points.Get();
output->SetPoints(outputPoints);
}
for (vtkIdType i = 0; i < nbReceivedPoints; i++)
{
outputPoints->InsertNextPoint(receivedPoints->GetPoint(i));
}
int nbArray = receivedPointData->GetNumberOfArrays();
for (int a = 0; a < nbArray; a++)
{
vtkDataArray* fromArray = receivedPointData->GetArray(a);
if (fromArray)
{
vtkDataArray* toArray = outputPointData->GetArray(fromArray->GetName());
if (!toArray)
{
toArray = fromArray->NewInstance();
toArray->SetName(fromArray->GetName());
toArray->SetNumberOfComponents(fromArray->GetNumberOfComponents());
toArray->SetNumberOfTuples(totalPointsToReceive);
outputPointData->AddArray(toArray);
toArray->Delete();
}
for (vtkIdType i = 0; i < nbReceivedPoints; i++)
{
toArray->SetTuple(totalPoints + i, fromArray->GetTuple(i));
}
}
}
totalPoints += nbReceivedPoints;
--nReceive;
receiveSize[partner] = 0;
}
}
}
// we wait for sent messages to be received before deleting them
for (int round = 0; round < np - 1; round++)
{
int partner = (rank + round + 1) % np;
if (messagesSize[partner] > 0)
{
sendRequests[partner].Wait();
}
}
}
} // END namespace vtkMPIUtilities
......@@ -19,7 +19,6 @@
// Forward declarations
class vtkMPIController;
class vtkPointSet;
namespace vtkMPIUtilities
{
......@@ -41,16 +40,6 @@ void Printf(vtkMPIController* comm, const char* format, ...);
VTKPARALLELMPI_EXPORT
void SynchronizedPrintf(vtkMPIController* comm, const char* format, ...);
/**
* Get the points that are inside innerBounds and put them in output DataSet.
* Ask other MPI ranks for their corresponding points.
*/
VTKPARALLELMPI_EXPORT
void GetPointsInsideBounds(vtkMPIController* controller,
vtkPointSet* input,
vtkPointSet* output,
const double innerBounds[6]);
} // END namespace vtkMPIUtilities
#endif // vtkMPIUtilities_h
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment