implicit cast from "int" to "uint" & wrong tag but don't know how to handle it
Reproduce
- connect to a server using paraview
- run the attached python script. It creates a vtkImage with timesteps debug.py
- Select "By" for coloring instead of "Solid color"
- Right click anywhere on the render
Crash log
Full crash log
( 42.807s) [paraview ] vtkOutputWindow.cxx:76 ERR| ERROR: In /builds/gitlab-kitware-sciviz-ci/source-paraview/VTK/Parallel/Core/vtkSocketCommunicator.cxx, line 847
vtkSocketCommunicator (0x268d410): Tag mismatch: got 1, expecting 10.
( 42.846s) [paraview ] vtkOutputWindow.cxx:76 ERR| ERROR: In /builds/gitlab-kitware-sciviz-ci/source-paraview/VTK/Rendering/OpenGL2/vtkShaderProgram.cxx, line 452
vtkShaderProgram (0x17036500): 1: #version 150
2: #ifdef GL_ES
3: #ifdef GL_FRAGMENT_PRECISION_HIGH
4: precision highp float;
5: precision highp sampler2D;
6: precision highp sampler3D;
7: #else
8: precision mediump float;
9: precision mediump sampler2D;
10: precision mediump sampler3D;
11: #endif
12: #define texelFetchBuffer texelFetch
13: #define texture1D texture
14: #define texture2D texture
15: #define texture3D texture
16: #else // GL_ES
17: #define highp
18: #define mediump
19: #define lowp
20: #if __VERSION__ == 150
21: #define texelFetchBuffer texelFetch
22: #define texture1D texture
23: #define texture2D texture
24: #define texture3D texture
25: #endif
26: #endif // GL_ES
27: #define varying in
28:
29:
30: /*=========================================================================
31:
32: Program: Visualization Toolkit
33: Module: raycasterfs.glsl
34:
35: Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
36: All rights reserved.
37: See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
38:
39: This software is distributed WITHOUT ANY WARRANTY; without even
40: the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
41: PURPOSE. See the above copyright notice for more information.
42:
43: =========================================================================*/
44:
45: //////////////////////////////////////////////////////////////////////////////
46: ///
47: /// Inputs
48: ///
49: //////////////////////////////////////////////////////////////////////////////
50:
51: /// 3D texture coordinates form vertex shader
52: in vec3 ip_textureCoords;
53: in vec3 ip_vertexPos;
54:
55: //////////////////////////////////////////////////////////////////////////////
56: ///
57: /// Outputs
58: ///
59: //////////////////////////////////////////////////////////////////////////////
60:
61: vec4 g_fragColor = vec4(0.0);
62:
63: //////////////////////////////////////////////////////////////////////////////
64: ///
65: /// Uniforms, attributes, and globals
66: ///
67: //////////////////////////////////////////////////////////////////////////////
68: vec3 g_dirStep;
69: vec4 g_srcColor;
70: vec4 g_eyePosObj;
71: bool g_exit;
72: bool g_skip;
73: float g_currentT;
74: float g_terminatePointMax;
75:
76: // These describe the entire ray for this scene, not just the current depth
77: // peeling segment. These are texture coordinates.
78: vec3 g_rayOrigin; // Entry point of volume or clip point
79: vec3 g_rayTermination; // Termination point (depth, clip, etc)
80:
81: // These describe the current segment. If not peeling, they are initialized to
82: // the ray endpoints.
83: vec3 g_dataPos;
84: vec3 g_terminatePos;
85:
86:
87:
88: out vec4 fragOutput0;
89:
90:
91: uniform sampler3D in_volume[1];
92: uniform vec4 in_volume_scale[1];
93: uniform vec4 in_volume_bias[1];
94: uniform int in_noOfComponents;
95:
96: #ifndef GL_ES
97: uniform sampler2D in_depthSampler;
98: #endif
99:
100: // Camera position
101: uniform vec3 in_cameraPos;
102: uniform sampler2D in_noiseSampler;
103: uniform mat4 in_volumeMatrix[1];
104: uniform mat4 in_inverseVolumeMatrix[1];
105: uniform mat4 in_textureDatasetMatrix[1];
106: uniform mat4 in_inverseTextureDatasetMatrix[1];
107: uniform mat4 in_textureToEye[1];
108: uniform vec3 in_texMin[1];
109: uniform vec3 in_texMax[1];
110: uniform mat4 in_cellToPoint[1];
111: // view and model matrices
112: uniform mat4 in_projectionMatrix;
113: uniform mat4 in_inverseProjectionMatrix;
114: uniform mat4 in_modelViewMatrix;
115: uniform mat4 in_inverseModelViewMatrix;
116: in mat4 ip_inverseTextureDataAdjusted;
117:
118: // Ray step size
119: uniform vec3 in_cellStep[1];
120: uniform vec2 in_scalarsRange[4];
121: uniform vec3 in_cellSpacing[1];
122:
123: // Sample distance
124: uniform float in_sampleDistance;
125:
126: // Scales
127: uniform vec2 in_windowLowerLeftCorner;
128: uniform vec2 in_inverseOriginalWindowSize;
129: uniform vec2 in_inverseWindowSize;
130: uniform vec3 in_textureExtentsMax;
131: uniform vec3 in_textureExtentsMin;
132:
133: // Material and lighting
134: uniform vec3 in_diffuse[4];
135: uniform vec3 in_ambient[4];
136: uniform vec3 in_specular[4];
137: uniform float in_shininess[4];
138:
139: // Others
140: vec3 g_rayJitter = vec3(0.0);
141:
142: uniform vec2 in_averageIPRange;
143: vec4 g_eyePosObjs[1];
144: uniform vec3 in_lightAmbientColor[1];
145: uniform vec3 in_lightDiffuseColor[1];
146: uniform vec3 in_lightSpecularColor[1];
147: vec4 g_lightPosObj[1];
148: vec3 g_ldir[1];
149: vec3 g_vdir[1];
150: vec3 g_h[1];
151:
152:
153:
154: const float g_opacityThreshold = 1.0 - 1.0 / 255.0;
155:
156:
157:
158:
159:
160:
161:
162:
163:
164:
165:
166: //VTK::GradientCache::Dec
167:
168: //VTK::Transfer2D::Dec
169:
170: uniform sampler2D in_opacityTransferFunc_0[1];
171:
172: float computeOpacity(vec4 scalar)
173: {
174: return texture2D(in_opacityTransferFunc_0[0], vec2(scalar.w, 0)).r;
175: }
176:
177: vec4 computeGradient(in vec3 texPos, in int c, in sampler3D volume, in int index)
178: {
179: return vec4(0.0);
180: }
181:
182:
183:
184:
185:
186: vec4 computeLighting(vec4 color, int component, float label)
187: {
188: vec4 finalColor = vec4(0.0);
189: finalColor = vec4(color.rgb, 0.0);
190: finalColor.a = color.a;
191: return finalColor;
192: }
193:
194: uniform sampler2D in_colorTransferFunc_0[1];
195:
196: vec4 computeColor(vec4 scalar, float opacity)
197: {
198: return computeLighting(vec4(texture2D(in_colorTransferFunc_0[0],
199: vec2(scalar.w, 0.0)).xyz, opacity), 0, 0.0);
200: }
201:
202:
203: uniform vec3 in_projectionDirection;
204: vec3 computeRayDirection()
205: {
206: return normalize((in_inverseVolumeMatrix[0] *
207: vec4(in_projectionDirection, 0.0)).xyz);
208: }
209:
210: //VTK::Picking::Dec
211:
212: //VTK::RenderToImage::Dec
213:
214: //VTK::DepthPeeling::Dec
215:
216: uniform float in_scale;
217: uniform float in_bias;
218:
219: //////////////////////////////////////////////////////////////////////////////
220: ///
221: /// Helper functions
222: ///
223: //////////////////////////////////////////////////////////////////////////////
224:
225: /**
226: * Transform window coordinate to NDC.
227: */
228: vec4 WindowToNDC(const float xCoord, const float yCoord, const float zCoord)
229: {
230: vec4 NDCCoord = vec4(0.0, 0.0, 0.0, 1.0);
231:
232: NDCCoord.x = (xCoord - in_windowLowerLeftCorner.x) * 2.0 *
233: in_inverseWindowSize.x - 1.0;
234: NDCCoord.y = (yCoord - in_windowLowerLeftCorner.y) * 2.0 *
235: in_inverseWindowSize.y - 1.0;
236: NDCCoord.z = (2.0 * zCoord - (gl_DepthRange.near + gl_DepthRange.far)) /
237: gl_DepthRange.diff;
238:
239: return NDCCoord;
240: }
241:
242: /**
243: * Transform NDC coordinate to window coordinates.
244: */
245: vec4 NDCToWindow(const float xNDC, const float yNDC, const float zNDC)
246: {
247: vec4 WinCoord = vec4(0.0, 0.0, 0.0, 1.0);
248:
249: WinCoord.x = (xNDC + 1.f) / (2.f * in_inverseWindowSize.x) +
250: in_windowLowerLeftCorner.x;
251: WinCoord.y = (yNDC + 1.f) / (2.f * in_inverseWindowSize.y) +
252: in_windowLowerLeftCorner.y;
253: WinCoord.z = (zNDC * gl_DepthRange.diff +
254: (gl_DepthRange.near + gl_DepthRange.far)) / 2.f;
255:
256: return WinCoord;
257: }
258:
259: /**
260: * Clamps the texture coordinate vector @a pos to a new position in the set
261: * { start + i * step }, where i is an integer. If @a ceiling
262: * is true, the sample located further in the direction of @a step is used,
263: * otherwise the sample location closer to the eye is used.
264: * This function assumes both start and pos already have jittering applied.
265: */
266: vec3 ClampToSampleLocation(vec3 start, vec3 step, vec3 pos, bool ceiling)
267: {
268: vec3 offset = pos - start;
269: float stepLength = length(step);
270:
271: // Scalar projection of offset on step:
272: float dist = dot(offset, step / stepLength);
273: if (dist < 0.) // Don't move before the start position:
274: {
275: return start;
276: }
277:
278: // Number of steps
279: float steps = dist / stepLength;
280:
281: // If we're reeaaaaallly close, just round -- it's likely just numerical noise
282: // and the value should be considered exact.
283: if (abs(mod(steps, 1.)) > 1e-5)
284: {
285: if (ceiling)
286: {
287: steps = ceil(steps);
288: }
289: else
290: {
291: steps = floor(steps);
292: }
293: }
294: else
295: {
296: steps = floor(steps + 0.5);
297: }
298:
299: return start + steps * step;
300: }
301:
302: //////////////////////////////////////////////////////////////////////////////
303: ///
304: /// Ray-casting
305: ///
306: //////////////////////////////////////////////////////////////////////////////
307:
308: /**
309: * Global initialization. This method should only be called once per shader
310: * invocation regardless of whether castRay() is called several times (e.g.
311: * vtkDualDepthPeelingPass). Any castRay() specific initialization should be
312: * placed within that function.
313: */
314: void initializeRayCast()
315: {
316: /// Initialize g_fragColor (output) to 0
317: g_fragColor = vec4(0.0);
318: g_dirStep = vec3(0.0);
319: g_srcColor = vec4(0.0);
320: g_exit = false;
321:
322:
323: // Get the 3D texture coordinates for lookup into the in_volume dataset
324: g_rayOrigin = ip_textureCoords.xyz;
325:
326: // Eye position in dataset space
327: g_eyePosObj = in_inverseVolumeMatrix[0] * vec4(in_cameraPos, 1.0);
328: g_eyePosObjs[0] = in_inverseVolumeMatrix[0] * vec4(in_cameraPos, 1.0);
329:
330: // Getting the ray marching direction (in dataset space)
331: vec3 rayDir = computeRayDirection();
332:
333: // 2D Texture fragment coordinates [0,1] from fragment coordinates.
334: // The frame buffer texture has the size of the plain buffer but
335: // we use a fraction of it. The texture coordinate is less than 1 if
336: // the reduction factor is less than 1.
337: // Device coordinates are between -1 and 1. We need texture
338: // coordinates between 0 and 1. The in_depthSampler
339: // buffer has the original size buffer.
340: vec2 fragTexCoord = (gl_FragCoord.xy - in_windowLowerLeftCorner) *
341: in_inverseWindowSize;
342:
343: // Multiply the raymarching direction with the step size to get the
344: // sub-step size we need to take at each raymarching step
345: g_dirStep = (ip_inverseTextureDataAdjusted *
346: vec4(rayDir, 0.0)).xyz * in_sampleDistance;
347:
348: float jitterValue = texture2D(in_noiseSampler, gl_FragCoord.xy / vec2(textureSize(in_noiseSampler, 0))).x;
349: g_rayJitter = g_dirStep * jitterValue;
350:
351: g_rayOrigin += g_rayJitter;
352:
353: // Flag to determine if voxel should be considered for the rendering
354: g_skip = false;
355:
356:
357:
358:
359: // Flag to indicate if the raymarch loop should terminate
360: bool stop = false;
361:
362: g_terminatePointMax = 0.0;
363:
364: #ifdef GL_ES
365: vec4 l_depthValue = vec4(1.0,1.0,1.0,1.0);
366: #else
367: vec4 l_depthValue = texture2D(in_depthSampler, fragTexCoord);
368: #endif
369: // Depth test
370: if(gl_FragCoord.z >= l_depthValue.x)
371: {
372: discard;
373: }
374:
375: // color buffer or max scalar buffer have a reduced size.
376: fragTexCoord = (gl_FragCoord.xy - in_windowLowerLeftCorner) *
377: in_inverseOriginalWindowSize;
378:
379: // Compute max number of iterations it will take before we hit
380: // the termination point
381:
382: // Abscissa of the point on the depth buffer along the ray.
383: // point in texture coordinates
384: vec4 rayTermination = WindowToNDC(gl_FragCoord.x, gl_FragCoord.y, l_depthValue.x);
385:
386: // From normalized device coordinates to eye coordinates.
387: // in_projectionMatrix is inversed because of way VT
388: // From eye coordinates to texture coordinates
389: rayTermination = ip_inverseTextureDataAdjusted *
390: in_inverseVolumeMatrix[0] *
391: in_inverseModelViewMatrix *
392: in_inverseProjectionMatrix *
393: rayTermination;
394: g_rayTermination = rayTermination.xyz / rayTermination.w;
395:
396: // Setup the current segment:
397: g_dataPos = g_rayOrigin;
398: g_terminatePos = g_rayTermination;
399:
400: g_terminatePointMax = length(g_terminatePos.xyz - g_dataPos.xyz) /
401: length(g_dirStep);
402: g_currentT = 0.0;
403:
404:
405:
406: //VTK::RenderToImage::Init
407:
408: //VTK::DepthPass::Init
409: }
410:
411: /**
412: * March along the ray direction sampling the volume texture. This function
413: * takes a start and end point as arguments but it is up to the specific render
414: * pass implementation to use these values (e.g. vtkDualDepthPeelingPass). The
415: * mapper does not use these values by default, instead it uses the number of
416: * steps defined by g_terminatePointMax.
417: */
418: vec4 castRay(const float zStart, const float zEnd)
419: {
420: //VTK::DepthPeeling::Ray::Init
421:
422:
423:
424: //VTK::DepthPeeling::Ray::PathCheck
425:
426:
427:
428: /// For all samples along the ray
429: while (!g_exit)
430: {
431:
432: g_skip = false;
433:
434:
435:
436:
437:
438:
439:
440: //VTK::PreComputeGradients::Impl
441:
442:
443: if (!g_skip)
444: {
445: vec4 scalar;
446:
447: scalar = texture3D(in_volume[0], g_dataPos);
448:
449: scalar.r = scalar.r * in_volume_scale[0].r + in_volume_bias[0].r;
450: scalar = vec4(scalar.r);
451: g_srcColor = vec4(0.0);
452: g_srcColor.a = computeOpacity(scalar);
453: if (g_srcColor.a > 0.0)
454: {
455: g_srcColor = computeColor(scalar, g_srcColor.a);
456: // Opacity calculation using compositing:
457: // Here we use front to back compositing scheme whereby
458: // the current sample value is multiplied to the
459: // currently accumulated alpha and then this product
460: // is subtracted from the sample value to get the
461: // alpha from the previous steps. Next, this alpha is
462: // multiplied with the current sample colour
463: // and accumulated to the composited colour. The alpha
464: // value from the previous steps is then accumulated
465: // to the composited colour alpha.
466: g_srcColor.rgb *= g_srcColor.a;
467: g_fragColor = (1.0f - g_fragColor.a) * g_srcColor + g_fragColor;
468: }
469: }
470:
471: //VTK::RenderToImage::Impl
472:
473: //VTK::DepthPass::Impl
474:
475: /// Advance ray
476: g_dataPos += g_dirStep;
477:
478:
479: if(any(greaterThan(max(g_dirStep, vec3(0.0))*(g_dataPos - in_texMax[0]),vec3(0.0))) ||
480: any(greaterThan(min(g_dirStep, vec3(0.0))*(g_dataPos - in_texMin[0]),vec3(0.0))))
481: {
482: break;
483: }
484:
485: // Early ray termination
486: // if the currently composited colour alpha is already fully saturated
487: // we terminated the loop or if we have hit an obstacle in the
488: // direction of they ray (using depth buffer) we terminate as well.
489: if((g_fragColor.a > g_opacityThreshold) ||
490: g_currentT >= g_terminatePointMax)
491: {
492: break;
493: }
494: ++g_currentT;
495: }
496:
497:
498:
499: return g_fragColor;
500: }
501:
502: /**
503: * Finalize specific modes and set output data.
504: */
505: void finalizeRayCast()
506: {
507:
508:
509:
510:
511:
512:
513:
514:
515:
516: // Special coloring mode which renders the voxel index in fragments that
517: // have accumulated certain level of opacity. Used during the selection
518: // pass vtkHardwareSelection::ID_MID24.
519: if (g_fragColor.a > 3.0/ 255.0)
520: {
521: uvec3 volumeDim = uvec3(in_textureExtentsMax - in_textureExtentsMin);
522: uvec3 voxelCoords = uvec3(volumeDim * g_dataPos);
523: // vtkHardwareSelector assumes index 0 to be empty space, so add uint(1).
524: uint idx = volumeDim.x * volumeDim.y * voxelCoords.z +
525: volumeDim.x * voxelCoords.y + voxelCoords.x + uint(1);
526: idx = ((idx & 0xff000000) >> 24);
527: fragOutput0 = vec4(float(idx % uint(256)) / 255.0,
528: float((idx / uint(256)) % uint(256)) / 255.0,
529: float(idx / uint(65536)) / 255.0, 1.0);
530: }
531: else
532: {
533: fragOutput0 = vec4(0.0);
534: }
535: return;
536:
537: g_fragColor.r = g_fragColor.r * in_scale + in_bias * g_fragColor.a;
538: g_fragColor.g = g_fragColor.g * in_scale + in_bias * g_fragColor.a;
539: g_fragColor.b = g_fragColor.b * in_scale + in_bias * g_fragColor.a;
540: fragOutput0 = g_fragColor;
541:
542: //VTK::RenderToImage::Exit
543:
544: //VTK::DepthPass::Exit
545: }
546:
547: //////////////////////////////////////////////////////////////////////////////
548: ///
549: /// Main
550: ///
551: //////////////////////////////////////////////////////////////////////////////
552: void main()
553: {
554:
555: initializeRayCast();
556: castRay(-1.0, -1.0);
557: finalizeRayCast();
558: }
( 42.857s) [paraview ] vtkOutputWindow.cxx:76 ERR| ERROR: In /builds/gitlab-kitware-sciviz-ci/source-paraview/VTK/Rendering/OpenGL2/vtkShaderProgram.cxx, line 453
vtkShaderProgram (0x17036500): 0(526) : error C7011: implicit cast from "int" to "uint"
( 42.857s) [paraview ] vtkOutputWindow.cxx:76 ERR| ERROR: In /builds/gitlab-kitware-sciviz-ci/source-paraview/VTK/Rendering/VolumeOpenGL2/vtkOpenGLGPUVolumeRayCastMapper.cxx, line 2699
vtkOpenGLGPUVolumeRayCastMapper (0x169f74e0): Shader failed to compile
Wrong tag but don't know how to handle it... 144432
Loguru caught a signal: SIGABRT
Stack trace:
42 0x407bea /opt/ParaView-5.9.1-MPI-Linux-Python3.9-64bit/bin/paraview-real() [0x407bea]
41 0x7fcb287f5d0a __libc_start_main + 234
40 0x40784a /opt/ParaView-5.9.1-MPI-Linux-Python3.9-64bit/bin/paraview-real() [0x40784a]
39 0x7fcb25c79120 QCoreApplication::exec() + 128
38 0x7fcb25c7062a QEventLoop::exec(QFlags<QEventLoop::ProcessEventsFlag>) + 298
37 0x7fcaf19d6e13 /opt/ParaView-5.9.1-MPI-Linux-Python3.9-64bit/plugins/platforms/../../lib/libQt5XcbQpa.so.5(+0x5ee13) [0x7fcaf19d6e13]
36 0x7fcb262f9b9b QWindowSystemInterface::sendWindowSystemEvents(QFlags<QEventLoop::ProcessEventsFlag>) + 187
35 0x7fcb2631e5f5 QGuiApplicationPrivate::processWindowSystemEvent(QWindowSystemInterfacePrivate::WindowSystemEvent*) + 261
34 0x7fcb2631d36d QGuiApplicationPrivate::processMouseEvent(QWindowSystemInterfacePrivate::MouseEvent*) + 1837
33 0x7fcb25c71dd8 QCoreApplication::notifyInternal2(QObject*, QEvent*) + 280
32 0x7fcb27c1e260 QApplication::notify(QObject*, QEvent*) + 704
31 0x7fcb27c1718c QApplicationPrivate::notify_helper(QObject*, QEvent*) + 156
30 0x7fcb27c718fb /opt/ParaView-5.9.1-MPI-Linux-Python3.9-64bit/bin/../lib/libQt5Widgets.so.5(+0x1b68fb) [0x7fcb27c718fb]
29 0x7fcb27c6ed86 /opt/ParaView-5.9.1-MPI-Linux-Python3.9-64bit/bin/../lib/libQt5Widgets.so.5(+0x1b3d86) [0x7fcb27c6ed86]
28 0x7fcb27c1d7cc QApplicationPrivate::sendMouseEvent(QWidget*, QMouseEvent*, QWidget*, QWidget*, QWidget**, QPointer<QWidget>&, bool, bool) + 460
27 0x7fcb25c71dd8 QCoreApplication::notifyInternal2(QObject*, QEvent*) + 280
26 0x7fcb27c1edc8 QApplication::notify(QObject*, QEvent*) + 3624
25 0x7fcb27c17165 QApplicationPrivate::notify_helper(QObject*, QEvent*) + 117
24 0x7fcb25c71b6d QCoreApplicationPrivate::sendThroughObjectEventFilters(QObject*, QEvent*) + 157
23 0x7fcb2847cf3f pqPipelineContextMenuBehavior::eventFilter(QObject*, QEvent*) + 431
22 0x7fcb26e0d371 pqRenderView::pickBlock(int*, unsigned int&) + 129
21 0x7fcb1b51178d vtkSMRenderViewProxy::PickBlock(int, int, unsigned int&) + 173
20 0x7fcb1b5115ec vtkSMRenderViewProxy::SelectSurfaceCells(int const*, vtkCollection*, vtkCollection*, bool, int, bool, char const*) + 220
19 0x7fcb1b5114e8 vtkSMRenderViewProxy::SelectInternal(vtkClientServerStream const&, vtkCollection*, vtkCollection*, bool, int, bool) + 264
18 0x7fcb23e13eca vtkPVSession::CleanupPendingProgress() + 138
17 0x7fcb24ce4820 vtkPVSessionBase::CleanupPendingProgressInternal() + 256
16 0x7fcb24df715b vtkSMSessionClient::ExecuteStream(unsigned int, vtkClientServerStream const&, bool) + 347
15 0x7fcb24ce4375 vtkPVSessionBase::ExecuteStream(unsigned int, vtkClientServerStream const&, bool) + 53
14 0x7fcb24ce534b vtkPVSessionCore::ExecuteStream(unsigned int, vtkClientServerStream const&, bool) + 59
13 0x7fcb24ce5512 vtkPVSessionCore::ExecuteStreamInternal(vtkClientServerStream const&, bool) + 242
12 0x7fcb23951ded vtkClientServerInterpreter::ProcessStream(vtkClientServerStream const&) + 29
11 0x7fcb23951b4e vtkClientServerInterpreter::ProcessOneMessage(vtkClientServerStream const&, int) + 1294
10 0x7fcb2395141d vtkClientServerInterpreter::ProcessCommandInvoke(vtkClientServerStream const&, int) + 1229
9 0x7fcb23950db9 vtkClientServerInterpreter::CallCommandFunction(char const*, vtkObjectBase*, char const*, vtkClientServerStream const&, vtkClientServerStream&) + 345
8 0x7fcb1c432643 vtkPVProgressHandlerCommand(vtkClientServerInterpreter*, vtkObjectBase*, char const*, vtkClientServerStream const&, vtkClientServerStream&, void*) + 1331
7 0x7fcb23e03ef6 vtkPVProgressHandler::CleanupPendingProgress() + 278
6 0x7fcb232861b5 vtkSocketCommunicator::ReceiveVoidArray(void*, long long, int, int, int) + 325
5 0x7fcb232837d4 vtkSocketCommunicator::ReceiveTagged(void*, int, int, int, char const*) + 388
4 0x7fcb1eddb249 /opt/ParaView-5.9.1-MPI-Linux-Python3.9-64bit/bin/../lib/libvtkCommonCore-pv5.9.so.1(+0x3f9249) [0x7fcb1eddb249]
3 0x7fcb24df1399 /opt/ParaView-5.9.1-MPI-Linux-Python3.9-64bit/bin/../lib/libvtkRemotingServerManager-pv5.9.so.1(+0x1cc399) [0x7fcb24df1399]
2 0x7fcb287f4537 abort + 291
1 0x7fcb2880ace1 gsignal + 321
0 0x7fcb2880ad60 /lib/x86_64-linux-gnu/libc.so.6(+0x3bd60) [0x7fcb2880ad60]
( 42.864s) [paraview ] :0 FATL| Signal: SIGABRT
error: exception occurred: Subprocess aborted
Environment
-
Client:
- Paraview: 5.9.1-1209
- Debian GNU/Linux 10 (buster)
-
Server (pvserver):
- Paraview: 5.9.1-1209-egl-mpi
- pvNvidiaIndex enabled
- RHEL 7.7
- GPU: Quadro GV100