diff --git a/src/Python/IO/CSVReader.md b/src/Python/IO/CSVReader.md
index ec8c0530a81c582fce1cf12662144324cda47d0b..6f9cd1a2d40b9ecd2ee50fce81f1a951ff0be6fa 100644
--- a/src/Python/IO/CSVReader.md
+++ b/src/Python/IO/CSVReader.md
@@ -1,10 +1,10 @@
 ### Description
 
-This example creates a PolyData object containing lines and points from a CSV file and writes it to a .vtp file.
+This example loads a CSV file, edits it and visualises the result.
 
-It demonstrates the use of [pandas](https://pandas.pydata.org/) to read the CSV input file and then using [numpy](https://numpy.org/) and the vtk-numpy interface for building the resultant vtkPolyData object based on the options selected.
+It demonstrates the use of [pandas](https://pandas.pydata.org/) to read and edit the CSV input file, then use [numpy](https://numpy.org/) and the vtk-numpy interface for building the resultant vtkPolyData object based on the options selected.
 
-The key thing about `pandas` is it can read/write data in various formats: CSV and text files, Microsoft Excel, SQL databases, and the fast HDF5 format. It is highly optimized for performance and the DataFrame object allows for extensive row/column manipulation. So we can edit the data, creating new columns, and, finally, selecting only relevant columns for further analysis by VTK.
+The key thing about `pandas` is it can read/write data in various formats: CSV and text files, Microsoft Excel, SQL databases, and the fast HDF5 format. It is highly optimized for performance and the DataFrame object allows for extensive row/column manipulation. So we can edit the data, creating new columns, and, finally, select only relevant columns for further analysis by VTK.
 
 In this case we select columns using numpy to create the three-dimensional point data array data. The numpy objects are then converted to vtk data structures and integrated into a vtkPolyData object.
 
diff --git a/src/Python/IO/CSVReader.py b/src/Python/IO/CSVReader.py
index c3067bffcbf69830bffaa7608a80c51fb07927d1..996514ce981c771e9aa2d89f444e7167a30d6817 100755
--- a/src/Python/IO/CSVReader.py
+++ b/src/Python/IO/CSVReader.py
@@ -35,32 +35,33 @@ from vtkmodules.vtkRenderingCore import (
 
 def get_program_parameters():
     import argparse
-    description = 'Create a PolyData object containing lines and points from a CSV file and write it to a .vtp file.'
+    description = 'Edit data from a CSV file and visualise it.'
     epilogue = '''
     This program selects ECEF or UTM coordinates from the input file and:
-       1) Creates a VTP file.
-       2) Optionally saves the edited file used to create the visualisation as a CSV file.
-       3) Visualises the resultant points and lines.
+       1) Visualises the resultant points and lines.
+       2) Optionally creates a VTP file for further analysis.
+       3) Optionally saves the edited file used to create the visualisation as a CSV file.
     If Geographic coordinates are selected, just the resultant CSV file is saved.
     '''
     parser = argparse.ArgumentParser(description=description, epilog=epilogue,
                                      formatter_class=argparse.RawTextHelpFormatter)
-    group = parser.add_mutually_exclusive_group(required=True)
     parser.add_argument('file_name', help='The CSV file containing the data.')
+    parser.add_argument('-v', '--generate_vtp', action='store_true', help='Generate the .vtp file.')
     parser.add_argument('-o', '--out_csv_fn', default=None, help='The file name for the edited CSV file.')
     parser.add_argument('-p', '--path', default='.',
                         help='The path to be appended to the .vtp and optional .csv file')
 
+    group = parser.add_mutually_exclusive_group(required=True)
     group.add_argument('-e', '--ecef', action='store_true', help='Use ECEF coordinates.')
     group.add_argument('-u', '--utm', action='store_true', help='Use UTM coordinates.')
     group.add_argument('-g', '--geo', action='store_true', help='Use geographic coordinates (latitude/longitude).')
 
     args = parser.parse_args()
-    return args.file_name, args.out_csv_fn, args.path, args.ecef, args.utm, args.geo
+    return args.file_name, args.generate_vtp, args.out_csv_fn, args.path, args.ecef, args.utm, args.geo
 
 
 def main():
-    ifn, ofn, sp, ecef, utm, geo = get_program_parameters()
+    ifn, generate_vtp, ofn, sp, ecef, utm, geo = get_program_parameters()
     file_name = Path(ifn)
     if not file_name.is_file():
         print('Unable to read:', file_name)
@@ -72,25 +73,22 @@ def main():
             return
     pth.mkdir(parents=True, exist_ok=True)
 
-    vtp_fn = Path(ifn).with_suffix('.vtp')
-    if not vtp_fn.root:
-        vtp_fn = pth / vtp_fn.name
+    # Build the output paths.
+    vtp_fn = Path(pth / Path(ifn).stem).with_suffix('.vtp')
     if ecef:
-        vtp_fn = Path(vtp_fn.parent, vtp_fn.stem + '_ecef' + vtp_fn.suffix)
+        vtp_fn = vtp_fn.with_stem(vtp_fn.stem + '_ecef')
     if utm:
-        vtp_fn = Path(vtp_fn.parent, vtp_fn.stem + '_utm' + vtp_fn.suffix)
+        vtp_fn = vtp_fn.with_stem(vtp_fn.stem + '_utm')
     if ofn:
-        csv_fn = Path(ofn)
-        if not csv_fn.root:
-            csv_fn = pth / csv_fn.name
+        csv_fn = Path(pth / Path(ofn).name)
         if not csv_fn.suffix.lower() == '.csv':
             csv_fn = csv_fn.with_suffix('.csv')
         if ecef:
-            csv_fn = Path(vtp_fn.parent, csv_fn.stem + '_ecef' + csv_fn.suffix)
+            csv_fn = csv_fn.with_stem(csv_fn.stem + '_ecef')
         if utm:
-            csv_fn = Path(vtp_fn.parent, csv_fn.stem + '_utm' + csv_fn.suffix)
+            csv_fn = csv_fn.with_stem(csv_fn.stem + '_utm')
         if geo:
-            csv_fn = Path(vtp_fn.parent, csv_fn.stem + '_geo' + csv_fn.suffix)
+            csv_fn = csv_fn.with_stem(csv_fn.stem + '_geo')
     else:
         csv_fn = None
 
@@ -177,17 +175,18 @@ def main():
     transform_filter.SetTransform(transform)
     transform_filter.Update()
 
-    writer = vtkXMLPolyDataWriter()
-    writer.SetFileName(vtp_fn)
-    writer.SetInputConnection(transform_filter.GetOutputPort())
-    writer.SetDataModeToBinary()
-    writer.Write()
+    if generate_vtp:
+        writer = vtkXMLPolyDataWriter()
+        writer.SetFileName(vtp_fn)
+        writer.SetInputConnection(transform_filter.GetOutputPort())
+        writer.SetDataModeToBinary()
+        writer.Write()
 
     colors = vtkNamedColors()
     colors.SetColor("ParaViewBkg", [82, 87, 110, 255])
 
     lut = get_diverging_lut('cool_warm')
-    # lut = get_diverging_lut1('MidnightBlue', 'Gainsboro', 'DarkOrange')
+    # lut = get_diverging_lut1('DarkRed', 'Gainsboro', 'Green')
 
     mapper = vtkPolyDataMapper()
     mapper.SetInputConnection(transform_filter.GetOutputPort())
diff --git a/src/Python/IO/CSVReader1.md b/src/Python/IO/CSVReader1.md
index 9a59ba450e9b04d5fb94f7bf2b78d0f158823e4e..c3bfb3a1bd034bfe3b14cedb3fdbeca3b78d26cd 100644
--- a/src/Python/IO/CSVReader1.md
+++ b/src/Python/IO/CSVReader1.md
@@ -1,10 +1,10 @@
 ### Description
 
-This example creates a PolyData object containing lines and points from a CSV file and writes it to a .vtp file.
+This example loads a CSV file, edits it and visualises the result.
 
-It demonstrates the use of [pandas](https://pandas.pydata.org/) to read the CSV input file and to create a temporary file containing the desired columns. This temporary file is subsequently read and parsed using vtkDelimitedTextReader.
+It demonstrates the use of [pandas](https://pandas.pydata.org/) to read and edit the CSV input file, then create a temporary file containing the desired columns. This temporary file is subsequently read and parsed using vtkDelimitedTextReader.
 
-The key thing about `pandas` is it can read/write data in various formats: CSV and text files, Microsoft Excel, SQL databases, and the fast HDF5 format. It is highly optimized for performance and the DataFrame object allows for extensive row/column manipulation. So we can edit the data, creating new columns, and, finally, selecting only relevant columns for further analysis by VTK.
+The key thing about `pandas` is it can read/write data in various formats: CSV and text files, Microsoft Excel, SQL databases, and the fast HDF5 format. It is highly optimized for performance and the DataFrame object allows for extensive row/column manipulation. So we can edit the data, creating new columns, and, finally, select only relevant columns for further analysis by VTK.
 
 In this case we create a CSV file of selected columns and read this with vtkDelimitedTextReader.
 
diff --git a/src/Python/IO/CSVReader1.py b/src/Python/IO/CSVReader1.py
index 95685685e51e0236a0640ba9083adec634f0c221..07d567d4641a5892b2e40f0a338dc44b9c40def3 100755
--- a/src/Python/IO/CSVReader1.py
+++ b/src/Python/IO/CSVReader1.py
@@ -36,32 +36,33 @@ from vtkmodules.vtkRenderingCore import (
 
 def get_program_parameters():
     import argparse
-    description = 'Create a PolyData object containing lines and points from a CSV file and write it to a .vtp file.'
+    description = 'Edit data from a CSV file and visualise it.'
     epilogue = '''
     This program selects ECEF or UTM coordinates from the input file and:
-       1) Creates a VTP file.
-       2) Optionally saves the edited file used to create the visualisation as a CSV file.
-       3) Visualises the resultant points and lines.
+       1) Visualises the resultant points and lines.
+       2) Optionally creates a VTP file for further analysis.
+       3) Optionally saves the edited file used to create the visualisation as a CSV file.
     If Geographic coordinates are selected, just the resultant CSV file is saved.
     '''
     parser = argparse.ArgumentParser(description=description, epilog=epilogue,
                                      formatter_class=argparse.RawTextHelpFormatter)
-    group = parser.add_mutually_exclusive_group(required=True)
     parser.add_argument('file_name', help='The CSV file containing the data.')
+    parser.add_argument('-v', '--generate_vtp', action='store_true', help='Generate the .vtp file.')
     parser.add_argument('-o', '--out_csv_fn', default=None, help='The file name for the edited CSV file.')
     parser.add_argument('-p', '--path', default='.',
                         help='The path to be appended to the .vtp and optional .csv file')
 
+    group = parser.add_mutually_exclusive_group(required=True)
     group.add_argument('-e', '--ecef', action='store_true', help='Use ECEF coordinates.')
     group.add_argument('-u', '--utm', action='store_true', help='Use UTM coordinates.')
     group.add_argument('-g', '--geo', action='store_true', help='Use geographic coordinates (latitude/longitude).')
 
     args = parser.parse_args()
-    return args.file_name, args.out_csv_fn, args.path, args.ecef, args.utm, args.geo
+    return args.file_name, args.generate_vtp, args.out_csv_fn, args.path, args.ecef, args.utm, args.geo
 
 
 def main():
-    ifn, ofn, sp, ecef, utm, geo = get_program_parameters()
+    ifn, generate_vtp, ofn, sp, ecef, utm, geo = get_program_parameters()
     file_name = Path(ifn)
     if not file_name.is_file():
         print('Unable to read:', file_name)
@@ -73,25 +74,22 @@ def main():
             return
     pth.mkdir(parents=True, exist_ok=True)
 
-    vtp_fn = Path(ifn).with_suffix('.vtp')
-    if not vtp_fn.root:
-        vtp_fn = pth / vtp_fn.name
+    # Build the output paths.
+    vtp_fn = Path(pth / Path(ifn).stem).with_suffix('.vtp')
     if ecef:
-        vtp_fn = Path(vtp_fn.parent, vtp_fn.stem + '_ecef' + vtp_fn.suffix)
+        vtp_fn = vtp_fn.with_stem(vtp_fn.stem + '_ecef')
     if utm:
-        vtp_fn = Path(vtp_fn.parent, vtp_fn.stem + '_utm' + vtp_fn.suffix)
+        vtp_fn = vtp_fn.with_stem(vtp_fn.stem + '_utm')
     if ofn:
-        csv_fn = Path(ofn)
-        if not csv_fn.root:
-            csv_fn = pth / csv_fn.name
+        csv_fn = Path(pth / Path(ofn).name)
         if not csv_fn.suffix.lower() == '.csv':
             csv_fn = csv_fn.with_suffix('.csv')
         if ecef:
-            csv_fn = Path(vtp_fn.parent, csv_fn.stem + '_ecef' + csv_fn.suffix)
+            csv_fn = csv_fn.with_stem(csv_fn.stem + '_ecef')
         if utm:
-            csv_fn = Path(vtp_fn.parent, csv_fn.stem + '_utm' + csv_fn.suffix)
+            csv_fn = csv_fn.with_stem(csv_fn.stem + '_utm')
         if geo:
-            csv_fn = Path(vtp_fn.parent, csv_fn.stem + '_geo' + csv_fn.suffix)
+            csv_fn = csv_fn.with_stem(csv_fn.stem + '_geo')
     else:
         csv_fn = None
 
@@ -186,17 +184,18 @@ def main():
     transform_filter.SetTransform(transform)
     transform_filter.Update()
 
-    writer = vtkXMLPolyDataWriter()
-    writer.SetFileName(vtp_fn)
-    writer.SetInputConnection(transform_filter.GetOutputPort())
-    writer.SetDataModeToBinary()
-    writer.Write()
+    if generate_vtp:
+        writer = vtkXMLPolyDataWriter()
+        writer.SetFileName(vtp_fn)
+        writer.SetInputConnection(transform_filter.GetOutputPort())
+        writer.SetDataModeToBinary()
+        writer.Write()
 
     colors = vtkNamedColors()
     colors.SetColor("ParaViewBkg", [82, 87, 110, 255])
 
     lut = get_diverging_lut('cool_warm')
-    # lut = get_diverging_lut1('MidnightBlue', 'Gainsboro', 'DarkOrange')
+    # lut = get_diverging_lut1('DarkRed', 'Gainsboro', 'Green')
 
     mapper = vtkPolyDataMapper()
     mapper.SetInputConnection(transform_filter.GetOutputPort())