Commit 52528801 authored by camp's avatar camp

Internallauncher

    I added a function to return the correct argument for the "process per node"
flag for Open MPI and MPICH. This change affects the mpirun and mpiexe commands.
If your system has a different argument then you can override this function.
This argument is only used if the user has set VisIt's -nn flag.

    I have changed the sbatch job submitter to be like qsub. This was done for
NERSC new Cori system. Now it has many functions that can be overriden to support
any system using sbatch. Also added a new job launcher srun.

    Added the hosttoip variable in MainLauncher to convert host name to IP address.
Had a problem that compute nodes could not convert host name to IP address. By default
this is turned off. You need to set hosttoip=True in your MainLauncher override.
See NERSC custom launcher for an example.

NERSC customlauncher
    Remove references to Carver. That system is gone. Added sbatch support, also 
turned on the hosttoip flag to translate the hostname to an IP address.

gui/QvisHostProfileWindow.C
    Added sbatch/srun option in the list of launchers.

config-site/cori.nersc.gov.cmake
    Added site file for NERSC's Cori system.

Removed NERSC's Carver site config file. System has been retired.



git-svn-id: http://visit.ilight.com/svn/visit/trunk/src@27736 18c085ea-50e0-402c-830e-de6fd14e8384
parent eb7102e0
......@@ -18,6 +18,10 @@ import time
# I modified the script so that all the arguments are passed to subsequent
# components before the "-s script_file" argument.
#
# David Camp, Mon Oct 26 10:13:49 PDT 2015
# I added a function to return the correct argument for the process per node
# flag for Open MPI and MPICH.
#
###############################################################################
class JobSubmitter(object):
......@@ -65,6 +69,19 @@ class JobSubmitter(object):
def PPN(self):
return str(int(math.ceil(float(self.parallel.np) / float(self.parallel.nn))))
# Open MPI and MPICH have different arguments for the process per node flag.
# This function trys to decide with flag to use. It defaults to MPICH.
def PPNArgument(self):
p = subprocess.Popen(["mpirun", "--version"], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
output = p.communicate()
new = ' '.join(output)
if new.find("Open MPI") == -1:
# MPICH
return "-ppn"
else:
# Open MPI
return "-npernode"
def CreateCommand(self, args, debugger):
parcmd = self.Executable()
parcmd = debugger.CreateCommand(parcmd + self.VisItExecutable() + args)
......@@ -88,6 +105,10 @@ class JobSubmitter(object):
# Make adding of -ppn to mpirun dependent on "self.parallel.nn".
# Removed useppnmpirun.
#
# David Camp, Mon Oct 26 10:13:49 PDT 2015
# Call the new PPNArgument function to get the correct MPI argument for
# the "process per node" flag.
#
###############################################################################
class JobSubmitter_mpirun(JobSubmitter):
......@@ -104,7 +125,7 @@ class JobSubmitter_mpirun(JobSubmitter):
if self.parallel.np != None:
parcmd = parcmd + ["-np", self.parallel.np]
if self.parallel.nn != None:
parcmd = parcmd + ["-ppn", self.PPN()]
parcmd = parcmd + [self.PPNArgument(), self.PPN()]
if self.parallel.partition != None:
parcmd = parcmd + ["-p", self.parallel.partition]
if self.parallel.machinefile != None:
......@@ -144,6 +165,10 @@ class JobSubmitter_mpirun(JobSubmitter):
# Make adding of -ppn to mpiexec dependent on "self.parallel.nn".
# Removed useppnmpiexec.
#
# David Camp, Mon Oct 26 10:13:49 PDT 2015
# Call the new PPNArgument function to get the correct MPI argument for
# the "process per node" flag.
#
################################################################################
class JobSubmitter_mpiexec(JobSubmitter):
......@@ -168,7 +193,7 @@ class JobSubmitter_mpiexec(JobSubmitter):
if self.parallel.np != None:
parcmd = parcmd + ["-n", self.parallel.np]
if self.parallel.nn != None:
parcmd = parcmd + ["-ppn", self.PPN()]
parcmd = parcmd + [self.PPNArgument(), self.PPN()]
if self.parallel.partition != None:
parcmd = parcmd + ["-p", self.parallel.partition]
if self.parallel.machinefile != None:
......@@ -510,6 +535,10 @@ class JobSubmitter_salloc(JobSubmitter):
# I added a fix from Jean Favre that corrected the setting of the -N option
# for aprun.
#
# David Camp, Thu Oct 1 11:37:53 PDT 2015
# Update sbatch to be more like qsub to support new NERSC system, Cori.
# Also added the srun mpi launcher option.
#
###############################################################################
class JobSubmitter_sbatch(JobSubmitter):
......@@ -519,8 +548,10 @@ class JobSubmitter_sbatch(JobSubmitter):
def Executable(self):
return ["sbatch"]
def PPNArguments(self, ppn):
return ["--npernode", ppn, "numa_wrapper", "--ppn", ppn]
def CreateFilename(self):
tdate = time.asctime()[11:19]
tuser = self.launcher.username()
return os.path.join("/tmp", "visit.%s.%s" % (tuser, tdate))
def HandledHardwareArguments(self):
return 1
......@@ -528,57 +559,140 @@ class JobSubmitter_sbatch(JobSubmitter):
def TFileLoadModules(self, tfile):
return
def CreateFilename(self):
tdate = time.asctime()[11:19]
tuser = self.launcher.username()
return os.path.join("/tmp", "visit.%s.%s" % (tuser, tdate))
def TFileSetup(self, tfile):
tfile.write("cd %s\n" % os.path.abspath(os.curdir))
tfile.write("ulimit -c 0\n")
#tfile.write("# Submitted on host %s\n" % self.launcher.hostname())
#for v in ("LIBPATH", "LD_LIBRARY_PATH", "VISITHOME", "VISITARCHHOME", "VISITPLUGINDIR"):
# tfile.write('echo "%s=$%s"\n' % (v,v))
def Writable(self, d):
return self.launcher.writepermission(d)
def HandleCWDPermissions(self):
args = []
if not self.Writable(os.path.abspath(os.curdir)):
msg = """
The directory you started VisIt in does not have write permission.
Using /dev/null for the batch job standard error and standard
output streams.
"""
self.launcher.warning(msg)
args = args + ["-e", "/dev/null", "-o", "/dev/null"]
return args
def AddEnvironment(self):
env = "--export="
env = env + "HOME=" + GETENV("HOME")
env = env + ",LIBPATH=" + GETENV("LIBPATH")
env = env + ",LD_LIBRARY_PATH=" + GETENV("LD_LIBRARY_PATH")
env = env + ",VISITHOME=" + GETENV("VISITHOME")
env = env + ",VISITARCHHOME=" + GETENV("VISITARCHHOME")
env = env + ",VISITPLUGINDIR=" + GETENV("VISITPLUGINDIR")
return [env]
def mpiexec(self):
return ["mpiexec"]
def mpiexec_args(self, args):
mpicmd = self.mpiexec()
if self.parallel.sublaunchargs != None:
mpicmd = mpicmd + self.parallel.sublaunchargs
if self.parallel.np != None:
mpicmd = mpicmd + ["-n", self.parallel.np]
if self.parallel.nn != None:
mpicmd = mpicmd + [self.PPNArgument(), self.PPN()]
if self.parallel.machinefile != None:
mpicmd = mpicmd + ["-machinefile", self.parallel.machinefile]
mpicmd = mpicmd + self.VisItExecutable() + args
return mpicmd
def aprun(self):
return ["aprun"]
def aprun_args(self, args):
mpicmd = self.aprun()
if self.parallel.sublaunchargs != None:
mpicmd = mpicmd + self.parallel.sublaunchargs
if self.parallel.np != None:
mpicmd = mpicmd + ["-n", self.parallel.np]
if self.parallel.nn != None:
mpicmd = mpicmd + ["-N", self.PPN()]
mpicmd = mpicmd + self.VisItExecutable() + args
return mpicmd
def mpirun(self):
return ["mpirun"]
def mpirun_args(self, args):
mpicmd = self.mpirun()
if self.parallel.sublaunchargs != None:
mpicmd = mpicmd + self.parallel.sublaunchargs
if self.parallel.np != None:
mpicmd = mpicmd + ["-np", self.parallel.np]
if self.parallel.nn != None:
mpicmd = mpicmd + [self.PPNArgument(), self.PPN()]
if self.parallel.machinefile != None:
mpicmd = mpicmd + ["-machinefile", self.parallel.machinefile]
mpicmd = mpicmd + self.VisItExecutable() + args
return mpicmd
def ibrun(self):
return ["ibrun"]
def ibrun_args(self, args):
mpicmd = self.ibrun()
if self.parallel.sublaunchargs != None:
mpicmd = mpicmd + self.parallel.sublaunchargs
mpicmd = mpicmd + self.VisItExecutable() + args
return mpicmd
def srun(self):
return ["srun"]
def srun_args(self, args):
mpicmd = self.srun()
if self.parallel.sublaunchargs != None:
mpicmd = mpicmd + self.parallel.sublaunchargs
if self.parallel.np != None:
mpicmd = mpicmd + ["--ntasks=%s" % self.parallel.np]
if self.parallel.nn != None:
mpicmd = mpicmd + ["--nodes=%s" % self.parallel.nn]
mpicmd = mpicmd + ["--tasks-per-node=%s" % self.PPN()]
mpicmd = mpicmd + self.VisItExecutable() + args
return mpicmd
def CreateCommand(self, args, debugger):
parcmd = self.Executable()
parcmd = parcmd + self.HandleCWDPermissions()
parcmd = parcmd + self.AddEnvironment()
if self.parallel.launchargs != None:
parcmd = parcmd + self.parallel.launchargs
parcmd = parcmd + self.parallel.launchargs
if self.parallel.partition != None:
parcmd = parcmd + ["--partition=%s" % self.parallel.partition]
parcmd = parcmd + ["--partition=%s" % self.parallel.partition]
if self.parallel.bank != None:
parcmd = parcmd + ["--account=%s" % self.parallel.bank]
parcmd = parcmd + ["--account=%s" % self.parallel.bank]
if self.parallel.time != None:
parcmd = parcmd + ["--time=%s" % self.parallel.time]
if self.parallel.nn != None:
parcmd = parcmd + ["--nodes=%s" % self.parallel.nn]
parcmd = parcmd + ["--time=%s" % self.parallel.time]
if self.parallel.np != None:
parcmd = parcmd + ["--ntasks=%s" % self.parallel.np]
parcmd = parcmd + ["--ntasks=%s" % self.parallel.np]
if self.parallel.nn != None:
parcmd = parcmd + ["--nodes=%s" % self.parallel.nn]
parcmd = parcmd + ["--tasks-per-node=%s" % self.PPN()]
if self.parallel.name != None:
parcmd = parcmd + ["--job-name=%s" % self.parallel.name]
ppn = "8"
if self.parallel.nn != None:
ppn = self.PPN()
sbatch,sublauncher = self.LauncherAndSubLauncher()
if sublauncher == "mpiexec":
mpicmd = ["mpiexec"]
if self.parallel.sublaunchargs != None:
mpicmd = mpicmd + self.parallel.sublaunchargs
mpicmd = mpicmd + self.PPNArguments(ppn)
mpicmd = mpicmd + self.VisItExecutable() + args
if sublauncher == "srun":
mpicmd = self.srun_args(args)
elif sublauncher == "mpiexec":
mpicmd = self.mpiexec_args(args)
elif sublauncher == "aprun":
mpicmd = ["aprun", "-n", self.parallel.np]
mpicmd = mpicmd + ["-N", self.PPN()]
if self.parallel.sublaunchargs != None:
mpicmd = mpicmd + self.parallel.sublaunchargs
mpicmd = mpicmd + self.VisItExecutable() + args
mpicmd = self.aprun_args(args)
elif sublauncher == "mpirun":
mpicmd = ["mpirun"]
if self.parallel.sublaunchargs != None:
mpicmd = mpicmd + self.parallel.sublaunchargs
if self.parallel.np != None:
mpicmd = mpicmd + ["-np", self.parallel.np]
mpicmd = mpicmd + ["--ppn", str(ppn)]
mpicmd = mpicmd + self.VisItExecutable() + args
mpicmd = self.mpirun_args(args)
elif sublauncher == "ibrun":
mpicmd = ["ibrun"]
if self.parallel.sublaunchargs != None:
mpicmd = mpicmd + self.parallel.sublaunchargs
mpicmd = mpicmd + self.VisItExecutable() + args
mpicmd = self.ibrun_args(args)
else:
mpicmd = self.VisItExecutable() + args
......@@ -590,8 +704,8 @@ class JobSubmitter_sbatch(JobSubmitter):
tfile = open(tfilename, "wt")
tfile.write("#!/bin/sh\n")
self.TFileLoadModules(tfile)
tfile.write("cd %s\n" % os.path.abspath(os.curdir))
tfile.write("ulimit -c 0\n")
self.TFileSetup(tfile)
if self.parallel.hw_precmd != None:
tfile.write(self.parallel.hw_precmd + "\n")
if self.parallel.sublaunchprecmd != None:
......@@ -601,7 +715,9 @@ class JobSubmitter_sbatch(JobSubmitter):
tfile.write(string.join(self.parallel.sublaunchpostcmd, " ") + "\n")
if self.parallel.hw_postcmd != None:
tfile.write(self.parallel.hw_postcmd + "\n")
tfile.close()
os.chmod(tfilename, 0775)
except (OSError, IOError):
exit("Could not create script file to launch %s job." % self.parallel.launcher, 0)
......@@ -700,6 +816,10 @@ class JobSubmitter_yod(JobSubmitter):
# Make adding of -ppn to mpirun and mpiexec dependent on "self.parallel.nn".
# Removed useppnmpiexec and useppnmpirun.
#
# David Camp, Mon Oct 26 10:13:49 PDT 2015
# Call the new PPNArgument function to get the correct MPI argument for
# the "process per node" flag.
#
###############################################################################
class JobSubmitter_qsub(JobSubmitter):
......@@ -741,7 +861,7 @@ class JobSubmitter_qsub(JobSubmitter):
if self.parallel.np != None:
mpicmd = mpicmd + ["-n", self.parallel.np]
if self.parallel.nn != None:
mpicmd = mpicmd + ["-ppn", self.PPN()]
mpicmd = mpicmd + [self.PPNArgument(), self.PPN()]
if self.parallel.machinefile != None:
mpicmd = mpicmd + ["-machinefile", self.parallel.machinefile]
mpicmd = mpicmd + self.VisItExecutable() + args
......@@ -757,7 +877,7 @@ class JobSubmitter_qsub(JobSubmitter):
if self.parallel.np != None:
mpicmd = mpicmd + ["-np", self.parallel.np]
if self.parallel.nn != None:
mpicmd = mpicmd + ["-ppn", self.PPN()]
mpicmd = mpicmd + [self.PPNArgument(), self.PPN()]
if self.parallel.machinefile != None:
mpicmd = mpicmd + ["-machinefile", self.parallel.machinefile]
mpicmd = mpicmd + self.VisItExecutable()
......@@ -774,6 +894,9 @@ class JobSubmitter_qsub(JobSubmitter):
mpicmd = mpicmd + self.parallel.sublaunchargs
if self.parallel.np != None:
mpicmd = mpicmd + ["-n", self.parallel.np]
if self.parallel.nn != None:
mpicmd = mpicmd + ["--nodes=%s" % self.parallel.nn]
mpicmd = mpicmd + ["--tasks-per-node=%s" % self.PPN()]
mpicmd = mpicmd + self.VisItExecutable() + args
return mpicmd
......@@ -1424,6 +1547,8 @@ class GeneralArguments(object):
self.threads = 0
self.hw_accel = False
self.osmesa = False
self.hosttoip = False
def ParseArguments(self, i, args):
if i >= len(args):
return 0
......@@ -1513,7 +1638,6 @@ class GeneralArguments(object):
self.osmesa = True
return n
def ProduceArguments(self, parallel):
args = []
if self.dv:
......@@ -1555,6 +1679,16 @@ class GeneralArguments(object):
return args
def GetIPAddress(self, hostname):
if not self.hosttoip:
return hostname
try:
IP = socket.gethostbyname(hostname)
except socket.gaierror, err:
print "cannot resolve hostname: ", hostname, err
IP = hostname
return IP
def ConnectionArguments(self, parallel):
# Localhost override to loopback. The default behavior is that we
# will always override the given host with the loopback device unless
......@@ -1612,9 +1746,9 @@ name.""", 1)
if self.noloopback or self.sshtunneling or self.guesshost or self.host != None:
if remotehost_set:
args = args + ["-host", remotehost]
args = args + ["-host", self.GetIPAddress(remotehost)]
elif self.host != None:
args = args + ["-host", self.host]
args = args + ["-host", self.GetIPAddress(self.host)]
if self.port != None:
args = args + ["-port", self.port]
......@@ -1862,6 +1996,7 @@ class MainLauncher(object):
self.clientPrograms = ("gui", "cli", "viewer")
self.componentNames = ("gui", "cli", "viewer", "mdserver", "vcl", "engine_ser", "engine_par", "xmledit")
self.xterm = ["xterm"]
self.hosttoip = False
def username(self):
try:
......@@ -1980,6 +2115,8 @@ class MainLauncher(object):
self.visitver = visitver
self.visitpluginver = visitpluginver
self.generalArgs = GeneralArguments(self)
if self.hosttoip:
self.generalArgs.hosttoip = True
self.parallelArgs = ParallelArguments()
self.debugArgs = DebugArguments(self)
self.os = self.uname("-s")
......
#/usr/common/graphics/visit/installs/3rdparty/cmake/3.0.2/linux-x86_64_gcc-4.9/bin/cmake
##
## ./src/svn_bin/build_visit generated host.cmake
## created: Tue Nov 17 10:45:36 PST 2015
## system: Linux cori10 3.0.101-0.46-default #1 SMP Wed Dec 17 11:04:10 UTC 2014 (8356111) x86_64 x86_64 x86_64 GNU/Linux
## by: camp
##
## Setup VISITHOME & VISITARCH variables.
##
SET(VISITHOME /usr/common/graphics/visit/installs/3rdparty)
SET(VISITARCH linux-x86_64_gcc-4.9)
VISIT_OPTION_DEFAULT(VISIT_INSTALL_PROFILES_TO_HOSTS "nersc" TYPE STRING)
VISIT_OPTION_DEFAULT(CMAKE_INSTALL_PREFIX /usr/common/graphics/visit)
VISIT_OPTION_DEFAULT(VISIT_INSTALL_THIRD_PARTY ON)
VISIT_OPTION_DEFAULT(CMAKE_BUILD_TYPE Release)
## Compiler flags.
##
VISIT_OPTION_DEFAULT(VISIT_C_COMPILER gcc TYPE FILEPATH)
VISIT_OPTION_DEFAULT(VISIT_CXX_COMPILER g++ TYPE FILEPATH)
VISIT_OPTION_DEFAULT(VISIT_C_FLAGS " -m64 -fPIC -fvisibility=hidden -DVIZSCHEMA_DECOMPOSE_DOMAINS" TYPE STRING)
VISIT_OPTION_DEFAULT(VISIT_CXX_FLAGS " -m64 -fPIC -fvisibility=hidden -DVIZSCHEMA_DECOMPOSE_DOMAINS" TYPE STRING)
##
## Parallel Build Setup.
##
VISIT_OPTION_DEFAULT(VISIT_MPI_C_FLAGS "-I/opt/cray/mpt/7.2.4/gni/mpich2-gnu/4.9/include" TYPE STRING)
VISIT_OPTION_DEFAULT(VISIT_MPI_CXX_FLAGS "-I/opt/cray/mpt/7.2.4/gni/mpich2-gnu/4.9/include" TYPE STRING)
VISIT_OPTION_DEFAULT(VISIT_MPI_LD_FLAGS "-L/opt/cray/mpt/7.2.4/gni/mpich2-gnu/4.9/lib -L/opt/cray/pmi/5.0.8-1.0000.10843.170.1.ari/lib64 -L/opt/cray/alps/5.2.3-2.0502.9295.14.14.ari/lib64 -Wl,-rpath,/opt/cray/mpt/7.2.4/gni/mpich2-gnu/4.9/lib -Wl,-rpath,/opt/cray/pmi/5.0.8-1.0000.10843.170.1.ari/lib64 -Wl,-rpath,/opt/cray/alps/5.2.3-2.0502.9295.14.14.ari/lib64 " TYPE STRING)
VISIT_OPTION_DEFAULT(VISIT_MPI_LIBS mpich pmi alpslli alpsutil pthread rt)
VISIT_OPTION_DEFAULT(VISIT_PARALLEL ON TYPE BOOL)
##
## VisIt Thread Option
##
VISIT_OPTION_DEFAULT(VISIT_THREAD OFF TYPE BOOL)
##############################################################
##
## Database reader plugin support libraries
##
## The HDF4, HDF5 and NetCDF libraries must be first so that
## their libdeps are defined for any plugins that need them.
##
## For libraries with LIBDEP settings, order matters.
## Libraries with LIBDEP settings that depend on other
## Library's LIBDEP settings must come after them.
##############################################################
##
##
## Mesa
##
VISIT_OPTION_DEFAULT(VISIT_MESA_DIR ${VISITHOME}/mesa/7.10.2/${VISITARCH})
##
## Python
##
VISIT_OPTION_DEFAULT(VISIT_PYTHON_DIR ${VISITHOME}/python/2.7.6/${VISITARCH})
##
## Qt
##
VISIT_OPTION_DEFAULT(VISIT_QT_BIN ${VISITHOME}/qt/4.8.3/${VISITARCH}/bin)
##
## VTK
##
SETUP_APP_VERSION(VTK 6.1.0)
VISIT_OPTION_DEFAULT(VISIT_VTK_DIR ${VISITHOME}/vtk/${VTK_VERSION}/${VISITARCH})
##
##
## ADIOS
## (configured w/ mpi compiler wrapper)
##
VISIT_OPTION_DEFAULT(VISIT_ADIOS_DIR ${VISITHOME}/adios/1.7.0/${VISITARCH})
##
## Boxlib
##
VISIT_OPTION_DEFAULT(VISIT_BOXLIB_DIR ${VISITHOME}/boxlib/1.0.7/${VISITARCH})
##
## CCMIO
##
VISIT_OPTION_DEFAULT(VISIT_CCMIO_DIR ${VISITHOME}/ccmio/2.6.1/${VISITARCH})
##
## CFITSIO
##
VISIT_OPTION_DEFAULT(VISIT_CFITSIO_DIR ${VISITHOME}/cfitsio/3006/${VISITARCH})
##
## SZIP
##
VISIT_OPTION_DEFAULT(VISIT_SZIP_DIR ${VISITHOME}/szip/2.1/${VISITARCH})
##
## HDF5
##
VISIT_OPTION_DEFAULT(VISIT_HDF5_DIR ${VISITHOME}/hdf5/1.8.14/${VISITARCH})
VISIT_OPTION_DEFAULT(VISIT_HDF5_LIBDEP ${VISITHOME}/szip/2.1/${VISITARCH}/lib sz /usr/lib z TYPE STRING)
##
## CGNS
##
VISIT_OPTION_DEFAULT(VISIT_CGNS_DIR ${VISITHOME}/cgns/3.2.1/${VISITARCH})
VISIT_OPTION_DEFAULT(VISIT_CGNS_LIBDEP HDF5_LIBRARY_DIR hdf5 ${VISIT_HDF5_LIBDEP} TYPE STRING)
##
## FastBit
##
VISIT_OPTION_DEFAULT(VISIT_FASTBIT_DIR ${VISITHOME}/fastbit/1.2.0/${VISITARCH})
##
## GDAL
##
VISIT_OPTION_DEFAULT(VISIT_GDAL_DIR ${VISITHOME}/gdal/1.10.0/${VISITARCH})
##
## H5Part
##
VISIT_OPTION_DEFAULT(VISIT_H5PART_DIR ${VISITHOME}/h5part/1.6.6/${VISITARCH})
VISIT_OPTION_DEFAULT(VISIT_H5PART_LIBDEP HDF5_LIBRARY_DIR hdf5 ${VISIT_HDF5_LIBDEP} TYPE STRING)
##
## HDF4
##
VISIT_OPTION_DEFAULT(VISIT_HDF4_DIR ${VISITHOME}/hdf4/4.2.5/${VISITARCH})
VISIT_OPTION_DEFAULT(VISIT_HDF4_LIBDEP ${VISITHOME}/szip/2.1/${VISITARCH}/lib sz ${VISITHOME}/vtk/${VTK_VERSION}/${VISITARCH}/lib vtkjpeg-${VTK_MAJOR_VERSION}.${VTK_MINOR_VERSION} TYPE STRING)
##
## Ice-T
##
VISIT_OPTION_DEFAULT(VISIT_ICET_DIR ${VISITHOME}/icet/1.0.0/${VISITARCH})
##
## NetCDF
##
VISIT_OPTION_DEFAULT(VISIT_NETCDF_DIR ${VISITHOME}/netcdf/4.1.1/${VISITARCH})
VISIT_OPTION_DEFAULT(VISIT_NETCDF_LIBDEP HDF5_LIBRARY_DIR hdf5_hl HDF5_LIBRARY_DIR hdf5 ${VISIT_HDF5_LIBDEP} TYPE STRING)
##
## ITAPS
##
## MOAB implementation
ITAPS_INCLUDE_DIRECTORIES(MOAB ${VISITHOME}/itaps/1.4/MOAB/4.8.1/${VISITARCH}/include)
ITAPS_FILE_PATTERNS(MOAB *.cub)
ITAPS_LINK_LIBRARIES(MOAB iMesh MOAB hdf5_hl hdf5 sz z netcdf_c++ netcdf )
ITAPS_LINK_DIRECTORIES(MOAB ${VISITHOME}/itaps/1.4/MOAB/4.8.1/${VISITARCH}/lib ${VISITHOME}/hdf5/1.8.14/${VISITARCH}/lib ${VISITHOME}/szip/2.1/${VISITARCH}/lib ${VISITHOME}/netcdf/4.1.1/${VISITARCH}/lib )
## FMDB implementation
ITAPS_INCLUDE_DIRECTORIES(FMDB ${VISITHOME}/itaps/1.4/FMDB/1.4.0/${VISITARCH}/include)
ITAPS_FILE_PATTERNS(FMDB *.sms)
ITAPS_LINK_LIBRARIES(FMDB FMDB SCORECModel SCORECUtil )
ITAPS_LINK_DIRECTORIES(FMDB ${VISITHOME}/itaps/1.4/FMDB/1.4.0/${VISITARCH}/lib)
## GRUMMP implementation
ITAPS_INCLUDE_DIRECTORIES(GRUMMP ${VISITHOME}/itaps/1.4/GRUMMP/0.6.5/${VISITARCH}/include)
ITAPS_FILE_PATTERNS(GRUMMP *.bdry *.smesh *.vmesh)
ITAPS_LINK_LIBRARIES(GRUMMP iMesh_GRUMMP GR_3D GR_surf GR_2D GR_geom GR_base SUMAAlog_lite OptMS cgm dl)
ITAPS_LINK_DIRECTORIES(GRUMMP ${VISITHOME}/itaps/1.4/GRUMMP/0.6.5/${VISITARCH}/lib)
##
## PySide
##
VISIT_OPTION_DEFAULT(VISIT_PYSIDE_DIR ${VISITHOME}/pyside/1.2.2/${VISITARCH}/)
##
## Silo
##
VISIT_OPTION_DEFAULT(VISIT_SILO_DIR ${VISITHOME}/silo/4.10.1/${VISITARCH})
VISIT_OPTION_DEFAULT(VISIT_SILO_LIBDEP HDF5_LIBRARY_DIR hdf5 ${VISIT_HDF5_LIBDEP} TYPE STRING)
##
## VISUS
##
VISIT_OPTION_DEFAULT(VISIT_VISUS_DIR ${VISITHOME}/visus/ad09cb8/${VISITARCH})
##
## Xdmf
##
VISIT_OPTION_DEFAULT(VISIT_XDMF_DIR ${VISITHOME}/Xdmf/2.1.1/${VISITARCH})
VISIT_OPTION_DEFAULT(VISIT_XDMF_LIBDEP HDF5_LIBRARY_DIR hdf5 VTK_LIBRARY_DIRS vtklibxml2-${VTK_MAJOR_VERSION}.${VTK_MINOR_VERSION} TYPE STRING)
......@@ -1159,6 +1159,7 @@ QvisHostProfileWindow::CreateLaunchSettingsGroup()
launchMethod->addItem("sbatch/ibrun");
launchMethod->addItem("sbatch/mpiexec");
launchMethod->addItem("sbatch/mpirun");
launchMethod->addItem("sbatch/srun");
connect(launchMethod, SIGNAL(activated(const QString &)),
this, SLOT(launchMethodChanged(const QString &)));
launchCheckBox = new QCheckBox(tr("Parallel launch method"), currentGroup);
......
import math
###############################################################################
# Class: JobSubmitter_sbatch_NERSC
#
# Purpose: Custom "sbatch" job submitter for NERSC.
#
# Programmer: David Camp
# Date: Thu Oct 1 08:39:34 PDT 2015
#
# Modifications:
#
###############################################################################
class JobSubmitter_sbatch_NERSC(JobSubmitter_sbatch):
def __init__(self, launcher):
super(JobSubmitter_sbatch_NERSC, self).__init__(launcher)
def CreateFilename(self):
tdate = time.asctime()[11:19]
tuser = self.launcher.username()
return os.path.join(GETENV("HOME"), "visit.%s.%s" % (tuser, tdate))
###############################################################################
# Class: JobSubmitter_aprun_NERSC
#
......@@ -18,50 +39,13 @@ class JobSubmitter_aprun_NERSC(JobSubmitter_aprun):
def __init__(self, launcher):
super(JobSubmitter_aprun_NERSC, self).__init__(launcher)
#
# Override the name of the aprun executable and use fully qualified path
# (Currently the path is identical on Edison and Hopper.)
#
def Executable(self):
if self.launcher.nersc_host == "edison":
return ["env", "DISPLAY=", "CRAY_ROOTFS=DSL", "/opt/cray/alps/default/bin/aprun"]
else:
return ["env", "DISPLAY=", "CRAY_ROOTFS=DSL", "/opt/cray/alps/5.2.1-2.0502.9072.13.1.gem/bin/aprun"]
###############################################################################
# Class: JobSubmitter_mpiexec_NERSC
#
# Purpose: Custom "mpiexec" job submitter for NERSC.
#
# Programmer: Eric Brugger
# Date: Wed Oct 22 16:26:54 PDT 2014
#
# Modifications:
#
###############################################################################
class JobSubmitter_mpiexec_NERSC(JobSubmitter_mpiexec):
def __init__(self, launcher):
super(JobSubmitter_mpiexec_NERSC, self).__init__(launcher)
self.useppnmpiexec = 1
###############################################################################
# Class: JobSubmitter_mpirun_NERSC
#
# Purpose: Custom "mpirun" job submitter for NERSC.
#
# Programmer: Eric Brugger
# Date: Wed Oct 22 16:26:54 PDT 2014
#
# Modifications:
#
###############################################################################
class JobSubmitter_mpirun_NERSC(JobSubmitter_mpirun):