# Ganga configuration file ($Name: Ganga-5-5-3 $). DO NOT remove this line. # #======================================================================= # All settings are commented out, so ganga will apply the default # values automatically. Wherever possible these default values are indicated. # To see which configuration settings are used, type at ganga prompt: # print config # # Support of Atlas and LHCb-specific functionality: # # GANGA_CONFIG_PATH environment variable or --config-path option at the command # line is used to enable LHCb or Atlas extensions. # For example: # $ export GANGA_CONFIG_PATH=GangaLHCb/LHCb.ini # $ ganga # or: # $ ganga --config-path=GangaAtlas/Atlas.ini # # In LHCb environment you do not need to worry about it because it is done # by GangaEnv command automatically. # #======================================================================= # global configuration parameters. this is a catch all section. [Configuration] # default batch system #Batch = LSF # runtime warnings issued by the interpreter may be suppresed #IgnoreRuntimeWarnings = False # the search path for the load() function #LOAD_PATH = # path to runtime plugin packages where custom handlers may be added. Normally # you should not worry about it. If an element of the path is just a name (like # in the example below) then the plugins will be loaded using current python # path. This means that some packages such as GangaTest may be taken from the # release area. # Examples: # RUNTIME_PATH = GangaGUI # RUNTIME_PATH = /my/SpecialExtensions:GangaTest #RUNTIME_PATH = # the search path to scripts directory. When running a script from the system # shell (e.g. ganga script) this path is used to search for script #SCRIPTS_PATH = Ganga/scripts # block of GPI commands executed at startup #StartupGPI = # The type of the interactive shell: IPython (cooler) or Console (limited) #TextShell = IPython # enable usage monitoring through MSG server defined in MSGMS configuration #UsageMonitoringMSG = True # MonALISA configuration file used to setup the destination of usage messages #UsageMonitoringURL = http://gangamon.cern.ch:8888/apmon/ganga.conf # Location of local job repositories and workspaces. Default is ~/gangadir but # in somecases (such as LSF CNAF) this needs to be modified to point to the # shared file system directory. #gangadir = /afs/cern.ch/user/g/gangage/gangadir # Type of the repository. # Examples: # LocalXML #repositorytype = LocalXML # User name. The same person may have different roles (user names) and still # use the same gangadir. Unless explicitly set this option defaults to the real # user name. #user = gangage # Type of workspace. Workspace is a place where input and output sandbox of # jobs are stored. Currently the only supported type is LocalFilesystem. #workspacetype = LocalFilesystem #======================================================================= # control the printing style of the different registries # ("jobs","box","tasks"...) [Display] # list of job attributes to be printed in separate columns #box_columns = ('id', 'type', 'name', 'application') # optional converter functions #box_columns_functions = {'application': 'lambda obj: obj.application._name'} # with exception of columns mentioned here, hide all values which evaluate to # logical false (so 0,"",[],...) #box_columns_show_empty = ['id'] # width of each column #box_columns_width = {'application': 15, 'type': 20, 'id': 5, 'name': 40} # colour print of the docstrings and examples #config_docstring_colour = fg.green # colour print of the names of configuration sections and options #config_name_colour = fx.bold # colour print of the configuration values #config_value_colour = fx.bold # list of job attributes to be printed in separate columns #jobs_columns = ('fqid', 'status', 'name', 'subjobs', 'application', 'backend', 'backend.actualCE') # optional converter functions #jobs_columns_functions = {'subjobs': 'lambda j: len(j.subjobs)', 'backend': 'lambda j:j.backend._name', 'application': 'lambda j: j.application._name'} # with exception of columns mentioned here, hide all values which evaluate to # logical false (so 0,"",[],...) #jobs_columns_show_empty = ['fqid'] # width of each column #jobs_columns_width = {'fqid': 8, 'status': 10, 'name': 10, 'subjobs': 8, 'backend.actualCE': 45, 'application': 15, 'backend': 15} #======================================================================= # Default associations between file types and file-viewing commands. The name # identifies the extension and the value the commans. New extensions can be # added. A single & after the command indicates that the process will be # started in the background. A && after the command indicates that a new # terminal will be opened and the command executed in that terminal. [File_Associations] # Default command to use if there is no association with the file type #fallback_command = less # Command for viewing html files. #htm = firefox & # Command for viewing html files. #html = firefox & # Command for listing the content of a directory #listing_command = ls -ltr # Command for opening a new terminal (xterm, gnome-terminal, ... #newterm_command = xterm # Option to give to a new terminal to tell it to execute a command. #newterm_exeopt = -e # Command for opening ROOT files. #root = root.exe && # Command for opening tar files. #tar = file-roller & # Command for opening tar files. #tgz = file-roller & #======================================================================= # Customization of GPI component object assignment for each category there may # be multiple filters registered, the one used being defined in the # configuration file in [GPIComponentFilters] e.g: # {'datasets':{'lhcbdatasets':lhcbFilter, 'testdatasets':testFilter}...} [GPIComponentFilters] #files = string_file_shortcut #======================================================================= # LCG/gLite/EGEE configuration parameters [LCG] # sets allowed computing elements by a regular expression #AllowedCEs = # sets the size limitation of the input sandbox, oversized input sandbox will # be pre-uploaded to the storage element specified by 'DefaultSE' in the area # specified by 'DefaultSRMToken' #BoundSandboxLimit = 10485760 # sets the generic LCG-UI configuration script for the GLITE workload # management system #Config = # sets the VO-specific LCG-UI configuration script for the EDG resource broker #ConfigVO = # sets the DataAccessProtocol #DataAccessProtocol = ['gsiftp'] # sets the DataRequirements of the job #DataRequirements = # sets the file catalogue server #DefaultLFC = prod-lfc-shared-central.cern.ch # sets the default storage element #DefaultSE = srm.cern.ch # sets the space token for storing temporary files (e.g. oversized input # sandbox) #DefaultSRMToken = # enables/disables the support of the EDG middleware #EDG_ENABLE = True # sets the LCG-UI environment setup script for the EDG middleware #EDG_SETUP = /afs/cern.ch/sw/ganga/install/config/grid_env_auto.sh # sets excluded computing elements by a regular expression #ExcludedCEs = # Enables/disables the support of the GLITE middleware GLITE_ENABLE = True # sets the LCG-UI environment setup script for the GLITE middleware #GLITE_SETUP = /afs/cern.ch/sw/ganga/install/config/grid_env_auto.sh # sets the maximum number of nodes (i.e. subjobs) in a gLite bulk job #GliteBulkJobSize = 50 # sets to True will load script-based glite-wms-* commands forcely with current # python, a trick for 32/64 bit compatibility issues. #IgnoreGliteScriptHeader = False # sets the way the job's stdout/err are being handled. #JobLogHandler = WMS # sets to True will do resource matching before submitting jobs, jobs without # any matched resources will fail the submission #MatchBeforeSubmit = False # sets the myproxy server #MyProxyServer = myproxy.cern.ch # sets the number of concurrent threads for downloading job's output sandbox # from gLite WMS #OutputDownloaderThread = 10 # sets the ranking rule for picking up computing element #Rank = # sets the replica catalogue server #ReplicaCatalog = # sets the full qualified class name for other specific LCG job requirements #Requirements = Ganga.Lib.LCG.LCGRequirements # sets maximum number of job retry #RetryCount = 3 # sets the full qualified class name for handling the oversized input sandbox #SandboxCache = Ganga.Lib.LCG.LCGSandboxCache # sets the transfer timeout of the oversized input sandbox #SandboxTransferTimeout = 60 # sets maximum number of job shallow retry #ShallowRetryCount = 10 # sets the storage index #StorageIndex = # sets the number of concurrent threads for job submission to gLite WMS #SubmissionThread = 10 # sets the name of the grid virtual organisation #VirtualOrganisation = dteam VirtualOrganisation = vo.gear.cern.ch #======================================================================= # internal LSF command line interface [LSF] # Name of environment with ID of the job #jobid_name = LSB_BATCH_JID # String contains option name for name of job in batch system #jobnameopt = J # String pattern for replay from the kill command #kill_res_pattern = (^Job <\d+> is being terminated)|(Job <\d+>: Job has already finished)|(Job <\d+>: No matching job found) # String used to kill job #kill_str = bkill %s # String contains commands executing before submiting job to queue #postexecute = # def filefilter(fn): # # FILTER OUT Batch INTERNAL INPUT/OUTPUT FILES: # # 10 digits . any number of digits . err or out # import re # internals = re.compile(r'\d{10}\.\d+.(out|err)') # return internals.match(fn) or fn == '.Batch.start' # String contains commands executing before submiting job to queue #preexecute = # Name of environment with queue name of the job #queue_name = LSB_QUEUE # Shared PYTHON #shared_python_executable = False # String pattern for replay from the submit command #submit_res_pattern = ^Job <(?P\d*)> is submitted to .*queue <(?P\S*)> # String used to submit job to queue #submit_str = cd %s; bsub %s %s %s %s # Timeout in seconds after which a job is declared killed if it has not touched # its heartbeat file. Heartbeat is touched every 30s so do not set this below # 120 or so. #timeout = 300 #======================================================================= # parameters of the local backend (jobs in the background on localhost) [Local] # remove automatically the local working directory when the job completed #remove_workdir = True #======================================================================= # control the messages printed by Ganga The settings are applied hierarchically # to the loggers. Ganga is the name of the top-level logger which applies by # default to all Ganga.* packages unless overriden in sub-packages. You may # define new loggers in this section. The log level may be one of: CRITICAL # ERROR WARNING INFO DEBUG [Logging] # top-level logger #Ganga = WARNING # logger of Ganga.GPIDev.* packages #Ganga.GPIDev = INFO # FIXME #Ganga.Runtime.bootstrap = INFO # logger of the Ganga logging package itself (use with care!) #Ganga.Utility.logging = WARNING # enable ASCII colour formatting of messages e.g. errors in red #_colour = True # format of logging messages: TERSE,NORMAL,VERBOSE,DEBUG #_format = NORMAL # if True then the cache used for interactive sessions, False disables caching #_interactive_cache = True # location of the logfile #_logfile = ~/.ganga.log # the size of the logfile (in bytes), the rotating log will never exceed this # file size #_logfile_size = 100000 # logger for stomp.py external package #stomp.py = CRITICAL #======================================================================= # Settings for the MSGMS monitoring plugin. Cannot be changed ruding the # interactive Ganga session. [MSGMS] #message_destination = /queue/ganga.status #password = # The port to connect to #port = 6163 # The server to connect to #server = gridmsg101.cern.ch #username = #======================================================================= # parameters for mergers [Mergers] # Dictionary of file associations #associate = {'log':'TextMerger','root':'RootMerger','text':'TextMerger','txt':'TextMerger'} # location of the merger's outputdir #merge_output_dir = ~/gangadir/merge_results # Standard (default) merger #std_merge = TextMerger #======================================================================= # External monitoring systems are used to follow the submission and execution # of jobs. Each entry in this section defines a monitoring plugin used for a # particular combination of application and backend. Asterisks may be used to # specify any application or any backend. The configuration entry syntax: # ApplicationName/BackendName = dot.path.to.monitoring.plugin.class. Example: # DummyMS plugin will be used to track executables run on all backends: # Executable/* = Ganga.Lib.MonitoringServices.DummyMS.DummyMS [MonitoringServices] Executable/* = Ganga.Lib.MonitoringServices.MSGMS.MSGMS #======================================================================= # internal PBS command line interface [PBS] # Name of environment with ID of the job #jobid_name = PBS_JOBID # String contains option name for name of job in batch system #jobnameopt = N # String pattern for replay from the kill command #kill_res_pattern = (^$)|(qdel: Unknown Job Id) # String used to kill job #kill_str = qdel %s # String contains commands executing before submiting job to queue #postexecute = # env = os.environ # jobnumid = env["PBS_JOBID"] # os.chdir("/tmp/") # os.system("rm -rf /tmp/%s/" %jobnumid) # String contains commands executing before submiting job to queue #preexecute = # env = os.environ # jobnumid = env["PBS_JOBID"] # os.system("mkdir /tmp/%s/" %jobnumid) # os.chdir("/tmp/%s/" %jobnumid) # os.environ["PATH"]+=":." # Name of environment with queue name of the job #queue_name = PBS_QUEUE # Shared PYTHON #shared_python_executable = False # String pattern for replay from the submit command #submit_res_pattern = ^(?P\d*)\.pbs\s* # String used to submit job to queue #submit_str = cd %s; qsub %s %s %s %s # Timeout in seconds after which a job is declared killed if it has not touched # its heartbeat file. Heartbeat is touched every 30s so do not set this below # 120 or so. #timeout = 300 #======================================================================= # General control of plugin mechanism. Set the default plugin in a given # category. For example: default_applications = DaVinci default_backends = LCG [Plugins] #======================================================================= # background job status monitoring and output retrieval [PollThread] # Poll rate for Condor backend. #Condor = 30 # Poll rate for Dirac backend. #Dirac = 50 # disk space checking callback. This function should return False when there is # no disk space available, True otherwise #DiskSpaceChecker = # Poll rate for LCG backend. #LCG = 30 # Poll rate for LSF backend. #LSF = 20 # Poll rate for Local backend. #Local = 10 # Poll rate for PBS backend. #PBS = 20 # Poll rate for Panda backend. #Panda = 50 # enable monitoring automatically at startup, in script mode monitoring is # disabled by default, in interactive mode it is enabled #autostart = False # internal supervising thread #base_poll_rate = 2 # The frequency in seconds for credentials checker #creds_poll_rate = 30 # Default rate for polling job status in the thread pool. This is the default # value for all backends. #default_backend_poll_rate = 30 # The frequency in seconds for free disk checker #diskspace_poll_rate = 30 # User will get the FIRST prompt after N seconds, as specified by this # parameter. This parameter also defines the time that Ganga will wait before # shutting down, if there are only non-critical threads alive, in both # interactive and batch mode. #forced_shutdown_first_prompt_time = 5 # If there are remaining background activities at exit such as monitoring, # output download Ganga will attempt to wait for the activities to complete. # You may select if a user is prompted to answer if he wants to force shutdown # ("interactive") or if the system waits on a timeout without questions # ("timeout"). The default is "session_type" which will do interactive shutdown # for CLI and timeout for scripts. #forced_shutdown_policy = session_type # User will get the prompt every N seconds, as specified by this parameter. #forced_shutdown_prompt_time = 10 # Timeout in seconds for forced Ganga shutdown in batch mode. #forced_shutdown_timeout = 60 # Poll rate for gLite backend. #gLite = 30 # OBSOLETE: this option has no effect anymore #max_shutdown_retries = 5 # if 0 then log only once the errors for a given backend and do not repeat them # anymore #repeat_messages = False # Size of the thread pool. Each threads monitors a specific backaend at a given # time. Minimum value is one, preferably set to the number_of_backends + 1 #update_thread_pool_size = 5 #======================================================================= # Options for Root backend [ROOT] # Architecture of ROOT #arch = slc4_ia32_gcc34 # Location of ROOT #location = /afs/cern.ch/sw/lcg/external/root # Set to a specific ROOT version. Will override other options. #path = # Location of the python used for execution of PyROOT script #pythonhome = ${location}/../Python/${pythonversion}/${arch}/ # Version number of python used for execution python ROOT script #pythonversion = # Version of ROOT #version = 5.18.00 #======================================================================= # internal SGE command line interface [SGE] # Name of environment with ID of the job #jobid_name = JOB_ID # String contains option name for name of job in batch system #jobnameopt = N # String pattern for replay from the kill command #kill_res_pattern = (has registered the job +\d+ +for deletion)|(denied: job +"\d+" +does not exist) # String used to kill job #kill_str = qdel %s # String contains commands executing before submiting job to queue #postexecute = # String contains commands executing before submiting job to queue #preexecute = os.chdir(os.environ["TMPDIR"]) # os.environ["PATH"]+=":." # Name of environment with queue name of the job #queue_name = QUEUE # Shared PYTHON #shared_python_executable = False # String pattern for replay from the submit command #submit_res_pattern = Your job (?P\d+) (.+) # String used to submit job to queue #submit_str = cd %s; qsub -cwd -V %s %s %s %s # Timeout in seconds after which a job is declared killed if it has not touched # its heartbeat file. Heartbeat is touched every 30s so do not set this below # 120 or so. #timeout = 300 #======================================================================= # configuration parameters for internal Shell utility. [Shell] # list of env variables not inherited in Shell environment #IgnoredVars = ['_', 'SHVL', 'PWD'] #======================================================================= # IPython shell configuration See IPython manual for more details: # http://ipython.scipy.org/doc/manual [TextShell_IPython] # FIXME #args = ['-colors','LightBG', '-noautocall'] #======================================================================= # default attribute values for AfsCommand objects [defaults_AfsCommand] # Command for destroying credential #destroy = unlog # Dictionary of parameter-value pairs to pass to destroy command #destroy_parameters = {'cell': '-cell'} # Command for obtaining information about credential #info = tokens # Dictionary mapping from Ganga credential properties to command-line options #info_parameters = {} # Command for creating/initialising credential #init = klog # Dictionary of parameter-value pairs to pass to init command #init_parameters = {'pipe': '-pipe', 'username': '-principal', 'valid': '-lifetime', 'cell': '-cell'} #======================================================================= # default attribute values for AfsToken objects [defaults_AfsToken] # AFS cell with which token is used [empty string implies local cell] #cell = # Set of commands to be used for credential-related operations #command = ICommandSet # Number of password attempts allowed when creating credential #maxTry = 1 # Default minimum validity #minValidity = 00:15 # AFS username with which token is used [defaults to login id] #username = # Default credential validity at creation #validityAtCreation = 24:00 #======================================================================= # default attribute values for ArgSplitter objects [defaults_ArgSplitter] # A list of lists of arguments to pass to script #args = [] #======================================================================= # default attribute values for Condor objects [defaults_Condor] # Environment settings for execution host #env = {} # Flag to pass current envrionment to execution host #getenv = False # Globus RSL settings (for Condor-G submission) #globus_rsl = # Globus scheduler to be used (required for Condor-G submission) #globusscheduler = # Ranking scheme to be used when selecting execution host #rank = Memory # Requirements for selecting execution host #requirements = CondorRequirements # Flag indicating if Condor nodes have shared filesystem #shared_filesystem = True # Options passed to Condor at submission time #submit_options = [] # Type of execution environment to be used by Condor #universe = vanilla #======================================================================= # default attribute values for CondorRequirements objects [defaults_CondorRequirements] # System architecture #arch = INTEL # Excluded execution hosts, given as a string of space-separated names: # 'machine1 machine2 machine3'; or as a list of names: [ 'machine1', # 'machine2', 'machine3' ] #excluded_machine = # Requested execution hosts, given as a string of space-separated names: # 'machine1 machine2 machine3'; or as a list of names: [ 'machine1', # 'machine2', 'machine3' ] #machine = # Mininum physical memory #memory = 400 # Operating system #opsys = LINUX # Other requirements, given as a list of strings, for example: [ 'OSTYPE == # "SLC4"', '(POOL == "GENERAL" || POOL == "GEN_FARM")' ]; the final requirement # is the AND of all elements in the list #other = [] # Minimum virtual memory #virtual_memory = 400 #======================================================================= # default attribute values for CustomMerger objects [defaults_CustomMerger] # A list of files to merge. #files = [] # Jobs that are in the failed or killed states will be excluded from the merge # when this flag is set to True. #ignorefailed = False # Path to a python module to perform the merge. #module = None # The default behaviour for this Merger object. Will overwrite output files. #overwrite = False #======================================================================= # default attribute values for DQ2FileIndex objects [defaults_DQ2FileIndex] # a key:value pairs of file metadata #attributes = {} # the DQ2 dataset name #dataset = # the main identity of the file #id = # the md5sum of the file #md5sum = # the name of the file #name = # the DQ2 site id #site = #======================================================================= # default attribute values for DQ2SandboxCache objects [defaults_DQ2SandboxCache] # the DQ2 dataset name #dataset_name = # the DQ2 local site id #local_site_id = CERN-PROD_SCRATCHDISK # max. number of tries in case of failures #max_try = 1 # file transfer protocol #protocol = # the DQ2 setup script #setup = /afs/cern.ch/atlas/offline/external/GRID/ddm/DQ2Clients/setup.sh #======================================================================= # default attribute values for Executable objects [defaults_Executable] # List of arguments for the executable. Arguments may be strings or File # objects. #args = ['Hello World'] # Environment #env = {} # A path (string) or a File object specifying an executable. #exe = echo #======================================================================= # default attribute values for File objects [defaults_File] # path to the file source #name = # destination subdirectory (a relative path) #subdir = . #======================================================================= # default attribute values for GangaList objects [defaults_GangaList] #======================================================================= # default attribute values for GenericSplitter objects [defaults_GenericSplitter] # The attribute on which the job is splitted #attribute = # A list of the values corresponding to the attribute of the subjobs #values = [] #======================================================================= # default attribute values for GridCommand objects [defaults_GridCommand] # Command for destroying credential #destroy = grid-proxy-destroy # Dictionary of parameter-value pairs to pass to destroy command #destroy_parameters = {} # Command for obtaining information about credential #info = grid-proxy-info # Dictionary mapping from Ganga credential properties to command-line options #info_parameters = {} # Command for creating/initialising credential #init = grid-proxy-init # Dictionary of parameter-value pairs to pass to init command #init_parameters = {'pipe': '-pwstdin', 'valid': '-valid'} #======================================================================= # default attribute values for GridFileIndex objects [defaults_GridFileIndex] # a key:value pairs of file metadata #attributes = {} # the main identity of the file #id = # the md5sum of the file #md5sum = # the name of the file #name = #======================================================================= # default attribute values for GridProxy objects [defaults_GridProxy] # Set of commands to be used for credential-related operations #command = ICommandSet # Refresh time of proxy info cache #info_refresh_time = 00:15 # String of options to be passed to command for proxy creation #init_opts = # Number of password attempts allowed when creating credential #maxTry = 1 # Default minimum validity #minValidity = 00:15 # Default credential validity at creation #validityAtCreation = 24:00 # Virtual organisation managment system information voms = True #======================================================================= # default attribute values for GridSandboxCache objects [defaults_GridSandboxCache] # max. number of tries in case of failures #max_try = 1 # file transfer protocol #protocol = #======================================================================= # default attribute values for ICommandSet objects [defaults_ICommandSet] # Command for destroying credential #destroy = # Dictionary of parameter-value pairs to pass to destroy command #destroy_parameters = {} # Command for obtaining information about credential #info = # Dictionary mapping from Ganga credential properties to command-line options #info_parameters = {} # Command for creating/initialising credential #init = # Dictionary of parameter-value pairs to pass to init command #init_parameters = {} #======================================================================= # default attribute values for Interactive objects [defaults_Interactive] #======================================================================= # default attribute values for Job objects [defaults_Job] # specification of the application to be executed #application = None # specification of the resources to be used (e.g. batch system) #backend = None # JobInfo #info = None # dataset definition (typically this is specific either to an application, a # site or the virtual organization #inputdata = None # list of File objects shipped to the worker node #inputsandbox = [] # optional output merger #merger = None # optional label which may be any combination of ASCII characters #name = # dataset definition (typically this is specific either to an application, a # site or the virtual organization #outputdata = None # list of filenames or patterns shipped from the worker node #outputsandbox = [] # optional splitter #splitter = None #======================================================================= # default attribute values for JobInfo objects [defaults_JobInfo] # job monitor instance #monitor = None #======================================================================= # default attribute values for JobTemplate objects [defaults_JobTemplate] # specification of the application to be executed #application = None # specification of the resources to be used (e.g. batch system) #backend = None # JobInfo #info = None # dataset definition (typically this is specific either to an application, a # site or the virtual organization #inputdata = None # list of File objects shipped to the worker node #inputsandbox = [] # optional output merger #merger = None # optional label which may be any combination of ASCII characters #name = # dataset definition (typically this is specific either to an application, a # site or the virtual organization #outputdata = None # list of filenames or patterns shipped from the worker node #outputsandbox = [] # optional splitter #splitter = None #======================================================================= # default attribute values for JobTime objects [defaults_JobTime] # Dictionary containing timestamps for job #timestamps = {} #======================================================================= # default attribute values for JobTree objects [defaults_JobTree] #name = #======================================================================= # default attribute values for LCG objects [defaults_LCG] # Request a specific Computing Element #CE = # Job type: Normal, MPICH #jobtype = Normal # Middleware type middleware = GLITE # Enable the job perusal feature of GLITE #perusable = False # Requirements for the resource selection #requirements = None # Interface for handling oversized input sandbox #sandboxcache = None #======================================================================= # default attribute values for LCGFileIndex objects [defaults_LCGFileIndex] # a key:value pairs of file metadata #attributes = {} # the main identity of the file #id = # the LFC hostname #lfc_host = # the original file path on local machine #local_fpath = # the md5sum of the file #md5sum = # the name of the file #name = #======================================================================= # default attribute values for LCGRequirements objects [defaults_LCGRequirements] # Minimum available CPU time (min) #cputime = 0 # External connectivity #ipconnectivity = False # Mininum available memory (MB) #memory = 0 # Number of Nodes for MPICH jobs #nodenumber = 1 # Other Requirements #other = [] # Software Installations #software = [] # Mimimum available total time (min) #walltime = 0 #======================================================================= # default attribute values for LCGSandboxCache objects [defaults_LCGSandboxCache] # the LCG LFC hostname #lfc_host = # max. number of tries in case of failures #max_try = 1 # file transfer protocol #protocol = # the LCG SE hostname #se = # the relative path to the VO directory on the SE #se_rpath = generated # the LCG SE type #se_type = srmv2 # the SRM space token, meaningful only when se_type is set to srmv2 #srm_token = #======================================================================= # default attribute values for LSF objects [defaults_LSF] # extra options for Batch. See help(Batch) for more details #extraopts = # queue name as defomed in your local Batch installation #queue = #======================================================================= # default attribute values for Local objects [defaults_Local] # adjust process priority using nice -n command #nice = 0 #======================================================================= # default attribute values for MultipleMerger objects [defaults_MultipleMerger] # A list of Merge objects to run #merger_objects = [] #======================================================================= # default attribute values for PBS objects [defaults_PBS] # extra options for Batch. See help(Batch) for more details #extraopts = # queue name as defomed in your local Batch installation #queue = #======================================================================= # default attribute values for Remote objects [defaults_Remote] # Overides any environment variables set in the job #environment = {} # Command line to start ganga on the remote host #ganga_cmd = # The directory to use for the remote workspace, repository, etc. #ganga_dir = # The remote host and port number ('host:port') to use. Default port is 22. #host = # Set to the type of ssh key to use (if required). Possible values are 'RSA' # and 'DSS'. #key_type = RSA # Sequence of commands to execute before running Ganga on the remote site #pre_script = [''] # specification of the resources to be used (e.g. batch system) #remote_backend = None # Set to true to the location of the the ssh key to use for authentication, # e.g. /home/mws/.ssh/id_rsa. Note, you should make sure 'key_type' is also set # correctly. #ssh_key = # The username at the remote host #username = #======================================================================= # default attribute values for Root objects [defaults_Root] # List of arguments for the script. Accepted types are numerics and strings #args = [] # A File object specifying the script to execute when Root starts #script = File(name='',subdir='.') # Execute 'script' using Python. The PyRoot libraries are added to the # PYTHONPATH. #usepython = False # The version of Root to run #version = 5.18.00 #======================================================================= # default attribute values for RootMerger objects [defaults_RootMerger] # Arguments to be passed to hadd. #args = None # A list of files to merge. #files = [] # Jobs that are in the failed or killed states will be excluded from the merge # when this flag is set to True. #ignorefailed = False # The default behaviour for this Merger object. Will overwrite output files. #overwrite = False #======================================================================= # default attribute values for SGE objects [defaults_SGE] # extra options for Batch. See help(Batch) for more details #extraopts = # queue name as defomed in your local Batch installation #queue = #======================================================================= # default attribute values for SmartMerger objects [defaults_SmartMerger] # A list of files to merge. #files = [] # Jobs that are in the failed or killed states will be excluded from the merge # when this flag is set to True. #ignorefailed = False # The default behaviour for this Merger object. Will overwrite output files. #overwrite = False #======================================================================= # default attribute values for TextMerger objects [defaults_TextMerger] # Output should be compressed with gzip. #compress = False # A list of files to merge. #files = [] # Jobs that are in the failed or killed states will be excluded from the merge # when this flag is set to True. #ignorefailed = False # The default behaviour for this Merger object. Will overwrite output files. #overwrite = False #======================================================================= # default attribute values for VomsCommand objects [defaults_VomsCommand] # Command for destroying credential #destroy = voms-proxy-destroy # Dictionary of parameter-value pairs to pass to destroy command #destroy_parameters = {} # Command for obtaining information about credential #info = voms-proxy-info # Dictionary mapping from Ganga credential properties to command-line options #info_parameters = {'vo': '-vo'} # Command for creating/initialising credential #init = voms-proxy-init # Dictionary of parameter-value pairs to pass to init command #init_parameters = {'pipe': '-pwstdin', 'voms': '-voms', 'valid': '-valid'}