diff --git a/.gitmodules b/.gitmodules index 76c06bbd678e6635afc9b270728411f7dc8e860b..26eeaaf28695d54fd84a8ed49a404c2b7267e912 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,3 @@ [submodule "autosubmit4-config-parser"] - path = autosubmit/autosubmit4-config-parser + path = autosubmit4-config-parser url = ../../ces/autosubmit4-config-parser diff --git a/autosubmit/job/job.py b/autosubmit/job/job.py index 57bdca689a66eac997a059e42fa841d5877f07e5..25cd4b1512abc56380563adebfc179ccf2703880 100644 --- a/autosubmit/job/job.py +++ b/autosubmit/job/job.py @@ -1198,10 +1198,10 @@ class Job(object): def update_content_extra(self,as_conf,files): additional_templates = [] for file in files: - if as_conf.get_project_type().lower() != "none": + if as_conf.get_project_type().lower() == "none": template = "%DEFAULT.EXPID%" else: - template = open(os.path.join(as_conf.get_project_dir(), file), 'r') + template = open(os.path.join(as_conf.get_project_dir(), file), 'r').read() additional_templates += [template] return additional_templates def update_content(self, as_conf): @@ -1328,16 +1328,19 @@ class Job(object): #enumerate and get value #todo revise pipeline that check this, additional templates value is not Mocked well - for file_n,additional_template_content in enumerate(additional_templates): - template_content += additional_template_content + for additional_file, additional_template_content in zip(self.additional_files, additional_templates): for key, value in parameters.items(): additional_template_content = re.sub('%(? 1, if True, the dependencies are evaluated against all - ## jobs in the frequency interval, otherwise only evaluate dependencies against current - ## iteration. - ## If not specified, defaults to True - # WAIT:False - ## Defines if job is only to be executed in reruns. If not specified, defaults to false. - # RERUN_ONLY:False - ## Wallclock to be submitted to the HPC queue in format HH:MM - # WALLCLOCK:00:05 - - ## Processors number to be submitted to the HPC. If not specified, defaults to 1. - ## Wallclock chunk increase (WALLCLOCK will be increased according to the formula WALLCLOCK + WCHUNKINC * (chunk - 1)). - ## Ideal for sequences of jobs that change their expected running time according to the current chunk. - # WCHUNKINC: 00:01 - # PROCESSORS: 1 - ## Threads number to be submitted to the HPC. If not specified, defaults to 1. - # THREADS: 1 - ## Enables hyper-threading. If not specified, defaults to false. - # HYPERTHREADING: false - ## Tasks number to be submitted to the HPC. If not specified, defaults to 1. - # Tasks: 1 - ## Memory requirements for the job in MB - # MEMORY: 4096 - ## Number of retrials if a job fails. If not specified, defaults to the value given on experiment's autosubmit.yml - # RETRIALS: 4 - ## Allows to put a delay between retries, of retrials if a job fails. If not specified, it will be static - # DELAY_RETRY_TIME: 11 - # DELAY_RETRY_TIME: +11 # will wait 11,22,33,44... - # DELAY_RETRY_TIME: *11 # will wait 11,110,1110,11110... - ## Some jobs can not be checked before running previous jobs. Set this option to false if that is the case - # CHECK: False - ## Select the interpreter that will run the job. Options: bash, python, r Default: bash - # TYPE: bash - ## Specify the path to the interpreter. If empty, use system default based on job type . Default: empty - # EXECUTABLE: /my_python_env/python3 + ## Script to execute. If not specified, job will be omitted from workflow. "You can also specify additional files separated by a ",". + # Note: The post-processed additional_files will be sent to %HPCROOT%/LOG_%EXPID% + ## Path relative to the project directory + # FILE: + ## Platform to execute the job. If not specified, defaults to HPCARCH in expedf file. + ## LOCAL is always defined and refers to current machine + # PLATFORM: + ## Queue to add the job to. If not specified, uses PLATFORM default. + # QUEUE: + ## Defines dependencies from job as a list of parents jobs separated by spaces. + ## Dependencies to jobs in previous chunk, member o startdate, use -(DISTANCE) + # DEPENDENCIES:INI SIM-1 CLEAN-2 + ## Define if jobs runs once, once per stardate, once per member or once per chunk. Options: once, date, member, chunk. + ## If not specified, defaults to once + # RUNNING:once + ## Specifies that job has only to be run after X dates, members or chunk. A job will always be created for the last + ## If not specified, defaults to 1 + # FREQUENCY:3 + ## On a job with FREQUENCY > 1, if True, the dependencies are evaluated against all + ## jobs in the frequency interval, otherwise only evaluate dependencies against current + ## iteration. + ## If not specified, defaults to True + # WAIT:False + ## Defines if job is only to be executed in reruns. If not specified, defaults to false. + # RERUN_ONLY:False + ## Wallclock to be submitted to the HPC queue in format HH:MM + # WALLCLOCK:00:05 + + ## Processors number to be submitted to the HPC. If not specified, defaults to 1. + ## Wallclock chunk increase (WALLCLOCK will be increased according to the formula WALLCLOCK + WCHUNKINC * (chunk - 1)). + ## Ideal for sequences of jobs that change their expected running time according to the current chunk. + # WCHUNKINC: 00:01 + # PROCESSORS: 1 + ## Threads number to be submitted to the HPC. If not specified, defaults to 1. + # THREADS: 1 + ## Enables hyper-threading. If not specified, defaults to false. + # HYPERTHREADING: false + ## Tasks number to be submitted to the HPC. If not specified, defaults to 1. + # Tasks: 1 + ## Memory requirements for the job in MB + # MEMORY: 4096 + ## Number of retrials if a job fails. If not specified, defaults to the value given on experiment's autosubmit.yml + # RETRIALS: 4 + ## Allows to put a delay between retries, of retrials if a job fails. If not specified, it will be static + # DELAY_RETRY_TIME: 11 + # DELAY_RETRY_TIME: +11 # will wait 11,22,33,44... + # DELAY_RETRY_TIME: *11 # will wait 11,110,1110,11110... + ## Some jobs can not be checked before running previous jobs. Set this option to false if that is the case + # CHECK: False + ## Select the interpreter that will run the job. Options: bash, python, r Default: bash + # TYPE: bash + ## Specify the path to the interpreter. If empty, use system default based on job type . Default: empty + # EXECUTABLE: /my_python_env/python3 LOCAL_SETUP: diff --git a/docs/source/userguide/configure/index.rst b/docs/source/userguide/configure/index.rst index ad6414e92022519cdc0b4c3dc15a6664e3b89a44..5e0ea58b404f8a16de993bb4aaa11da9af46d256 100644 --- a/docs/source/userguide/configure/index.rst +++ b/docs/source/userguide/configure/index.rst @@ -344,7 +344,8 @@ In the file: ## Job name # JOBNAME: - ## Script to execute. If not specified, job will be omitted from workflow. + ## Script to execute. If not specified, job will be omitted from workflow. You can also specify additional files separated by a ",". + # Note: The post processed additional_files will be sent to %HPCROOT%/LOG_%EXPID% ## Path relative to the project directory # FILE : ## Platform to execute the job. If not specified, defaults to HPCARCH in expedf file.