diff --git a/VERSION b/VERSION
new file mode 100644
index 0000000000000000000000000000000000000000..75c3613460e6e01abd29e63b0d42a7714376ab75
--- /dev/null
+++ b/VERSION
@@ -0,0 +1 @@
+2.9.179
diff --git a/autosubmit_api/__init__.py b/autosubmit_api/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/autosubmit_api/__init__.pyc b/autosubmit_api/__init__.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9f29865e198bf6934833727d0f1e9cd388c43bbf
Binary files /dev/null and b/autosubmit_api/__init__.pyc differ
diff --git a/autosubmit_api/autosubmit.pyc b/autosubmit_api/autosubmit.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b8827f5bde2206db511265777fa6317d16078cc9
Binary files /dev/null and b/autosubmit_api/autosubmit.pyc differ
diff --git a/autosubmit_api/autosubmit_legacy/__init__.py b/autosubmit_api/autosubmit_legacy/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/autosubmit_api/autosubmit_legacy/autosubmit.py b/autosubmit_api/autosubmit_legacy/autosubmit.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc6e30dfdca19970b26c9e2b3b7532b66112fff0
--- /dev/null
+++ b/autosubmit_api/autosubmit_legacy/autosubmit.py
@@ -0,0 +1,3506 @@
+#!/usr/bin/env python
+
+# Copyright 2017 Earth Sciences Department, BSC-CNS
+
+# This file is part of Autosubmit.
+
+# Autosubmit is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# Autosubmit is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with Autosubmit. If not, see .
+# pipeline_test
+
+from __future__ import print_function
+import traceback
+from pyparsing import nestedExpr
+from collections import defaultdict
+from distutils.util import strtobool
+from pkg_resources import require, resource_listdir, resource_exists, resource_string
+import portalocker
+import datetime
+import signal
+import random
+import re
+import shutil
+import sys
+import pwd
+import os
+import copy
+import time
+import tarfile
+import json
+import subprocess
+import argparse
+import radical.saga as saga
+
+sys.path.insert(0, os.path.abspath('.'))
+from config.basicConfig import BasicConfig
+from config.config_common import AutosubmitConfig
+from bscearth.utils.config_parser import ConfigParserFactory
+from job.job_common import Status
+from git.autosubmit_git import AutosubmitGit
+from job.job_list import JobList
+from job.job_packages import JobPackageThread
+from job.job_package_persistence import JobPackagePersistence
+from job.job_list_persistence import JobListPersistenceDb
+from job.job_list_persistence import JobListPersistencePkl
+from job.job_grouping import JobGrouping
+from bscearth.utils.log import Log
+from database.db_common import create_db
+from experiment.experiment_common import new_experiment
+from experiment.experiment_common import copy_experiment
+from database.db_common import delete_experiment
+from database.db_common import get_autosubmit_version
+from monitor.monitor import Monitor
+from bscearth.utils.date import date2str
+from notifications.mail_notifier import MailNotifier
+from notifications.notifier import Notifier
+from platforms.saga_submitter import SagaSubmitter
+from platforms.paramiko_submitter import ParamikoSubmitter
+from job.job_exceptions import WrongTemplateException
+from job.job_packager import JobPackager
+from sets import Set
+from platforms.paramiko_platform import ParamikoTimeout
+"""
+Main module for autosubmit. Only contains an interface class to all functionality implemented on autosubmit
+"""
+
+try:
+ # noinspection PyCompatibility
+ from configparser import SafeConfigParser
+except ImportError:
+ # noinspection PyCompatibility
+ from ConfigParser import SafeConfigParser
+
+# It is Python dialog available? (optional dependency)
+try:
+ import dialog
+except Exception:
+ dialog = None
+
+
+# noinspection PyPackageRequirements
+# noinspection PyPackageRequirements
+# noinspection PyPackageRequirements
+
+# noinspection PyUnusedLocal
+
+
+def signal_handler(signal_received, frame):
+ """
+ Used to handle interrupt signals, allowing autosubmit to clean before exit
+
+ :param signal_received:
+ :param frame:
+ """
+ Log.info('Autosubmit will interrupt at the next safe occasion')
+ Autosubmit.exit = True
+
+
+class Autosubmit:
+ """
+ Interface class for autosubmit.
+ """
+ sys.setrecursionlimit(500000)
+ # Get the version number from the relevant file. If not, from autosubmit package
+ scriptdir = os.path.abspath(os.path.dirname(__file__))
+
+ if not os.path.exists(os.path.join(scriptdir, 'VERSION')):
+ scriptdir = os.path.join(scriptdir, os.path.pardir)
+
+ version_path = os.path.join(scriptdir, 'VERSION')
+ readme_path = os.path.join(scriptdir, 'README')
+ changes_path = os.path.join(scriptdir, 'CHANGELOG')
+ if os.path.isfile(version_path):
+ with open(version_path) as f:
+ autosubmit_version = f.read().strip()
+ else:
+ autosubmit_version = require("autosubmitAPIwu")[0].version
+
+ exit = False
+
+ @staticmethod
+ def parse_args():
+ """
+ Parse arguments given to an executable and start execution of command given
+ """
+ try:
+ BasicConfig.read()
+
+ parser = argparse.ArgumentParser(
+ description='Main executable for autosubmit. ')
+ parser.add_argument('-v', '--version', action='version', version=Autosubmit.autosubmit_version,
+ help="returns autosubmit's version number and exit")
+ parser.add_argument('-lf', '--logfile', choices=('EVERYTHING', 'DEBUG', 'INFO', 'RESULT', 'USER_WARNING',
+ 'WARNING', 'ERROR', 'CRITICAL', 'NO_LOG'),
+ default='DEBUG', type=str,
+ help="sets file's log level.")
+ parser.add_argument('-lc', '--logconsole', choices=('EVERYTHING', 'DEBUG', 'INFO', 'RESULT', 'USER_WARNING',
+ 'WARNING', 'ERROR', 'CRITICAL', 'NO_LOG'),
+ default='INFO', type=str,
+ help="sets console's log level")
+
+ subparsers = parser.add_subparsers(dest='command')
+
+ # Run
+ subparser = subparsers.add_parser(
+ 'run', description="runs specified experiment")
+ subparser.add_argument('expid', help='experiment identifier')
+ subparser.add_argument('-nt', '--notransitive', action='store_true',
+ default=False, help='Disable transitive reduction')
+
+ # Expid
+ subparser = subparsers.add_parser(
+ 'expid', description="Creates a new experiment")
+ group = subparser.add_mutually_exclusive_group()
+ group.add_argument(
+ '-y', '--copy', help='makes a copy of the specified experiment')
+ group.add_argument('-dm', '--dummy', action='store_true',
+ help='creates a new experiment with default values, usually for testing')
+ group.add_argument('-op', '--operational', action='store_true',
+ help='creates a new experiment with operational experiment id')
+ subparser.add_argument('-H', '--HPC', required=True,
+ help='specifies the HPC to use for the experiment')
+ subparser.add_argument('-d', '--description', type=str, required=True,
+ help='sets a description for the experiment to store in the database.')
+
+ # Delete
+ subparser = subparsers.add_parser(
+ 'delete', description="delete specified experiment")
+ subparser.add_argument('expid', help='experiment identifier')
+ subparser.add_argument(
+ '-f', '--force', action='store_true', help='deletes experiment without confirmation')
+
+ # Monitor
+ subparser = subparsers.add_parser(
+ 'monitor', description="plots specified experiment")
+ subparser.add_argument('expid', help='experiment identifier')
+ subparser.add_argument('-o', '--output', choices=('pdf', 'png', 'ps', 'svg'), default='pdf',
+ help='chooses type of output for generated plot')
+ subparser.add_argument('-group_by', choices=('date', 'member', 'chunk', 'split', 'automatic'), default=None,
+ help='Groups the jobs automatically or by date, member, chunk or split')
+ subparser.add_argument('-expand', type=str,
+ help='Supply the list of dates/members/chunks to filter the list of jobs. Default = "Any". '
+ 'LIST = "[ 19601101 [ fc0 [1 2 3 4] fc1 [1] ] 19651101 [ fc0 [16-30] ] ]"')
+ subparser.add_argument(
+ '-expand_status', type=str, help='Select the statuses to be expanded')
+ subparser.add_argument('--hide_groups', action='store_true',
+ default=False, help='Hides the groups from the plot')
+ subparser.add_argument('-cw', '--check_wrapper', action='store_true',
+ default=False, help='Generate possible wrapper in the current workflow')
+
+ group2 = subparser.add_mutually_exclusive_group(required=False)
+
+ group.add_argument('-fs', '--filter_status', type=str,
+ choices=('Any', 'READY', 'COMPLETED',
+ 'WAITING', 'SUSPENDED', 'FAILED', 'UNKNOWN'),
+ help='Select the original status to filter the list of jobs')
+ group = subparser.add_mutually_exclusive_group(required=False)
+ group.add_argument('-fl', '--list', type=str,
+ help='Supply the list of job names to be filtered. Default = "Any". '
+ 'LIST = "b037_20101101_fc3_21_sim b037_20111101_fc4_26_sim"')
+ group.add_argument('-fc', '--filter_chunks', type=str,
+ help='Supply the list of chunks to filter the list of jobs. Default = "Any". '
+ 'LIST = "[ 19601101 [ fc0 [1 2 3 4] fc1 [1] ] 19651101 [ fc0 [16-30] ] ]"')
+ group.add_argument('-fs', '--filter_status', type=str,
+ choices=('Any', 'READY', 'COMPLETED',
+ 'WAITING', 'SUSPENDED', 'FAILED', 'UNKNOWN'),
+ help='Select the original status to filter the list of jobs')
+ group.add_argument('-ft', '--filter_type', type=str,
+ help='Select the job type to filter the list of jobs')
+ subparser.add_argument('--hide', action='store_true', default=False,
+ help='hides plot window')
+ group2.add_argument('--txt', action='store_true', default=False,
+ help='Generates only txt status file')
+
+ group2.add_argument('-txtlog', '--txt_logfiles', action='store_true', default=False,
+ help='Generates only txt status file(AS < 3.12b behaviour)')
+
+ subparser.add_argument('-nt', '--notransitive', action='store_true',
+ default=False, help='Disable transitive reduction')
+
+ # Stats
+ subparser = subparsers.add_parser(
+ 'stats', description="plots statistics for specified experiment")
+ subparser.add_argument('expid', help='experiment identifier')
+ subparser.add_argument('-ft', '--filter_type', type=str, help='Select the job type to filter '
+ 'the list of jobs')
+ subparser.add_argument('-fp', '--filter_period', type=int, help='Select the period to filter jobs '
+ 'from current time to the past '
+ 'in number of hours back')
+ subparser.add_argument('-o', '--output', choices=('pdf', 'png', 'ps', 'svg'), default='pdf',
+ help='type of output for generated plot')
+ subparser.add_argument('--hide', action='store_true', default=False,
+ help='hides plot window')
+ subparser.add_argument('-nt', '--notransitive', action='store_true',
+ default=False, help='Disable transitive reduction')
+
+ # Clean
+ subparser = subparsers.add_parser(
+ 'clean', description="clean specified experiment")
+ subparser.add_argument('expid', help='experiment identifier')
+ subparser.add_argument(
+ '-pr', '--project', action="store_true", help='clean project')
+ subparser.add_argument('-p', '--plot', action="store_true",
+ help='clean plot, only 2 last will remain')
+ subparser.add_argument('-s', '--stats', action="store_true",
+ help='clean stats, only last will remain')
+
+ # Recovery
+ subparser = subparsers.add_parser(
+ 'recovery', description="recover specified experiment")
+ subparser.add_argument(
+ 'expid', type=str, help='experiment identifier')
+ subparser.add_argument(
+ '-np', '--noplot', action='store_true', default=False, help='omit plot')
+ subparser.add_argument('--all', action="store_true", default=False,
+ help='Get completed files to synchronize pkl')
+ subparser.add_argument(
+ '-s', '--save', action="store_true", default=False, help='Save changes to disk')
+ subparser.add_argument('--hide', action='store_true', default=False,
+ help='hides plot window')
+ subparser.add_argument('-group_by', choices=('date', 'member', 'chunk', 'split', 'automatic'), default=None,
+ help='Groups the jobs automatically or by date, member, chunk or split')
+ subparser.add_argument('-expand', type=str,
+ help='Supply the list of dates/members/chunks to filter the list of jobs. Default = "Any". '
+ 'LIST = "[ 19601101 [ fc0 [1 2 3 4] fc1 [1] ] 19651101 [ fc0 [16-30] ] ]"')
+ subparser.add_argument(
+ '-expand_status', type=str, help='Select the statuses to be expanded')
+ subparser.add_argument('-nt', '--notransitive', action='store_true',
+ default=False, help='Disable transitive reduction')
+ subparser.add_argument('-nl', '--no_recover_logs', action='store_true', default=False,
+ help='Disable logs recovery')
+ # Migrate
+ subparser = subparsers.add_parser(
+ 'migrate', description="Migrate experiments from current user to another")
+ subparser.add_argument('expid', help='experiment identifier')
+ group = subparser.add_mutually_exclusive_group(required=True)
+ group.add_argument('-o', '--offer', action="store_true",
+ default=False, help='Offer experiment')
+ group.add_argument('-p', '--pickup', action="store_true",
+ default=False, help='Pick-up released experiment')
+
+ # Inspect
+ subparser = subparsers.add_parser(
+ 'inspect', description="Generate all .cmd files")
+ subparser.add_argument('expid', help='experiment identifier')
+ subparser.add_argument('-nt', '--notransitive', action='store_true',
+ default=False, help='Disable transitive reduction')
+ subparser.add_argument(
+ '-f', '--force', action="store_true", help='Overwrite all cmd')
+ subparser.add_argument('-cw', '--check_wrapper', action='store_true',
+ default=False, help='Generate possible wrapper in the current workflow')
+
+ group.add_argument('-fs', '--filter_status', type=str,
+ choices=('Any', 'READY', 'COMPLETED',
+ 'WAITING', 'SUSPENDED', 'FAILED', 'UNKNOWN'),
+ help='Select the original status to filter the list of jobs')
+ group = subparser.add_mutually_exclusive_group(required=False)
+ group.add_argument('-fl', '--list', type=str,
+ help='Supply the list of job names to be filtered. Default = "Any". '
+ 'LIST = "b037_20101101_fc3_21_sim b037_20111101_fc4_26_sim"')
+ group.add_argument('-fc', '--filter_chunks', type=str,
+ help='Supply the list of chunks to filter the list of jobs. Default = "Any". '
+ 'LIST = "[ 19601101 [ fc0 [1 2 3 4] fc1 [1] ] 19651101 [ fc0 [16-30] ] ]"')
+ group.add_argument('-fs', '--filter_status', type=str,
+ choices=('Any', 'READY', 'COMPLETED',
+ 'WAITING', 'SUSPENDED', 'FAILED', 'UNKNOWN'),
+ help='Select the original status to filter the list of jobs')
+ group.add_argument('-ft', '--filter_type', type=str,
+ help='Select the job type to filter the list of jobs')
+
+ # Check
+ subparser = subparsers.add_parser(
+ 'check', description="check configuration for specified experiment")
+ subparser.add_argument('expid', help='experiment identifier')
+ subparser.add_argument('-nt', '--notransitive', action='store_true',
+ default=False, help='Disable transitive reduction')
+ # Describe
+ subparser = subparsers.add_parser(
+ 'describe', description="Show details for specified experiment")
+ subparser.add_argument('expid', help='experiment identifier')
+
+ # Create
+ subparser = subparsers.add_parser(
+ 'create', description="create specified experiment joblist")
+ subparser.add_argument('expid', help='experiment identifier')
+ subparser.add_argument(
+ '-np', '--noplot', action='store_true', default=False, help='omit plot')
+ subparser.add_argument('--hide', action='store_true', default=False,
+ help='hides plot window')
+ subparser.add_argument('-o', '--output', choices=('pdf', 'png', 'ps', 'svg'), default='pdf',
+ help='chooses type of output for generated plot')
+ subparser.add_argument('-group_by', choices=('date', 'member', 'chunk', 'split', 'automatic'), default=None,
+ help='Groups the jobs automatically or by date, member, chunk or split')
+ subparser.add_argument('-expand', type=str,
+ help='Supply the list of dates/members/chunks to filter the list of jobs. Default = "Any". '
+ 'LIST = "[ 19601101 [ fc0 [1 2 3 4] fc1 [1] ] 19651101 [ fc0 [16-30] ] ]"')
+ subparser.add_argument(
+ '-expand_status', type=str, help='Select the statuses to be expanded')
+ subparser.add_argument('-nt', '--notransitive', action='store_true',
+ default=False, help='Disable transitive reduction')
+ subparser.add_argument('-cw', '--check_wrapper', action='store_true',
+ default=False, help='Generate possible wrapper in the current workflow')
+
+ # Configure
+ subparser = subparsers.add_parser('configure', description="configure database and path for autosubmit. It "
+ "can be done at machine, user or local level."
+ "If no arguments specified configure will "
+ "display dialog boxes (if installed)")
+ subparser.add_argument(
+ '--advanced', action="store_true", help="Open advanced configuration of autosubmit")
+ subparser.add_argument('-db', '--databasepath', default=None, help='path to database. If not supplied, '
+ 'it will prompt for it')
+ subparser.add_argument(
+ '-dbf', '--databasefilename', default=None, help='database filename')
+ subparser.add_argument('-lr', '--localrootpath', default=None, help='path to store experiments. If not '
+ 'supplied, it will prompt for it')
+ subparser.add_argument('-pc', '--platformsconfpath', default=None, help='path to platforms.conf file to '
+ 'use by default. Optional')
+ subparser.add_argument('-jc', '--jobsconfpath', default=None, help='path to jobs.conf file to use by '
+ 'default. Optional')
+ subparser.add_argument(
+ '-sm', '--smtphostname', default=None, help='STMP server hostname. Optional')
+ subparser.add_argument(
+ '-mf', '--mailfrom', default=None, help='Notifications sender address. Optional')
+ group = subparser.add_mutually_exclusive_group()
+ group.add_argument('--all', action="store_true",
+ help='configure for all users')
+ group.add_argument('--local', action="store_true", help='configure only for using Autosubmit from this '
+ 'path')
+
+ # Install
+ subparsers.add_parser(
+ 'install', description='install database for autosubmit on the configured folder')
+
+ # Set status
+ subparser = subparsers.add_parser(
+ 'setstatus', description="sets job status for an experiment")
+ subparser.add_argument('expid', help='experiment identifier')
+ subparser.add_argument(
+ '-np', '--noplot', action='store_true', default=False, help='omit plot')
+ subparser.add_argument(
+ '-s', '--save', action="store_true", default=False, help='Save changes to disk')
+
+ subparser.add_argument('-t', '--status_final',
+ choices=('READY', 'COMPLETED', 'WAITING', 'SUSPENDED', 'FAILED', 'UNKNOWN',
+ 'QUEUING', 'RUNNING'),
+ required=True,
+ help='Supply the target status')
+ group = subparser.add_mutually_exclusive_group(required=True)
+ group.add_argument('-fl', '--list', type=str,
+ help='Supply the list of job names to be changed. Default = "Any". '
+ 'LIST = "b037_20101101_fc3_21_sim b037_20111101_fc4_26_sim"')
+ group.add_argument('-fc', '--filter_chunks', type=str,
+ help='Supply the list of chunks to change the status. Default = "Any". '
+ 'LIST = "[ 19601101 [ fc0 [1 2 3 4] fc1 [1] ] 19651101 [ fc0 [16-30] ] ]"')
+ group.add_argument('-fs', '--filter_status', type=str,
+ help='Select the status (one or more) to filter the list of jobs.'
+ "Valid values = ['Any', 'READY', 'COMPLETED', 'WAITING', 'SUSPENDED', 'FAILED', 'UNKNOWN']")
+ group.add_argument('-ft', '--filter_type', type=str,
+ help='Select the job type to filter the list of jobs')
+
+ subparser.add_argument('--hide', action='store_true', default=False,
+ help='hides plot window')
+ subparser.add_argument('-group_by', choices=('date', 'member', 'chunk', 'split', 'automatic'), default=None,
+ help='Groups the jobs automatically or by date, member, chunk or split')
+ subparser.add_argument('-expand', type=str,
+ help='Supply the list of dates/members/chunks to filter the list of jobs. Default = "Any". '
+ 'LIST = "[ 19601101 [ fc0 [1 2 3 4] fc1 [1] ] 19651101 [ fc0 [16-30] ] ]"')
+ subparser.add_argument(
+ '-expand_status', type=str, help='Select the statuses to be expanded')
+ subparser.add_argument('-nt', '--notransitive', action='store_true',
+ default=False, help='Disable transitive reduction')
+ subparser.add_argument('-cw', '--check_wrapper', action='store_true',
+ default=False, help='Generate possible wrapper in the current workflow')
+
+ # Test Case
+ subparser = subparsers.add_parser(
+ 'testcase', description='create test case experiment')
+ subparser.add_argument(
+ '-y', '--copy', help='makes a copy of the specified experiment')
+ subparser.add_argument(
+ '-d', '--description', required=True, help='description of the test case')
+ subparser.add_argument('-c', '--chunks', help='chunks to run')
+ subparser.add_argument('-m', '--member', help='member to run')
+ subparser.add_argument('-s', '--stardate', help='stardate to run')
+ subparser.add_argument(
+ '-H', '--HPC', required=True, help='HPC to run experiment on it')
+ subparser.add_argument(
+ '-b', '--branch', help='branch of git to run (or revision from subversion)')
+
+ # Test
+ subparser = subparsers.add_parser(
+ 'test', description='test experiment')
+ subparser.add_argument('expid', help='experiment identifier')
+ subparser.add_argument(
+ '-c', '--chunks', required=True, help='chunks to run')
+ subparser.add_argument('-m', '--member', help='member to run')
+ subparser.add_argument('-s', '--stardate', help='stardate to run')
+ subparser.add_argument(
+ '-H', '--HPC', help='HPC to run experiment on it')
+ subparser.add_argument(
+ '-b', '--branch', help='branch of git to run (or revision from subversion)')
+
+ # Refresh
+ subparser = subparsers.add_parser(
+ 'refresh', description='refresh project directory for an experiment')
+ subparser.add_argument('expid', help='experiment identifier')
+ subparser.add_argument('-mc', '--model_conf', default=False, action='store_true',
+ help='overwrite model conf file')
+ subparser.add_argument('-jc', '--jobs_conf', default=False, action='store_true',
+ help='overwrite jobs conf file')
+
+ # Archive
+ subparser = subparsers.add_parser(
+ 'archive', description='archives an experiment')
+ subparser.add_argument('expid', help='experiment identifier')
+
+ # Unarchive
+ subparser = subparsers.add_parser(
+ 'unarchive', description='unarchives an experiment')
+ subparser.add_argument('expid', help='experiment identifier')
+
+ # Readme
+ subparsers.add_parser('readme', description='show readme')
+
+ # Changelog
+ subparsers.add_parser('changelog', description='show changelog')
+
+ args = parser.parse_args()
+
+ Log.set_console_level(args.logconsole)
+ Log.set_file_level(args.logfile)
+
+ if args.command == 'run':
+ return Autosubmit.run_experiment(args.expid, args.notransitive)
+ elif args.command == 'expid':
+ return Autosubmit.expid(args.HPC, args.description, args.copy, args.dummy, False,
+ args.operational) != ''
+ elif args.command == 'delete':
+ return Autosubmit.delete(args.expid, args.force)
+ elif args.command == 'monitor':
+ return Autosubmit.monitor(args.expid, args.output, args.list, args.filter_chunks, args.filter_status,
+ args.filter_type, args.hide, args.txt, args.group_by, args.expand,
+ args.expand_status, args.hide_groups, args.notransitive, args.check_wrapper, args.txt_logfiles)
+ elif args.command == 'stats':
+ return Autosubmit.statistics(args.expid, args.filter_type, args.filter_period, args.output, args.hide,
+ args.notransitive)
+ elif args.command == 'clean':
+ return Autosubmit.clean(args.expid, args.project, args.plot, args.stats)
+ elif args.command == 'recovery':
+ return Autosubmit.recovery(args.expid, args.noplot, args.save, args.all, args.hide, args.group_by,
+ args.expand, args.expand_status, args.notransitive, args.no_recover_logs)
+ elif args.command == 'check':
+ return Autosubmit.check(args.expid, args.notransitive)
+ elif args.command == 'inspect':
+ return Autosubmit.inspect(args.expid, args.list, args.filter_chunks, args.filter_status,
+ args.filter_type, args.notransitive, args.force, args.check_wrapper)
+ elif args.command == 'describe':
+ return Autosubmit.describe(args.expid)
+ elif args.command == 'migrate':
+ return Autosubmit.migrate(args.expid, args.offer, args.pickup)
+ elif args.command == 'create':
+ return Autosubmit.create(args.expid, args.noplot, args.hide, args.output, args.group_by, args.expand,
+ args.expand_status, args.notransitive, args.check_wrapper)
+ elif args.command == 'configure':
+ if not args.advanced or (args.advanced and dialog is None):
+ return Autosubmit.configure(args.advanced, args.databasepath, args.databasefilename,
+ args.localrootpath, args.platformsconfpath, args.jobsconfpath,
+ args.smtphostname, args.mailfrom, args.all, args.local)
+ else:
+ return Autosubmit.configure_dialog()
+ elif args.command == 'install':
+ return Autosubmit.install()
+ elif args.command == 'setstatus':
+ return Autosubmit.set_status(args.expid, args.noplot, args.save, args.status_final, args.list,
+ args.filter_chunks, args.filter_status, args.filter_type, args.hide,
+ args.group_by, args.expand, args.expand_status, args.notransitive, args.check_wrapper)
+ elif args.command == 'testcase':
+ return Autosubmit.testcase(args.copy, args.description, args.chunks, args.member, args.stardate,
+ args.HPC, args.branch)
+ elif args.command == 'test':
+ return Autosubmit.test(args.expid, args.chunks, args.member, args.stardate, args.HPC, args.branch)
+ elif args.command == 'refresh':
+ return Autosubmit.refresh(args.expid, args.model_conf, args.jobs_conf)
+ elif args.command == 'archive':
+ return Autosubmit.archive(args.expid)
+ elif args.command == 'unarchive':
+ return Autosubmit.unarchive(args.expid)
+
+ elif args.command == 'readme':
+ if os.path.isfile(Autosubmit.readme_path):
+ with open(Autosubmit.readme_path) as f:
+ print(f.read())
+ return True
+ return False
+ elif args.command == 'changelog':
+ if os.path.isfile(Autosubmit.changes_path):
+ with open(Autosubmit.changes_path) as f:
+ print(f.read())
+ return True
+ return False
+ except Exception as e:
+ from traceback import format_exc
+ Log.critical(
+ 'Unhandled exception on Autosubmit: {0}\n{1}', e, format_exc(10))
+
+ return False
+
+ @staticmethod
+ def _delete_expid(expid_delete):
+ """
+ Removes an experiment from path and database
+
+ :type expid_delete: str
+ :param expid_delete: identifier of the experiment to delete
+ """
+ if expid_delete == '' or expid_delete is None and not os.path.exists(os.path.join(BasicConfig.LOCAL_ROOT_DIR,
+ expid_delete)):
+ Log.info("Experiment directory does not exist.")
+ else:
+ Log.info("Removing experiment directory...")
+ ret = False
+ if pwd.getpwuid(os.stat(os.path.join(BasicConfig.LOCAL_ROOT_DIR, expid_delete)).st_uid).pw_name == os.getlogin():
+ try:
+
+ shutil.rmtree(os.path.join(
+ BasicConfig.LOCAL_ROOT_DIR, expid_delete))
+ except OSError as e:
+ Log.warning('Can not delete experiment folder: {0}', e)
+ return ret
+ Log.info("Deleting experiment from database...")
+ ret = delete_experiment(expid_delete)
+ if ret:
+ Log.result("Experiment {0} deleted".format(expid_delete))
+ else:
+ Log.warning(
+ "Current User is not the Owner {0} can not be deleted!", expid_delete)
+ return ret
+
+ @staticmethod
+ def expid(hpc, description, copy_id='', dummy=False, test=False, operational=False):
+ """
+ Creates a new experiment for given HPC
+
+ :param operational: if true, creates an operational experiment
+ :type operational: bool
+ :type hpc: str
+ :type description: str
+ :type copy_id: str
+ :type dummy: bool
+ :param hpc: name of the main HPC for the experiment
+ :param description: short experiment's description.
+ :param copy_id: experiment identifier of experiment to copy
+ :param dummy: if true, writes a default dummy configuration for testing
+ :param test: if true, creates an experiment for testing
+ :return: experiment identifier. If method fails, returns ''.
+ :rtype: str
+ """
+ BasicConfig.read()
+
+ log_path = os.path.join(
+ BasicConfig.LOCAL_ROOT_DIR, 'ASlogs', 'expid.log'.format(os.getuid()))
+ try:
+ Log.set_file(log_path)
+ except IOError as e:
+ Log.error("Can not create log file in path {0}: {1}".format(
+ log_path, e.message))
+ exp_id = None
+ if description is None:
+ Log.error("Missing experiment description.")
+ return ''
+ if hpc is None:
+ Log.error("Missing HPC.")
+ return ''
+ if not copy_id:
+ exp_id = new_experiment(
+ description, Autosubmit.autosubmit_version, test, operational)
+ if exp_id == '':
+ return ''
+ try:
+ os.mkdir(os.path.join(BasicConfig.LOCAL_ROOT_DIR, exp_id))
+
+ os.mkdir(os.path.join(
+ BasicConfig.LOCAL_ROOT_DIR, exp_id, 'conf'))
+ Log.info("Copying config files...")
+
+ # autosubmit config and experiment copied from AS.
+ files = resource_listdir('autosubmit.config', 'files')
+ for filename in files:
+ if resource_exists('autosubmit.config', 'files/' + filename):
+ index = filename.index('.')
+ new_filename = filename[:index] + \
+ "_" + exp_id + filename[index:]
+
+ if filename == 'platforms.conf' and BasicConfig.DEFAULT_PLATFORMS_CONF != '':
+ content = open(os.path.join(
+ BasicConfig.DEFAULT_PLATFORMS_CONF, filename)).read()
+ elif filename == 'jobs.conf' and BasicConfig.DEFAULT_JOBS_CONF != '':
+ content = open(os.path.join(
+ BasicConfig.DEFAULT_JOBS_CONF, filename)).read()
+ else:
+ content = resource_string(
+ 'autosubmit.config', 'files/' + filename)
+
+ conf_new_filename = os.path.join(
+ BasicConfig.LOCAL_ROOT_DIR, exp_id, "conf", new_filename)
+ Log.debug(conf_new_filename)
+ open(conf_new_filename, 'w').write(content)
+ Autosubmit._prepare_conf_files(
+ exp_id, hpc, Autosubmit.autosubmit_version, dummy)
+ except (OSError, IOError) as e:
+ Log.error(
+ "Can not create experiment: {0}\nCleaning...".format(e))
+ Autosubmit._delete_expid(exp_id)
+ return ''
+ else:
+ try:
+ if os.path.exists(os.path.join(BasicConfig.LOCAL_ROOT_DIR, copy_id)):
+ exp_id = copy_experiment(
+ copy_id, description, Autosubmit.autosubmit_version, test, operational)
+ if exp_id == '':
+ return ''
+ dir_exp_id = os.path.join(
+ BasicConfig.LOCAL_ROOT_DIR, exp_id)
+ os.mkdir(dir_exp_id)
+ os.mkdir(dir_exp_id + '/conf')
+ Log.info("Copying previous experiment config directories")
+ conf_copy_id = os.path.join(
+ BasicConfig.LOCAL_ROOT_DIR, copy_id, "conf")
+ files = os.listdir(conf_copy_id)
+ for filename in files:
+ if os.path.isfile(os.path.join(conf_copy_id, filename)):
+ new_filename = filename.replace(copy_id, exp_id)
+ content = open(os.path.join(
+ conf_copy_id, filename), 'r').read()
+ open(os.path.join(dir_exp_id, "conf",
+ new_filename), 'w').write(content)
+ Autosubmit._prepare_conf_files(
+ exp_id, hpc, Autosubmit.autosubmit_version, dummy)
+ #####
+ autosubmit_config = AutosubmitConfig(
+ copy_id, BasicConfig, ConfigParserFactory())
+ if autosubmit_config.check_conf_files():
+ project_type = autosubmit_config.get_project_type()
+ if project_type == "git":
+ autosubmit_config.check_proj()
+ autosubmit_git = AutosubmitGit(copy_id[0])
+ Log.info("checking model version...")
+ if not autosubmit_git.check_commit(autosubmit_config):
+ return False
+ #####
+ else:
+ Log.critical(
+ "The previous experiment directory does not exist")
+ return ''
+ except (OSError, IOError) as e:
+ Log.error(
+ "Can not create experiment: {0}\nCleaning...".format(e))
+ Autosubmit._delete_expid(exp_id)
+ return ''
+
+ Log.debug("Creating temporal directory...")
+ exp_id_path = os.path.join(BasicConfig.LOCAL_ROOT_DIR, exp_id)
+ tmp_path = os.path.join(exp_id_path, "tmp")
+ os.mkdir(tmp_path)
+ os.chmod(tmp_path, 0o775)
+ os.mkdir(os.path.join(tmp_path, BasicConfig.LOCAL_ASLOG_DIR))
+ os.chmod(os.path.join(tmp_path, BasicConfig.LOCAL_ASLOG_DIR), 0o775)
+ Log.debug("Creating temporal remote directory...")
+ remote_tmp_path = os.path.join(tmp_path, "LOG_" + exp_id)
+ os.mkdir(remote_tmp_path)
+ os.chmod(remote_tmp_path, 0o775)
+
+ Log.debug("Creating pkl directory...")
+ os.mkdir(os.path.join(exp_id_path, "pkl"))
+
+ Log.debug("Creating plot directory...")
+ os.mkdir(os.path.join(exp_id_path, "plot"))
+ os.chmod(os.path.join(exp_id_path, "plot"), 0o775)
+ Log.result("Experiment registered successfully")
+ Log.user_warning("Remember to MODIFY the config files!")
+ return exp_id
+
+ @staticmethod
+ def delete(expid, force):
+ """
+ Deletes and experiment from database and experiment's folder
+
+ :type force: bool
+ :type expid: str
+ :param expid: identifier of the experiment to delete
+ :param force: if True, does not ask for confirmation
+
+ :returns: True if succesful, False if not
+ :rtype: bool
+ """
+ log_path = os.path.join(
+ BasicConfig.LOCAL_ROOT_DIR, "ASlogs", 'delete.log'.format(os.getuid()))
+ try:
+ Log.set_file(log_path)
+ except IOError as e:
+ Log.error("Can not create log file in path {0}: {1}".format(
+ log_path, e.message))
+
+ if os.path.exists(os.path.join(BasicConfig.LOCAL_ROOT_DIR, expid)):
+ if force or Autosubmit._user_yes_no_query("Do you want to delete " + expid + " ?"):
+ return Autosubmit._delete_expid(expid)
+ else:
+ Log.info("Quitting...")
+ return False
+ else:
+ Log.error("The experiment does not exist")
+ return True
+
+ @staticmethod
+ def _load_parameters(as_conf, job_list, platforms):
+ # Load parameters
+ Log.debug("Loading parameters...")
+ parameters = as_conf.load_parameters()
+ for platform_name in platforms:
+ platform = platforms[platform_name]
+ platform.add_parameters(parameters)
+
+ platform = platforms[as_conf.get_platform().lower()]
+ platform.add_parameters(parameters, True)
+
+ job_list.parameters = parameters
+
+ @staticmethod
+ def inspect(expid, lst, filter_chunks, filter_status, filter_section, notransitive=False, force=False, check_wrapper=False):
+ """
+ Generates cmd files experiment.
+
+ :type expid: str
+ :param expid: identifier of experiment to be run
+ :return: True if run to the end, False otherwise
+ :rtype: bool
+ """
+
+ if expid is None:
+ Log.critical("Missing experiment id")
+
+ BasicConfig.read()
+ exp_path = os.path.join(BasicConfig.LOCAL_ROOT_DIR, expid)
+ tmp_path = os.path.join(exp_path, BasicConfig.LOCAL_TMP_DIR)
+ if os.path.exists(os.path.join(tmp_path, 'autosubmit.lock')):
+ locked = True
+ else:
+ locked = False
+
+ if not os.path.exists(exp_path):
+ Log.critical(
+ "The directory %s is needed and does not exist" % exp_path)
+ Log.warning("Does an experiment with the given id exist?")
+ return 1
+ Log.info("Starting inspect command")
+ Log.set_file(os.path.join(
+ tmp_path, BasicConfig.LOCAL_ASLOG_DIR, 'generate.log'))
+ os.system('clear')
+ signal.signal(signal.SIGINT, signal_handler)
+ as_conf = AutosubmitConfig(expid, BasicConfig, ConfigParserFactory())
+ if not as_conf.check_conf_files():
+ Log.critical('Can not generate scripts with invalid configuration')
+ return False
+ project_type = as_conf.get_project_type()
+ if project_type != "none":
+ # Check proj configuration
+ as_conf.check_proj()
+ safetysleeptime = as_conf.get_safetysleeptime()
+ Log.debug("The Experiment name is: {0}", expid)
+ Log.debug("Sleep: {0}", safetysleeptime)
+ packages_persistence = JobPackagePersistence(os.path.join(BasicConfig.LOCAL_ROOT_DIR, expid, "pkl"),
+ "job_packages_" + expid)
+ os.chmod(os.path.join(BasicConfig.LOCAL_ROOT_DIR, expid,
+ "pkl", "job_packages_" + expid + ".db"), 0664)
+
+ packages_persistence.reset_table(True)
+ job_list_original = Autosubmit.load_job_list(
+ expid, as_conf, notransitive=notransitive)
+ job_list = copy.deepcopy(job_list_original)
+ job_list.packages_dict = {}
+
+ Log.debug("Length of the jobs list: {0}", len(job_list))
+
+ # variables to be updated on the fly
+ safetysleeptime = as_conf.get_safetysleeptime()
+ Log.debug("Sleep: {0}", safetysleeptime)
+ # Generate
+ Log.info("Starting to generate cmd scripts")
+
+ if not isinstance(job_list, type([])):
+ jobs = []
+ jobs_cw = []
+ if check_wrapper and (not locked or (force and locked)):
+ Log.info("Generating all cmd script adapted for wrappers")
+ jobs = job_list.get_uncompleted()
+
+ jobs_cw = job_list.get_completed()
+ else:
+ if (force and not locked) or (force and locked):
+ Log.info("Overwritting all cmd scripts")
+ jobs = job_list.get_job_list()
+ elif locked:
+ Log.warning(
+ "There is a .lock file and not -f, generating only all unsubmitted cmd scripts")
+ jobs = job_list.get_unsubmitted()
+ else:
+ Log.info("Generating cmd scripts only for selected jobs")
+ if filter_chunks:
+ fc = filter_chunks
+ Log.debug(fc)
+ if fc == 'Any':
+ jobs = job_list.get_job_list()
+ else:
+ # noinspection PyTypeChecker
+ data = json.loads(Autosubmit._create_json(fc))
+ for date_json in data['sds']:
+ date = date_json['sd']
+ jobs_date = filter(lambda j: date2str(
+ j.date) == date, job_list.get_job_list())
+
+ for member_json in date_json['ms']:
+ member = member_json['m']
+ jobs_member = filter(
+ lambda j: j.member == member, jobs_date)
+
+ for chunk_json in member_json['cs']:
+ chunk = int(chunk_json)
+ jobs = jobs + \
+ [job for job in filter(
+ lambda j: j.chunk == chunk, jobs_member)]
+
+ elif filter_status:
+ Log.debug(
+ "Filtering jobs with status {0}", filter_status)
+ if filter_status == 'Any':
+ jobs = job_list.get_job_list()
+ else:
+ fs = Autosubmit._get_status(filter_status)
+ jobs = [job for job in filter(
+ lambda j: j.status == fs, job_list.get_job_list())]
+
+ elif filter_section:
+ ft = filter_section
+ Log.debug(ft)
+
+ if ft == 'Any':
+ jobs = job_list.get_job_list()
+ else:
+ for job in job_list.get_job_list():
+ if job.section == ft:
+ jobs.append(job)
+ elif lst:
+ jobs_lst = lst.split()
+
+ if jobs == 'Any':
+ jobs = job_list.get_job_list()
+ else:
+ for job in job_list.get_job_list():
+ if job.name in jobs_lst:
+ jobs.append(job)
+ else:
+ jobs = job_list.get_job_list()
+ if isinstance(jobs, type([])):
+ referenced_jobs_to_remove = set()
+ for job in jobs:
+ for child in job.children:
+ if child not in jobs:
+ referenced_jobs_to_remove.add(child)
+ for parent in job.parents:
+ if parent not in jobs:
+ referenced_jobs_to_remove.add(parent)
+
+ for job in jobs:
+ job.status = Status.WAITING
+
+ Autosubmit.generate_scripts_andor_wrappers(
+ as_conf, job_list, jobs, packages_persistence, False)
+ if len(jobs_cw) > 0:
+ referenced_jobs_to_remove = set()
+ for job in jobs_cw:
+ for child in job.children:
+ if child not in jobs_cw:
+ referenced_jobs_to_remove.add(child)
+ for parent in job.parents:
+ if parent not in jobs_cw:
+ referenced_jobs_to_remove.add(parent)
+
+ for job in jobs_cw:
+ job.status = Status.WAITING
+ Autosubmit.generate_scripts_andor_wrappers(
+ as_conf, job_list, jobs_cw, packages_persistence, False)
+
+ Log.info("no more scripts to generate, now proceed to check them manually")
+ time.sleep(safetysleeptime)
+ return True
+
+ @staticmethod
+ def generate_scripts_andor_wrappers(as_conf, job_list, jobs_filtered, packages_persistence, only_wrappers=False):
+ """
+ as_conf: AutosubmitConfig object
+ job_list: JobList object, contains a list of jobs
+ jobs_filtered: list of jobs
+ packages_persistence: Database handler
+ only_wrappers: True
+ """
+ job_list._job_list = jobs_filtered
+ job_list.update_list(as_conf, False)
+ # Identifying the submitter and loading it
+ submitter = Autosubmit._get_submitter(as_conf)
+ # Function depending on the submitter
+ submitter.load_platforms(as_conf)
+ # Identifying HPC from config files
+ hpcarch = as_conf.get_platform()
+ #
+ Autosubmit._load_parameters(as_conf, job_list, submitter.platforms)
+ platforms_to_test = set()
+ for job in job_list.get_job_list():
+ if job.platform_name is None:
+ job.platform_name = hpcarch
+ # noinspection PyTypeChecker
+ job.platform = submitter.platforms[job.platform_name.lower()]
+ # noinspection PyTypeChecker
+ platforms_to_test.add(job.platform)
+ # case setstatus
+ job_list.check_scripts(as_conf)
+ job_list.update_list(as_conf, False)
+ Autosubmit._load_parameters(as_conf, job_list, submitter.platforms)
+ while job_list.get_active():
+ Autosubmit.submit_ready_jobs(
+ as_conf, job_list, platforms_to_test, packages_persistence, True, only_wrappers)
+
+ job_list.update_list(as_conf, False)
+
+ @staticmethod
+ def run_experiment(expid, notransitive=False):
+ """
+ Runs and experiment (submitting all the jobs properly and repeating its execution in case of failure).
+
+ :type expid: str
+ :param expid: identifier of experiment to be run
+ :return: True if run to the end, False otherwise
+ :rtype: bool
+ """
+ if expid is None:
+ Log.critical("Missing experiment id")
+
+ BasicConfig.read()
+ exp_path = os.path.join(BasicConfig.LOCAL_ROOT_DIR, expid)
+ tmp_path = os.path.join(exp_path, BasicConfig.LOCAL_TMP_DIR)
+ aslogs_path = os.path.join(tmp_path, BasicConfig.LOCAL_ASLOG_DIR)
+ if not os.path.exists(aslogs_path):
+ os.mkdir(aslogs_path)
+ os.chmod(aslogs_path, 0o775)
+ if not os.path.exists(exp_path):
+ Log.critical(
+ "The directory %s is needed and does not exist" % exp_path)
+ Log.warning("Does an experiment with the given id exist?")
+ return 1
+
+ # checking host whitelist
+ import platform
+ host = platform.node()
+ print(host)
+ if BasicConfig.ALLOWED_HOSTS and host not in BasicConfig.ALLOWED_HOSTS:
+ Log.info("\n Autosubmit run command is not allowed on this host")
+ return False
+
+ # checking if there is a lock file to avoid multiple running on the same expid
+ try:
+ with portalocker.Lock(os.path.join(tmp_path, 'autosubmit.lock'), timeout=1):
+ Log.info(
+ "Preparing .lock file to avoid multiple instances with same experiment id")
+
+ Log.set_file(os.path.join(aslogs_path, 'run.log'))
+ os.system('clear')
+
+ signal.signal(signal.SIGINT, signal_handler)
+
+ as_conf = AutosubmitConfig(
+ expid, BasicConfig, ConfigParserFactory())
+ if not as_conf.check_conf_files():
+ Log.critical('Can not run with invalid configuration')
+ return False
+
+ project_type = as_conf.get_project_type()
+ if project_type != "none":
+ # Check proj configuration
+ as_conf.check_proj()
+
+ hpcarch = as_conf.get_platform()
+
+ safetysleeptime = as_conf.get_safetysleeptime()
+ retrials = as_conf.get_retrials()
+
+ submitter = Autosubmit._get_submitter(as_conf)
+ submitter.load_platforms(as_conf)
+
+ Log.debug("The Experiment name is: {0}", expid)
+ Log.debug("Sleep: {0}", safetysleeptime)
+ Log.debug("Default retrials: {0}", retrials)
+
+ Log.info("Starting job submission...")
+
+ pkl_dir = os.path.join(
+ BasicConfig.LOCAL_ROOT_DIR, expid, 'pkl')
+ job_list = Autosubmit.load_job_list(
+ expid, as_conf, notransitive=notransitive)
+
+ Log.debug(
+ "Starting from job list restored from {0} files", pkl_dir)
+
+ Log.debug("Length of the jobs list: {0}", len(job_list))
+
+ Autosubmit._load_parameters(
+ as_conf, job_list, submitter.platforms)
+
+ # check the job list script creation
+ Log.debug("Checking experiment templates...")
+
+ platforms_to_test = set()
+ for job in job_list.get_job_list():
+ if job.platform_name is None:
+ job.platform_name = hpcarch
+ # noinspection PyTypeChecker
+ job.platform = submitter.platforms[job.platform_name.lower(
+ )]
+ # noinspection PyTypeChecker
+ platforms_to_test.add(job.platform)
+
+ job_list.check_scripts(as_conf)
+
+ packages_persistence = JobPackagePersistence(os.path.join(BasicConfig.LOCAL_ROOT_DIR, expid, "pkl"),
+ "job_packages_" + expid)
+
+ if as_conf.get_wrapper_type() != 'none':
+ os.chmod(os.path.join(BasicConfig.LOCAL_ROOT_DIR,
+ expid, "pkl", "job_packages_" + expid + ".db"), 0664)
+ packages = packages_persistence.load()
+ for (exp_id, package_name, job_name) in packages:
+ if package_name not in job_list.packages_dict:
+ job_list.packages_dict[package_name] = []
+ job_list.packages_dict[package_name].append(
+ job_list.get_job_by_name(job_name))
+
+ for package_name, jobs in job_list.packages_dict.items():
+ from job.job import WrapperJob
+ wrapper_job = WrapperJob(package_name, jobs[0].id, Status.SUBMITTED, 0, jobs,
+ None,
+ None, jobs[0].platform, as_conf)
+ job_list.job_package_map[jobs[0].id] = wrapper_job
+ job_list.update_list(as_conf)
+ job_list.save()
+ #########################
+ # AUTOSUBMIT - MAIN LOOP
+ #########################
+ # Main loop. Finishing when all jobs have been submitted
+ while job_list.get_active():
+ if Autosubmit.exit:
+ return 2
+ # reload parameters changes
+ Log.debug("Reloading parameters...")
+ as_conf.reload()
+ Autosubmit._load_parameters(
+ as_conf, job_list, submitter.platforms)
+ # variables to be updated on the fly
+ total_jobs = len(job_list.get_job_list())
+ Log.info(
+ "\n\n{0} of {1} jobs remaining ({2})".format(total_jobs - len(job_list.get_completed()),
+ total_jobs,
+ time.strftime("%H:%M")))
+ safetysleeptime = as_conf.get_safetysleeptime()
+ Log.debug("Sleep: {0}", safetysleeptime)
+ default_retrials = as_conf.get_retrials()
+ Log.debug("Number of retrials: {0}", default_retrials)
+
+ check_wrapper_jobs_sleeptime = as_conf.get_wrapper_check_time()
+ Log.debug('WRAPPER CHECK TIME = {0}'.format(
+ check_wrapper_jobs_sleeptime))
+
+ save = False
+
+ slurm = []
+ for platform in platforms_to_test:
+ list_jobid = ""
+ completed_joblist = []
+ list_prevStatus = []
+ queuing_jobs = job_list.get_in_queue_grouped_id(
+ platform)
+ for job_id, job in queuing_jobs.items():
+ if job_list.job_package_map and job_id in job_list.job_package_map:
+ Log.debug(
+ 'Checking wrapper job with id ' + str(job_id))
+ wrapper_job = job_list.job_package_map[job_id]
+ check_wrapper = True
+ if wrapper_job.status == Status.RUNNING:
+ check_wrapper = True if datetime.timedelta.total_seconds(datetime.datetime.now(
+ ) - wrapper_job.checked_time) >= check_wrapper_jobs_sleeptime else False
+ if check_wrapper:
+ wrapper_job.checked_time = datetime.datetime.now()
+ platform.check_job(wrapper_job)
+ Log.info(
+ 'Wrapper job ' + wrapper_job.name + ' is ' + str(Status.VALUE_TO_KEY[wrapper_job.new_status]))
+ wrapper_job.check_status(
+ wrapper_job.new_status)
+ save = True
+ else:
+ Log.info(
+ "Waiting for wrapper check time: {0}\n", check_wrapper_jobs_sleeptime)
+ else:
+ job = job[0]
+ prev_status = job.status
+ if job.status == Status.FAILED:
+ continue
+
+ if platform.type == "slurm":
+ list_jobid += str(job_id) + ','
+ list_prevStatus.append(prev_status)
+ completed_joblist.append(job)
+ else:
+ platform.check_job(job)
+ if prev_status != job.update_status(as_conf.get_copy_remote_logs() == 'true'):
+ if as_conf.get_notifications() == 'true':
+ if Status.VALUE_TO_KEY[job.status] in job.notify_on:
+ Notifier.notify_status_change(MailNotifier(BasicConfig), expid, job.name,
+ Status.VALUE_TO_KEY[prev_status],
+ Status.VALUE_TO_KEY[job.status],
+ as_conf.get_mails_to())
+ save = True
+
+ if platform.type == "slurm" and list_jobid != "":
+ slurm.append(
+ [platform, list_jobid, list_prevStatus, completed_joblist])
+ # END LOOP
+ for platform_jobs in slurm:
+ platform = platform_jobs[0]
+ jobs_to_check = platform_jobs[1]
+ platform.check_Alljobs(
+ platform_jobs[3], jobs_to_check, as_conf.get_copy_remote_logs())
+
+ for j_Indx in xrange(0, len(platform_jobs[3])):
+ prev_status = platform_jobs[2][j_Indx]
+ job = platform_jobs[3][j_Indx]
+
+ if prev_status != job.update_status(as_conf.get_copy_remote_logs() == 'true'):
+ if as_conf.get_notifications() == 'true':
+ if Status.VALUE_TO_KEY[job.status] in job.notify_on:
+ Notifier.notify_status_change(MailNotifier(BasicConfig), expid, job.name,
+ Status.VALUE_TO_KEY[prev_status],
+ Status.VALUE_TO_KEY[job.status],
+ as_conf.get_mails_to())
+ save = True
+
+ if job_list.update_list(as_conf) or save:
+ job_list.save()
+
+ if Autosubmit.submit_ready_jobs(as_conf, job_list, platforms_to_test, packages_persistence):
+ job_list.save()
+
+ if Autosubmit.exit:
+ return 2
+ time.sleep(safetysleeptime)
+
+ Log.info("No more jobs to run.")
+ if len(job_list.get_failed()) > 0:
+ Log.info("Some jobs have failed and reached maximum retrials")
+ return False
+ else:
+ Log.result("Run successful")
+ return True
+
+ except portalocker.AlreadyLocked:
+ Autosubmit.show_lock_warning(expid)
+
+ except WrongTemplateException:
+ return False
+
+ @staticmethod
+ def submit_ready_jobs(as_conf, job_list, platforms_to_test, packages_persistence, inspect=False,
+ only_wrappers=False):
+ """
+ Gets READY jobs and send them to the platforms if there is available space on the queues
+
+ :param as_conf: autosubmit config object. \n
+ :type as_conf: AutosubmitConfig Object. \n
+ :param job_list: JobList as a single entity. \n
+ :type job_list: JobList() Object. \n
+ :param platforms_to_test: List of platforms that will be used in the experiment. \n
+ :type platforms_to_test: Set() of Platform() Object. e.g. EcPlatform(), LsfPlatform(), etc. \n
+ :return: True if at least one job was submitted, False otherwise
+ :rtype: bool
+ """
+ save = False
+
+ for platform in platforms_to_test:
+ Log.debug("\nJobs ready for {1}: {0}", len(
+ job_list.get_ready(platform)), platform.name)
+ packages_to_submit, remote_dependencies_dict = JobPackager(
+ as_conf, platform, job_list).build_packages()
+ if not inspect:
+ platform.open_submit_script()
+ valid_packages_to_submit = []
+ for package in packages_to_submit:
+ try:
+ if hasattr(package, "name"):
+ if remote_dependencies_dict and package.name in remote_dependencies_dict['dependencies']:
+ remote_dependency = remote_dependencies_dict['dependencies'][package.name]
+ remote_dependency_id = remote_dependencies_dict['name_to_id'][remote_dependency]
+ package.set_job_dependency(remote_dependency_id)
+ if not only_wrappers:
+ try:
+ package.submit(
+ as_conf, job_list.parameters, inspect)
+ valid_packages_to_submit.append(package)
+ except (IOError, OSError):
+ # write error file
+ continue
+ if only_wrappers or inspect:
+ for innerJob in package._jobs:
+ innerJob.status = Status.COMPLETED
+
+ if hasattr(package, "name"):
+ job_list.packages_dict[package.name] = package.jobs
+ from job.job import WrapperJob
+ wrapper_job = WrapperJob(package.name, package.jobs[0].id, Status.READY, 0,
+ package.jobs,
+ package._wallclock, package._num_processors,
+ package.platform, as_conf)
+ job_list.job_package_map[package.jobs[0].id] = wrapper_job
+ if remote_dependencies_dict and package.name in remote_dependencies_dict['name_to_id']:
+ remote_dependencies_dict['name_to_id'][package.name] = package.jobs[0].id
+ if isinstance(package, JobPackageThread):
+ packages_persistence.save(
+ package.name, package.jobs, package._expid, inspect)
+ save = True
+ except WrongTemplateException as e:
+ Log.error(
+ "Invalid parameter substitution in {0} template", e.job_name)
+ raise
+ except Exception:
+ Log.error(
+ "{0} submission failed due to Unknown error", platform.name)
+ raise
+
+ if platform.type == "slurm" and not inspect and not only_wrappers:
+ try:
+ save = True
+ if len(valid_packages_to_submit) > 0:
+ jobs_id = platform.submit_Script()
+ if jobs_id is None:
+ raise BaseException(
+ "Exiting AS being unable to get jobID")
+ i = 0
+ for package in valid_packages_to_submit:
+ for job in package.jobs:
+ job.id = str(jobs_id[i])
+ Log.info("{0} submitted", job.name)
+ job.status = Status.SUBMITTED
+ job.write_submit_time()
+ if hasattr(package, "name"):
+ job_list.packages_dict[package.name] = package.jobs
+ from job.job import WrapperJob
+ wrapper_job = WrapperJob(package.name, package.jobs[0].id, Status.SUBMITTED, 0,
+ package.jobs,
+ package._wallclock, package._num_processors,
+ package.platform, as_conf)
+ job_list.job_package_map[package.jobs[0].id] = wrapper_job
+ if remote_dependencies_dict and package.name in remote_dependencies_dict[
+ 'name_to_id']:
+ remote_dependencies_dict['name_to_id'][package.name] = package.jobs[0].id
+ if isinstance(package, JobPackageThread):
+ packages_persistence.save(
+ package.name, package.jobs, package._expid, inspect)
+ i += 1
+
+ except WrongTemplateException as e:
+ Log.error(
+ "Invalid parameter substitution in {0} template", e.job_name)
+ raise
+ except Exception:
+ Log.error("{0} submission failed", platform.name)
+ raise
+
+ return save
+
+ @staticmethod
+ def monitor(expid, file_format, lst, filter_chunks, filter_status, filter_section, hide, txt_only=False,
+ group_by=None, expand=list(), expand_status=list(), hide_groups=False, notransitive=False, check_wrapper=False, txt_logfiles=False):
+ """
+ Plots workflow graph for a given experiment with status of each job coded by node color.
+ Plot is created in experiment's plot folder with name __