diff --git a/.gitignore b/.gitignore index 40e7cb3ec7e789f1d55658d9559726592bd3eb72..62eb691d75829a800172b22b21febdd44b6d653e 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ autosubmit/simple_test.py .idea/ autosubmit.egg-info/ docs/build/ +docs/_build/ dist/ build/ .cache diff --git a/autosubmit/autosubmit.py b/autosubmit/autosubmit.py index d946626320ed51f9c6c7fe48282040218dac1f75..c8b8c58e19642d5bba2f309ab6373d5069f0aea8 100644 --- a/autosubmit/autosubmit.py +++ b/autosubmit/autosubmit.py @@ -56,6 +56,7 @@ from .notifications.mail_notifier import MailNotifier from .notifications.notifier import Notifier from .platforms.paramiko_submitter import ParamikoSubmitter from .platforms.platform import Platform +from .generators import Engine, get_engine_generator dialog = None from time import sleep @@ -637,7 +638,20 @@ class Autosubmit: help='Read job files generated by the inspect subcommand.') subparser.add_argument('ID', metavar='ID', help='An ID of a Workflow (eg a000) or a Job (eg a000_20220401_fc0_1_1_APPLICATION).') - args = parser.parse_args() + # Generate + subparser = subparsers.add_parser( + 'generate', description='Generate a workflow definition for a different Workflow Manager', + argument_default=argparse.SUPPRESS) + subparser.add_argument('expid', help='experiment identifier') + subparser.add_argument('-engine', '--engine', default=Engine.pyflow.value, type=str.lower, + help='The target Workflow Manager engine', choices=[engine.value for engine in Engine]) + subparser.add_argument('args', nargs='?') + + if len(sys.argv) > 1 and len(sys.argv[1]) > 1 and sys.argv[1] in ['generate']: + args, options = parser.parse_known_args() + else: + options = [] + args = parser.parse_args() if args.command is None: parser.print_help() @@ -743,6 +757,8 @@ class Autosubmit: return Autosubmit.update_description(args.expid, args.description) elif args.command == 'cat-log': return Autosubmit.cat_log(args.ID, args.file, args.mode, args.inspect) + elif args.command == 'generate': + return Autosubmit.generate_workflow(args.expid, Engine[args.engine], options) @staticmethod def _init_logs(args, console_level='INFO', log_level='DEBUG', expid='None'): @@ -6155,3 +6171,43 @@ class Autosubmit: raise AutosubmitCritical(f'The job log file {file} found is not a file: {workflow_log_file}', 7011) return view_file(workflow_log_file, mode) == 0 + + @staticmethod + def generate_workflow(expid: str, engine: Engine, options: List[str]) -> None: + """Generate the workflow configuration for a different backend engine.""" + Log.info(f'Generate workflow configuration for {engine}') + + try: + Log.info("Getting job list...") + as_conf = AutosubmitConfig(expid, BasicConfig, YAMLParserFactory()) + as_conf.check_conf_files(False) + + submitter = Autosubmit._get_submitter(as_conf) + submitter.load_platforms(as_conf) + if len(submitter.platforms) == 0: + raise ValueError('Missing platform!') + + packages_persistence = JobPackagePersistence( + os.path.join(BasicConfig.LOCAL_ROOT_DIR, expid, "pkl"), "job_packages_" + expid) + job_list = Autosubmit.load_job_list(expid, as_conf, notransitive=False, monitor=False) + + Autosubmit._load_parameters(as_conf, job_list, submitter.platforms) + + hpc_architecture = as_conf.get_platform() + for job in job_list.get_job_list(): + if job.platform_name is None or job.platform_name == '': + job.platform_name = hpc_architecture + job.platform = submitter.platforms[job.platform_name] + job.update_parameters(as_conf, job_list.parameters) + + job_list.check_scripts(as_conf) + except AutosubmitError as e: + raise AutosubmitCritical(e.message, e.code, e.trace) + except AutosubmitCritical as e: + raise + except BaseException as e: + raise AutosubmitCritical("Error while checking the configuration files or loading the job_list", 7040, + str(e)) + + get_engine_generator(engine)(job_list, as_conf, [f'--experiment={expid}', *options]) + diff --git a/autosubmit/generators/__init__.py b/autosubmit/generators/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..513c9e5820a1f79228fa27e7fec982620811fa61 --- /dev/null +++ b/autosubmit/generators/__init__.py @@ -0,0 +1,30 @@ +from enum import Enum +from importlib import import_module +from typing import Callable, cast + +"""This module provides generators to produce workflow configurations for different backend engines.""" + +class Engine(Enum): + """Workflow Manager engine flavors.""" + pyflow = 'pyflow' + + def __str__(self): + return self.value + + +# TODO: use typing.Protocol instead of object when Py>=3.8 +class GenerateProto(object): + """Need a protocol to define the type returned by importlib.""" + generate: Callable + + +def get_engine_generator(engine: Engine) -> Callable: + """Dynamically loads the engine generate function.""" + generator_function = cast(GenerateProto, import_module(f'autosubmit.generators.{engine.value}')) + return generator_function.generate + + +__all__ = [ + 'Engine', + 'get_engine_generator' +] diff --git a/autosubmit/generators/pyflow.py b/autosubmit/generators/pyflow.py new file mode 100644 index 0000000000000000000000000000000000000000..5ba972d99c6a03d11f7b4587c91b55cec820fa4f --- /dev/null +++ b/autosubmit/generators/pyflow.py @@ -0,0 +1,224 @@ +import argparse +import tempfile +from enum import Enum +from pathlib import Path +from typing import List + +import pyflow as pf +from autosubmitconfigparser.config.configcommon import AutosubmitConfig +from autosubmitconfigparser.config.basicconfig import BasicConfig +from pyflow import * + +from autosubmit.job.job_list import JobList, Job + +"""The PyFlow generator for Autosubmit.""" + +# Autosubmit Task name separator (not to be confused with task and chunk name separator). +DEFAULT_SEPARATOR = '_' + + +class Running(Enum): + """The Running level of an Autosubmit task.""" + ONCE = 'once' + MEMBER = 'member' + CHUNK = 'chunk' + + def __str__(self): + return self.value + + +# Defines how many ``-``'s are replaced by a ``/`` for +# each Autosubmit hierarchy level (to avoid using an if/elif/else). +REPLACE_COUNT = { + Running.ONCE.value: 1, + Running.MEMBER.value: 3, + Running.CHUNK.value: 4 +} + + +def _autosubmit_id_to_ecflow_id(job_id: str, running: str): + """Given an Autosubmit ID, create the node ID for ecFlow (minus heading ``/``).""" + replace_count = REPLACE_COUNT[running] + return job_id.replace(DEFAULT_SEPARATOR, '/', replace_count) + + +def _parse_args(args) -> argparse.Namespace: + parser = argparse.ArgumentParser( + prog='autosubmit generate ... engine=pyflow', + description='Produces a valid PyFlow workflow configuration given an Autosubmit experiment ID', + epilog='This program needs access to an Autosubmit installation' + ) + parser.add_argument('-e', '--experiment', required=True, help='Autosubmit experiment ID') + parser.add_argument('-d', '--deploy', default=False, action='store_true', help='Deploy to ecFlow or not') + parser.add_argument('-o', '--output', default=tempfile.gettempdir(), help='Output directory') + parser.add_argument('-s', '--server', default='localhost', + help='ecFlow server hostname or IP (only used if deploy=True)') + parser.add_argument('-p', '--port', default=3141, help='ecFlow server port (only used if deploy=True)') + parser.add_argument('-g', '--graph', default=False, action='store_true', help='Print the DOT plot') + parser.add_argument('-q', '--quiet', default=False, action='store_true') + + return parser.parse_args(args) + + +def _create_ecflow_suite( + experiment_id: str, + start_dates: List[str], + members: List[str], + chunks: [int], + jobs: List[Job], + server_host: str, + output_dir: str, + as_conf: AutosubmitConfig) -> Suite: + """Replicate the vanilla workflow graph structure.""" + + # From: https://pyflow-workflow-generator.readthedocs.io/en/latest/content/introductory-course/getting-started.html + # /scratch is a base directory for ECF_FILES and ECF_HOME + scratch_dir = Path(Path(output_dir).absolute(), 'scratch') + # /scratch/files is the ECF_FILES, where ecflow_server looks for ecf scripts if they are not in their default location + files_dir = scratch_dir / 'files' + # /scratch/out is the ECF_HOME, the home of all ecFlow files, $CWD + out_dir = scratch_dir / 'out' + + if not files_dir.exists(): + files_dir.mkdir(parents=True, exist_ok=True) + + if not out_dir.exists(): + out_dir.mkdir(parents=True, exist_ok=True) + + # First we create a suite with the same ID as the Autosubmit experiment, + # and families for each Autosubmit hierarchy level. We use control variables + # such as home (ECF_HOME), and files (ECF_FILES), but there are others that + # can be used too, like include (ECF_INCLUDE), out (ECF_OUT), and extn + # (ECF_EXTN, defaults to the extension .ecf). + # NOTE: PyFlow does not work very well with MyPy: https://github.com/ecmwf/pyflow/issues/5 + with Suite( # typing: ignore + experiment_id, + host=pf.LocalHost(server_host), + defstatus=pf.state.suspended, # type: ignore + home=str(out_dir), # type: ignore + files=str(files_dir) # type: ignore + ) as s: # typing: ignore + for start_date in start_dates: + with AnchorFamily(start_date, START_DATE=start_date): # type: ignore + for member in members: + with AnchorFamily(member, MEMBER=member): # type: ignore + for chunk in chunks: + AnchorFamily(str(chunk), CHUNK=chunk) + # PyFlow API makes it very easy to create tasks having the ecFlow ID. + # Due to how we expanded the Autosubmit graph to include the ID's, and how + # we structured this suite, an Autosubmit ID can be seamlessly translated + # to an ecFlow ID by simply replacing `_`'s by `/`, ignoring the `_`'s in + # tasks names. + # + # This means that `a000_REMOTE_SETUP` from Autosubmit is `a000/REMOTE_SETUP` + # in ecFlow, `a000_20220401_fc0_INI` is `a000/20220401/fc0/INI`, and so on. + for job in jobs: + ecflow_node = _autosubmit_id_to_ecflow_id(job.long_name, job.running) + if job.split is not None and job.split > 0: + t = Task(f'{job.section}_{job.split}', SPLIT=job.split) + else: + t = Task(job.section) + + # Find the direct parent of the task, based on the Autosubmit task ID. + # Start from the Suite, and skip the first (suite), and the last (task) + # as we know we can discard these. + parent_node = s + for node in ecflow_node.split('/')[1:-1]: + parent_node = parent_node[node] + # We just need to prevent adding a node twice since creating a task automatically adds + # it to the suite in the context. And simply call ``add_node`` and we should have it. + # TODO: use mapping.keys when Py>=3.8 or 3.9? if t.name not in list(parent_node.children.mapping.keys()): + if t.name not in [child.name for child in parent_node.children]: + parent_node.add_node(t) + + # Dependencies + for parent in job.parents: + dependency_node = _autosubmit_id_to_ecflow_id(parent.long_name, parent.running) + parent_node = s + for node in dependency_node.split('/')[1:-1]: + parent_node = parent_node[node] + parent_key = parent.section + if parent.split is not None and parent.split > 0: + parent_key = f'{parent_key}_{parent.split}' + dependency_node = parent_node[parent_key] + + # In case we ever need to use the pre-processed file. + # + # script_name = job.create_script(as_conf) + # script_text = open(Path(job._tmp_path, script_name)).read() + # # Let's drop the Autosubmit header and tailed. + # script_text = re.findall( + # r'# Autosubmit job(.*)# Autosubmit tailer', + # script_text, + # flags=re.DOTALL | re.MULTILINE)[0][1:-1] + # t.script = script_text + + # Operator overloaded in PyFlow. This creates a dependency. + dependency_node >> t + + # Script + # N.B.: We used the code below in the beginning, but later we realized it would + # not work. In Autosubmit, the FILE: $FILE is a file, relative to the AS + # Project folder. The $FILE template script is then pre-processed to be + # executed by AS. That is different from ecFlow, where the Task Script is + # value is pre-processed (i.e. if the value is ``templates/local_script.sh`` + # that value is treated as a string when included in the final job file) + # to generate the ecFlow job file. So ecFlow pre-processes the ``.script`` + # value, whereas Autosubmit loads the ``FILE`` script and pre-processes it. + # + # In order to have a similar behavior, we insert the contents of the AS + # template script as the ecFlow task. That way, the template script (now + # a Task Script in ecFlow) will be pre-processed by ecFlow. + # + # The variables may still need to be manually adjusted, but once that is + # done, the script should then be ready to be executed (i.e. ported). + # FIXME + # t.script = job.file + # with open(Path(as_conf.get_project_dir(), job.file)) as f: + # t.script = f.read() + t.script = 'sleep 5' + + return s + + +def generate(job_list: JobList, as_conf: AutosubmitConfig, options: List[str]) -> None: + """Generates a PyFlow workflow using Autosubmit database. + + The ``autosubmit create`` command must have been already executed prior + to calling this function. This is so that the jobs are correctly loaded + to produce the PyFlow workflow. + + :param job_list: ``JobList`` Autosubmit object, that contains the parameters, jobs, and graph + :param as_conf: Autosubmit configuration + :param options: a list of strings with arguments (equivalent to sys.argv), passed to argparse + """ + args: argparse.Namespace = _parse_args(options) + + expid = job_list.expid + start_dates = [d.strftime("%Y%m%d") for d in job_list.get_date_list()] + members = job_list.get_member_list() + chunks = job_list.get_chunk_list() + + suite = _create_ecflow_suite( + experiment_id=expid, + start_dates=start_dates, + members=members, + chunks=chunks, + jobs=job_list.get_all(), + server_host=args.server, + output_dir=args.output, + as_conf=as_conf + ) + + suite.check_definition() + if not args.quiet: + print(suite) + + if args.deploy: + suite.deploy_suite(overwrite=True) # type: ignore + suite.replace_on_server(host=args.server, port=args.port) + + +__all__ = [ + 'generate' +] diff --git a/docs/source/index.rst b/docs/source/index.rst index c269937033a08b9c9b290d6267e1c8ec24b9843c..8f6e1690522acda0dfc7ae9554a3b235cf6b5a48 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -45,6 +45,7 @@ Welcome to autosubmit's documentation! /userguide/variables /userguide/expids /userguide/provenance + /userguide/generate/index .. toctree:: :caption: Database Documentation @@ -92,4 +93,4 @@ Resource Management Autosubmit supports a per-platform configuration, allowing users to run their experiments without adapting job scripts. Multiple Platform - Autosubmit can run jobs of an experiment in different platforms \ No newline at end of file + Autosubmit can run jobs of an experiment in different platforms diff --git a/docs/source/qstartguide/index.rst b/docs/source/qstartguide/index.rst index c99973882114adfeb36996dfb397bb494e342c83..5c619d9b9d9a7c1b0f1b857c0c222d4bba73b742 100644 --- a/docs/source/qstartguide/index.rst +++ b/docs/source/qstartguide/index.rst @@ -57,6 +57,8 @@ Description of most used commands - Recovers the experiment workflow obtaining the last run complete jobs. * - **setstatus ** - Sets one or multiple jobs status to a given value. + * - **generate ** + - Generate workflow configuration for a different workflow backend engine (e.g. PyFlow/ecFlow). Create a new experiment diff --git a/docs/source/userguide/generate/index.rst b/docs/source/userguide/generate/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..42b9cc28df2e13bd3af0a3942b19467b0e248438 --- /dev/null +++ b/docs/source/userguide/generate/index.rst @@ -0,0 +1,114 @@ +Generate Workflow Configuration +=============================== + +By default, Autosubmit produces an internal representation of a workflow to run +the experiment created and configured. With the ``autosubmit generate`` subcommand, +it is possible to generate an external configuration of the same workflow in a +different syntax, for a different workflow backend engine. + +At the moment, the only workflow engine supported is `ecFlow `_. + +Prerequisites +------------- + +Before running ``autosubmit generate``, you **must** have executed ``autosubmit create``. +This is important as the ``create`` subcommand produces the information necessary to have +a workflow graph. This graph is traversed by the ``generate`` subcommand to produce a new +graph for a different workflow engine. + +How to generate workflow configuration +-------------------------------------- + +The command syntax is: + +.. code-block:: bash + + autosubmit generate --engine args... + +PyFlow / ecFlow +~~~~~~~~~~~~~~~ + +`PyFlow `_ is a Python utility developed +by the ECMWF to generate workflow configuration for workflow engines. At the moment its only +output workflow configuration is for ecFlow, an ECMWF workflow manager. + +.. code-block:: bash + :caption: Command usage to generate PyFlow/ecFlow configuration + + autosubmit generate \ + --generate {pyflow} \ + --server= \ + --output= \ + [--quiet --deploy --port=] + +For PyFlow/ecFlow, the required parameters are the ``--server`` where the workflow +will run, and the ``--output`` with the directory to write the ecFlow generated +files. If you enable ``--deploy``, it will call code from PyFlow to deploy it to +ecFlow. For this option, you will also have to specify ``--port``. + +To reduce verbosity of the command, you can specify ``--quiet``, although that does not +guarantee complete the command will not output anything — as it calls other modules. + +Scripts preprocessing +--------------------- + +One important thing to keep in mind when generating workflow configurations for different +workflow engines, is the use of preprocessing of script templates. + +Autosubmit, as many other workflow managers, offers a variable substitution (interpolation) +that is used to preprocess task scripts. For example: + +.. code-block:: bash + :caption: Task script that require Autosubmit preprocessing + + echo "The root dir is %ROOTDIR%" + +The ``ROOTDIR`` variable is :doc:`replaced by Autosubmit `, before Bash shell executes the script +(i.e. it is not an environment variable). The ``ROOTDIR`` is a variable provided by the +Autosubmit runtime, that may exist in other workflow managers, but it may have a different +name. + +This is a problem for the portability of the generated scripts. A recommended workaround +for this issue is to use a single script that defines the variables used by the workflow +tasks. For example, a file called ``prepare_environment.sh``: + +.. code-block:: bash + :caption: ``prepare_environment.sh`` for Autosubmit + + ROOTDIR=%ROOTDIR% + CHUNK=%CHUNK% + +This script will have to ``source`` that script in your Bash scripts, like so: + +.. code-block:: bash + :caption: Task script that does not require Autosubmit preprocessing + + #!/bin/bash + set -xuve + + source prepare_environment.sh + + echo "The root dir is ${ROOTDIR}" + +The idea of this approach is to reduce the necessary modifications when porting +the workflow from Autosubmit to a different workflow engine. In contrast, if you +used the Autosubmit variables in all your template files, that means that when +porting to a different workflow engine you would have to ``a)`` adjust every +script to use the correct variables, or ``b)`` preprocess the scripts with +Autosubmit assuming you have an identical target platform, or ``c)`` change the +generated workflow configuration manually. + +In the case of PyFlow/ecFlow, for instance, the ``prepare_environment.sh`` file +would have to be updated to use the correct variable substitution syntax and the +correct ecFlow variable. For example: + +.. code-block:: bash + :caption: ``prepare_environment.sh`` modified for ecFlow + + ROOTDIR=%ECF_HOME% # ECF_HOME is a possible replacement for ROOTDIR + CHUNK=%CHUNK% # CHUNK is set by the generate subcommand via PyFlow + +.. note:: Note + Autosubmit and ecFlow have similar syntax for the variables that are preprocessed, + using ``%`` to wrap the variables. However, this may not be always the case. You can + find workflow managers that use other symbols, or Jinja, YAML input files, etc. diff --git a/requeriments.txt b/requeriments.txt index d357f39dd55bda022a103d910c76efa2192375a6..f63354fd1093ffd91dca136175895eaf653c59a1 100644 --- a/requeriments.txt +++ b/requeriments.txt @@ -31,3 +31,4 @@ typing>=3.7 wheel psutil rocrate==0.* +pyflow-workflow-generator diff --git a/setup.py b/setup.py index 7ad4b3409d3262b0c2cfac859c4a15c0e8ac2bc4..d68bef11a563ecc2d0b837d0b3d81018f4a03f7a 100644 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ setup( url='http://www.bsc.es/projects/earthscience/autosubmit/', download_url='https://earth.bsc.es/wiki/doku.php?id=tools:autosubmit', keywords=['climate', 'weather', 'workflow', 'HPC'], - install_requires=['ruamel.yaml==0.17.21','cython','autosubmitconfigparser','bcrypt>=3.2','packaging>19','six>=1.10.0','configobj>=5.0.6','argparse>=1.4.0','python-dateutil>=2.8.2','matplotlib<3.6','py3dotplus>=1.1.0','pyparsing>=3.0.7','paramiko>=2.9.2','mock>=4.0.3','portalocker>=2.3.2,<=2.7.0','networkx==2.6.3','requests>=2.27.1','bscearth.utils>=0.5.2','cryptography>=36.0.1','setuptools>=60.8.2','xlib>=0.21','pip>=22.0.3','pythondialog','pytest','nose','coverage','PyNaCl>=1.5.0','Pygments','psutil','rocrate==0.*'], + install_requires=['ruamel.yaml==0.17.21','cython','autosubmitconfigparser','bcrypt>=3.2','packaging>19','six>=1.10.0','configobj>=5.0.6','argparse>=1.4.0','python-dateutil>=2.8.2','matplotlib<3.6','py3dotplus>=1.1.0','pyparsing>=3.0.7','paramiko>=2.9.2','mock>=4.0.3','portalocker>=2.3.2,<=2.7.0','networkx==2.6.3','requests>=2.27.1','bscearth.utils>=0.5.2','cryptography>=36.0.1','setuptools>=60.8.2','xlib>=0.21','pip>=22.0.3','pythondialog','pytest','nose','coverage','PyNaCl>=1.5.0','Pygments','psutil','rocrate==0.*','pyflow-workflow-generator'], classifiers=[ "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.9", diff --git a/test/unit/generators/test_pyflow.py b/test/unit/generators/test_pyflow.py new file mode 100644 index 0000000000000000000000000000000000000000..8b8c79046c3b8729c6ffd80d03d1d77d3c28db59 --- /dev/null +++ b/test/unit/generators/test_pyflow.py @@ -0,0 +1,91 @@ +from unittest import TestCase +from unittest.mock import MagicMock, patch +from autosubmit.generators.pyflow import generate, Running +from pyflow import Suite +from tempfile import TemporaryDirectory +from datetime import datetime +from autosubmit.job.job import Job, Status + + +class TestPyFlow(TestCase): + + def setUp(self) -> None: + self.suite_name = 'a000' + + def _create_job(self, section, name, status, running, split=None): + """Create an Autosubmit job with pre-defined values.""" + # TODO: maybe suggest a kwags approach in the constructor to expand to local vars? + job = Job(name, 0, status, 0) + job.section = section + job.running = str(running) + job.split = split + job.file = 'templates/script.sh' + return job + + def _get_jobs(self): + """Create list of Autosubmit jobs. For these tests we use a very simple experiment workflow.""" + ini_job = self._create_job('INI', 'a000_INI', Status.COMPLETED, Running.ONCE) + prep_job = self._create_job('PREP', 'a000_20000101_fc0_PREP', Status.READY, Running.MEMBER) + prep_job.parents = {ini_job} + sim_job_1 = self._create_job('SIM', 'a000_20000101_fc0_1_1_SIM', Status.QUEUING, Running.CHUNK, 1) + sim_job_1.parent = {prep_job} + sim_job_2 = self._create_job('SIM', 'a000_20000101_fc0_1_2_SIM', Status.QUEUING, Running.CHUNK, 2) + sim_job_2.parent = {prep_job, sim_job_1} + return [ini_job, prep_job, sim_job_1, sim_job_2] + + def _create_job_list(self, expid, dates=None, members=None, chunks=None, empty=False): + if dates is None: + dates = [] + if members is None: + members = [] + if chunks is None: + chunks = [] + job_list = MagicMock(expid=expid) + job_list.get_date_list.return_value = dates + job_list.get_member_list.return_value = members + job_list.get_chunk_list.return_value = chunks + job_list.get_all.return_value = [] if empty is True else self._get_jobs() + return job_list + + def test_generate(self): + with TemporaryDirectory() as temp_out_dir: + tests = [ + { + 'job_list': self._create_job_list('a000', [datetime(2000, 1, 1)], ['fc0'], ['1']), + 'as_conf': None, + 'options': ['-e', 'a000', '-o', temp_out_dir, '-s', 'localhost'], + 'expected_error': None + }, + { + 'job_list': self._create_job_list('a000', [datetime(2000, 1, 1)], ['fc0'], ['1']), + 'as_conf': None, + 'options': ['-e', 'a000', '-o', temp_out_dir, '-s', 'localhost', '--quiet'], + 'expected_error': None + }, + { + 'job_list': self._create_job_list('a001', [], [], [], empty=True), + 'as_conf': None, + 'options': ['-e', 'a001', '-o', temp_out_dir, '-s', 'localhost', '--quiet'], + 'expected_error': None + }, + { + 'job_list': self._create_job_list('a002', [], [], [], empty=True), + 'as_conf': None, + 'options': ['-e', 'a002', '-o', None, '-s', 'localhost', '--quiet'], + 'expected_error': TypeError + } + ] + for test in tests: + job_list = test['job_list'] + as_conf = test['as_conf'] + options = test['options'] + expected_error = test['expected_error'] + + if expected_error is not None: + try: + generate(job_list, as_conf, options) + self.fail('Test case expected to fail') + except: + pass + else: + generate(job_list, as_conf, options)