diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 0000000000000000000000000000000000000000..d83c1d1b68f3115ff9cd6fc59efac7a860c76a6f
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,9 @@
+[pytest]
+addopts =
+ --verbose
+testpaths =
+ tests/unit/
+doctest_optionflags =
+ NORMALIZE_WHITESPACE
+ IGNORE_EXCEPTION_DETAIL
+ ELLIPSIS
diff --git a/setup.py b/setup.py
index 7ad4b3409d3262b0c2cfac859c4a15c0e8ac2bc4..c223aa6b9652a5cbfdb0a5af32a5204799aa37af 100644
--- a/setup.py
+++ b/setup.py
@@ -39,7 +39,7 @@ setup(
url='http://www.bsc.es/projects/earthscience/autosubmit/',
download_url='https://earth.bsc.es/wiki/doku.php?id=tools:autosubmit',
keywords=['climate', 'weather', 'workflow', 'HPC'],
- install_requires=['ruamel.yaml==0.17.21','cython','autosubmitconfigparser','bcrypt>=3.2','packaging>19','six>=1.10.0','configobj>=5.0.6','argparse>=1.4.0','python-dateutil>=2.8.2','matplotlib<3.6','py3dotplus>=1.1.0','pyparsing>=3.0.7','paramiko>=2.9.2','mock>=4.0.3','portalocker>=2.3.2,<=2.7.0','networkx==2.6.3','requests>=2.27.1','bscearth.utils>=0.5.2','cryptography>=36.0.1','setuptools>=60.8.2','xlib>=0.21','pip>=22.0.3','pythondialog','pytest','nose','coverage','PyNaCl>=1.5.0','Pygments','psutil','rocrate==0.*'],
+ install_requires=['ruamel.yaml==0.17.21','cython','autosubmitconfigparser','bcrypt>=3.2','packaging>19','six>=1.10.0','configobj>=5.0.6','argparse>=1.4.0','python-dateutil>=2.8.2','matplotlib<3.6','py3dotplus>=1.1.0','pyparsing>=3.0.7','paramiko>=2.9.2','portalocker>=2.3.2,<=2.7.0','networkx==2.6.3','requests>=2.27.1','bscearth.utils>=0.5.2','cryptography>=36.0.1','setuptools>=60.8.2','xlib>=0.21','pip>=22.0.3','pythondialog','pytest','pytest-cov','pytest-mock','PyNaCl>=1.5.0','Pygments','psutil','rocrate==0.*'],
classifiers=[
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.9",
diff --git a/test/unit/conftest.py b/test/unit/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd24fc68d5f364c227809e056ac34b06c97ac48b
--- /dev/null
+++ b/test/unit/conftest.py
@@ -0,0 +1,109 @@
+# Fixtures available to multiple test files must be created in this file.
+
+import pytest
+from dataclasses import dataclass
+from pathlib import Path
+from shutil import rmtree
+from tempfile import TemporaryDirectory
+from typing import Any, Dict, Callable, List
+
+from autosubmit.autosubmit import Autosubmit
+from autosubmit.platforms.slurmplatform import SlurmPlatform, ParamikoPlatform
+from autosubmitconfigparser.config.basicconfig import BasicConfig
+from autosubmitconfigparser.config.configcommon import AutosubmitConfig
+from autosubmitconfigparser.config.yamlparser import YAMLParserFactory
+
+
+@dataclass
+class AutosubmitExperiment:
+ """This holds information about an experiment created by Autosubmit."""
+ expid: str
+ autosubmit: Autosubmit
+ exp_path: Path
+ tmp_dir: Path
+ aslogs_dir: Path
+ status_dir: Path
+ platform: ParamikoPlatform
+
+
+@pytest.fixture(scope='function')
+def autosubmit_exp(autosubmit: Autosubmit, request: pytest.FixtureRequest) -> Callable:
+ """Create an instance of ``Autosubmit`` with an experiment."""
+
+ original_root_dir = BasicConfig.LOCAL_ROOT_DIR
+ tmp_dir = TemporaryDirectory()
+ tmp_path = Path(tmp_dir.name)
+
+ def _create_autosubmit_exp(expid: str):
+ # directories used when searching for logs to cat
+ root_dir = tmp_path
+ BasicConfig.LOCAL_ROOT_DIR = str(root_dir)
+ exp_path = root_dir / expid
+ exp_tmp_dir = exp_path / BasicConfig.LOCAL_TMP_DIR
+ aslogs_dir = exp_tmp_dir / BasicConfig.LOCAL_ASLOG_DIR
+ status_dir = exp_path / 'status'
+ aslogs_dir.mkdir(parents=True)
+ status_dir.mkdir()
+
+ platform_config = {
+ "LOCAL_ROOT_DIR": BasicConfig.LOCAL_ROOT_DIR,
+ "LOCAL_TMP_DIR": str(exp_tmp_dir),
+ "LOCAL_ASLOG_DIR": str(aslogs_dir)
+ }
+ platform = SlurmPlatform(expid=expid, name='slurm_platform', config=platform_config)
+ platform.job_status = {
+ 'COMPLETED': [],
+ 'RUNNING': [],
+ 'QUEUING': [],
+ 'FAILED': []
+ }
+ submit_platform_script = aslogs_dir / 'submit_local.sh'
+ submit_platform_script.touch(exist_ok=True)
+
+ return AutosubmitExperiment(
+ expid=expid,
+ autosubmit=autosubmit,
+ exp_path=exp_path,
+ tmp_dir=exp_tmp_dir,
+ aslogs_dir=aslogs_dir,
+ status_dir=status_dir,
+ platform=platform
+ )
+
+ def finalizer():
+ BasicConfig.LOCAL_ROOT_DIR = original_root_dir
+ rmtree(tmp_path)
+
+ request.addfinalizer(finalizer)
+
+ return _create_autosubmit_exp
+
+
+@pytest.fixture(scope='module')
+def autosubmit() -> Autosubmit:
+ """Create an instance of ``Autosubmit``.
+
+ Useful when you need ``Autosubmit`` but do not need any experiments."""
+ autosubmit = Autosubmit()
+ return autosubmit
+
+
+@pytest.fixture(scope='function')
+def create_as_conf() -> Callable:
+ def _create_as_conf(autosubmit_exp: AutosubmitExperiment, yaml_files: List[Path], experiment_data: Dict[str, Any]):
+ basic_config = BasicConfig
+ parser_factory = YAMLParserFactory()
+ as_conf = AutosubmitConfig(
+ expid=autosubmit_exp.expid,
+ basic_config=basic_config,
+ parser_factory=parser_factory
+ )
+ for yaml_file in yaml_files:
+ parser = parser_factory.create_parser()
+ parser.data = parser.load(yaml_file)
+ as_conf.experiment_data.update(parser.data)
+ # add user-provided experiment data
+ as_conf.experiment_data.update(experiment_data)
+ return as_conf
+
+ return _create_as_conf
diff --git a/test/unit/test_basic_config.py b/test/unit/test_basic_config.py
index 120b7d5c27cbd7ac0e7314ed9f66e30a35246cba..837262cfdb4452ec213781ccadebd0ec95a9b65b 100644
--- a/test/unit/test_basic_config.py
+++ b/test/unit/test_basic_config.py
@@ -1,8 +1,20 @@
-from unittest import TestCase
+# Copyright 2015-2023 Earth Sciences Department, BSC-CNS
+# This file is part of Autosubmit.
+#
+# Autosubmit is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Autosubmit is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Autosubmit. If not, see .
import os
-from mock import Mock
-from mock import patch
from autosubmitconfigparser.config.basicconfig import BasicConfig
@@ -15,19 +27,19 @@ from autosubmitconfigparser.config.basicconfig import BasicConfig
'''
-class TestBasicConfig(TestCase):
- def test_update_config_set_the_right_db_path(self):
- # arrange
- BasicConfig.DB_PATH = 'fake-path'
- # act
- BasicConfig._update_config()
- # assert
- self.assertEqual(os.path.join(BasicConfig.DB_DIR, BasicConfig.DB_FILE), BasicConfig.DB_PATH)
-
- def test_read_makes_the_right_method_calls(self):
- # arrange
- with patch('autosubmitconfigparser.config.basicconfig.BasicConfig._update_config', Mock()):
- # act
- BasicConfig.read()
- # assert
- BasicConfig._update_config.assert_called_once_with()
+def test_update_config_set_the_right_db_path():
+ # arrange
+ BasicConfig.DB_PATH = 'fake-path'
+ # act
+ BasicConfig._update_config()
+ # assert
+ assert os.path.join(BasicConfig.DB_DIR, BasicConfig.DB_FILE) == BasicConfig.DB_PATH
+
+
+def test_read_makes_the_right_method_calls(mocker):
+ # arrange
+ mocker.patch('autosubmitconfigparser.config.basicconfig.BasicConfig._update_config')
+ # act
+ BasicConfig.read()
+ # assert
+ assert BasicConfig._update_config.call_count == 1
diff --git a/test/unit/test_catlog.py b/test/unit/test_catlog.py
index 86dc6ae83b768aedf2040fbfdca997954494bcb1..ca18d4db4b7b15b433c83fc647f6e908175c0e56 100644
--- a/test/unit/test_catlog.py
+++ b/test/unit/test_catlog.py
@@ -1,126 +1,135 @@
-from unittest import TestCase
+# Copyright 2015-2023 Earth Sciences Department, BSC-CNS
+# This file is part of Autosubmit.
+#
+# Autosubmit is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Autosubmit is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Autosubmit. If not, see .
import io
import sys
from contextlib import suppress, redirect_stdout
from pathlib import Path
-from tempfile import TemporaryDirectory
-from unittest.mock import patch
+from pytest import raises
+from pytest_mock import MockerFixture
+from typing import Callable
from autosubmit.autosubmit import Autosubmit, AutosubmitCritical
-from autosubmitconfigparser.config.basicconfig import BasicConfig
-
-
-class TestJob(TestCase):
-
- def setUp(self):
- self.autosubmit = Autosubmit()
- # directories used when searching for logs to cat
- self.original_root_dir = BasicConfig.LOCAL_ROOT_DIR
- self.root_dir = TemporaryDirectory()
- BasicConfig.LOCAL_ROOT_DIR = self.root_dir.name
- self.exp_path = Path(self.root_dir.name, 'a000')
- self.tmp_dir = self.exp_path / BasicConfig.LOCAL_TMP_DIR
- self.aslogs_dir = self.tmp_dir / BasicConfig.LOCAL_ASLOG_DIR
- self.status_path = self.exp_path / 'status'
- self.aslogs_dir.mkdir(parents=True)
- self.status_path.mkdir()
-
- def tearDown(self) -> None:
- BasicConfig.LOCAL_ROOT_DIR = self.original_root_dir
- if self.root_dir is not None:
- self.root_dir.cleanup()
-
- def test_invalid_file(self):
- def _fn():
- self.autosubmit.cat_log(None, '8', None) # type: ignore
- self.assertRaises(AutosubmitCritical, _fn)
-
- def test_invalid_mode(self):
- def _fn():
- self.autosubmit.cat_log(None, 'o', '8') # type: ignore
- self.assertRaises(AutosubmitCritical, _fn)
-
- # -- workflow
-
- def test_is_workflow_invalid_file(self):
- def _fn():
- self.autosubmit.cat_log('a000', 'j', None)
- self.assertRaises(AutosubmitCritical, _fn)
-
- @patch('autosubmit.autosubmit.Log')
- def test_is_workflow_not_found(self, Log):
- self.autosubmit.cat_log('a000', 'o', 'c')
+from test.unit.conftest import AutosubmitExperiment
+
+
+def test_invalid_file(autosubmit: Autosubmit):
+ with raises(AutosubmitCritical):
+ autosubmit.cat_log(None, '8', None) # type: ignore
+
+
+def test_invalid_mode(autosubmit: Autosubmit):
+ with raises(AutosubmitCritical):
+ autosubmit.cat_log(None, 'o', '8') # type: ignore
+
+
+# -- workflow
+
+def test_is_workflow_invalid_file(autosubmit: Autosubmit):
+ with raises(AutosubmitCritical):
+ autosubmit.cat_log('a000', 'j', None)
+
+
+def test_is_workflow_not_found(mocker, autosubmit: Autosubmit):
+ Log = mocker.patch('autosubmit.autosubmit.Log')
+ autosubmit.cat_log('a000', 'o', 'c')
+ assert Log.info.called
+ assert Log.info.call_args[0][0] == 'No logs found.'
+
+
+def test_is_workflow_log_is_dir(autosubmit_exp: Callable):
+ exp = autosubmit_exp('a000')
+ log_file_actually_dir = Path(exp.aslogs_dir, 'log_run.log')
+ log_file_actually_dir.mkdir()
+
+ with raises(AutosubmitCritical):
+ exp.autosubmit.cat_log('a000', 'o', 'c')
+
+
+def test_is_workflow_out_cat(mocker, autosubmit_exp: Callable):
+ exp = autosubmit_exp('a000')
+ popen = mocker.patch('subprocess.Popen')
+ log_file = Path(exp.aslogs_dir, 'log_run.log')
+ with open(log_file, 'w') as f:
+ f.write('as test')
+ f.flush()
+ exp.autosubmit.cat_log('a000', file=None, mode='c')
+ assert popen.called
+ args = popen.call_args[0][0]
+ assert args[0] == 'cat'
+ assert args[1] == str(log_file)
+
+
+def test_is_workflow_status_tail(mocker, autosubmit_exp: Callable):
+ popen = mocker.patch('subprocess.Popen')
+ exp = autosubmit_exp('a000')
+ log_file = Path(exp.status_dir, 'a000_anything.txt')
+ with open(log_file, 'w') as f:
+ f.write('as test')
+ f.flush()
+ exp.autosubmit.cat_log('a000', file='s', mode='t')
+ assert popen.called
+ args = popen.call_args[0][0]
+ assert args[0] == 'tail'
+ assert str(args[-1]) == str(log_file)
+
+
+# --- jobs
+
+
+def test_is_jobs_not_found(mocker, autosubmit_exp: Callable):
+ Log = mocker.patch('autosubmit.autosubmit.Log')
+ exp = autosubmit_exp('a000')
+ for file in ['j', 's', 'o']:
+ exp.autosubmit.cat_log('a000_INI', file=file, mode='c')
assert Log.info.called
assert Log.info.call_args[0][0] == 'No logs found.'
- def test_is_workflow_log_is_dir(self):
- log_file_actually_dir = Path(self.aslogs_dir, 'log_run.log')
- log_file_actually_dir.mkdir()
- def _fn():
- self.autosubmit.cat_log('a000', 'o', 'c')
- self.assertRaises(AutosubmitCritical, _fn)
-
- @patch('subprocess.Popen')
- def test_is_workflow_out_cat(self, popen):
- log_file = Path(self.aslogs_dir, 'log_run.log')
- with open(log_file, 'w') as f:
- f.write('as test')
- f.flush()
- self.autosubmit.cat_log('a000', file=None, mode='c')
- assert popen.called
- args = popen.call_args[0][0]
- assert args[0] == 'cat'
- assert args[1] == str(log_file)
-
- @patch('subprocess.Popen')
- def test_is_workflow_status_tail(self, popen):
- log_file = Path(self.status_path, 'a000_anything.txt')
- with open(log_file, 'w') as f:
- f.write('as test')
- f.flush()
- self.autosubmit.cat_log('a000', file='s', mode='t')
- assert popen.called
- args = popen.call_args[0][0]
- assert args[0] == 'tail'
- assert str(args[-1]) == str(log_file)
-
- # --- jobs
-
- @patch('autosubmit.autosubmit.Log')
- def test_is_jobs_not_found(self, Log):
- for file in ['j', 's', 'o']:
- self.autosubmit.cat_log('a000_INI', file=file, mode='c')
- assert Log.info.called
- assert Log.info.call_args[0][0] == 'No logs found.'
-
- def test_is_jobs_log_is_dir(self):
- log_file_actually_dir = Path(self.tmp_dir, 'LOG_a000/a000_INI.20000101.out')
- log_file_actually_dir.mkdir(parents=True)
- def _fn():
- self.autosubmit.cat_log('a000_INI', 'o', 'c')
- self.assertRaises(AutosubmitCritical, _fn)
-
- @patch('subprocess.Popen')
- def test_is_jobs_out_tail(self, popen):
- log_dir = self.tmp_dir / 'LOG_a000'
- log_dir.mkdir()
- log_file = log_dir / 'a000_INI.20200101.out'
- with open(log_file, 'w') as f:
- f.write('as test')
- f.flush()
- self.autosubmit.cat_log('a000_INI', file=None, mode='t')
- assert popen.called
- args = popen.call_args[0][0]
- assert args[0] == 'tail'
- assert str(args[-1]) == str(log_file)
-
- # --- command-line
-
- def test_command_line_help(self):
- args = ['autosubmit', 'cat-log', '--help']
- with patch.object(sys, 'argv', args) as _, io.StringIO() as buf, redirect_stdout(buf):
- with suppress(SystemExit):
- assert Autosubmit.parse_args()
- assert buf
- assert 'View workflow and job logs.' in buf.getvalue()
+
+def test_is_jobs_log_is_dir(autosubmit_exp: Callable, tmp_path: Path):
+ exp: AutosubmitExperiment = autosubmit_exp('a000')
+ log_file_actually_dir = Path(exp.tmp_dir, 'LOG_a000/a000_INI.20000101.out')
+ log_file_actually_dir.mkdir(parents=True)
+
+ with raises(AutosubmitCritical):
+ exp.autosubmit.cat_log('a000_INI', 'o', 'c')
+
+
+def test_is_jobs_out_tail(autosubmit_exp: Callable, tmp_path: Path, mocker: MockerFixture):
+ exp: AutosubmitExperiment = autosubmit_exp('a000')
+ popen = mocker.patch('subprocess.Popen')
+ log_dir = exp.tmp_dir / 'LOG_a000'
+ log_dir.mkdir(parents=True)
+ log_file = log_dir / 'a000_INI.20200101.out'
+ with open(log_file, 'w') as f:
+ f.write('as test')
+ f.flush()
+ exp.autosubmit.cat_log('a000_INI', file=None, mode='t')
+ assert popen.called
+ args = popen.call_args[0][0]
+ assert args[0] == 'tail'
+ assert str(args[-1]) == str(log_file)
+
+
+# --- command-line
+
+def test_command_line_help(mocker):
+ args = ['autosubmit', 'cat-log', '--help']
+ mocker.patch.context_manager(sys, 'argv', args)
+ with io.StringIO() as buf, redirect_stdout(buf):
+ with suppress(SystemExit):
+ assert Autosubmit.parse_args()
+ assert 'View workflow and job logs.' in buf.getvalue()
diff --git a/test/unit/test_chunk_date_lib.py b/test/unit/test_chunk_date_lib.py
index af5101e0b76cc8cd042af5fa1f93e2c7d24a4bad..fc93d3e62176ae1b6dfdcc2140851788ae7618ee 100644
--- a/test/unit/test_chunk_date_lib.py
+++ b/test/unit/test_chunk_date_lib.py
@@ -1,212 +1,219 @@
-from unittest import TestCase
+# Copyright 2015-2023 Earth Sciences Department, BSC-CNS
+# This file is part of Autosubmit.
+#
+# Autosubmit is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Autosubmit is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Autosubmit. If not, see .
+
from bscearth.utils.date import *
from datetime import datetime
+from pytest import raises
+
+
+def test_add_time():
+ assert add_time(datetime(2000, 1, 1), 1, 'year', 'standard') == datetime(2001, 1, 1)
+ assert add_time(datetime(2000, 1, 30), 1, 'month', 'standard') == datetime(2000, 2, 29)
+ assert add_time(datetime(2000, 2, 28), 1, 'day', 'standard') == datetime(2000, 2, 29)
+ assert add_time(datetime(2000, 2, 28, 23), 1, 'hour', 'standard') == datetime(2000, 2, 29)
+
+ assert add_time(datetime(2000, 1, 1), 1, 'year', 'noleap') == datetime(2001, 1, 1)
+ assert add_time(datetime(2000, 1, 30), 1, 'month', 'noleap') == datetime(2000, 2, 28)
+ assert add_time(datetime(2000, 2, 28), 1, 'day', 'noleap') == datetime(2000, 3, 1)
+ assert add_time(datetime(2000, 2, 28, 23), 1, 'hour', 'noleap') == datetime(2000, 3, 1)
+
+ # Theoretically tests that Log is called
+ assert add_time(datetime(2000, 2, 28, 23), 1, 'other', 'noleap') is None
+
+
+def test_add_years():
+ assert add_years(datetime(2000, 1, 1), 1) == datetime(2001, 1, 1)
+
+
+def test_add_months():
+ assert add_months(datetime(2000, 1, 1), 1, 'standard') == datetime(2000, 2, 1)
+ assert add_months(datetime(2000, 1, 29), 1, 'standard') == datetime(2000, 2, 29)
+ assert add_months(datetime(2000, 1, 1), 1, 'noleap') == datetime(2000, 2, 1)
+ assert add_months(datetime(2000, 1, 29), 1, 'noleap') == datetime(2000, 2, 28)
+
+
+def test_add_days():
+ assert add_days(datetime(2000, 1, 1), 1, 'standard') == datetime(2000, 1, 2)
+ assert add_days(datetime(2000, 2, 28), 1, 'standard') == datetime(2000, 2, 29)
+ assert add_days(datetime(2000, 1, 1), 1, 'noleap') == datetime(2000, 1, 2)
+ assert add_days(datetime(2000, 2, 28), 1, 'noleap') == datetime(2000, 3, 1)
+ assert add_days(datetime(2000, 3, 1), 1, 'noleap') == datetime(2000, 3, 2)
+
+
+def test_add_hours():
+ assert add_hours(datetime(2000, 1, 1), 24, 'standard') == datetime(2000, 1, 2)
+ assert add_hours(datetime(2000, 1, 1, 23), 1, 'standard') == datetime(2000, 1, 2)
+ assert add_hours(datetime(2000, 2, 28), 24, 'standard') == datetime(2000, 2, 29)
+ assert add_hours(datetime(2000, 2, 28), 24, 'noleap') == datetime(2000, 3, 1)
+
+
+def test_subs_dates():
+ assert subs_dates(datetime(2000, 1, 1), datetime(2001, 1, 1), 'standard') == 366
+ assert subs_dates(datetime(2000, 2, 1), datetime(2000, 3, 1), 'standard') == 29
+ assert subs_dates(datetime(2000, 2, 28), datetime(2000, 3, 1), 'standard') == 2
+ assert subs_dates(datetime(2000, 2, 28, 23), datetime(2000, 3, 1), 'standard') == 1
+
+ assert subs_dates(datetime(2000, 1, 1), datetime(2001, 1, 1), 'noleap') == 365
+ assert subs_dates(datetime(2000, 2, 1), datetime(2000, 3, 1), 'noleap') == 28
+ assert subs_dates(datetime(2000, 2, 28), datetime(2000, 3, 1), 'noleap') == 1
+ assert subs_dates(datetime(2000, 2, 28, 23), datetime(2000, 3, 1), 'noleap') == 0
+ assert subs_dates(datetime(2000, 3, 28), datetime(2000, 3, 29), 'noleap') == 1
+ assert subs_dates(datetime(1999, 3, 28), datetime(2000, 2, 28), 'noleap') == 337
-class TestChunkDateLib(TestCase):
- def test_add_time(self):
- self.assertEqual(add_time(datetime(2000, 1, 1), 1, 'year', 'standard'), datetime(2001, 1, 1))
- self.assertEqual(add_time(datetime(2000, 1, 30), 1, 'month', 'standard'), datetime(2000, 2, 29))
- self.assertEqual(add_time(datetime(2000, 2, 28), 1, 'day', 'standard'), datetime(2000, 2, 29))
- self.assertEqual(add_time(datetime(2000, 2, 28, 23), 1, 'hour', 'standard'), datetime(2000, 2, 29))
-
- self.assertEqual(add_time(datetime(2000, 1, 1), 1, 'year', 'noleap'), datetime(2001, 1, 1))
- self.assertEqual(add_time(datetime(2000, 1, 30), 1, 'month', 'noleap'), datetime(2000, 2, 28))
- self.assertEqual(add_time(datetime(2000, 2, 28), 1, 'day', 'noleap'), datetime(2000, 3, 1))
- self.assertEqual(add_time(datetime(2000, 2, 28, 23), 1, 'hour', 'noleap'), datetime(2000, 3, 1))
-
- # Theoretically tests that Log is called
- self.assertEqual(add_time(datetime(2000, 2, 28, 23), 1, 'other', 'noleap'), None)
-
- def test_add_years(self):
- self.assertEqual(add_years(datetime(2000, 1, 1), 1), datetime(2001, 1, 1))
-
- def test_add_months(self):
- self.assertEqual(add_months(datetime(2000, 1, 1), 1, 'standard'), datetime(2000, 2, 1))
- self.assertEqual(add_months(datetime(2000, 1, 29), 1, 'standard'), datetime(2000, 2, 29))
- self.assertEqual(add_months(datetime(2000, 1, 1), 1, 'noleap'), datetime(2000, 2, 1))
- self.assertEqual(add_months(datetime(2000, 1, 29), 1, 'noleap'), datetime(2000, 2, 28))
-
- def test_add_days(self):
- self.assertEqual(add_days(datetime(2000, 1, 1), 1, 'standard'), datetime(2000, 1, 2))
- self.assertEqual(add_days(datetime(2000, 2, 28), 1, 'standard'), datetime(2000, 2, 29))
- self.assertEqual(add_days(datetime(2000, 1, 1), 1, 'noleap'), datetime(2000, 1, 2))
- self.assertEqual(add_days(datetime(2000, 2, 28), 1, 'noleap'), datetime(2000, 3, 1))
- self.assertEqual(add_days(datetime(2000, 3, 1), 1, 'noleap'), datetime(2000, 3, 2))
-
- def test_add_hours(self):
- self.assertEqual(add_hours(datetime(2000, 1, 1), 24, 'standard'), datetime(2000, 1, 2))
- self.assertEqual(add_hours(datetime(2000, 1, 1, 23), 1, 'standard'), datetime(2000, 1, 2))
- self.assertEqual(add_hours(datetime(2000, 2, 28), 24, 'standard'), datetime(2000, 2, 29))
- self.assertEqual(add_hours(datetime(2000, 2, 28), 24, 'noleap'), datetime(2000, 3, 1))
-
- def test_subs_dates(self):
- self.assertEqual(subs_dates(datetime(2000, 1, 1), datetime(2001, 1, 1), 'standard'), 366)
- self.assertEqual(subs_dates(datetime(2000, 2, 1), datetime(2000, 3, 1), 'standard'), 29)
- self.assertEqual(subs_dates(datetime(2000, 2, 28), datetime(2000, 3, 1), 'standard'), 2)
- self.assertEqual(subs_dates(datetime(2000, 2, 28, 23), datetime(2000, 3, 1), 'standard'), 1)
-
- self.assertEqual(subs_dates(datetime(2000, 1, 1), datetime(2001, 1, 1), 'noleap'), 365)
- self.assertEqual(subs_dates(datetime(2000, 2, 1), datetime(2000, 3, 1), 'noleap'), 28)
- self.assertEqual(subs_dates(datetime(2000, 2, 28), datetime(2000, 3, 1), 'noleap'), 1)
- self.assertEqual(subs_dates(datetime(2000, 2, 28, 23), datetime(2000, 3, 1), 'noleap'), 0)
- self.assertEqual(subs_dates(datetime(2000, 3, 28), datetime(2000, 3, 29), 'noleap'), 1)
- self.assertEqual(subs_dates(datetime(1999, 3, 28), datetime(2000, 2, 28), 'noleap'), 337)
-
- def test_subs_days(self):
- self.assertEqual(sub_days(datetime(2000, 1, 2), 1, 'standard'), datetime(2000, 1, 1))
- self.assertEqual(sub_days(datetime(2000, 1, 2), -1, 'standard'), datetime(2000, 1, 3))
- self.assertEqual(sub_days(datetime(2000, 3, 1), 1, 'standard'), datetime(2000, 2, 29))
- self.assertEqual(sub_days(datetime(2000, 2, 28), -1, 'standard'), datetime(2000, 2, 29))
- self.assertEqual(sub_days(datetime(2000, 1, 1), 365, 'standard'), datetime(1999, 1, 1))
- self.assertEqual(sub_days(datetime(1999, 1, 1), -365, 'standard'), datetime(2000, 1, 1))
- self.assertEqual(sub_days(datetime(2000, 12, 31), 365, 'standard'), datetime(2000, 1, 1))
- self.assertEqual(sub_days(datetime(2000, 1, 1), -365, 'standard'), datetime(2000, 12, 31))
- self.assertEqual(sub_days(datetime(2000, 2, 28), -2920, 'standard'), datetime(2008, 2, 26))
- self.assertEqual(sub_days(datetime(2008, 2, 26), 2920, 'standard'), datetime(2000, 2, 28))
- self.assertEqual(sub_days(datetime(2015, 12, 31), -61, 'standard'), datetime(2016, 3, 1))
- self.assertEqual(sub_days(datetime(2016, 3, 1), 61, 'standard'), datetime(2015, 12, 31))
- self.assertEqual(sub_days(datetime(2001, 1, 1), 1, 'standard'), datetime(2000, 12, 31))
- self.assertEqual(sub_days(datetime(1999, 12, 31), -1, 'standard'), datetime(2000, 1, 1))
-
- self.assertEqual(sub_days(datetime(2000, 1, 2), 1, 'noleap'), datetime(2000, 1, 1))
- self.assertEqual(sub_days(datetime(2000, 1, 2), -1, 'noleap'), datetime(2000, 1, 3))
- self.assertEqual(sub_days(datetime(2000, 3, 1), 1, 'noleap'), datetime(2000, 2, 28))
- self.assertEqual(sub_days(datetime(2000, 2, 28), -1, 'noleap'), datetime(2000, 3, 1))
- self.assertEqual(sub_days(datetime(2000, 1, 1), 365, 'noleap'), datetime(1999, 1, 1))
- self.assertEqual(sub_days(datetime(1999, 1, 1), -365, 'noleap'), datetime(2000, 1, 1))
- self.assertEqual(sub_days(datetime(2001, 1, 1), 365, 'noleap'), datetime(2000, 1, 1))
- self.assertEqual(sub_days(datetime(2000, 1, 1), -365, 'noleap'), datetime(2001, 1, 1))
- self.assertEqual(sub_days(datetime(2000, 2, 28), -2920, 'noleap'), datetime(2008, 2, 28))
- self.assertEqual(sub_days(datetime(2008, 2, 26), 2920, 'noleap'), datetime(2000, 2, 26))
- self.assertEqual(sub_days(datetime(2015, 12, 31), -61, 'noleap'), datetime(2016, 3, 2))
- self.assertEqual(sub_days(datetime(2016, 3, 2), 61, 'noleap'), datetime(2015, 12, 31))
- self.assertEqual(sub_days(datetime(2001, 1, 1), 1, 'noleap'), datetime(2000, 12, 31))
- self.assertEqual(sub_days(datetime(1999, 12, 31), -1, 'noleap'), datetime(2000, 1, 1))
-
- def test_chunk_start_date(self):
- self.assertEqual(chunk_start_date(datetime(2000, 1, 1), 2, 1, 'year', 'standard'),
- datetime(2001, 1, 1))
- self.assertEqual(chunk_start_date(datetime(2000, 1, 1), 2, 2, 'year', 'standard'),
- datetime(2002, 1, 1))
-
- self.assertEqual(chunk_start_date(datetime(2000, 1, 1), 2, 1, 'year', 'noleap'),
- datetime(2001, 1, 1))
- self.assertEqual(chunk_start_date(datetime(2000, 1, 1), 2, 2, 'year', 'noleap'),
- datetime(2002, 1, 1))
-
- self.assertEqual(chunk_start_date(datetime(2000, 1, 1), 2, 1, 'month', 'standard'),
- datetime(2000, 2, 1))
- self.assertEqual(chunk_start_date(datetime(2000, 1, 1), 2, 2, 'month', 'standard'),
- datetime(2000, 3, 1))
- self.assertEqual(chunk_start_date(datetime(2000, 1, 31), 2, 1, 'month', 'standard'),
- datetime(2000, 2, 29))
-
- self.assertEqual(chunk_start_date(datetime(2000, 1, 1), 2, 1, 'month', 'noleap'),
- datetime(2000, 2, 1))
- self.assertEqual(chunk_start_date(datetime(2000, 1, 1), 2, 2, 'month', 'noleap'),
- datetime(2000, 3, 1))
- self.assertEqual(chunk_start_date(datetime(2000, 1, 31), 2, 1, 'month', 'noleap'),
- datetime(2000, 2, 28))
-
- self.assertEqual(chunk_start_date(datetime(2000, 1, 1), 2, 1, 'day', 'standard'),
- datetime(2000, 1, 2))
- self.assertEqual(chunk_start_date(datetime(2000, 1, 1), 2, 2, 'day', 'standard'),
- datetime(2000, 1, 3))
- self.assertEqual(chunk_start_date(datetime(2000, 2, 28), 2, 1, 'day', 'standard'),
- datetime(2000, 2, 29))
-
- self.assertEqual(chunk_start_date(datetime(2000, 1, 1), 2, 1, 'day', 'noleap'),
- datetime(2000, 1, 2))
- self.assertEqual(chunk_start_date(datetime(2000, 1, 1), 2, 2, 'day', 'noleap'),
- datetime(2000, 1, 3))
- self.assertEqual(chunk_start_date(datetime(2000, 2, 28), 2, 1, 'day', 'noleap'),
- datetime(2000, 3, 1))
-
- self.assertEqual(chunk_start_date(datetime(2000, 1, 1), 2, 1, 'hour', 'standard'),
- datetime(2000, 1, 1, 1))
- self.assertEqual(chunk_start_date(datetime(2000, 1, 1), 2, 2, 'hour', 'standard'),
- datetime(2000, 1, 1, 2))
- self.assertEqual(chunk_start_date(datetime(2000, 2, 28, 23), 2, 1, 'hour', 'standard'),
- datetime(2000, 2, 29))
-
- self.assertEqual(chunk_start_date(datetime(2000, 1, 1), 2, 1, 'hour', 'noleap'),
- datetime(2000, 1, 1, 1))
- self.assertEqual(chunk_start_date(datetime(2000, 1, 1), 2, 2, 'hour', 'noleap'),
- datetime(2000, 1, 1, 2))
- self.assertEqual(chunk_start_date(datetime(2000, 2, 28, 23), 2, 1, 'hour', 'noleap'),
- datetime(2000, 3, 1))
-
- def test_chunk_end_date(self):
- self.assertEqual(chunk_end_date(datetime(2000, 1, 1), 1, 'year', 'standard'), datetime(2001, 1, 1))
- self.assertEqual(chunk_end_date(datetime(2000, 1, 30), 1, 'month', 'standard'), datetime(2000, 2, 29))
- self.assertEqual(chunk_end_date(datetime(2000, 2, 28), 1, 'day', 'standard'), datetime(2000, 2, 29))
- self.assertEqual(chunk_end_date(datetime(2000, 2, 28, 23), 1, 'hour', 'standard'), datetime(2000, 2, 29))
-
- self.assertEqual(chunk_end_date(datetime(2000, 1, 1), 1, 'year', 'noleap'), datetime(2001, 1, 1))
- self.assertEqual(chunk_end_date(datetime(2000, 1, 30), 1, 'month', 'noleap'), datetime(2000, 2, 28))
- self.assertEqual(chunk_end_date(datetime(2000, 2, 28), 1, 'day', 'noleap'), datetime(2000, 3, 1))
- self.assertEqual(chunk_end_date(datetime(2000, 2, 28, 23), 1, 'hour', 'noleap'), datetime(2000, 3, 1))
-
- def test_previous_date(self):
- self.assertEqual(previous_day(datetime(2000, 1, 2), 'standard'), datetime(2000, 1, 1))
- self.assertEqual(previous_day(datetime(2000, 3, 1), 'standard'), datetime(2000, 2, 29))
-
- self.assertEqual(previous_day(datetime(2000, 1, 2), 'noleap'), datetime(2000, 1, 1))
- self.assertEqual(previous_day(datetime(2000, 3, 1), 'noleap'), datetime(2000, 2, 28))
-
- self.assertEqual(previous_day(datetime(2000, 1, 1), 'noleap'), datetime(1999, 12, 31))
- self.assertEqual(previous_day(datetime(2001, 1, 1), 'noleap'), datetime(2000, 12, 31))
-
- def test_parse_date(self):
- self.assertEqual(parse_date(''), None)
- self.assertEqual(parse_date('2000'), datetime(2000, 1, 1))
- self.assertEqual(parse_date('200001'), datetime(2000, 1, 1))
- self.assertEqual(parse_date('20000101'), datetime(2000, 1, 1))
- self.assertEqual(parse_date('2000010100'), datetime(2000, 1, 1))
- self.assertEqual(parse_date('200001010000'), datetime(2000, 1, 1))
- self.assertEqual(parse_date('20000101000000'), datetime(2000, 1, 1))
- self.assertEqual(parse_date('2000-01-01 00:00:00'), datetime(2000, 1, 1))
-
- with self.assertRaises(ValueError):
- parse_date('200')
- with self.assertRaises(ValueError):
- parse_date('20001')
- with self.assertRaises(ValueError):
- parse_date('200014')
- with self.assertRaises(ValueError):
- parse_date('2000011')
- with self.assertRaises(ValueError):
- parse_date('20000230')
- with self.assertRaises(ValueError):
- parse_date('200002281')
- with self.assertRaises(ValueError):
- parse_date('2000022825')
- with self.assertRaises(ValueError):
- parse_date('20000228121')
- with self.assertRaises(ValueError):
- parse_date('200002281299')
- with self.assertRaises(ValueError):
- parse_date('2000022812591')
- with self.assertRaises(ValueError):
- parse_date('20000228125999')
-
- def test_date2str(self):
- # noinspection PyTypeChecker
- self.assertEqual(date2str(None), '')
- self.assertEqual(date2str(datetime(2000, 1, 1)), '20000101')
- self.assertEqual(date2str(datetime(2000, 1, 1), 'H'), '2000010100')
- self.assertEqual(date2str(datetime(2000, 1, 1), 'M'), '200001010000')
- self.assertEqual(date2str(datetime(2000, 1, 1), 'S'), '20000101000000')
-
- def test_sum_str_hours(self):
- self.assertEqual(sum_str_hours('00:30', '00:30'), '01:00')
- self.assertEqual(sum_str_hours('14:30', '14:30'), '29:00')
- self.assertEqual(sum_str_hours('50:45', '50:30'), '101:15')
-
- def test_split_str_hours(self):
- self.assertEqual(split_str_hours('00:30'), (0, 30))
- self.assertEqual(split_str_hours('12:55'), (12, 55))
- with self.assertRaises(Exception):
- parse_date('30')
+def test_subs_days():
+ assert sub_days(datetime(2000, 1, 2), 1, 'standard'), datetime(2000, 1, 1)
+ assert sub_days(datetime(2000, 1, 2), -1, 'standard'), datetime(2000, 1, 3)
+ assert sub_days(datetime(2000, 3, 1), 1, 'standard'), datetime(2000, 2, 29)
+ assert sub_days(datetime(2000, 2, 28), -1, 'standard'), datetime(2000, 2, 29)
+ assert sub_days(datetime(2000, 1, 1), 365, 'standard'), datetime(1999, 1, 1)
+ assert sub_days(datetime(1999, 1, 1), -365, 'standard'), datetime(2000, 1, 1)
+ assert sub_days(datetime(2000, 12, 31), 365, 'standard'), datetime(2000, 1, 1)
+ assert sub_days(datetime(2000, 1, 1), -365, 'standard'), datetime(2000, 12, 31)
+ assert sub_days(datetime(2000, 2, 28), -2920, 'standard'), datetime(2008, 2, 26)
+ assert sub_days(datetime(2008, 2, 26), 2920, 'standard'), datetime(2000, 2, 28)
+ assert sub_days(datetime(2015, 12, 31), -61, 'standard'), datetime(2016, 3, 1)
+ assert sub_days(datetime(2016, 3, 1), 61, 'standard'), datetime(2015, 12, 31)
+ assert sub_days(datetime(2001, 1, 1), 1, 'standard'), datetime(2000, 12, 31)
+ assert sub_days(datetime(1999, 12, 31), -1, 'standard'), datetime(2000, 1, 1)
+
+ assert sub_days(datetime(2000, 1, 2), 1, 'noleap'), datetime(2000, 1, 1)
+ assert sub_days(datetime(2000, 1, 2), -1, 'noleap'), datetime(2000, 1, 3)
+ assert sub_days(datetime(2000, 3, 1), 1, 'noleap'), datetime(2000, 2, 28)
+ assert sub_days(datetime(2000, 2, 28), -1, 'noleap'), datetime(2000, 3, 1)
+ assert sub_days(datetime(2000, 1, 1), 365, 'noleap'), datetime(1999, 1, 1)
+ assert sub_days(datetime(1999, 1, 1), -365, 'noleap'), datetime(2000, 1, 1)
+ assert sub_days(datetime(2001, 1, 1), 365, 'noleap'), datetime(2000, 1, 1)
+ assert sub_days(datetime(2000, 1, 1), -365, 'noleap'), datetime(2001, 1, 1)
+ assert sub_days(datetime(2000, 2, 28), -2920, 'noleap'), datetime(2008, 2, 28)
+ assert sub_days(datetime(2008, 2, 26), 2920, 'noleap'), datetime(2000, 2, 26)
+ assert sub_days(datetime(2015, 12, 31), -61, 'noleap'), datetime(2016, 3, 2)
+ assert sub_days(datetime(2016, 3, 2), 61, 'noleap'), datetime(2015, 12, 31)
+ assert sub_days(datetime(2001, 1, 1), 1, 'noleap'), datetime(2000, 12, 31)
+ assert sub_days(datetime(1999, 12, 31), -1, 'noleap'), datetime(2000, 1, 1)
+
+
+def test_chunk_start_date():
+ assert chunk_start_date(datetime(2000, 1, 1), 2, 1, 'year', 'standard') == datetime(2001, 1, 1)
+
+ assert chunk_start_date(datetime(2000, 1, 1), 2, 2, 'year', 'standard') == datetime(2002, 1, 1)
+
+ assert chunk_start_date(datetime(2000, 1, 1), 2, 1, 'year', 'noleap') == datetime(2001, 1, 1)
+ assert chunk_start_date(datetime(2000, 1, 1), 2, 2, 'year', 'noleap') == datetime(2002, 1, 1)
+
+ assert chunk_start_date(datetime(2000, 1, 1), 2, 1, 'month', 'standard') == datetime(2000, 2, 1)
+ assert chunk_start_date(datetime(2000, 1, 1), 2, 2, 'month', 'standard') == datetime(2000, 3, 1)
+ assert chunk_start_date(datetime(2000, 1, 31), 2, 1, 'month', 'standard') == datetime(2000, 2, 29)
+
+ assert chunk_start_date(datetime(2000, 1, 1), 2, 1, 'month', 'noleap') == datetime(2000, 2, 1)
+ assert chunk_start_date(datetime(2000, 1, 1), 2, 2, 'month', 'noleap') == datetime(2000, 3, 1)
+ assert chunk_start_date(datetime(2000, 1, 31), 2, 1, 'month', 'noleap') == datetime(2000, 2, 28)
+
+ assert chunk_start_date(datetime(2000, 1, 1), 2, 1, 'day', 'standard') == datetime(2000, 1, 2)
+ assert chunk_start_date(datetime(2000, 1, 1), 2, 2, 'day', 'standard') == datetime(2000, 1, 3)
+ assert chunk_start_date(datetime(2000, 2, 28), 2, 1, 'day', 'standard') == datetime(2000, 2, 29)
+
+ assert chunk_start_date(datetime(2000, 1, 1), 2, 1, 'day', 'noleap') == datetime(2000, 1, 2)
+ assert chunk_start_date(datetime(2000, 1, 1), 2, 2, 'day', 'noleap') == datetime(2000, 1, 3)
+ assert chunk_start_date(datetime(2000, 2, 28), 2, 1, 'day', 'noleap') == datetime(2000, 3, 1)
+
+ assert chunk_start_date(datetime(2000, 1, 1), 2, 1, 'hour', 'standard') == datetime(2000, 1, 1, 1)
+ assert chunk_start_date(datetime(2000, 1, 1), 2, 2, 'hour', 'standard') == datetime(2000, 1, 1, 2)
+ assert chunk_start_date(datetime(2000, 2, 28, 23), 2, 1, 'hour', 'standard') == datetime(2000, 2, 29)
+
+ assert chunk_start_date(datetime(2000, 1, 1), 2, 1, 'hour', 'noleap') == datetime(2000, 1, 1, 1)
+ assert chunk_start_date(datetime(2000, 1, 1), 2, 2, 'hour', 'noleap') == datetime(2000, 1, 1, 2)
+ assert chunk_start_date(datetime(2000, 2, 28, 23), 2, 1, 'hour', 'noleap') == datetime(2000, 3, 1)
+
+
+def test_chunk_end_date():
+ assert chunk_end_date(datetime(2000, 1, 1), 1, 'year', 'standard'), datetime(2001, 1, 1)
+ assert chunk_end_date(datetime(2000, 1, 30), 1, 'month', 'standard'), datetime(2000, 2, 29)
+ assert chunk_end_date(datetime(2000, 2, 28), 1, 'day', 'standard'), datetime(2000, 2, 29)
+ assert chunk_end_date(datetime(2000, 2, 28, 23), 1, 'hour', 'standard'), datetime(2000, 2, 29)
+
+ assert chunk_end_date(datetime(2000, 1, 1), 1, 'year', 'noleap'), datetime(2001, 1, 1)
+ assert chunk_end_date(datetime(2000, 1, 30), 1, 'month', 'noleap'), datetime(2000, 2, 28)
+ assert chunk_end_date(datetime(2000, 2, 28), 1, 'day', 'noleap'), datetime(2000, 3, 1)
+ assert chunk_end_date(datetime(2000, 2, 28, 23), 1, 'hour', 'noleap'), datetime(2000, 3, 1)
+
+
+def test_previous_date():
+ assert previous_day(datetime(2000, 1, 2), 'standard'), datetime(2000, 1, 1)
+ assert previous_day(datetime(2000, 3, 1), 'standard'), datetime(2000, 2, 29)
+
+ assert previous_day(datetime(2000, 1, 2), 'noleap'), datetime(2000, 1, 1)
+ assert previous_day(datetime(2000, 3, 1), 'noleap'), datetime(2000, 2, 28)
+
+ assert previous_day(datetime(2000, 1, 1), 'noleap'), datetime(1999, 12, 31)
+ assert previous_day(datetime(2001, 1, 1), 'noleap'), datetime(2000, 12, 31)
+
+
+def test_parse_date():
+ assert parse_date('') is None
+ assert parse_date('2000') == datetime(2000, 1, 1)
+ assert parse_date('200001') == datetime(2000, 1, 1)
+ assert parse_date('20000101') == datetime(2000, 1, 1)
+ assert parse_date('2000010100') == datetime(2000, 1, 1)
+ assert parse_date('200001010000') == datetime(2000, 1, 1)
+ assert parse_date('20000101000000') == datetime(2000, 1, 1)
+ assert parse_date('2000-01-01 00:00:00') == datetime(2000, 1, 1)
+
+ with raises(ValueError):
+ parse_date('200')
+ with raises(ValueError):
+ parse_date('20001')
+ with raises(ValueError):
+ parse_date('200014')
+ with raises(ValueError):
+ parse_date('2000011')
+ with raises(ValueError):
+ parse_date('20000230')
+ with raises(ValueError):
+ parse_date('200002281')
+ with raises(ValueError):
+ parse_date('2000022825')
+ with raises(ValueError):
+ parse_date('20000228121')
+ with raises(ValueError):
+ parse_date('200002281299')
+ with raises(ValueError):
+ parse_date('2000022812591')
+ with raises(ValueError):
+ parse_date('20000228125999')
+
+
+def test_date2str():
+ # noinspection PyTypeChecker
+ assert date2str(None) == ''
+ assert date2str(datetime(2000, 1, 1)) == '20000101'
+ assert date2str(datetime(2000, 1, 1), 'H') == '2000010100'
+ assert date2str(datetime(2000, 1, 1), 'M') == '200001010000'
+ assert date2str(datetime(2000, 1, 1), 'S') == '20000101000000'
+
+
+def test_sum_str_hours():
+ assert sum_str_hours('00:30', '00:30') == '01:00'
+ assert sum_str_hours('14:30', '14:30') == '29:00'
+ assert sum_str_hours('50:45', '50:30') == '101:15'
+
+
+def test_split_str_hours():
+ assert split_str_hours('00:30') == (0, 30)
+ assert split_str_hours('12:55') == (12, 55)
+ with raises(Exception):
+ parse_date('30')
diff --git a/test/unit/test_database_managers.py b/test/unit/test_database_managers.py
index 9999fe9488dfafa49e5ee2bcbf666150eda403af..da9ca1bcea65fa1e84f83fe25550f85122609568 100644
--- a/test/unit/test_database_managers.py
+++ b/test/unit/test_database_managers.py
@@ -1,32 +1,33 @@
-#!/usr/bin/env python3
-
-# Copyright 2015-2020 Earth Sciences Department, BSC-CNS
+# Copyright 2015-2023 Earth Sciences Department, BSC-CNS
# This file is part of Autosubmit.
-
+#
# Autosubmit is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
-
+#
# Autosubmit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
-
+#
# You should have received a copy of the GNU General Public License
# along with Autosubmit. If not, see .
-import unittest
-import time
-import random
+
import os
+import pytest
+import random
+import time
from shutil import copy2
-from autosubmit.history.database_managers.experiment_history_db_manager import ExperimentHistoryDbManager
-from autosubmit.history.database_managers.experiment_status_db_manager import ExperimentStatusDbManager
+
+import autosubmit.history.utils as HUtils
from autosubmit.history.data_classes.experiment_run import ExperimentRun
from autosubmit.history.data_classes.job_data import JobData
+from autosubmit.history.database_managers.experiment_history_db_manager import ExperimentHistoryDbManager
+from autosubmit.history.database_managers.experiment_status_db_manager import ExperimentStatusDbManager
from autosubmitconfigparser.config.basicconfig import BasicConfig
-import autosubmit.history.utils as HUtils
+
EXPID_TT00_SOURCE = "test_database.db~"
EXPID_TT01_SOURCE = "test_database_no_run.db~"
EXPID = "t024"
@@ -35,218 +36,217 @@ BasicConfig.read()
JOBDATA_DIR = BasicConfig.JOBDATA_DIR
LOCAL_ROOT_DIR = BasicConfig.LOCAL_ROOT_DIR
-@unittest.skip('TODO: looks like another test that used actual experiments data')
-class TestExperimentStatusDatabaseManager(unittest.TestCase):
- """ Covers Experiment Status Database Manager """
- def setUp(self):
- self.exp_status_db = ExperimentStatusDbManager(EXPID, BasicConfig.DB_DIR, BasicConfig.DB_FILE, LOCAL_ROOT_DIR)
-
- def test_get_current_experiment_status_row(self):
- exp_status_row = self.exp_status_db.get_experiment_status_row_by_expid(EXPID)
- self.assertIsNotNone(exp_status_row)
- exp_status_row_none = self.exp_status_db.get_experiment_status_row_by_expid(EXPID_NONE)
- self.assertIsNone(exp_status_row_none)
- exp_row_direct = self.exp_status_db.get_experiment_status_row_by_exp_id(exp_status_row.exp_id)
- self.assertTrue(exp_status_row.exp_id == exp_row_direct.exp_id)
-
-
- def test_update_exp_status(self):
- self.exp_status_db.update_exp_status(EXPID, "RUNNING")
- exp_status_row_current = self.exp_status_db.get_experiment_status_row_by_expid(EXPID)
- self.assertTrue(exp_status_row_current.status == "RUNNING")
- self.exp_status_db.update_exp_status(EXPID, "NOT RUNNING")
- exp_status_row_current = self.exp_status_db.get_experiment_status_row_by_expid(EXPID)
- self.assertTrue(exp_status_row_current.status == "NOT RUNNING")
-
- def test_create_exp_status(self):
- experiment = self.exp_status_db.get_experiment_row_by_expid(EXPID_NONE)
- self.exp_status_db.create_experiment_status_as_running(experiment)
- experiment_status = self.exp_status_db.get_experiment_status_row_by_expid(EXPID_NONE)
- self.assertIsNotNone(experiment_status)
- self.exp_status_db.delete_exp_status(EXPID_NONE)
- experiment_status = self.exp_status_db.get_experiment_status_row_by_expid(EXPID_NONE)
- self.assertIsNone(experiment_status)
-
-@unittest.skip('TODO: looks like another test that used actual experiments data')
-class TestExperimentHistoryDbManager(unittest.TestCase):
- """ Covers Experiment History Database Manager and Data Models """
- def setUp(self):
- self.experiment_database = ExperimentHistoryDbManager(EXPID, JOBDATA_DIR)
- source_path_tt00 = os.path.join(JOBDATA_DIR, EXPID_TT00_SOURCE)
- self.target_path_tt00 = os.path.join(JOBDATA_DIR, "job_data_{0}.db".format(EXPID))
- copy2(source_path_tt00, self.target_path_tt00)
- source_path_tt01 = os.path.join(JOBDATA_DIR, EXPID_TT01_SOURCE)
- self.target_path_tt01 = os.path.join(JOBDATA_DIR, "job_data_{0}.db".format(EXPID_NONE))
- copy2(source_path_tt01, self.target_path_tt01)
- self.experiment_database.initialize()
-
- def tearDown(self):
- os.remove(self.target_path_tt00)
- os.remove(self.target_path_tt01)
-
- def test_get_max_id(self):
- max_item = self.experiment_database.get_experiment_run_dc_with_max_id()
- self.assertTrue(max_item.run_id > 0)
- self.assertTrue(max_item.run_id >= 18) # Max is 18
-
- def test_pragma(self):
- self.assertTrue(self.experiment_database._get_pragma_version() == 17) # Update version on changes
-
- def test_get_job_data(self):
- job_data = self.experiment_database._get_job_data_last_by_name("a29z_20000101_fc0_1_SIM")
- self.assertTrue(len(job_data) > 0)
- self.assertTrue(job_data[0].last == 1)
- self.assertTrue(job_data[0].job_name == "a29z_20000101_fc0_1_SIM")
-
- job_data = self.experiment_database.get_job_data_by_name("a29z_20000101_fc0_1_SIM")
- self.assertTrue(job_data[0].job_name == "a29z_20000101_fc0_1_SIM")
-
- job_data = self.experiment_database._get_job_data_last_by_run_id(18) # Latest
- self.assertTrue(len(job_data) > 0)
-
- job_data = self.experiment_database._get_job_data_last_by_run_id_and_finished(18)
- self.assertTrue(len(job_data) > 0)
-
- job_data = self.experiment_database.get_job_data_all()
- self.assertTrue(len(job_data) > 0)
-
- def test_insert_and_delete_experiment_run(self):
- new_run = ExperimentRun(19)
- new_id = self.experiment_database._insert_experiment_run(new_run)
- self.assertIsNotNone(new_id)
- last_experiment_run = self.experiment_database.get_experiment_run_dc_with_max_id()
- self.assertTrue(new_id == last_experiment_run.run_id)
- self.experiment_database.delete_experiment_run(new_id)
- last_experiment_run = self.experiment_database.get_experiment_run_dc_with_max_id()
- self.assertTrue(new_id != last_experiment_run.run_id)
-
- def test_insert_and_delete_job_data(self):
- max_run_id = self.experiment_database.get_experiment_run_dc_with_max_id().run_id
- new_job_data_name = "test_001_name_{0}".format(int(time.time()))
- new_job_data = JobData(_id=1, job_name=new_job_data_name, run_id=max_run_id)
- new_job_data_id = self.experiment_database._insert_job_data(new_job_data)
- self.assertIsNotNone(new_job_data_id)
- self.experiment_database.delete_job_data(new_job_data_id)
- job_data = self.experiment_database.get_job_data_by_name(new_job_data_name)
- self.assertTrue(len(job_data) == 0)
-
-
- def test_update_experiment_run(self):
- experiment_run_data_class = self.experiment_database.get_experiment_run_dc_with_max_id() # 18
- backup_run = self.experiment_database.get_experiment_run_dc_with_max_id()
- experiment_run_data_class.chunk_unit = "unouno"
- experiment_run_data_class.running = random.randint(1, 100)
- experiment_run_data_class.queuing = random.randint(1, 100)
- experiment_run_data_class.suspended = random.randint(1, 100)
- self.experiment_database._update_experiment_run(experiment_run_data_class)
- last_experiment_run = self.experiment_database.get_experiment_run_dc_with_max_id() # 18
- self.assertTrue(last_experiment_run.chunk_unit == experiment_run_data_class.chunk_unit)
- self.assertTrue(last_experiment_run.running == experiment_run_data_class.running)
- self.assertTrue(last_experiment_run.queuing == experiment_run_data_class.queuing)
- self.assertTrue(last_experiment_run.suspended == experiment_run_data_class.suspended)
- self.experiment_database._update_experiment_run(backup_run)
-
- def test_job_data_from_model(self):
- job_data_rows = self.experiment_database._get_job_data_last_by_name("a29z_20000101_fc0_1_SIM")
- job_data_row_first = job_data_rows[0]
- job_data_data_class = JobData.from_model(job_data_row_first)
- self.assertTrue(job_data_row_first.job_name == job_data_data_class.job_name)
-
- def test_update_job_data_processed(self):
- current_time = time.time()
- job_data_rows = self.experiment_database._get_job_data_last_by_name("a29z_20000101_fc0_1_SIM")
- job_data_row_first = job_data_rows[0]
- job_data_data_class = JobData.from_model(job_data_row_first)
- backup_job_dc = JobData.from_model(job_data_row_first)
- job_data_data_class.nnodes = random.randint(1, 1000)
- job_data_data_class.ncpus = random.randint(1, 1000)
- job_data_data_class.status = "DELAYED"
- job_data_data_class.finish = current_time
- self.experiment_database._update_job_data_by_id(job_data_data_class)
- job_data_rows_current = self.experiment_database._get_job_data_last_by_name("a29z_20000101_fc0_1_SIM")
- job_data_row_first = job_data_rows_current[0]
- self.assertTrue(job_data_row_first.nnodes == job_data_data_class.nnodes)
- self.assertTrue(job_data_row_first.ncpus == job_data_data_class.ncpus)
- self.assertTrue(job_data_row_first.status == job_data_data_class.status)
- self.assertTrue(job_data_row_first.finish == job_data_data_class.finish)
- self.experiment_database._update_job_data_by_id(backup_job_dc)
-
- def test_bulk_update(self):
- current_time = time.time()
- all_job_data_rows = self.experiment_database.get_job_data_all()
- job_data_rows_test = [job for job in all_job_data_rows if job.run_id == 3]
- backup = [JobData.from_model(job) for job in job_data_rows_test]
- list_job_data_class = [JobData.from_model(job) for job in job_data_rows_test]
- backup_changes = [(HUtils.get_current_datetime(), job.status, job.rowstatus, job._id) for job in list_job_data_class]
- changes = [(HUtils.get_current_datetime(), "DELAYED", job.rowstatus, job._id) for job in list_job_data_class]
- self.experiment_database.update_many_job_data_change_status(changes)
- all_job_data_rows = self.experiment_database.get_job_data_all()
- job_data_rows_validate = [job for job in all_job_data_rows if job.run_id == 3]
- for (job_val, change_item) in zip(job_data_rows_validate, changes):
- modified, status, rowstatus, _id = change_item
- # self.assertTrue(job_val.finish == finish)
- self.assertTrue(job_val.modified == modified)
- self.assertTrue(job_val.status == status)
- self.assertTrue(job_val.rowstatus == rowstatus)
- self.assertTrue(job_val.id == _id)
- self.experiment_database.update_many_job_data_change_status(backup_changes)
-
- def test_job_data_maxcounter(self):
- new_job_data = ExperimentHistoryDbManager(EXPID_NONE, JOBDATA_DIR)
- new_job_data.initialize()
- max_empty_table_counter = new_job_data.get_job_data_max_counter()
- self.assertTrue(max_empty_table_counter == 0)
- max_existing_counter = self.experiment_database.get_job_data_max_counter()
- self.assertTrue(max_existing_counter > 0)
-
- def test_register_submitted_job_data_dc(self):
- job_data_dc = self.experiment_database.get_job_data_dc_unique_latest_by_job_name("a29z_20000101_fc0_1_SIM")
- max_counter = self.experiment_database.get_job_data_max_counter()
- self.assertTrue(max_counter > 0)
- self.assertTrue(job_data_dc.counter > 0)
- next_counter = max(max_counter, job_data_dc.counter + 1)
- self.assertTrue(next_counter >= max_counter)
- self.assertTrue(next_counter >= job_data_dc.counter + 1)
- job_data_dc.counter = next_counter
- job_data_dc_current = self.experiment_database.register_submitted_job_data_dc(job_data_dc)
- self.assertTrue(job_data_dc._id < job_data_dc_current._id)
- job_data_last_list = self.experiment_database._get_job_data_last_by_name(job_data_dc.job_name)
- self.assertTrue(len(job_data_last_list) == 1)
- self.experiment_database.delete_job_data(job_data_last_list[0].id)
- job_data_dc.last = 1
- updated_job_data_dc = self.experiment_database.update_job_data_dc_by_id(job_data_dc)
- self.assertTrue(job_data_dc._id == updated_job_data_dc._id)
- job_data_dc = self.experiment_database.get_job_data_dc_unique_latest_by_job_name("a29z_20000101_fc0_1_SIM")
- self.assertTrue(job_data_dc._id == updated_job_data_dc._id)
-
- def test_update_children_and_platform_output(self):
- job_data_dc = self.experiment_database.get_job_data_dc_unique_latest_by_job_name("a29z_20000101_fc0_1_SIM")
- children_str = "a00, a01, a02"
- platform_output_str = " SLURM OUTPUT "
- job_data_dc.children = children_str
- job_data_dc.platform_output = platform_output_str
- self.experiment_database.update_job_data_dc_by_id(job_data_dc)
- job_data_dc_updated = self.experiment_database.get_job_data_dc_unique_latest_by_job_name("a29z_20000101_fc0_1_SIM")
- self.assertTrue(job_data_dc_updated.children == children_str)
- self.assertTrue(job_data_dc_updated.platform_output == platform_output_str)
- # Back to normal
- job_data_dc.children = ""
- job_data_dc.platform_output = "NO OUTPUT"
- self.experiment_database.update_job_data_dc_by_id(job_data_dc)
- job_data_dc_updated = self.experiment_database.get_job_data_dc_unique_latest_by_job_name("a29z_20000101_fc0_1_SIM")
- self.assertTrue(job_data_dc_updated.children == "")
- self.assertTrue(job_data_dc_updated.platform_output == "NO OUTPUT")
-
-
-
- def test_experiment_run_dc(self):
- experiment_run = self.experiment_database.get_experiment_run_dc_with_max_id()
- self.assertIsNotNone(experiment_run)
-
- def test_if_database_exists(self):
- exp_manager = ExperimentHistoryDbManager("0000")
- self.assertTrue(exp_manager.my_database_exists() is False)
-
-
-if __name__ == '__main__':
- unittest.main()
\ No newline at end of file
+
+@pytest.mark.skip('TODO: looks like another test that used actual experiments data')
+class TestExperimentStatusDatabaseManager():
+ """ Covers Experiment Status Database Manager """
+
+ def setUp(self):
+ self.exp_status_db = ExperimentStatusDbManager(EXPID, BasicConfig.DB_DIR, BasicConfig.DB_FILE, LOCAL_ROOT_DIR)
+
+ def test_get_current_experiment_status_row(self):
+ exp_status_row = self.exp_status_db.get_experiment_status_row_by_expid(EXPID)
+ self.assertIsNotNone(exp_status_row)
+ exp_status_row_none = self.exp_status_db.get_experiment_status_row_by_expid(EXPID_NONE)
+ self.assertIsNone(exp_status_row_none)
+ exp_row_direct = self.exp_status_db.get_experiment_status_row_by_exp_id(exp_status_row.exp_id)
+ self.assertTrue(exp_status_row.exp_id == exp_row_direct.exp_id)
+
+ def test_update_exp_status(self):
+ self.exp_status_db.update_exp_status(EXPID, "RUNNING")
+ exp_status_row_current = self.exp_status_db.get_experiment_status_row_by_expid(EXPID)
+ self.assertTrue(exp_status_row_current.status == "RUNNING")
+ self.exp_status_db.update_exp_status(EXPID, "NOT RUNNING")
+ exp_status_row_current = self.exp_status_db.get_experiment_status_row_by_expid(EXPID)
+ self.assertTrue(exp_status_row_current.status == "NOT RUNNING")
+
+ def test_create_exp_status(self):
+ experiment = self.exp_status_db.get_experiment_row_by_expid(EXPID_NONE)
+ self.exp_status_db.create_experiment_status_as_running(experiment)
+ experiment_status = self.exp_status_db.get_experiment_status_row_by_expid(EXPID_NONE)
+ self.assertIsNotNone(experiment_status)
+ self.exp_status_db.delete_exp_status(EXPID_NONE)
+ experiment_status = self.exp_status_db.get_experiment_status_row_by_expid(EXPID_NONE)
+ self.assertIsNone(experiment_status)
+
+
+@pytest.mark.skip('TODO: looks like another test that used actual experiments data')
+class TestExperimentHistoryDbManager():
+ """ Covers Experiment History Database Manager and Data Models """
+
+ def setUp(self):
+ self.experiment_database = ExperimentHistoryDbManager(EXPID, JOBDATA_DIR)
+ source_path_tt00 = os.path.join(JOBDATA_DIR, EXPID_TT00_SOURCE)
+ self.target_path_tt00 = os.path.join(JOBDATA_DIR, "job_data_{0}.db".format(EXPID))
+ copy2(source_path_tt00, self.target_path_tt00)
+ source_path_tt01 = os.path.join(JOBDATA_DIR, EXPID_TT01_SOURCE)
+ self.target_path_tt01 = os.path.join(JOBDATA_DIR, "job_data_{0}.db".format(EXPID_NONE))
+ copy2(source_path_tt01, self.target_path_tt01)
+ self.experiment_database.initialize()
+
+ def tearDown(self):
+ os.remove(self.target_path_tt00)
+ os.remove(self.target_path_tt01)
+
+ def test_get_max_id(self):
+ max_item = self.experiment_database.get_experiment_run_dc_with_max_id()
+ self.assertTrue(max_item.run_id > 0)
+ self.assertTrue(max_item.run_id >= 18) # Max is 18
+
+ def test_pragma(self):
+ self.assertTrue(self.experiment_database._get_pragma_version() == 17) # Update version on changes
+
+ def test_get_job_data(self):
+ job_data = self.experiment_database._get_job_data_last_by_name("a29z_20000101_fc0_1_SIM")
+ self.assertTrue(len(job_data) > 0)
+ self.assertTrue(job_data[0].last == 1)
+ self.assertTrue(job_data[0].job_name == "a29z_20000101_fc0_1_SIM")
+
+ job_data = self.experiment_database.get_job_data_by_name("a29z_20000101_fc0_1_SIM")
+ self.assertTrue(job_data[0].job_name == "a29z_20000101_fc0_1_SIM")
+
+ job_data = self.experiment_database._get_job_data_last_by_run_id(18) # Latest
+ self.assertTrue(len(job_data) > 0)
+
+ job_data = self.experiment_database._get_job_data_last_by_run_id_and_finished(18)
+ self.assertTrue(len(job_data) > 0)
+
+ job_data = self.experiment_database.get_job_data_all()
+ self.assertTrue(len(job_data) > 0)
+
+ def test_insert_and_delete_experiment_run(self):
+ new_run = ExperimentRun(19)
+ new_id = self.experiment_database._insert_experiment_run(new_run)
+ self.assertIsNotNone(new_id)
+ last_experiment_run = self.experiment_database.get_experiment_run_dc_with_max_id()
+ self.assertTrue(new_id == last_experiment_run.run_id)
+ self.experiment_database.delete_experiment_run(new_id)
+ last_experiment_run = self.experiment_database.get_experiment_run_dc_with_max_id()
+ self.assertTrue(new_id != last_experiment_run.run_id)
+
+ def test_insert_and_delete_job_data(self):
+ max_run_id = self.experiment_database.get_experiment_run_dc_with_max_id().run_id
+ new_job_data_name = "test_001_name_{0}".format(int(time.time()))
+ new_job_data = JobData(_id=1, job_name=new_job_data_name, run_id=max_run_id)
+ new_job_data_id = self.experiment_database._insert_job_data(new_job_data)
+ self.assertIsNotNone(new_job_data_id)
+ self.experiment_database.delete_job_data(new_job_data_id)
+ job_data = self.experiment_database.get_job_data_by_name(new_job_data_name)
+ self.assertTrue(len(job_data) == 0)
+
+ def test_update_experiment_run(self):
+ experiment_run_data_class = self.experiment_database.get_experiment_run_dc_with_max_id() # 18
+ backup_run = self.experiment_database.get_experiment_run_dc_with_max_id()
+ experiment_run_data_class.chunk_unit = "unouno"
+ experiment_run_data_class.running = random.randint(1, 100)
+ experiment_run_data_class.queuing = random.randint(1, 100)
+ experiment_run_data_class.suspended = random.randint(1, 100)
+ self.experiment_database._update_experiment_run(experiment_run_data_class)
+ last_experiment_run = self.experiment_database.get_experiment_run_dc_with_max_id() # 18
+ self.assertTrue(last_experiment_run.chunk_unit == experiment_run_data_class.chunk_unit)
+ self.assertTrue(last_experiment_run.running == experiment_run_data_class.running)
+ self.assertTrue(last_experiment_run.queuing == experiment_run_data_class.queuing)
+ self.assertTrue(last_experiment_run.suspended == experiment_run_data_class.suspended)
+ self.experiment_database._update_experiment_run(backup_run)
+
+ def test_job_data_from_model(self):
+ job_data_rows = self.experiment_database._get_job_data_last_by_name("a29z_20000101_fc0_1_SIM")
+ job_data_row_first = job_data_rows[0]
+ job_data_data_class = JobData.from_model(job_data_row_first)
+ self.assertTrue(job_data_row_first.job_name == job_data_data_class.job_name)
+
+ def test_update_job_data_processed(self):
+ current_time = time.time()
+ job_data_rows = self.experiment_database._get_job_data_last_by_name("a29z_20000101_fc0_1_SIM")
+ job_data_row_first = job_data_rows[0]
+ job_data_data_class = JobData.from_model(job_data_row_first)
+ backup_job_dc = JobData.from_model(job_data_row_first)
+ job_data_data_class.nnodes = random.randint(1, 1000)
+ job_data_data_class.ncpus = random.randint(1, 1000)
+ job_data_data_class.status = "DELAYED"
+ job_data_data_class.finish = current_time
+ self.experiment_database._update_job_data_by_id(job_data_data_class)
+ job_data_rows_current = self.experiment_database._get_job_data_last_by_name("a29z_20000101_fc0_1_SIM")
+ job_data_row_first = job_data_rows_current[0]
+ self.assertTrue(job_data_row_first.nnodes == job_data_data_class.nnodes)
+ self.assertTrue(job_data_row_first.ncpus == job_data_data_class.ncpus)
+ self.assertTrue(job_data_row_first.status == job_data_data_class.status)
+ self.assertTrue(job_data_row_first.finish == job_data_data_class.finish)
+ self.experiment_database._update_job_data_by_id(backup_job_dc)
+
+ def test_bulk_update(self):
+ current_time = time.time()
+ all_job_data_rows = self.experiment_database.get_job_data_all()
+ job_data_rows_test = [job for job in all_job_data_rows if job.run_id == 3]
+ backup = [JobData.from_model(job) for job in job_data_rows_test]
+ list_job_data_class = [JobData.from_model(job) for job in job_data_rows_test]
+ backup_changes = [(HUtils.get_current_datetime(), job.status, job.rowstatus, job._id) for job in
+ list_job_data_class]
+ changes = [(HUtils.get_current_datetime(), "DELAYED", job.rowstatus, job._id) for job in list_job_data_class]
+ self.experiment_database.update_many_job_data_change_status(changes)
+ all_job_data_rows = self.experiment_database.get_job_data_all()
+ job_data_rows_validate = [job for job in all_job_data_rows if job.run_id == 3]
+ for (job_val, change_item) in zip(job_data_rows_validate, changes):
+ modified, status, rowstatus, _id = change_item
+ # self.assertTrue(job_val.finish == finish)
+ self.assertTrue(job_val.modified == modified)
+ self.assertTrue(job_val.status == status)
+ self.assertTrue(job_val.rowstatus == rowstatus)
+ self.assertTrue(job_val.id == _id)
+ self.experiment_database.update_many_job_data_change_status(backup_changes)
+
+ def test_job_data_maxcounter(self):
+ new_job_data = ExperimentHistoryDbManager(EXPID_NONE, JOBDATA_DIR)
+ new_job_data.initialize()
+ max_empty_table_counter = new_job_data.get_job_data_max_counter()
+ self.assertTrue(max_empty_table_counter == 0)
+ max_existing_counter = self.experiment_database.get_job_data_max_counter()
+ self.assertTrue(max_existing_counter > 0)
+
+ def test_register_submitted_job_data_dc(self):
+ job_data_dc = self.experiment_database.get_job_data_dc_unique_latest_by_job_name("a29z_20000101_fc0_1_SIM")
+ max_counter = self.experiment_database.get_job_data_max_counter()
+ self.assertTrue(max_counter > 0)
+ self.assertTrue(job_data_dc.counter > 0)
+ next_counter = max(max_counter, job_data_dc.counter + 1)
+ self.assertTrue(next_counter >= max_counter)
+ self.assertTrue(next_counter >= job_data_dc.counter + 1)
+ job_data_dc.counter = next_counter
+ job_data_dc_current = self.experiment_database.register_submitted_job_data_dc(job_data_dc)
+ self.assertTrue(job_data_dc._id < job_data_dc_current._id)
+ job_data_last_list = self.experiment_database._get_job_data_last_by_name(job_data_dc.job_name)
+ self.assertTrue(len(job_data_last_list) == 1)
+ self.experiment_database.delete_job_data(job_data_last_list[0].id)
+ job_data_dc.last = 1
+ updated_job_data_dc = self.experiment_database.update_job_data_dc_by_id(job_data_dc)
+ self.assertTrue(job_data_dc._id == updated_job_data_dc._id)
+ job_data_dc = self.experiment_database.get_job_data_dc_unique_latest_by_job_name("a29z_20000101_fc0_1_SIM")
+ self.assertTrue(job_data_dc._id == updated_job_data_dc._id)
+
+ def test_update_children_and_platform_output(self):
+ job_data_dc = self.experiment_database.get_job_data_dc_unique_latest_by_job_name("a29z_20000101_fc0_1_SIM")
+ children_str = "a00, a01, a02"
+ platform_output_str = " SLURM OUTPUT "
+ job_data_dc.children = children_str
+ job_data_dc.platform_output = platform_output_str
+ self.experiment_database.update_job_data_dc_by_id(job_data_dc)
+ job_data_dc_updated = self.experiment_database.get_job_data_dc_unique_latest_by_job_name(
+ "a29z_20000101_fc0_1_SIM")
+ self.assertTrue(job_data_dc_updated.children == children_str)
+ self.assertTrue(job_data_dc_updated.platform_output == platform_output_str)
+ # Back to normal
+ job_data_dc.children = ""
+ job_data_dc.platform_output = "NO OUTPUT"
+ self.experiment_database.update_job_data_dc_by_id(job_data_dc)
+ job_data_dc_updated = self.experiment_database.get_job_data_dc_unique_latest_by_job_name(
+ "a29z_20000101_fc0_1_SIM")
+ self.assertTrue(job_data_dc_updated.children == "")
+ self.assertTrue(job_data_dc_updated.platform_output == "NO OUTPUT")
+
+ def test_experiment_run_dc(self):
+ experiment_run = self.experiment_database.get_experiment_run_dc_with_max_id()
+ self.assertIsNotNone(experiment_run)
+
+ def test_if_database_exists(self):
+ exp_manager = ExperimentHistoryDbManager("0000")
+ self.assertTrue(exp_manager.my_database_exists() is False)
diff --git a/test/unit/test_db_manager.py b/test/unit/test_db_manager.py
index a46133c9f76b78f6184dd3c899d341d1b04bc8a7..c688ee30ae067586dbbc5eddf0b251c7513deaf0 100644
--- a/test/unit/test_db_manager.py
+++ b/test/unit/test_db_manager.py
@@ -1,62 +1,78 @@
-from unittest import TestCase
+# Copyright 2015-2023 Earth Sciences Department, BSC-CNS
+# This file is part of Autosubmit.
+#
+# Autosubmit is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Autosubmit is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Autosubmit. If not, see .
-import os
import sys
from mock import MagicMock
-from mock import patch
+
from autosubmit.database.db_manager import DbManager
-class TestDbManager(TestCase):
- def test_create_table_command_returns_a_valid_command(self):
- # arrange
- table_name = 'tests'
- table_fields = ['dummy1', 'dummy2', 'dummy3']
- expected_command = 'CREATE TABLE IF NOT EXISTS tests (dummy1, dummy2, dummy3)'
- # act
- command = DbManager.generate_create_table_command(table_name, table_fields)
- # assert
- self.assertEqual(expected_command, command)
-
- def test_insert_command_returns_a_valid_command(self):
- # arrange
- table_name = 'tests'
- columns = ['col1, col2, col3']
- values = ['dummy1', 'dummy2', 'dummy3']
- expected_command = 'INSERT INTO tests(col1, col2, col3) VALUES ("dummy1", "dummy2", "dummy3")'
- # act
- command = DbManager.generate_insert_command(table_name, columns, values)
- # assert
- self.assertEqual(expected_command, command)
-
- def test_insert_many_command_returns_a_valid_command(self):
- # arrange
- table_name = 'tests'
- num_of_values = 3
- expected_command = 'INSERT INTO tests VALUES (?,?,?)'
- # act
- command = DbManager.generate_insert_many_command(table_name, num_of_values)
- # assert
- self.assertEqual(expected_command, command)
-
- def test_select_command_returns_a_valid_command(self):
- # arrange
- table_name = 'tests'
- where = ['test=True', 'debug=True']
- expected_command = 'SELECT * FROM tests WHERE test=True AND debug=True'
- # act
- command = DbManager.generate_select_command(table_name, where)
- # assert
- self.assertEqual(expected_command, command)
-
- def test_when_database_already_exists_then_is_not_initialized_again(self):
- sys.modules['os'].path.exists = MagicMock(return_value=True)
- connection_mock = MagicMock()
- cursor_mock = MagicMock()
- cursor_mock.side_effect = Exception('This method should not be called')
- connection_mock.cursor = MagicMock(return_value=cursor_mock)
- original_connect = sys.modules['sqlite3'].connect
- sys.modules['sqlite3'].connect = MagicMock(return_value=connection_mock)
- DbManager('dummy-path', 'dummy-name', 999)
- connection_mock.cursor.assert_not_called()
- sys.modules['sqlite3'].connect = original_connect
+def test_create_table_command_returns_a_valid_command():
+ # arrange
+ table_name = 'tests'
+ table_fields = ['dummy1', 'dummy2', 'dummy3']
+ expected_command = 'CREATE TABLE IF NOT EXISTS tests (dummy1, dummy2, dummy3)'
+ # act
+ command = DbManager.generate_create_table_command(table_name, table_fields)
+ # assert
+ assert expected_command == command
+
+
+def test_insert_command_returns_a_valid_command():
+ # arrange
+ table_name = 'tests'
+ columns = ['col1, col2, col3']
+ values = ['dummy1', 'dummy2', 'dummy3']
+ expected_command = 'INSERT INTO tests(col1, col2, col3) VALUES ("dummy1", "dummy2", "dummy3")'
+ # act
+ command = DbManager.generate_insert_command(table_name, columns, values)
+ # assert
+ assert expected_command == command
+
+
+def test_insert_many_command_returns_a_valid_command():
+ # arrange
+ table_name = 'tests'
+ num_of_values = 3
+ expected_command = 'INSERT INTO tests VALUES (?,?,?)'
+ # act
+ command = DbManager.generate_insert_many_command(table_name, num_of_values)
+ # assert
+ assert expected_command == command
+
+
+def test_select_command_returns_a_valid_command():
+ # arrange
+ table_name = 'tests'
+ where = ['test=True', 'debug=True']
+ expected_command = 'SELECT * FROM tests WHERE test=True AND debug=True'
+ # act
+ command = DbManager.generate_select_command(table_name, where)
+ # assert
+ assert expected_command == command
+
+
+def test_when_database_already_exists_then_is_not_initialized_again():
+ sys.modules['os'].path.exists = MagicMock(return_value=True)
+ connection_mock = MagicMock()
+ cursor_mock = MagicMock()
+ cursor_mock.side_effect = Exception('This method should not be called')
+ connection_mock.cursor = MagicMock(return_value=cursor_mock)
+ original_connect = sys.modules['sqlite3'].connect
+ sys.modules['sqlite3'].connect = MagicMock(return_value=connection_mock)
+ DbManager('dummy-path', 'dummy-name', 999)
+ connection_mock.cursor.assert_not_called()
+ sys.modules['sqlite3'].connect = original_connect
diff --git a/test/unit/test_dic_jobs.py b/test/unit/test_dic_jobs.py
index fd8b459d72c2a9c168e62f2112b8d75ebec75b7d..af9c6d7ea874e14d57bd5f7ab03bff7dc64a67a7 100644
--- a/test/unit/test_dic_jobs.py
+++ b/test/unit/test_dic_jobs.py
@@ -360,7 +360,7 @@ class TestDicJobs(TestCase):
self.assertEqual('random-id_2016010100_fc0_ch0_test', created_job.name)
self.assertEqual(Status.WAITING, created_job.status)
self.assertEqual(priority, created_job.priority)
- self.assertEqual(section, created_job.section)
+ self.assertEqual(section, created_job.SECTION)
self.assertEqual(date, created_job.date)
self.assertEqual(member, created_job.member)
self.assertEqual(chunk, created_job.chunk)
diff --git a/test/unit/test_expid.py b/test/unit/test_expid.py
index 466e3879da21c8e002863886a8a01c9fa8bfb267..56427298c9f2cbef91d52a36f533b8fbc44b1065 100644
--- a/test/unit/test_expid.py
+++ b/test/unit/test_expid.py
@@ -1,183 +1,198 @@
+# Copyright 2015-2023 Earth Sciences Department, BSC-CNS
+# This file is part of Autosubmit.
+#
+# Autosubmit is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Autosubmit is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Autosubmit. If not, see .
+
import tempfile
-from unittest import TestCase
-from mock import Mock, patch
+from itertools import permutations, product
+from mock import Mock
+from pathlib import Path
+from textwrap import dedent
+
from autosubmit.autosubmit import Autosubmit
from autosubmit.experiment.experiment_common import new_experiment
-from textwrap import dedent
-from pathlib import Path
from autosubmitconfigparser.config.basicconfig import BasicConfig
-from itertools import permutations, product
+
+DESCRIPTION = "for testing"
+VERSION = "test-version"
+
+
+def test_create_new_experiment(mocker):
+ db_common_mock = mocker.patch('autosubmit.experiment.experiment_common.db_common')
+ current_experiment_id = "empty"
+ _build_db_mock(current_experiment_id, db_common_mock)
+ experiment_id = new_experiment(DESCRIPTION, VERSION)
+ assert "a000" == experiment_id
+
+
+def test_create_new_test_experiment(mocker):
+ db_common_mock = mocker.patch('autosubmit.experiment.experiment_common.db_common')
+ current_experiment_id = "empty"
+ _build_db_mock(current_experiment_id, db_common_mock)
+ experiment_id = new_experiment(DESCRIPTION, VERSION, True)
+ assert "t000" == experiment_id
+
+
+def test_create_new_operational_experiment(mocker):
+ db_common_mock = mocker.patch('autosubmit.experiment.experiment_common.db_common')
+ current_experiment_id = "empty"
+ _build_db_mock(current_experiment_id, db_common_mock)
+ experiment_id = new_experiment(DESCRIPTION, VERSION, False, True)
+ assert "o000" == experiment_id
+
+
+def test_create_new_experiment_with_previous_one(mocker):
+ db_common_mock = mocker.patch('autosubmit.experiment.experiment_common.db_common')
+ current_experiment_id = "a007"
+ _build_db_mock(current_experiment_id, db_common_mock)
+ experiment_id = new_experiment(DESCRIPTION, VERSION)
+ assert "a007" == experiment_id
-class TestExpid(TestCase):
- def setUp(self):
- self.description = "for testing"
- self.version = "test-version"
-
- @patch('autosubmit.experiment.experiment_common.db_common')
- def test_create_new_experiment(self, db_common_mock):
- current_experiment_id = "empty"
- self._build_db_mock(current_experiment_id, db_common_mock)
- experiment_id = new_experiment(self.description, self.version)
- self.assertEqual("a000", experiment_id)
-
- @patch('autosubmit.experiment.experiment_common.db_common')
- def test_create_new_test_experiment(self, db_common_mock):
- current_experiment_id = "empty"
- self._build_db_mock(current_experiment_id, db_common_mock)
- experiment_id = new_experiment(self.description, self.version, True)
- self.assertEqual("t000", experiment_id)
-
- @patch('autosubmit.experiment.experiment_common.db_common')
- def test_create_new_operational_experiment(self, db_common_mock):
- current_experiment_id = "empty"
- self._build_db_mock(current_experiment_id, db_common_mock)
- experiment_id = new_experiment(self.description, self.version, False, True)
- self.assertEqual("o000", experiment_id)
-
- @patch('autosubmit.experiment.experiment_common.db_common')
- def test_create_new_experiment_with_previous_one(self, db_common_mock):
- current_experiment_id = "a007"
- self._build_db_mock(current_experiment_id, db_common_mock)
- experiment_id = new_experiment(self.description, self.version)
- self.assertEqual("a007", experiment_id)
-
- @patch('autosubmit.experiment.experiment_common.db_common')
- def test_create_new_test_experiment_with_previous_one(self, db_common_mock):
- current_experiment_id = "t0ac"
- self._build_db_mock(current_experiment_id, db_common_mock)
- experiment_id = new_experiment(self.description, self.version, True)
- self.assertEqual("t0ac", experiment_id)
-
- @patch('autosubmit.experiment.experiment_common.db_common')
- def test_create_new_operational_experiment_with_previous_one(self, db_common_mock):
- current_experiment_id = "o113"
- self._build_db_mock(current_experiment_id, db_common_mock)
- experiment_id = new_experiment(self.description, self.version, False, True)
- self.assertEqual("o113", experiment_id)
-
- @staticmethod
- def _build_db_mock(current_experiment_id, mock_db_common):
- mock_db_common.last_name_used = Mock(return_value=current_experiment_id)
- mock_db_common.check_experiment_exists = Mock(return_value=False)
-
- @patch('autosubmit.autosubmit.resource_listdir')
- @patch('autosubmit.autosubmit.resource_filename')
- def test_autosubmit_generate_config(self, resource_filename_mock, resource_listdir_mock):
+def test_create_new_test_experiment_with_previous_one(mocker):
+ db_common_mock = mocker.patch('autosubmit.experiment.experiment_common.db_common')
+ current_experiment_id = "t0ac"
+ _build_db_mock(current_experiment_id, db_common_mock)
+ experiment_id = new_experiment(DESCRIPTION, VERSION, True)
+ assert "t0ac" == experiment_id
+
+
+def test_create_new_operational_experiment_with_previous_one(mocker):
+ db_common_mock = mocker.patch('autosubmit.experiment.experiment_common.db_common')
+ current_experiment_id = "o113"
+ _build_db_mock(current_experiment_id, db_common_mock)
+ experiment_id = new_experiment(DESCRIPTION, VERSION, False, True)
+ assert "o113" == experiment_id
+
+
+def _build_db_mock(current_experiment_id, mock_db_common):
+ mock_db_common.last_name_used = Mock(return_value=current_experiment_id)
+ mock_db_common.check_experiment_exists = Mock(return_value=False)
+
+
+def test_autosubmit_generate_config(mocker):
+ resource_filename_mock = mocker.patch('autosubmit.autosubmit.resource_listdir')
+ resource_listdir_mock = mocker.patch('autosubmit.autosubmit.resource_filename')
+ expid = 'ff99'
+ original_local_root_dir = BasicConfig.LOCAL_ROOT_DIR
+
+ with tempfile.NamedTemporaryFile(suffix='.yaml',
+ mode='w') as source_yaml, tempfile.TemporaryDirectory() as temp_dir:
+ # Our processed and commented YAML output file must be written here
+ Path(temp_dir, expid, 'conf').mkdir(parents=True)
+ BasicConfig.LOCAL_ROOT_DIR = temp_dir
+
+ source_yaml.write(
+ dedent('''JOB:
+JOBNAME: SIM
+PLATFORM: local
+CONFIG:
+TEST: The answer?
+ROOT: No'''))
+ source_yaml.flush()
+ resource_listdir_mock.return_value = [Path(source_yaml.name).name]
+ resource_filename_mock.return_value = source_yaml.name
+
+ parameters = {
+ 'JOB': {
+ 'JOBNAME': 'sim'
+ },
+ 'CONFIG': {
+ 'CONFIG.TEST': '42'
+ }
+ }
+ Autosubmit.generate_as_config(exp_id=expid, parameters=parameters)
+
+ source_text = Path(source_yaml.name).read_text()
+ source_name = Path(source_yaml.name)
+ output_text = Path(temp_dir, expid, 'conf', f'{source_name.stem}_{expid}.yml').read_text()
+
+ assert source_text != output_text
+ assert '# sim' not in source_text
+ assert '# sim' in output_text
+ assert '# 42' not in source_text
+ assert '# 42' in output_text
+
+ # Reset the local root dir.
+ BasicConfig.LOCAL_ROOT_DIR = original_local_root_dir
+
+
+def test_autosubmit_generate_config_resource_listdir_order(mocker) -> None:
+ """
+ In https://earth.bsc.es/gitlab/es/autosubmit/-/issues/1063 we had a bug
+ where we relied on the order of returned entries of ``pkg_resources.resource_listdir``
+ (which is actually undefined per https://importlib-resources.readthedocs.io/en/latest/migration.html).
+
+ We use the arrays below to test that defining a git minimal, we process only
+ the expected files. We permute and then product the arrays to get as many test
+ cases as possible.
+
+ For every test case, we know that for dummy and minimal we get just one configuration
+ template file used. But for other cases we get as many files as we have that are not
+ ``*minimal.yml`` nor ``*dummy.yml``. In our test cases here, when not dummy and not minimal
+ we must get 2 files, since we have ``include_me_please.yml`` and ``me_too.yml``.
+
+ :return: None
+ """
+
+ resource_filename_mock = mocker.patch('autosubmit.autosubmit.resource_filename')
+ resource_listdir_mock = mocker.patch('autosubmit.autosubmit.resource_listdir')
+ yaml_mock = mocker.patch('autosubmit.autosubmit.YAML.dump')
+
+ # unique lists of resources, no repetitions
+ resources = permutations(
+ ['dummy.yml', 'local-minimal.yml', 'git-minimal.yml', 'include_me_please.yml', 'me_too.yml'])
+ dummy = [True, False]
+ local = [True, False]
+ minimal_configuration = [True, False]
+ test_cases = product(resources, dummy, local, minimal_configuration)
+ keys = ['resources', 'dummy', 'local', 'minimal_configuration']
+
+ for test_case in test_cases:
+ test = dict(zip(keys, test_case))
expid = 'ff99'
original_local_root_dir = BasicConfig.LOCAL_ROOT_DIR
- with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as source_yaml, tempfile.TemporaryDirectory() as temp_dir:
- # Our processed and commented YAML output file must be written here
+ with tempfile.TemporaryDirectory() as temp_dir:
Path(temp_dir, expid, 'conf').mkdir(parents=True)
BasicConfig.LOCAL_ROOT_DIR = temp_dir
- source_yaml.write(
-dedent('''JOB:
- JOBNAME: SIM
- PLATFORM: local
-CONFIG:
- TEST: The answer?
- ROOT: No'''))
- source_yaml.flush()
- resource_listdir_mock.return_value = [Path(source_yaml.name).name]
- resource_filename_mock.return_value = source_yaml.name
-
- parameters = {
- 'JOB': {
- 'JOBNAME': 'sim'
- },
- 'CONFIG': {
- 'CONFIG.TEST': '42'
- }
- }
- Autosubmit.generate_as_config(exp_id=expid, parameters=parameters)
+ resources_return = []
+ filenames_return = []
- source_text = Path(source_yaml.name).read_text()
- source_name = Path(source_yaml.name)
- output_text = Path(temp_dir, expid, 'conf', f'{source_name.stem}_{expid}.yml').read_text()
+ for file_name in test['resources']:
+ input_path = Path(temp_dir, file_name)
+ with open(input_path, 'w+') as source_yaml:
+ source_yaml.write('TEST: YES')
+ source_yaml.flush()
- self.assertNotEquals(source_text, output_text)
- self.assertFalse('# sim' in source_text)
- self.assertTrue('# sim' in output_text)
- self.assertFalse('# 42' in source_text)
- self.assertTrue('# 42' in output_text)
+ resources_return.append(input_path.name) # path
+ filenames_return.append(source_yaml.name) # textiowrapper
+
+ resource_listdir_mock.return_value = resources_return
+ resource_filename_mock.side_effect = filenames_return
+
+ Autosubmit.generate_as_config(
+ exp_id=expid,
+ dummy=test['dummy'],
+ minimal_configuration=test['minimal_configuration'],
+ local=test['local'])
+
+ msg = f'Incorrect call count for resources={",".join(resources_return)}, dummy={test["dummy"]}, minimal_configuration={test["minimal_configuration"]}, local={test["local"]}'
+ expected = 2 if (not test['dummy'] and not test['minimal_configuration']) else 1
+ assert yaml_mock.call_count == expected, msg
+ yaml_mock.reset_mock()
# Reset the local root dir.
BasicConfig.LOCAL_ROOT_DIR = original_local_root_dir
-
- @patch('autosubmit.autosubmit.YAML.dump')
- @patch('autosubmit.autosubmit.resource_listdir')
- @patch('autosubmit.autosubmit.resource_filename')
- def test_autosubmit_generate_config_resource_listdir_order(
- self,
- resource_filename_mock,
- resource_listdir_mock,
- yaml_mock
- ) -> None:
- """
- In https://earth.bsc.es/gitlab/es/autosubmit/-/issues/1063 we had a bug
- where we relied on the order of returned entries of ``pkg_resources.resource_listdir``
- (which is actually undefined per https://importlib-resources.readthedocs.io/en/latest/migration.html).
-
- We use the arrays below to test that defining a git minimal, we process only
- the expected files. We permute and then product the arrays to get as many test
- cases as possible.
-
- For every test case, we know that for dummy and minimal we get just one configuration
- template file used. But for other cases we get as many files as we have that are not
- ``*minimal.yml`` nor ``*dummy.yml``. In our test cases here, when not dummy and not minimal
- we must get 2 files, since we have ``include_me_please.yml`` and ``me_too.yml``.
-
- :param resource_filename_mock: mocked resource_listdir
- :param resource_listdir_mock: mocked resource_filename
- :param yaml_mock: mocked YAML dump function
- :return: None
- """
-
- # unique lists of resources, no repetitions
- resources = permutations(['dummy.yml', 'local-minimal.yml', 'git-minimal.yml', 'include_me_please.yml', 'me_too.yml'])
- dummy = [True, False]
- local = [True, False]
- minimal_configuration = [True, False]
- test_cases = product(resources, dummy, local, minimal_configuration)
- keys = ['resources', 'dummy', 'local', 'minimal_configuration']
-
- for test_case in test_cases:
- test = dict(zip(keys, test_case))
- expid = 'ff99'
- original_local_root_dir = BasicConfig.LOCAL_ROOT_DIR
-
- with tempfile.TemporaryDirectory() as temp_dir:
- Path(temp_dir, expid, 'conf').mkdir(parents=True)
- BasicConfig.LOCAL_ROOT_DIR = temp_dir
-
- resources_return = []
- filenames_return = []
-
- for file_name in test['resources']:
- input_path = Path(temp_dir, file_name)
- with open(input_path, 'w+') as source_yaml:
-
- source_yaml.write('TEST: YES')
- source_yaml.flush()
-
- resources_return.append(input_path.name) # path
- filenames_return.append(source_yaml.name) # textiowrapper
-
- resource_listdir_mock.return_value = resources_return
- resource_filename_mock.side_effect = filenames_return
-
- Autosubmit.generate_as_config(
- exp_id=expid,
- dummy=test['dummy'],
- minimal_configuration=test['minimal_configuration'],
- local=test['local'])
-
- msg = f'Incorrect call count for resources={",".join(resources_return)}, dummy={test["dummy"]}, minimal_configuration={test["minimal_configuration"]}, local={test["local"]}'
- expected = 2 if (not test['dummy'] and not test['minimal_configuration']) else 1
- self.assertEqual(yaml_mock.call_count, expected, msg=msg)
- yaml_mock.reset_mock()
-
- # Reset the local root dir.
- BasicConfig.LOCAL_ROOT_DIR = original_local_root_dir
diff --git a/test/unit/test_job_common.py b/test/unit/test_job_common.py
index bf9e2ac68f6b0933d2049687f3a51fb897fb949b..852d770e629b88e076e9113aba6b3e34021c1c06 100644
--- a/test/unit/test_job_common.py
+++ b/test/unit/test_job_common.py
@@ -1,21 +1,32 @@
-from unittest import TestCase
+# Copyright 2015-2023 Earth Sciences Department, BSC-CNS
+# This file is part of Autosubmit.
+#
+# Autosubmit is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Autosubmit is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Autosubmit. If not, see .
from autosubmit.job.job_common import Status
+"""This test is intended to prevent wrong changes on the Status class definition."""
-class TestJobCommon(TestCase):
- """
- This test is intended to prevent wrong changes on the Status class definition
- """
- def test_value_to_key_has_the_same_values_as_status_constants(self):
- self.assertEqual('SUSPENDED', Status.VALUE_TO_KEY[Status.SUSPENDED])
- self.assertEqual('UNKNOWN', Status.VALUE_TO_KEY[Status.UNKNOWN])
- self.assertEqual('FAILED', Status.VALUE_TO_KEY[Status.FAILED])
- self.assertEqual('WAITING', Status.VALUE_TO_KEY[Status.WAITING])
- self.assertEqual('READY', Status.VALUE_TO_KEY[Status.READY])
- self.assertEqual('SUBMITTED', Status.VALUE_TO_KEY[Status.SUBMITTED])
- self.assertEqual('HELD', Status.VALUE_TO_KEY[Status.HELD])
- self.assertEqual('QUEUING', Status.VALUE_TO_KEY[Status.QUEUING])
- self.assertEqual('RUNNING', Status.VALUE_TO_KEY[Status.RUNNING])
- self.assertEqual('COMPLETED', Status.VALUE_TO_KEY[Status.COMPLETED])
+def test_value_to_key_has_the_same_values_as_status_constants():
+ assert 'SUSPENDED' == Status.VALUE_TO_KEY[Status.SUSPENDED]
+ assert 'UNKNOWN' == Status.VALUE_TO_KEY[Status.UNKNOWN]
+ assert 'FAILED' == Status.VALUE_TO_KEY[Status.FAILED]
+ assert 'WAITING' == Status.VALUE_TO_KEY[Status.WAITING]
+ assert 'READY' == Status.VALUE_TO_KEY[Status.READY]
+ assert 'SUBMITTED' == Status.VALUE_TO_KEY[Status.SUBMITTED]
+ assert 'HELD' == Status.VALUE_TO_KEY[Status.HELD]
+ assert 'QUEUING' == Status.VALUE_TO_KEY[Status.QUEUING]
+ assert 'RUNNING' == Status.VALUE_TO_KEY[Status.RUNNING]
+ assert 'COMPLETED' == Status.VALUE_TO_KEY[Status.COMPLETED]
diff --git a/test/unit/test_log.py b/test/unit/test_log.py
index e261b64291a65fb4e4fa29faabd49aa6bbb266bc..704b56f48e2efe77f500d6bb14c20ae1defb4027 100644
--- a/test/unit/test_log.py
+++ b/test/unit/test_log.py
@@ -1,30 +1,41 @@
-from unittest import TestCase
-from log.log import AutosubmitError, AutosubmitCritical
+# Copyright 2015-2023 Earth Sciences Department, BSC-CNS
+# This file is part of Autosubmit.
+#
+# Autosubmit is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Autosubmit is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Autosubmit. If not, see .
+from log.log import AutosubmitError, AutosubmitCritical
"""Tests for the log module."""
-class TestLog(TestCase):
- def setUp(self):
- ...
+def test_autosubmit_error():
+ ae = AutosubmitError()
+ assert 'Unhandled Error' == ae.message
+ assert 6000 == ae.code
+ assert None is ae.trace
+ assert 'Unhandled Error' == ae.error_message
+ assert ' ' == str(ae)
- def test_autosubmit_error(self):
- ae = AutosubmitError()
- assert 'Unhandled Error' == ae.message
- assert 6000 == ae.code
- assert None is ae.trace
- assert 'Unhandled Error' == ae.error_message
- assert ' ' == str(ae)
- def test_autosubmit_error_error_message(self):
- ae = AutosubmitError(trace='ERROR!')
- assert 'ERROR! Unhandled Error' == ae.error_message
+def test_autosubmit_error_error_message():
+ ae = AutosubmitError(trace='ERROR!')
+ assert 'ERROR! Unhandled Error' == ae.error_message
- def test_autosubmit_critical(self):
- ac = AutosubmitCritical()
- assert 'Unhandled Error' == ac.message
- assert 7000 == ac.code
- assert None is ac.trace
- assert ' ' == str(ac)
+def test_autosubmit_critical():
+ ac = AutosubmitCritical()
+ assert 'Unhandled Error' == ac.message
+ assert 7000 == ac.code
+ assert None is ac.trace
+ assert ' ' == str(ac)
diff --git a/test/unit/test_machinefiles_wrapper.py b/test/unit/test_machinefiles_wrapper.py
index 61972af10ae06be9e63adeb4a5b13e72c4e189be..f43774d43dfea8977a2247a2fe2d93ccc7781733 100644
--- a/test/unit/test_machinefiles_wrapper.py
+++ b/test/unit/test_machinefiles_wrapper.py
@@ -1,4 +1,18 @@
-from unittest import TestCase
+# Copyright 2015-2023 Earth Sciences Department, BSC-CNS
+# This file is part of Autosubmit.
+#
+# Autosubmit is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Autosubmit is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Autosubmit. If not, see .
import collections
import textwrap
@@ -6,94 +20,95 @@ from math import ceil
from autosubmit.platforms.wrappers.wrapper_builder import PythonWrapperBuilder
-
-class TestMachinefiles(TestCase):
-
- def setUp(self):
- self.job_scripts = ['JOB_1', 'JOB_2', 'JOB_3']
-
- def test_job_less_than_48_cores_standard(self):
- num_processors = 60
- jobs_resources = {'MACHINEFILES': 'STANDARD', 'JOB': {'PROCESSORS': '20', 'TASKS': '48'},
- 'PROCESSORS_PER_NODE': '48'}
-
- wrapper_builder = PythonWrapperBuilder(header_directive='', jobs_scripts=self.job_scripts,
- num_processors=num_processors, expid='a000',
- jobs_resources=jobs_resources,threads='1',retrials=0,wallclock_by_level=None,num_processors_value=num_processors)
-
- nodes = self._create_nodelist(num_processors)
- cores_list = wrapper_builder.build_cores_list()
- machinefiles_code = wrapper_builder.get_machinefile_function().replace("_NEWLINE_", '\\n')
-
- result = dict()
-
- script = textwrap.dedent("""
- from math import ceil
-
- all_nodes = {0}
- section = 'JOB'
- {1}
- machinefiles_dict = dict()
- for job in {2}:
- {3}
- machinefiles_dict[job] = machines
- """).format(nodes, cores_list, self.job_scripts, wrapper_builder._indent(machinefiles_code, 4))
-
- exec (script, result)
-
- machinefiles_dict = result["machinefiles_dict"]
- all_machines = list()
- for job, machines in machinefiles_dict.items():
- machines = machines.split("\n")[:-1]
- job_section = job.split("_")[0]
- job_cores = int(jobs_resources[job_section]['PROCESSORS'])
- self.assertEqual(len(machines), job_cores)
- all_machines += machines
-
- machines_count = collections.Counter(all_machines)
- for count in list(machines_count.values()):
- self.assertLessEqual(count, int(jobs_resources['PROCESSORS_PER_NODE']))
-
- def test_job_more_than_48_cores_standard(self):
- num_processors = 150
- jobs_resources = {'MACHINEFILES': 'STANDARD', 'JOB': {'PROCESSORS': '50', 'TASKS': '48'},
- 'PROCESSORS_PER_NODE': '48'}
-
- wrapper_builder = PythonWrapperBuilder(header_directive='', jobs_scripts=self.job_scripts,
- num_processors=num_processors, expid='a000',
- jobs_resources=jobs_resources,threads='1',retrials=0,wallclock_by_level=None,num_processors_value=num_processors)
-
- nodes = self._create_nodelist(num_processors)
- cores_list = wrapper_builder.build_cores_list()
- machinefiles_code = wrapper_builder.get_machinefile_function().replace("_NEWLINE_", '\\n')
-
- result = dict()
-
- script = textwrap.dedent("""
- from math import ceil
-
- all_nodes = {0}
- section = 'JOB'
- {1}
- machinefiles_dict = dict()
- for job in {2}:
- {3}
- machinefiles_dict[job] = machines
- """).format(nodes, cores_list, self.job_scripts, wrapper_builder._indent(machinefiles_code, 4))
-
- exec (script, result)
- machinefiles_dict = result["machinefiles_dict"]
- for job, machines in machinefiles_dict.items():
- machines = machines.split("\n")[:-1]
- job_section = job.split("_")[0]
- job_cores = int(jobs_resources[job_section]['PROCESSORS'])
- self.assertEqual(len(machines), job_cores)
-
- def _create_nodelist(self, num_cores):
- num_nodes = int(ceil(num_cores/float(48)))
-
- node_list = []
-
- for i in range(num_nodes):
- node_list.append('node_'+str(i))
- return node_list
\ No newline at end of file
+job_scripts = ['JOB_1', 'JOB_2', 'JOB_3']
+
+
+def test_job_less_than_48_cores_standard():
+ num_processors = 60
+ jobs_resources = {'MACHINEFILES': 'STANDARD', 'JOB': {'PROCESSORS': '20', 'TASKS': '48'},
+ 'PROCESSORS_PER_NODE': '48'}
+
+ wrapper_builder = PythonWrapperBuilder(header_directive='', jobs_scripts=job_scripts,
+ num_processors=num_processors, expid='a000',
+ jobs_resources=jobs_resources, threads='1', retrials=0,
+ wallclock_by_level=None, num_processors_value=num_processors)
+
+ nodes = _create_nodelist(num_processors)
+ cores_list = wrapper_builder.build_cores_list()
+ machinefiles_code = wrapper_builder.get_machinefile_function().replace("_NEWLINE_", '\\n')
+
+ result = dict()
+
+ script = textwrap.dedent("""
+ from math import ceil
+
+ all_nodes = {0}
+ section = 'JOB'
+ {1}
+ machinefiles_dict = dict()
+ for job in {2}:
+ {3}
+ machinefiles_dict[job] = machines
+ """).format(nodes, cores_list, job_scripts, wrapper_builder._indent(machinefiles_code, 4))
+
+ exec(script, result)
+
+ machinefiles_dict = result["machinefiles_dict"]
+ all_machines = list()
+ for job, machines in machinefiles_dict.items():
+ machines = machines.split("\n")[:-1]
+ job_section = job.split("_")[0]
+ job_cores = int(jobs_resources[job_section]['PROCESSORS'])
+ assert len(machines) == job_cores
+ all_machines += machines
+
+ machines_count = collections.Counter(all_machines)
+ for count in list(machines_count.values()):
+ assert count <= int(jobs_resources['PROCESSORS_PER_NODE'])
+
+
+def test_job_more_than_48_cores_standard():
+ num_processors = 150
+ jobs_resources = {'MACHINEFILES': 'STANDARD', 'JOB': {'PROCESSORS': '50', 'TASKS': '48'},
+ 'PROCESSORS_PER_NODE': '48'}
+
+ wrapper_builder = PythonWrapperBuilder(header_directive='', jobs_scripts=job_scripts,
+ num_processors=num_processors, expid='a000',
+ jobs_resources=jobs_resources, threads='1', retrials=0,
+ wallclock_by_level=None, num_processors_value=num_processors)
+
+ nodes = _create_nodelist(num_processors)
+ cores_list = wrapper_builder.build_cores_list()
+ machinefiles_code = wrapper_builder.get_machinefile_function().replace("_NEWLINE_", '\\n')
+
+ result = dict()
+
+ script = textwrap.dedent("""
+ from math import ceil
+
+ all_nodes = {0}
+ section = 'JOB'
+ {1}
+ machinefiles_dict = dict()
+ for job in {2}:
+ {3}
+ machinefiles_dict[job] = machines
+ """).format(nodes, cores_list, job_scripts, wrapper_builder._indent(machinefiles_code, 4))
+
+ exec(script, result)
+ machinefiles_dict = result["machinefiles_dict"]
+ for job, machines in machinefiles_dict.items():
+ machines = machines.split("\n")[:-1]
+ job_section = job.split("_")[0]
+ job_cores = int(jobs_resources[job_section]['PROCESSORS'])
+ assert len(machines) == job_cores
+
+
+def _create_nodelist(num_cores):
+ num_nodes = int(ceil(num_cores / float(48)))
+
+ node_list = []
+
+ for i in range(num_nodes):
+ node_list.append('node_' + str(i))
+ return node_list
diff --git a/test/unit/test_paramiko_platform.py b/test/unit/test_paramiko_platform.py
index fdfee7e9f9b81f34ef9224841c42f93ec18fc987..114226dbb011535e548b570b563884158eb84d0c 100644
--- a/test/unit/test_paramiko_platform.py
+++ b/test/unit/test_paramiko_platform.py
@@ -1,116 +1,93 @@
-from collections import namedtuple
-from unittest import TestCase
+# Copyright 2015-2023 Earth Sciences Department, BSC-CNS
+# This file is part of Autosubmit.
+#
+# Autosubmit is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Autosubmit is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Autosubmit. If not, see .
-from tempfile import TemporaryDirectory
-from unittest.mock import MagicMock, patch
+from pytest import raises
from autosubmit.job.job_common import Status
-from autosubmit.platforms.paramiko_platform import ParamikoPlatform
from log.log import AutosubmitError
-class TestParamikoPlatform(TestCase):
+def test_check_Alljobs_send_command1_raises_autosubmit_error(autosubmit_exp, mocker):
+ exp = autosubmit_exp('a000')
+ mocker.patch('autosubmit.platforms.paramiko_platform.sleep')
+ mock_log = mocker.patch('autosubmit.platforms.paramiko_platform.Log')
+ platform = exp.platform
+ # Because it raises a NotImplementedError, but we want to skip it to test an error...
+ platform.get_checkAlljobs_cmd = mocker.MagicMock()
+ platform.get_checkAlljobs_cmd.side_effect = ['ls']
+ # Raise the AE error here.
+ platform.send_command = mocker.MagicMock()
+ ae = AutosubmitError(message='Test', code=123, trace='ERR!')
+ platform.send_command.side_effect = ae
+ as_conf = mocker.MagicMock()
+ as_conf.get_copy_remote_logs.return_value = None
+ job = mocker.MagicMock()
+ job.id = 'TEST'
+ job.name = 'TEST'
+ with raises(AutosubmitError) as e:
+ # Retries is -1 so that it skips the retry code block completely,
+ # as we are not interested in testing that part here.
+ platform.check_Alljobs(
+ job_list=[(job, None)],
+ as_conf=as_conf,
+ retries=-1)
+ assert e.value.message == 'Some Jobs are in Unknown status'
+ assert e.value.code == 6008
+ assert e.value.trace is None
- Config = namedtuple('Config', ['LOCAL_ROOT_DIR', 'LOCAL_TMP_DIR'])
+ assert mock_log.warning.called
+ assert mock_log.warning.call_args[0][1] == job.id
+ assert mock_log.warning.call_args[0][2] == platform.name
+ assert mock_log.warning.call_args[0][3] == Status.UNKNOWN
- def setUp(self):
- self.local_root_dir = TemporaryDirectory()
- self.config = {
- "LOCAL_ROOT_DIR" : self.local_root_dir.name,
- "LOCAL_TMP_DIR" : 'tmp'
- }
- self.platform = ParamikoPlatform(expid='a000', name='local', config=self.config)
- self.platform.job_status = {
- 'COMPLETED': [],
- 'RUNNING': [],
- 'QUEUING': [],
- 'FAILED': []
- }
- def tearDown(self) -> None:
- self.local_root_dir.cleanup()
+def test_check_Alljobs_send_command2_raises_autosubmit_error(autosubmit_exp, mocker):
+ exp = autosubmit_exp('a000')
+ platform = exp.platform
+ mocker.patch('autosubmit.platforms.paramiko_platform.sleep')
+ # Because it raises a NotImplementedError, but we want to skip it to test an error...
+ platform.get_checkAlljobs_cmd = mocker.MagicMock()
+ platform.get_checkAlljobs_cmd.side_effect = ['ls']
+ # Raise the AE error here.
+ platform.send_command = mocker.MagicMock()
+ ae = AutosubmitError(message='Test', code=123, trace='ERR!')
+ # Here the first time ``send_command`` is called it returns None, but
+ # the second time it will raise the AutosubmitError for our test case.
+ platform.send_command.side_effect = [None, ae]
+ # Also need to make this function return False...
+ platform._check_jobid_in_queue = mocker.MagicMock(return_value=False)
+ # Then it will query the job status of the job, see further down as we set it
+ as_conf = mocker.MagicMock()
+ as_conf.get_copy_remote_logs.return_value = None
+ job = mocker.MagicMock()
+ job.id = 'TEST'
+ job.name = 'TEST'
+ job.status = Status.UNKNOWN
- def test_paramiko_platform_constructor(self):
- assert self.platform.name == 'local'
- assert self.platform.expid == 'a000'
- assert self.config is self.platform.config
+ platform.get_queue_status = mocker.MagicMock(side_effect=None)
- assert self.platform.header is None
- assert self.platform.wrapper is None
-
- assert len(self.platform.job_status) == 4
-
- @patch('autosubmit.platforms.paramiko_platform.Log')
- @patch('autosubmit.platforms.paramiko_platform.sleep')
- def test_check_Alljobs_send_command1_raises_autosubmit_error(self, mock_sleep, mock_log):
- """
- Args:
- mock_sleep (MagicMock): mocking because the function sleeps for 5 seconds.
- """
- # Because it raises a NotImplementedError, but we want to skip it to test an error...
- self.platform.get_checkAlljobs_cmd = MagicMock()
- self.platform.get_checkAlljobs_cmd.side_effect = ['ls']
- # Raise the AE error here.
- self.platform.send_command = MagicMock()
- ae = AutosubmitError(message='Test', code=123, trace='ERR!')
- self.platform.send_command.side_effect = ae
- as_conf = MagicMock()
- as_conf.get_copy_remote_logs.return_value = None
- job = MagicMock()
- job.id = 'TEST'
- job.name = 'TEST'
- with self.assertRaises(AutosubmitError) as cm:
- # Retries is -1 so that it skips the retry code block completely,
- # as we are not interested in testing that part here.
- self.platform.check_Alljobs(
- job_list=[(job, None)],
- as_conf=as_conf,
- retries=-1)
- assert cm.exception.message == 'Some Jobs are in Unknown status'
- assert cm.exception.code == 6008
- assert cm.exception.trace is None
-
- assert mock_log.warning.called
- assert mock_log.warning.call_args[0][1] == job.id
- assert mock_log.warning.call_args[0][2] == self.platform.name
- assert mock_log.warning.call_args[0][3] == Status.UNKNOWN
-
- @patch('autosubmit.platforms.paramiko_platform.sleep')
- def test_check_Alljobs_send_command2_raises_autosubmit_error(self, mock_sleep):
- """
- Args:
- mock_sleep (MagicMock): mocking because the function sleeps for 5 seconds.
- """
- # Because it raises a NotImplementedError, but we want to skip it to test an error...
- self.platform.get_checkAlljobs_cmd = MagicMock()
- self.platform.get_checkAlljobs_cmd.side_effect = ['ls']
- # Raise the AE error here.
- self.platform.send_command = MagicMock()
- ae = AutosubmitError(message='Test', code=123, trace='ERR!')
- # Here the first time ``send_command`` is called it returns None, but
- # the second time it will raise the AutosubmitError for our test case.
- self.platform.send_command.side_effect = [None, ae]
- # Also need to make this function return False...
- self.platform._check_jobid_in_queue = MagicMock(return_value = False)
- # Then it will query the job status of the job, see further down as we set it
- as_conf = MagicMock()
- as_conf.get_copy_remote_logs.return_value = None
- job = MagicMock()
- job.id = 'TEST'
- job.name = 'TEST'
- job.status = Status.UNKNOWN
-
- self.platform.get_queue_status = MagicMock(side_effect=None)
-
- with self.assertRaises(AutosubmitError) as cm:
- # Retries is -1 so that it skips the retry code block completely,
- # as we are not interested in testing that part here.
- self.platform.check_Alljobs(
- job_list=[(job, None)],
- as_conf=as_conf,
- retries=1)
- # AS raises an exception with the message using the previous exception's
- # ``error_message``, but error code 6000 and no trace.
- assert cm.exception.message == ae.error_message
- assert cm.exception.code == 6000
- assert cm.exception.trace is None
+ with raises(AutosubmitError) as e:
+ # Retries is -1 so that it skips the retry code block completely,
+ # as we are not interested in testing that part here.
+ platform.check_Alljobs(
+ job_list=[(job, None)],
+ as_conf=as_conf,
+ retries=1)
+ # AS raises an exception with the message using the previous exception's
+ # ``error_message``, but error code 6000 and no trace.
+ assert e.value.message == ae.error_message
+ assert e.value.code == 6000
+ assert e.value.trace is None
diff --git a/test/unit/test_pjm.py b/test/unit/test_pjm.py
index c20b0e6a899b129dc62799333b6c85373362ed10..16df8c0452e3e37bf0bb061ed778cd30ff344d41 100644
--- a/test/unit/test_pjm.py
+++ b/test/unit/test_pjm.py
@@ -1,121 +1,88 @@
-from unittest import TestCase
-from unittest.mock import Mock,MagicMock, patch
-from autosubmitconfigparser.config.configcommon import AutosubmitConfig
-from autosubmitconfigparser.config.yamlparser import YAMLParserFactory
-from autosubmit.autosubmit import Autosubmit
-import autosubmit.platforms.pjmplatform
import pytest
+from autosubmitconfigparser.config.configcommon import AutosubmitConfig
+from autosubmit.autosubmit import Autosubmit
+from autosubmit.platforms.submitter import Submitter
+from autosubmit.platforms.paramiko_platform import ParamikoPlatform
from pathlib import Path
-from autosubmit.platforms.platform import Platform
-from autosubmit.platforms.pjmplatform import PJMPlatform
-import autosubmit.platforms.headers.pjm_header
-from tempfile import TemporaryDirectory
-from datetime import datetime
-from autosubmit.job.job import Job, Status
-import inspect
-class FakeBasicConfig:
- def props(self):
- pr = {}
- for name in dir(self):
- value = getattr(self, name)
- if not name.startswith('__') and not inspect.ismethod(value) and not inspect.isfunction(value):
- pr[name] = value
- return pr
- DB_DIR = '/dummy/db/dir'
- DB_FILE = '/dummy/db/file'
- DB_PATH = '/dummy/db/path'
- LOCAL_ROOT_DIR = '/dummy/local/root/dir'
- LOCAL_TMP_DIR = '/dummy/local/temp/dir'
- LOCAL_PROJ_DIR = '/dummy/local/proj/dir'
- LOCAL_ASLOG_DIR = '/dummy/local/aslog/dir'
- DEFAULT_PLATFORMS_CONF = ''
- DEFAULT_JOBS_CONF = ''
- @staticmethod
- def read():
- return
-class TestPJM(TestCase):
- def setUp(self) -> None:
- self.exp_id = 'a000'
- self.as_conf = MagicMock()
- with patch.object(Path, 'exists') as mock_exists:
- mock_exists.return_value = True
- self.as_conf = AutosubmitConfig(self.exp_id, FakeBasicConfig, YAMLParserFactory())
- self.as_conf.experiment_data = dict()
- self.as_conf.experiment_data["DEFAULT"] = dict()
- self.as_conf.experiment_data["DEFAULT"]["HPCARCH"] = "ARM"
- yml_file = Path(__file__).resolve().parent / "files/fake-jobs.yml"
- factory = YAMLParserFactory()
- parser = factory.create_parser()
- parser.data = parser.load(yml_file)
- self.as_conf.experiment_data.update(parser.data)
- yml_file = Path(__file__).resolve().parent / "files/fake-platforms.yml"
- factory = YAMLParserFactory()
- parser = factory.create_parser()
- parser.data = parser.load(yml_file)
- self.as_conf.experiment_data.update(parser.data)
- self.setUp_pjm()
+from typing import Callable, List, Tuple
+
+from textwrap import dedent
+SECTION = 'ARM'
+OUT = dedent("""\
+ JOB_ID ST REASON
+ 167727 EXT COMPLETED
+ 167728 RNO -
+ 167729 RNE -
+ 167730 RUN -
+ 167732 ACC -
+ 167733 QUE -
+ 167734 RNA -
+ 167735 RNP -
+ 167736 HLD ASHOLD
+ 167737 ERR -
+ 167738 CCL -
+ 167739 RJT -
+ """)
- @patch("builtins.open",MagicMock())
- def setUp_pjm(self):
- MagicMock().write = MagicMock()
- MagicMock().os.path.join = MagicMock()
- self.section = 'ARM'
- self.submitted_ok = "[INFO] PJM 0000 pjsub Job 167661 submitted."
- self.submitted_fail = "[ERR.] PJM 0057 pjsub node=32 is greater than the upper limit (24)."
- self.out= """JOB_ID ST REASON
-167727 EXT COMPLETED
-167728 RNO -
-167729 RNE -
-167730 RUN -
-167732 ACC -
-167733 QUE -
-167734 RNA -
-167735 RNP -
-167736 HLD ASHOLD
-167737 ERR -
-167738 CCL -
-167739 RJT -
-"""
- self.completed_jobs = ["167727"]
- self.running_jobs = ["167728","167729","167730"]
- self.queued_jobs = ["167732","167733","167734","167735","167736"]
- self.failed_jobs = ["167737","167738","167739"]
- self.jobs_that_arent_listed = ["3442432423", "238472364782", "1728362138712"]
- self.completed_jobs_cmd = "167727"
- self.running_jobs_cmd = "167728+167729+167730"
- self.queued_jobs_cmd = "167732+167733+167734+167735+167736"
- self.failed_jobs_cmd = "167737+167738+167739"
- self.jobs_that_arent_listed_cmd = "3442432423+238472364782+1728362138712"
- self.submitter = Autosubmit._get_submitter(self.as_conf)
- self.submitter.load_platforms(self.as_conf)
- self.remote_platform = self.submitter.platforms[self.section]
- def test_parse_Alljobs_output(self):
- """Test parsing of all jobs output."""
- for job_id in self.completed_jobs:
- assert self.remote_platform.parse_Alljobs_output(self.out,job_id) in self.remote_platform.job_status["COMPLETED"]
- for job_id in self.failed_jobs:
- assert self.remote_platform.parse_Alljobs_output(self.out,job_id) in self.remote_platform.job_status["FAILED"]
- for job_id in self.queued_jobs:
- assert self.remote_platform.parse_Alljobs_output(self.out,job_id) in self.remote_platform.job_status["QUEUING"]
- for job_id in self.running_jobs:
- assert self.remote_platform.parse_Alljobs_output(self.out,job_id) in self.remote_platform.job_status["RUNNING"]
- for job_id in self.jobs_that_arent_listed:
- assert self.remote_platform.parse_Alljobs_output(self.out,job_id) == []
+@pytest.mark.parametrize(
+ 'jobs, cmd, status, reasons',
+ [
+ (['167727'], '167727', 'COMPLETED', ['COMPLETED']),
+ (["167728", "167729", "167730"], '167728+167729+167730', 'RUNNING', ['-', '-', '-']),
+ (["167732", "167733", "167734", "167735", "167736"], '167732+167733+167734+167735+167736', 'QUEUING',
+ ['-', '-', '-', '-', 'ASHOLD']),
+ (["167737", "167738", "167739"], '167737+167738+167739', 'FAILED', ['-', '-', '-']),
+ (["3442432423", "238472364782", "1728362138712"], '3442432423+238472364782+1728362138712', None,
+ [[], [], []])
+ ]
+)
+def test_parse_all_jobs_output(
+ jobs: List[str], cmd: str, status: str, reasons: List[str],
+ pjm_setup: Tuple[AutosubmitConfig, Submitter, ParamikoPlatform]
+):
+ """Test parsing of all jobs output."""
+ as_conf, submitter, remote_platform = pjm_setup
+ for job_id, reason in zip(jobs, reasons):
+ if status is not None:
+ assert remote_platform.parse_Alljobs_output(OUT, job_id) in remote_platform.job_status[status]
+ else:
+ assert remote_platform.parse_Alljobs_output(OUT, job_id) == []
+ assert remote_platform.parse_queue_reason(OUT, job_id) == reason
- def test_get_submitted_job_id(self):
- """Test parsing of submitted job id."""
- output = self.remote_platform.get_submitted_job_id(self.submitted_ok)
- assert output == [167661]
- output = self.remote_platform.get_submitted_job_id(self.submitted_fail)
- assert output == []
- def test_parse_queue_reason(self):
- """Test parsing of queue reason."""
- output = self.remote_platform.parse_queue_reason(self.out, self.completed_jobs[0])
- assert output == "COMPLETED"
+@pytest.fixture
+def pjm_setup(
+ autosubmit_exp: Callable,
+ create_as_conf: Callable) -> Tuple[AutosubmitConfig, Submitter, ParamikoPlatform]:
+ exp = autosubmit_exp('a000')
+ as_conf = create_as_conf(
+ autosubmit_exp=exp,
+ yaml_files=[
+ Path(__file__).resolve().parent / "files/fake-jobs.yml",
+ Path(__file__).resolve().parent / "files/fake-platforms.yml"
+ ],
+ experiment_data={
+ 'DEFAULT': {
+ 'HPCARCH': 'ARM'
+ }
+ }
+ )
+ submitter = Autosubmit._get_submitter(as_conf)
+ submitter.load_platforms(as_conf)
+ remote_platform = submitter.platforms[SECTION]
+ return as_conf, submitter, remote_platform
+def test_get_submitted_job_id(pjm_setup: Tuple[AutosubmitConfig, Submitter, ParamikoPlatform]):
+ """Test parsing of submitted job id."""
+ submitted_ok = "[INFO] PJM 0000 pjsub Job 167661 submitted."
+ submitted_fail = "[ERR.] PJM 0057 pjsub node=32 is greater than the upper limit (24)."
+ as_conf, submitter, remote_platform = pjm_setup
+ output = remote_platform.get_submitted_job_id(submitted_ok)
+ assert output == [167661]
+ output = remote_platform.get_submitted_job_id(submitted_fail)
+ assert output == []
diff --git a/test/unit/test_platform_monitor.py b/test/unit/test_platform_monitor.py
index f2813cf6635a2ea95de1b1a5d9e33e10b9fe240a..6d269194de5e804b8b207502c74ddf1d47aa1759 100644
--- a/test/unit/test_platform_monitor.py
+++ b/test/unit/test_platform_monitor.py
@@ -1,94 +1,88 @@
-#!/usr/bin/env python3
-
-# Copyright 2015-2020 Earth Sciences Department, BSC-CNS
+# Copyright 2015-2023 Earth Sciences Department, BSC-CNS
# This file is part of Autosubmit.
-
+#
# Autosubmit is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
-
+#
# Autosubmit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
-
+#
# You should have received a copy of the GNU General Public License
# along with Autosubmit. If not, see .
-import unittest
from autosubmit.history.platform_monitor import platform_utils as utils
from autosubmit.history.platform_monitor.slurm_monitor import SlurmMonitor
-class TestSlurmMonitor(unittest.TestCase):
- def test_reader_on_simple_wrapper_example_1(self):
+
+def test_reader_on_simple_wrapper_example_1():
ssh_output = utils.read_example("wrapper1.txt")
- slurm_monitor = SlurmMonitor(ssh_output)
+ slurm_monitor = SlurmMonitor(ssh_output)
# Header
- self.assertTrue(slurm_monitor.input_items[0].is_batch is False)
- self.assertTrue(slurm_monitor.input_items[0].is_detail is False)
- self.assertTrue(slurm_monitor.input_items[0].is_extern is False)
- self.assertTrue(slurm_monitor.input_items[0].is_header is True)
- self.assertTrue(slurm_monitor.input_items[0].is_detail is False)
+ assert slurm_monitor.input_items[0].is_batch is False
+ assert slurm_monitor.input_items[0].is_detail is False
+ assert slurm_monitor.input_items[0].is_extern is False
+ assert slurm_monitor.input_items[0].is_header is True
+ assert slurm_monitor.input_items[0].is_detail is False
# Batch
- self.assertTrue(slurm_monitor.input_items[1].is_batch is True)
- self.assertTrue(slurm_monitor.input_items[1].is_detail is True)
- self.assertTrue(slurm_monitor.input_items[1].is_extern is False)
- self.assertTrue(slurm_monitor.input_items[1].is_header is False)
- self.assertTrue(slurm_monitor.input_items[1].is_detail is True)
+ assert slurm_monitor.input_items[1].is_batch is True
+ assert slurm_monitor.input_items[1].is_detail is True
+ assert slurm_monitor.input_items[1].is_extern is False
+ assert slurm_monitor.input_items[1].is_header is False
+ assert slurm_monitor.input_items[1].is_detail is True
# Extern
- self.assertTrue(slurm_monitor.input_items[2].is_batch is False)
- self.assertTrue(slurm_monitor.input_items[2].is_detail is True)
- self.assertTrue(slurm_monitor.input_items[2].is_extern is True)
- self.assertTrue(slurm_monitor.input_items[2].is_header is False)
- self.assertTrue(slurm_monitor.input_items[2].is_detail is True)
+ assert slurm_monitor.input_items[2].is_batch is False
+ assert slurm_monitor.input_items[2].is_detail is True
+ assert slurm_monitor.input_items[2].is_extern is True
+ assert slurm_monitor.input_items[2].is_header is False
+ assert slurm_monitor.input_items[2].is_detail is True
header = slurm_monitor.header
batch = slurm_monitor.batch
extern = slurm_monitor.extern
- self.assertIsNotNone(header)
- self.assertIsNotNone(batch)
- self.assertIsNotNone(extern)
+ assert header is not None
+ assert batch is not None
+ assert extern is not None
# print("{0} {1} <- {2}".format(batch.name, batch.energy, batch.energy_str))
# print("{0} {1} <- {2}".format(extern.name, extern.energy, extern.energy_str))
# print("{0} {1} <- {2}".format(header.name, header.energy, header.energy_str))
- self.assertTrue(slurm_monitor.steps_plus_extern_approximate_header_energy())
+ assert slurm_monitor.steps_plus_extern_approximate_header_energy()
-
- def test_reader_on_simple_wrapper_example_2(self):
- ssh_output = utils.read_example("wrapper2.txt") # not real
- slurm_monitor = SlurmMonitor(ssh_output)
+
+def test_reader_on_simple_wrapper_example_2():
+ ssh_output = utils.read_example("wrapper2.txt") # not real
+ slurm_monitor = SlurmMonitor(ssh_output)
# Header
- self.assertTrue(slurm_monitor.input_items[0].is_batch is False)
- self.assertTrue(slurm_monitor.input_items[0].is_detail is False)
- self.assertTrue(slurm_monitor.input_items[0].is_step is False)
- self.assertTrue(slurm_monitor.input_items[0].is_extern is False)
- self.assertTrue(slurm_monitor.input_items[0].is_header is True)
+ assert slurm_monitor.input_items[0].is_batch is False
+ assert slurm_monitor.input_items[0].is_detail is False
+ assert slurm_monitor.input_items[0].is_step is False
+ assert slurm_monitor.input_items[0].is_extern is False
+ assert slurm_monitor.input_items[0].is_header is True
# Batch
- self.assertTrue(slurm_monitor.input_items[1].is_batch is True)
- self.assertTrue(slurm_monitor.input_items[1].is_detail is True)
- self.assertTrue(slurm_monitor.input_items[1].is_step is False)
- self.assertTrue(slurm_monitor.input_items[1].is_extern is False)
- self.assertTrue(slurm_monitor.input_items[1].is_header is False)
+ assert slurm_monitor.input_items[1].is_batch is True
+ assert slurm_monitor.input_items[1].is_detail is True
+ assert slurm_monitor.input_items[1].is_step is False
+ assert slurm_monitor.input_items[1].is_extern is False
+ assert slurm_monitor.input_items[1].is_header is False
# Step 0
- self.assertTrue(slurm_monitor.input_items[2].is_batch is False)
- self.assertTrue(slurm_monitor.input_items[2].is_detail is True)
- self.assertTrue(slurm_monitor.input_items[2].is_step is True)
- self.assertTrue(slurm_monitor.input_items[2].is_extern is False)
- self.assertTrue(slurm_monitor.input_items[2].is_header is False)
- self.assertTrue(slurm_monitor.input_items[2].step_number >= 0)
-
- def test_reader_on_big_wrapper(self):
+ assert slurm_monitor.input_items[2].is_batch is False
+ assert slurm_monitor.input_items[2].is_detail is True
+ assert slurm_monitor.input_items[2].is_step is True
+ assert slurm_monitor.input_items[2].is_extern is False
+ assert slurm_monitor.input_items[2].is_header is False
+ assert slurm_monitor.input_items[2].step_number >= 0
+
+
+def test_reader_on_big_wrapper():
ssh_output = utils.read_example("wrapper_big.txt")
slurm_monitor = SlurmMonitor(ssh_output)
- self.assertTrue(slurm_monitor.step_count == 30)
+ assert slurm_monitor.step_count == 30
header = slurm_monitor.header
batch = slurm_monitor.batch
extern = slurm_monitor.extern
- self.assertIsNotNone(header)
- self.assertIsNotNone(batch)
- self.assertIsNotNone(extern)
- self.assertTrue(slurm_monitor.steps_plus_extern_approximate_header_energy())
-
-
-if __name__ == '__main__':
- unittest.main()
\ No newline at end of file
+ assert header is not None
+ assert batch is not None
+ assert extern is not None
+ assert slurm_monitor.steps_plus_extern_approximate_header_energy()
diff --git a/test/unit/test_profiler.py b/test/unit/test_profiler.py
index cf99067eaaf349f507a6c59f53063d45493cd9e8..85cf7c37efd9a6d41a509df65a9dd03d66f3d1c0 100644
--- a/test/unit/test_profiler.py
+++ b/test/unit/test_profiler.py
@@ -1,48 +1,75 @@
-from unittest import TestCase, mock
+# Copyright 2015-2023 Earth Sciences Department, BSC-CNS
+# This file is part of Autosubmit.
+#
+# Autosubmit is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Autosubmit is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Autosubmit. If not, see .
+
+import pytest
+from pytest_mock import MockerFixture
+
from autosubmit.profiler.profiler import Profiler
from log.log import AutosubmitCritical
-class TestProfiler(TestCase):
- def setUp(self):
- self.profiler = Profiler("a000")
+@pytest.fixture
+def profiler():
+ return Profiler("a000")
+
+
+# Black box techniques for status machine based software
+#
+# O---->__init__------> start
+# |
+# |
+# stop ----> report --->0
+
+# Transition coverage
+def test_transitions(profiler):
+ # __init__ -> start
+ profiler.start()
+
+ # start -> stop
+ profiler.stop()
+
- # Black box techniques for status machine based software
- #
- # O---->__init__------> start
- # |
- # |
- # stop ----> report --->0
+def test_transitions_fail_cases(profiler):
+ # __init__ -> stop
+ with pytest.raises(AutosubmitCritical):
+ profiler.stop()
- # Transition coverage
- def test_transitions(self):
- # __init__ -> start
- self.profiler.start()
+ # start -> start
+ profiler.start()
- # start -> stop
- self.profiler.stop()
+ with pytest.raises(AutosubmitCritical):
+ profiler.start()
- def test_transitions_fail_cases(self):
- # __init__ -> stop
- self.assertRaises(AutosubmitCritical, self.profiler.stop)
+ # stop -> stop
+ profiler.stop()
+ with pytest.raises(AutosubmitCritical):
+ profiler.stop()
- # start -> start
- self.profiler.start()
- self.assertRaises(AutosubmitCritical, self.profiler.start)
- # stop -> stop
- self.profiler.stop()
- self.assertRaises(AutosubmitCritical, self.profiler.stop)
+# White box tests
+def test_writing_permission_check_fails(profiler, mocker: MockerFixture):
+ mock_response = mocker.patch("os.access")
+ mock_response.return_value = False
- # White box tests
- @mock.patch("os.access")
- def test_writing_permission_check_fails(self, mock_response):
- mock_response.return_value = False
+ profiler.start()
+ with pytest.raises(AutosubmitCritical):
+ profiler.stop()
- self.profiler.start()
- self.assertRaises(AutosubmitCritical, self.profiler.stop)
- def test_memory_profiling_loop(self):
- self.profiler.start()
- bytearray(1024*1024)
- self.profiler.stop()
+def test_memory_profiling_loop(profiler):
+ profiler.start()
+ bytearray(1024 * 1024)
+ profiler.stop()
diff --git a/test/unit/test_setup.py b/test/unit/test_setup.py
deleted file mode 100644
index a8925d4fa1831f9e021bc72f23be43b819d358e3..0000000000000000000000000000000000000000
--- a/test/unit/test_setup.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import os
-from unittest import TestCase
-
-
-#class TestSetup(TestCase):
- #def test_setup_check_works(self):
- # exit_code = run_setup_check()
- # self.assertEqual(0, exit_code)
-
-
-def run_setup_check():
- return os.system('python ' + get_directory_of_this_file() + '/../../setup.py check')
-
-
-def get_directory_of_this_file():
- return os.path.dirname(os.path.realpath(__file__))
diff --git a/test/unit/test_slurm_platform.py b/test/unit/test_slurm_platform.py
index 0bfe849554deba4d1b7fd31fa88dfcd09e1362ef..d4b4a99e90bdc055713e79779241c71f2272dc20 100644
--- a/test/unit/test_slurm_platform.py
+++ b/test/unit/test_slurm_platform.py
@@ -1,77 +1,76 @@
-from collections import namedtuple
-from unittest import TestCase
+# Copyright 2015-2023 Earth Sciences Department, BSC-CNS
+# This file is part of Autosubmit.
+#
+# Autosubmit is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Autosubmit is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Autosubmit. If not, see .
-from pathlib import Path
-from tempfile import TemporaryDirectory
-from unittest.mock import MagicMock
+from pytest import raises
+from pytest_mock import MockerFixture
+from typing import Callable
-from autosubmit.platforms.slurmplatform import SlurmPlatform
from log.log import AutosubmitCritical, AutosubmitError
-class TestSlurmPlatform(TestCase):
+def test_properties(autosubmit_exp: Callable):
+ exp = autosubmit_exp('a000')
+ platform = exp.platform
+ props = {
+ 'name': 'foo',
+ 'host': 'localhost1',
+ 'user': 'sam',
+ 'project': 'proj1',
+ 'budget': 100,
+ 'reservation': 1,
+ 'exclusivity': True,
+ 'hyperthreading': True,
+ 'type': 'SuperSlurm',
+ 'scratch': '/scratch/1',
+ 'project_dir': '/proj1',
+ 'root_dir': '/root_1',
+ 'partition': 'inter',
+ 'queue': 'prio1'
+ }
+ for prop, value in props.items():
+ setattr(platform, prop, value)
+ for prop, value in props.items():
+ assert value == getattr(platform, prop)
- Config = namedtuple('Config', ['LOCAL_ROOT_DIR', 'LOCAL_TMP_DIR', 'LOCAL_ASLOG_DIR'])
- def setUp(self):
- self.local_root_dir = TemporaryDirectory()
- self.config = {
- "LOCAL_ROOT_DIR" : self.local_root_dir.name,
- "LOCAL_TMP_DIR" : 'tmp',
- "LOCAL_ASLOG_DIR" : 'ASLOG_a000'
- }
- # We need to create the submission archive that AS expects to find in this location:
- p = Path(self.local_root_dir.name) / 'a000' / 'tmp' / 'ASLOG_a000'
- p.mkdir(parents=True)
- submit_platform_script = Path(p) / 'submit_local.sh'
- submit_platform_script.touch(exist_ok=True)
+def test_slurm_platform_submit_script_raises_autosubmit_critical_with_trace(
+ autosubmit_exp: Callable,
+ mocker: MockerFixture):
+ exp = autosubmit_exp('a000')
- self.platform = SlurmPlatform(expid='a000', name='local', config=self.config)
+ platform = exp.platform
- def tearDown(self) -> None:
- self.local_root_dir.cleanup()
+ package = mocker.MagicMock()
+ package.jobs.return_value = []
+ valid_packages_to_submit = [
+ package
+ ]
- def test_properties(self):
- props = {
- 'name': 'foo',
- 'host': 'localhost1',
- 'user': 'sam',
- 'project': 'proj1',
- 'budget': 100,
- 'reservation': 1,
- 'exclusivity': True,
- 'hyperthreading': True,
- 'type': 'SuperSlurm',
- 'scratch': '/scratch/1',
- 'project_dir': '/proj1',
- 'root_dir': '/root_1',
- 'partition': 'inter',
- 'queue': 'prio1'
- }
- for prop, value in props.items():
- setattr(self.platform, prop, value)
- for prop, value in props.items():
- self.assertEqual(value, getattr(self.platform, prop))
+ ae = AutosubmitError(message='invalid partition', code=123, trace='ERR!')
+ platform.submit_Script = mocker.MagicMock(side_effect=ae)
- def test_slurm_platform_submit_script_raises_autosubmit_critical_with_trace(self):
- package = MagicMock()
- package.jobs.return_value = []
- valid_packages_to_submit = [
- package
- ]
+ # AS will handle the AutosubmitError above, but then raise an AutosubmitCritical.
+ # This new error won't contain all the info from the upstream error.
+ with raises(AutosubmitCritical) as e:
+ platform.process_batch_ready_jobs(
+ valid_packages_to_submit=valid_packages_to_submit,
+ failed_packages=[]
+ )
- ae = AutosubmitError(message='invalid partition', code=123, trace='ERR!')
- self.platform.submit_Script = MagicMock(side_effect=ae)
-
- # AS will handle the AutosubmitError above, but then raise an AutosubmitCritical.
- # This new error won't contain all the info from the upstream error.
- with self.assertRaises(AutosubmitCritical) as cm:
- self.platform.process_batch_ready_jobs(
- valid_packages_to_submit=valid_packages_to_submit,
- failed_packages=[]
- )
-
- # AS will handle the error and then later will raise another error message.
- # But the AutosubmitError object we created will have been correctly used
- # without raising any exceptions (such as AttributeError).
- assert cm.exception.message != ae.message
+ # AS will handle the error and then later will raise another error message.
+ # But the AutosubmitError object we created will have been correctly used
+ # without raising any exceptions (such as AttributeError).
+ assert e.value.message != ae.message
diff --git a/test/unit/test_statistics.py b/test/unit/test_statistics.py
index 1ee5c7bec639b54bb82da42ff53f4061f94b26a9..1677416283092288593921c4b7d08bb03f7684dc 100644
--- a/test/unit/test_statistics.py
+++ b/test/unit/test_statistics.py
@@ -1,24 +1,37 @@
-import unittest
-from autosubmit.statistics.statistics import Statistics
+# Copyright 2015-2023 Earth Sciences Department, BSC-CNS
+# This file is part of Autosubmit.
+#
+# Autosubmit is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Autosubmit is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Autosubmit. If not, see .
+
+import pytest
+
+import autosubmit.database.db_structure as DbStructure
+from autosubmit.autosubmit import Autosubmit
from autosubmit.job.job_common import Status
+from autosubmit.job.job_list import JobList
from autosubmit.job.job_utils import SubJobManager, SubJob
+from autosubmit.statistics.statistics import Statistics
from autosubmitconfigparser.config.basicconfig import BasicConfig
from autosubmitconfigparser.config.configcommon import AutosubmitConfig
-from bscearth.utils.config_parser import ConfigParserFactory
-from autosubmit.autosubmit import Autosubmit
-from autosubmit.job.job_list import JobList
-# import autosubmit.experiment.common_db_requests as DbRequests
-import autosubmit.database.db_structure as DbStructure
-# from autosubmit.database.db_jobdata import JobDataStructure, ExperimentGraphDrawing
-@unittest.skip("TODO: looks like this test was used by devs to run an existing experiment a49z")
-class TestStatistics(unittest.TestCase):
- def setUp(self):
- self.expid = "a49z"
- def test_normal_execution(self):
+# from autosubmit.database.db_jobdata import JobDataStructure, ExperimentGraphDrawing
+
+@pytest.mark.skip("TODO: looks like this test was used by devs to run an existing experiment a49z")
+def test_normal_execution():
print("Testing normal execution")
- expid = self.expid
+ expid = "a49z"
period_fi = ""
period_ini = ""
ft = "Any"
@@ -31,39 +44,38 @@ class TestStatistics(unittest.TestCase):
as_conf.reload(force_load=True)
job_list = Autosubmit.load_job_list(expid, as_conf, False)
jobs_considered = [job for job in job_list.get_job_list() if job.status not in [
- Status.READY, Status.WAITING]]
+ Status.READY, Status.WAITING]]
job_to_package, package_to_jobs, _, _ = JobList.retrieve_packages(
- BasicConfig, expid, [job.name for job in job_list.get_job_list()])
+ BasicConfig, expid, [job.name for job in job_list.get_job_list()])
queue_time_fixes = {}
if job_to_package:
- current_table_structure = DbStructure.get_structure(expid, BasicConfig.STRUCTURES_DIR)
- subjobs = []
- for job in job_list.get_job_list():
- job_info = JobList.retrieve_times(job.status, job.name, job._tmp_path, make_exception=False, job_times=None, seconds=True, job_data_collection=None)
- time_total = (job_info.queue_time + job_info.run_time) if job_info else 0
- subjobs.append(
- SubJob(job.name,
- job_to_package.get(job.name, None),
- job_info.queue_time if job_info else 0,
- job_info.run_time if job_info else 0,
- time_total,
- job_info.status if job_info else Status.UNKNOWN)
- )
- queue_time_fixes = SubJobManager(subjobs, job_to_package, package_to_jobs, current_table_structure).get_collection_of_fixes_applied()
-
+ current_table_structure = DbStructure.get_structure(expid, BasicConfig.STRUCTURES_DIR)
+ subjobs = []
+ for job in job_list.get_job_list():
+ job_info = JobList.retrieve_times(job.status, job.name, job._tmp_path, make_exception=False,
+ job_times=None, seconds=True, job_data_collection=None)
+ time_total = (job_info.queue_time + job_info.run_time) if job_info else 0
+ subjobs.append(
+ SubJob(job.name,
+ job_to_package.get(job.name, None),
+ job_info.queue_time if job_info else 0,
+ job_info.run_time if job_info else 0,
+ time_total,
+ job_info.status if job_info else Status.UNKNOWN)
+ )
+ queue_time_fixes = SubJobManager(subjobs, job_to_package, package_to_jobs,
+ current_table_structure).get_collection_of_fixes_applied()
if len(jobs_considered) > 0:
- print("Get results")
- exp_stats = Statistics(jobs_considered, period_ini, period_fi, queue_time_fixes)
- exp_stats.calculate_statistics()
- exp_stats.calculate_summary()
- exp_stats.make_old_format()
- print(exp_stats.get_summary_as_list())
- failed_jobs_dict = exp_stats.build_failed_jobs_only_list()
+ print("Get results")
+ exp_stats = Statistics(jobs_considered, period_ini, period_fi, queue_time_fixes)
+ exp_stats.calculate_statistics()
+ exp_stats.calculate_summary()
+ exp_stats.make_old_format()
+ print(exp_stats.get_summary_as_list())
+ failed_jobs_dict = exp_stats.build_failed_jobs_only_list()
else:
- raise Exception("Autosubmit API couldn't find jobs that match your search criteria (Section: {0}) in the period from {1} to {2}.".format(
- ft, period_ini, period_fi))
+ raise Exception(
+ "Autosubmit API couldn't find jobs that match your search criteria (Section: {0}) in the period from {1} to {2}.".format(
+ ft, period_ini, period_fi))
return results
-
-if __name__ == '__main__':
- unittest.main()
\ No newline at end of file
diff --git a/test/unit/test_strategies.py b/test/unit/test_strategies.py
index 6be6a43f6675dc364293965381587c60955d5eb9..38e8d5458ef3f4c48508ede3d351e264c899659e 100644
--- a/test/unit/test_strategies.py
+++ b/test/unit/test_strategies.py
@@ -1,66 +1,78 @@
-#!/usr/bin/python
-
-# Copyright 2015-2020 Earth Sciences Department, BSC-CNS
+# Copyright 2015-2023 Earth Sciences Department, BSC-CNS
# This file is part of Autosubmit.
-
+#
# Autosubmit is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
-
+#
# Autosubmit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
-
+#
# You should have received a copy of the GNU General Public License
# along with Autosubmit. If not, see .
-import unittest
from collections import namedtuple
+
from autosubmit.history.data_classes.job_data import JobData
-from autosubmit.history.strategies import PlatformInformationHandler, TwoDimWrapperDistributionStrategy
from autosubmit.history.platform_monitor.slurm_monitor import SlurmMonitor
+from autosubmit.history.strategies import PlatformInformationHandler, TwoDimWrapperDistributionStrategy
+
job_dc = namedtuple("Job", ["job_name", "date", "member", "status_str", "children", "children_list"])
-class Test2DWrapperDistributionStrategy(unittest.TestCase):
- def setUp(self):
- self.strategy = TwoDimWrapperDistributionStrategy()
- self.job_data_dcs_in_wrapper = [
- JobData(0, job_name="a29z_20000101_fc2_1_POSTR", status="COMPLETED", submit=10, start=100, finish=200, ncpus=100, energy=0, children="a29z_20000101_fc1_1_CLEAN, a29z_20000101_fc3_1_POST"),
- JobData(0, job_name="a29z_20000101_fc1_1_CLEAN", status="COMPLETED", submit=10, start=100, finish=200, ncpus=100, energy=0, children="a29z_20000101_fc2_1_CLEAN"),
- JobData(0, job_name="a29z_20000101_fc3_1_POST", status="COMPLETED", submit=10, start=100, finish=200, ncpus=100, energy=0, children="a29z_20000101_fc0_3_SIM"),
- JobData(0, job_name="a29z_20000101_fc2_1_CLEAN", status="COMPLETED", submit=10, start=100, finish=200, ncpus=100, energy=0, children=""),
- JobData(0, job_name="a29z_20000101_fc0_3_SIM", status="COMPLETED", submit=10, start=100, finish=200, ncpus=100, energy=0, children=""),
- JobData(0, job_name="a29z_20000101_fc1_2_POSTR1", status="COMPLETED", submit=10, start=100, finish=200, ncpus=100, energy=0, children="a29z_20000101_fc1_5_POST2"),
- JobData(0, job_name="a29z_20000101_fc1_5_POST2", status="COMPLETED", submit=10, start=100, finish=200, ncpus=100, energy=0, children="a29z_20000101_fc1_4_POST3"),
- JobData(0, job_name="a29z_20000101_fc1_4_POST3", status="COMPLETED", submit=10, start=100, finish=200, ncpus=100, energy=0, children="a29z_20000101_fc2_5_CLEAN4"),
- JobData(0, job_name="a29z_20000101_fc2_5_CLEAN4", status="COMPLETED", submit=10, start=100, finish=200, ncpus=100, energy=0, children="a29z_20000101_fc0_1_POST5"),
- JobData(0, job_name="a29z_20000101_fc0_1_POST5", status="COMPLETED", submit=10, start=100, finish=200, ncpus=100, energy=0, children=""),
- ]
+strategy = TwoDimWrapperDistributionStrategy()
+job_data_dcs_in_wrapper = [
+ JobData(0, job_name="a29z_20000101_fc2_1_POSTR", status="COMPLETED", submit=10, start=100, finish=200,
+ ncpus=100, energy=0, children="a29z_20000101_fc1_1_CLEAN, a29z_20000101_fc3_1_POST"),
+ JobData(0, job_name="a29z_20000101_fc1_1_CLEAN", status="COMPLETED", submit=10, start=100, finish=200,
+ ncpus=100, energy=0, children="a29z_20000101_fc2_1_CLEAN"),
+ JobData(0, job_name="a29z_20000101_fc3_1_POST", status="COMPLETED", submit=10, start=100, finish=200,
+ ncpus=100, energy=0, children="a29z_20000101_fc0_3_SIM"),
+ JobData(0, job_name="a29z_20000101_fc2_1_CLEAN", status="COMPLETED", submit=10, start=100, finish=200,
+ ncpus=100, energy=0, children=""),
+ JobData(0, job_name="a29z_20000101_fc0_3_SIM", status="COMPLETED", submit=10, start=100, finish=200, ncpus=100,
+ energy=0, children=""),
+ JobData(0, job_name="a29z_20000101_fc1_2_POSTR1", status="COMPLETED", submit=10, start=100, finish=200,
+ ncpus=100, energy=0, children="a29z_20000101_fc1_5_POST2"),
+ JobData(0, job_name="a29z_20000101_fc1_5_POST2", status="COMPLETED", submit=10, start=100, finish=200,
+ ncpus=100, energy=0, children="a29z_20000101_fc1_4_POST3"),
+ JobData(0, job_name="a29z_20000101_fc1_4_POST3", status="COMPLETED", submit=10, start=100, finish=200,
+ ncpus=100, energy=0, children="a29z_20000101_fc2_5_CLEAN4"),
+ JobData(0, job_name="a29z_20000101_fc2_5_CLEAN4", status="COMPLETED", submit=10, start=100, finish=200,
+ ncpus=100, energy=0, children="a29z_20000101_fc0_1_POST5"),
+ JobData(0, job_name="a29z_20000101_fc0_1_POST5", status="COMPLETED", submit=10, start=100, finish=200,
+ ncpus=100, energy=0, children=""),
+]
- def test_get_all_children(self):
- children = self.strategy._get_all_children(self.job_data_dcs_in_wrapper)
- self.assertTrue(len(children) == 8)
-
- def test_get_roots(self):
- roots = self.strategy._get_roots(self.job_data_dcs_in_wrapper)
- self.assertTrue(len(roots) == 2)
- def test_get_level(self):
- roots = self.strategy._get_roots(self.job_data_dcs_in_wrapper)
- job_name_to_children_names = {job.job_name: job.children_list for job in self.job_data_dcs_in_wrapper}
- next_level = self.strategy.get_level(roots, job_name_to_children_names)
- self.assertTrue(len(next_level) == 3)
+def test_get_all_children():
+ children = strategy._get_all_children(job_data_dcs_in_wrapper)
+ assert len(children) == 8
- def test_get_jobs_per_level(self):
- levels = self.strategy.get_jobs_per_level(self.job_data_dcs_in_wrapper)
- for level in levels:
- print([job.job_name for job in level])
- self.assertTrue(len(levels) == 5)
- self.assertTrue("a29z_20000101_fc0_1_POST5" in [job.job_name for job in levels[4]])
- def test_energy_distribution(self):
+def test_get_roots():
+ roots = strategy._get_roots(job_data_dcs_in_wrapper)
+ assert len(roots) == 2
+
+
+def test_get_level():
+ roots = strategy._get_roots(job_data_dcs_in_wrapper)
+ job_name_to_children_names = {job.job_name: job.children_list for job in job_data_dcs_in_wrapper}
+ next_level = strategy.get_level(roots, job_name_to_children_names)
+ assert len(next_level) == 3
+
+
+def test_get_jobs_per_level():
+ levels = strategy.get_jobs_per_level(job_data_dcs_in_wrapper)
+ # for level in levels:
+ # print([job.job_name for job in level])
+ assert len(levels) == 5
+ assert "a29z_20000101_fc0_1_POST5" in [job.job_name for job in levels[4]]
+
+
+def test_energy_distribution():
ssh_output = ''' 17857525 COMPLETED 10 1 2021-10-13T15:51:16 2021-10-13T15:51:17 2021-10-13T15:52:47 2.62K
17857525.batch COMPLETED 10 1 2021-10-13T15:51:17 2021-10-13T15:51:17 2021-10-13T15:52:47 1.88K 6264K 6264K
17857525.extern COMPLETED 10 1 2021-10-13T15:51:17 2021-10-13T15:51:17 2021-10-13T15:52:47 1.66K 473K 68K
@@ -72,23 +84,22 @@ class Test2DWrapperDistributionStrategy(unittest.TestCase):
'''
slurm_monitor = SlurmMonitor(ssh_output)
info_handler = PlatformInformationHandler(TwoDimWrapperDistributionStrategy())
- job_dcs = info_handler.execute_distribution(self.job_data_dcs_in_wrapper[0], self.job_data_dcs_in_wrapper, slurm_monitor)
- for job in job_dcs:
- print(("{0} -> {1} and {2} : ncpus {3} running {4}".format(job.job_name, job.energy, job.rowstatus, job.ncpus, job.running_time)))
- for level in info_handler.strategy.jobs_per_level:
- print([job.job_name for job in level])
- total_in_jobs = sum(job.energy for job in job_dcs[:-1]) # ignore last
- self.assertTrue(abs(total_in_jobs - slurm_monitor.total_energy) <= 10)
- self.assertTrue(abs(job_dcs[0].energy - 259) < 1)
- self.assertTrue(abs(job_dcs[1].energy - 259) < 1)
- self.assertTrue(abs(job_dcs[2].energy - 228) < 1)
- self.assertTrue(abs(job_dcs[3].energy - 228) < 1)
- self.assertTrue(abs(job_dcs[4].energy - 228) < 1)
- self.assertTrue(abs(job_dcs[5].energy - 228.67) < 1)
- self.assertTrue(abs(job_dcs[6].energy - 228.67) < 1)
- self.assertTrue(abs(job_dcs[7].energy - 228.67) < 1)
- self.assertTrue(abs(job_dcs[8].energy - 358) < 1)
- self.assertTrue(abs(job_dcs[9].energy - 376) < 1)
-
-if __name__ == '__main__':
- unittest.main()
\ No newline at end of file
+ job_dcs = info_handler.execute_distribution(job_data_dcs_in_wrapper[0], job_data_dcs_in_wrapper,
+ slurm_monitor)
+ # for job in job_dcs:
+ # print(("{0} -> {1} and {2} : ncpus {3} running {4}".format(job.job_name, job.energy, job.rowstatus, job.ncpus,
+ # job.running_time)))
+ # for level in info_handler.strategy.jobs_per_level:
+ # print([job.job_name for job in level])
+ total_in_jobs = sum(job.energy for job in job_dcs[:-1]) # ignore last
+ assert abs(total_in_jobs - slurm_monitor.total_energy) <= 10
+ assert abs(job_dcs[0].energy - 259) < 1
+ assert abs(job_dcs[1].energy - 259) < 1
+ assert abs(job_dcs[2].energy - 228) < 1
+ assert abs(job_dcs[3].energy - 228) < 1
+ assert abs(job_dcs[4].energy - 228) < 1
+ assert abs(job_dcs[5].energy - 228.67) < 1
+ assert abs(job_dcs[6].energy - 228.67) < 1
+ assert abs(job_dcs[7].energy - 228.67) < 1
+ assert abs(job_dcs[8].energy - 358) < 1
+ assert abs(job_dcs[9].energy - 376) < 1
diff --git a/test/unit/test_wrappers.py b/test/unit/test_wrappers.py
index c2235c6b7f18c3a23b0076c7783ac587c75b618e..78fff905be0da14bdd8f409c95fccf0f328640ff 100644
--- a/test/unit/test_wrappers.py
+++ b/test/unit/test_wrappers.py
@@ -1475,7 +1475,7 @@ class TestWrappers(TestCase):
def _manage_dependencies(self, sections_dict):
for job in self.job_list.get_job_list():
- section = job.section
+ section = job.SECTION
dependencies = sections_dict['sections'][section][
'DEPENDENCIES'] if 'DEPENDENCIES' in sections_dict['sections'][section] else ''
self._manage_job_dependencies(job, dependencies, sections_dict)
@@ -1505,7 +1505,7 @@ class TestWrappers(TestCase):
def _filter_jobs(self, section, date=None, member=None, chunk=None):
# TODO: improve the efficiency
- jobs = [job for job in self.job_list.get_job_list() if job.section == section and job.date == date and job.member == member and job.chunk == chunk]
+ jobs = [job for job in self.job_list.get_job_list() if job.SECTION == section and job.date == date and job.member == member and job.chunk == chunk]
return jobs
def _createDummyJob(self, name, total_wallclock, section, date=None, member=None, chunk=None):