summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLubomir Rintel <lkundrak@v3.sk>2023-03-02 19:31:53 +0100
committerLubomir Rintel <lkundrak@v3.sk>2023-03-21 23:35:42 +0100
commitad6878d50a10c0a6d0c36c3487d41ebce5f2ee74 (patch)
treeef269bfa5c6ad81f998b69833777eccdb253d0ad
parent1e114c804b45f0a501f90526f7e762bfeedcf232 (diff)
tests/client: split out nmcli specific bits into a separate cass
The mock service is more widely useful -- in particular for testing nm-cloud-setup in a following commit. Split the commonly useful parts into TestNmClient class.
-rw-r--r--Makefile.am2
-rw-r--r--src/tests/client/meson.build1
-rwxr-xr-xsrc/tests/client/test-client.py438
3 files changed, 224 insertions, 217 deletions
diff --git a/Makefile.am b/Makefile.am
index 9c81ef9c90..0f827c6763 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -5468,7 +5468,7 @@ endif
###############################################################################
check-local-tests-client: src/nmcli/nmcli src/tests/client/test-client.py
- LIBTOOL="$(LIBTOOL)" "$(srcdir)/src/tests/client/test-client.sh" "$(builddir)" "$(srcdir)" "$(PYTHON)" --
+ LIBTOOL="$(LIBTOOL)" "$(srcdir)/src/tests/client/test-client.sh" "$(builddir)" "$(srcdir)" "$(PYTHON)" -- TestNmcli
check_local += check-local-tests-client
diff --git a/src/tests/client/meson.build b/src/tests/client/meson.build
index 6dc0f2a2c8..6a6891354d 100644
--- a/src/tests/client/meson.build
+++ b/src/tests/client/meson.build
@@ -8,6 +8,7 @@ test(
source_root,
python.path(),
'--',
+ 'TestNmcli',
],
env: [
'LIBTOOL=',
diff --git a/src/tests/client/test-client.py b/src/tests/client/test-client.py
index e5febe964f..859a5280be 100755
--- a/src/tests/client/test-client.py
+++ b/src/tests/client/test-client.py
@@ -835,7 +835,7 @@ class AsyncProcess:
MAX_JOBS = 15
-class TestNmcli(unittest.TestCase):
+class TestNmClient(unittest.TestCase):
def __init__(self, *args, **kwargs):
self._calling_num = {}
self._skip_test_for_l10n_diff = []
@@ -888,13 +888,53 @@ class TestNmcli(unittest.TestCase):
return content_expect, results_expect
- def nmcli_construct_argv(self, args, with_valgrind=None):
+ def _env(
+ self, lang="C", calling_num=None, fatal_warnings=_DEFAULT_ARG, extra_env=None
+ ):
+ if lang == "C":
+ language = ""
+ elif lang == "de_DE.utf8":
+ language = "de"
+ elif lang == "pl_PL.UTF-8":
+ language = "pl"
+ else:
+ self.fail("invalid language %s" % (lang))
+
+ env = {}
+ for k in [
+ "LD_LIBRARY_PATH",
+ "DBUS_SESSION_BUS_ADDRESS",
+ "LIBNM_CLIENT_DEBUG",
+ "LIBNM_CLIENT_DEBUG_FILE",
+ ]:
+ val = os.environ.get(k, None)
+ if val is not None:
+ env[k] = val
+ env["LANG"] = lang
+ env["LANGUAGE"] = language
+ env["LIBNM_USE_SESSION_BUS"] = "1"
+ env["LIBNM_USE_NO_UDEV"] = "1"
+ env["TERM"] = "linux"
+ env["ASAN_OPTIONS"] = conf.get(ENV_NM_TEST_ASAN_OPTIONS)
+ env["LSAN_OPTIONS"] = conf.get(ENV_NM_TEST_LSAN_OPTIONS)
+ env["LBSAN_OPTIONS"] = conf.get(ENV_NM_TEST_UBSAN_OPTIONS)
+ env["XDG_CONFIG_HOME"] = PathConfiguration.srcdir()
+ if calling_num is not None:
+ env["NM_TEST_CALLING_NUM"] = str(calling_num)
+ if fatal_warnings is _DEFAULT_ARG or fatal_warnings:
+ env["G_DEBUG"] = "fatal-warnings"
+ if extra_env is not None:
+ for k, v in extra_env.items():
+ env[k] = v
+ return env
+
+ def cmd_construct_argv(self, cmd_path, args, with_valgrind=None):
if with_valgrind is None:
with_valgrind = conf.get(ENV_NM_TEST_VALGRIND)
valgrind_log = None
- cmd = conf.get(ENV_NM_TEST_CLIENT_NMCLI_PATH)
+ cmd = conf.get(cmd_path)
if with_valgrind:
valgrind_log = tempfile.mkstemp(prefix="nm-test-client-valgrind.")
argv = [
@@ -921,6 +961,174 @@ class TestNmcli(unittest.TestCase):
argv.extend(args)
return argv, valgrind_log
+ def call_pexpect(self, cmd_path, args, extra_env):
+ argv, valgrind_log = self.cmd_construct_argv(cmd_path, args)
+ env = self._env(extra_env=extra_env)
+
+ pexp = pexpect.spawn(argv[0], argv[1:], timeout=10, env=env)
+
+ typ = collections.namedtuple("CallPexpect", ["pexp", "valgrind_log"])
+ return typ(pexp, valgrind_log)
+
+ def async_start(self, wait_all=False):
+
+ while True:
+
+ while True:
+ for async_job in list(self._async_jobs[0:MAX_JOBS]):
+ async_job.start()
+ # start up to MAX_JOBS jobs, but poll() and complete those
+ # that are already exited. Retry, until there are no more
+ # jobs to start, or until MAX_JOBS are running.
+ jobs_running = []
+ for async_job in list(self._async_jobs[0:MAX_JOBS]):
+ if async_job.poll() is not None:
+ self._async_jobs.remove(async_job)
+ async_job.wait_and_complete()
+ continue
+ jobs_running.append(async_job)
+ if len(jobs_running) >= len(self._async_jobs):
+ break
+ if len(jobs_running) >= MAX_JOBS:
+ break
+
+ if not jobs_running:
+ return
+ if not wait_all:
+ return
+
+ # in a loop, indefinitely poll the running jobs until we find one that
+ # completes. Note that poll() itself will raise an exception if a
+ # jobs times out.
+ for async_job in Util.random_job(jobs_running):
+ if async_job.poll(timeout=0.03) is not None:
+ self._async_jobs.remove(async_job)
+ async_job.wait_and_complete()
+ break
+
+ def async_wait(self):
+ return self.async_start(wait_all=True)
+
+ def _nm_test_post(self):
+
+ self.async_wait()
+
+ self.srv_shutdown()
+
+ self._calling_num = None
+
+ results = self._results
+ self._results = None
+
+ if len(results) == 0:
+ return
+
+ skip_test_for_l10n_diff = self._skip_test_for_l10n_diff
+ self._skip_test_for_l10n_diff = None
+
+ test_name = self._testMethodName
+
+ filename = os.path.abspath(
+ PathConfiguration.srcdir()
+ + "/test-client.check-on-disk/"
+ + test_name
+ + ".expected"
+ )
+
+ regenerate = conf.get(ENV_NM_TEST_REGENERATE)
+
+ content_expect, results_expect = self._read_expected(filename)
+
+ if results_expect is None:
+ if not regenerate:
+ self.fail(
+ "Failed to parse expected file '%s'. Let the test write the file by rerunning with NM_TEST_REGENERATE=1"
+ % (filename)
+ )
+ else:
+ for i in range(0, min(len(results_expect), len(results))):
+ n = results[i]
+ if results_expect[i] == n["content"]:
+ continue
+ if regenerate:
+ continue
+ if n["ignore_l10n_diff"]:
+ skip_test_for_l10n_diff.append(n["test_name"])
+ continue
+ print(
+ "\n\n\nThe file '%s' does not have the expected content:"
+ % (filename)
+ )
+ print("ACTUAL OUTPUT:\n[[%s]]\n" % (n["content"]))
+ print("EXPECT OUTPUT:\n[[%s]]\n" % (results_expect[i]))
+ print(
+ "Let the test write the file by rerunning with NM_TEST_REGENERATE=1"
+ )
+ print(
+ "See howto in %s for details.\n"
+ % (PathConfiguration.canonical_script_filename())
+ )
+ sys.stdout.flush()
+ self.fail(
+ "Unexpected output of command, expected %s. Rerun test with NM_TEST_REGENERATE=1 to regenerate files"
+ % (filename)
+ )
+ if len(results_expect) != len(results):
+ if not regenerate:
+ print(
+ "\n\n\nThe number of tests in %s does not match the expected content (%s vs %s):"
+ % (filename, len(results_expect), len(results))
+ )
+ if len(results_expect) < len(results):
+ print(
+ "ACTUAL OUTPUT:\n[[%s]]\n"
+ % (results[len(results_expect)]["content"])
+ )
+ else:
+ print(
+ "EXPECT OUTPUT:\n[[%s]]\n" % (results_expect[len(results)])
+ )
+ print(
+ "Let the test write the file by rerunning with NM_TEST_REGENERATE=1"
+ )
+ print(
+ "See howto in %s for details.\n"
+ % (PathConfiguration.canonical_script_filename())
+ )
+ sys.stdout.flush()
+ self.fail(
+ "Unexpected output of command, expected %s. Rerun test with NM_TEST_REGENERATE=1 to regenerate files"
+ % (filename)
+ )
+
+ if regenerate:
+ content_new = b"".join([r["content"] for r in results])
+ if content_new != content_expect:
+ try:
+ with open(filename, "wb") as content_file:
+ content_file.write(content_new)
+ except Exception as e:
+ self.fail("Failure to write '%s': %s" % (filename, e))
+
+ if skip_test_for_l10n_diff:
+ # nmcli loads translations from the installation path. This failure commonly
+ # happens because you did not install the binary in the --prefix, before
+ # running the test. Hence, translations are not available or differ.
+ self.skipTest(
+ "Skipped asserting for localized tests %s. Set NM_TEST_CLIENT_CHECK_L10N=1 to force fail."
+ % (",".join(skip_test_for_l10n_diff))
+ )
+
+ def setUp(self):
+ if not dbus_session_inited:
+ self.skipTest(
+ "Own D-Bus session for testing is not initialized. Do you have dbus-run-session available?"
+ )
+ if NM is None:
+ self.skipTest("gi.NM is not available. Did you build with introspection?")
+
+
+class TestNmcli(TestNmClient):
def call_nmcli_l(
self,
args,
@@ -1004,54 +1212,7 @@ class TestNmcli(unittest.TestCase):
)
def call_nmcli_pexpect(self, args):
-
- env = self._env(extra_env={"NO_COLOR": "1"})
- argv, valgrind_log = self.nmcli_construct_argv(args)
-
- pexp = pexpect.spawn(argv[0], argv[1:], timeout=10, env=env)
-
- typ = collections.namedtuple("CallNmcliPexpect", ["pexp", "valgrind_log"])
- return typ(pexp, valgrind_log)
-
- def _env(
- self, lang="C", calling_num=None, fatal_warnings=_DEFAULT_ARG, extra_env=None
- ):
- if lang == "C":
- language = ""
- elif lang == "de_DE.utf8":
- language = "de"
- elif lang == "pl_PL.UTF-8":
- language = "pl"
- else:
- self.fail("invalid language %s" % (lang))
-
- env = {}
- for k in [
- "LD_LIBRARY_PATH",
- "DBUS_SESSION_BUS_ADDRESS",
- "LIBNM_CLIENT_DEBUG",
- "LIBNM_CLIENT_DEBUG_FILE",
- ]:
- val = os.environ.get(k, None)
- if val is not None:
- env[k] = val
- env["LANG"] = lang
- env["LANGUAGE"] = language
- env["LIBNM_USE_SESSION_BUS"] = "1"
- env["LIBNM_USE_NO_UDEV"] = "1"
- env["TERM"] = "linux"
- env["ASAN_OPTIONS"] = conf.get(ENV_NM_TEST_ASAN_OPTIONS)
- env["LSAN_OPTIONS"] = conf.get(ENV_NM_TEST_LSAN_OPTIONS)
- env["LBSAN_OPTIONS"] = conf.get(ENV_NM_TEST_UBSAN_OPTIONS)
- env["XDG_CONFIG_HOME"] = PathConfiguration.srcdir()
- if calling_num is not None:
- env["NM_TEST_CALLING_NUM"] = str(calling_num)
- if fatal_warnings is _DEFAULT_ARG or fatal_warnings:
- env["G_DEBUG"] = "fatal-warnings"
- if extra_env is not None:
- for k, v in extra_env.items():
- env[k] = v
- return env
+ return self.call_pexpect(ENV_NM_TEST_CLIENT_NMCLI_PATH, args, {"NO_COLOR": "1"})
def _call_nmcli(
self,
@@ -1113,7 +1274,9 @@ class TestNmcli(unittest.TestCase):
self.fail("invalid language %s" % (lang))
# Running under valgrind is not yet supported for those tests.
- args, valgrind_log = self.nmcli_construct_argv(args, with_valgrind=False)
+ args, valgrind_log = self.cmd_construct_argv(
+ ENV_NM_TEST_CLIENT_NMCLI_PATH, args, with_valgrind=False
+ )
assert valgrind_log is None
@@ -1232,163 +1395,6 @@ class TestNmcli(unittest.TestCase):
self.async_start(wait_all=sync_barrier)
- def async_start(self, wait_all=False):
-
- while True:
-
- while True:
- for async_job in list(self._async_jobs[0:MAX_JOBS]):
- async_job.start()
- # start up to MAX_JOBS jobs, but poll() and complete those
- # that are already exited. Retry, until there are no more
- # jobs to start, or until MAX_JOBS are running.
- jobs_running = []
- for async_job in list(self._async_jobs[0:MAX_JOBS]):
- if async_job.poll() is not None:
- self._async_jobs.remove(async_job)
- async_job.wait_and_complete()
- continue
- jobs_running.append(async_job)
- if len(jobs_running) >= len(self._async_jobs):
- break
- if len(jobs_running) >= MAX_JOBS:
- break
-
- if not jobs_running:
- return
- if not wait_all:
- return
-
- # in a loop, indefinitely poll the running jobs until we find one that
- # completes. Note that poll() itself will raise an exception if a
- # jobs times out.
- for async_job in Util.random_job(jobs_running):
- if async_job.poll(timeout=0.03) is not None:
- self._async_jobs.remove(async_job)
- async_job.wait_and_complete()
- break
-
- def async_wait(self):
- return self.async_start(wait_all=True)
-
- def _nm_test_post(self):
-
- self.async_wait()
-
- self.srv_shutdown()
-
- self._calling_num = None
-
- results = self._results
- self._results = None
-
- if len(results) == 0:
- return
-
- skip_test_for_l10n_diff = self._skip_test_for_l10n_diff
- self._skip_test_for_l10n_diff = None
-
- test_name = self._testMethodName
-
- filename = os.path.abspath(
- PathConfiguration.srcdir()
- + "/test-client.check-on-disk/"
- + test_name
- + ".expected"
- )
-
- regenerate = conf.get(ENV_NM_TEST_REGENERATE)
-
- content_expect, results_expect = self._read_expected(filename)
-
- if results_expect is None:
- if not regenerate:
- self.fail(
- "Failed to parse expected file '%s'. Let the test write the file by rerunning with NM_TEST_REGENERATE=1"
- % (filename)
- )
- else:
- for i in range(0, min(len(results_expect), len(results))):
- n = results[i]
- if results_expect[i] == n["content"]:
- continue
- if regenerate:
- continue
- if n["ignore_l10n_diff"]:
- skip_test_for_l10n_diff.append(n["test_name"])
- continue
- print(
- "\n\n\nThe file '%s' does not have the expected content:"
- % (filename)
- )
- print("ACTUAL OUTPUT:\n[[%s]]\n" % (n["content"]))
- print("EXPECT OUTPUT:\n[[%s]]\n" % (results_expect[i]))
- print(
- "Let the test write the file by rerunning with NM_TEST_REGENERATE=1"
- )
- print(
- "See howto in %s for details.\n"
- % (PathConfiguration.canonical_script_filename())
- )
- sys.stdout.flush()
- self.fail(
- "Unexpected output of command, expected %s. Rerun test with NM_TEST_REGENERATE=1 to regenerate files"
- % (filename)
- )
- if len(results_expect) != len(results):
- if not regenerate:
- print(
- "\n\n\nThe number of tests in %s does not match the expected content (%s vs %s):"
- % (filename, len(results_expect), len(results))
- )
- if len(results_expect) < len(results):
- print(
- "ACTUAL OUTPUT:\n[[%s]]\n"
- % (results[len(results_expect)]["content"])
- )
- else:
- print(
- "EXPECT OUTPUT:\n[[%s]]\n" % (results_expect[len(results)])
- )
- print(
- "Let the test write the file by rerunning with NM_TEST_REGENERATE=1"
- )
- print(
- "See howto in %s for details.\n"
- % (PathConfiguration.canonical_script_filename())
- )
- sys.stdout.flush()
- self.fail(
- "Unexpected output of command, expected %s. Rerun test with NM_TEST_REGENERATE=1 to regenerate files"
- % (filename)
- )
-
- if regenerate:
- content_new = b"".join([r["content"] for r in results])
- if content_new != content_expect:
- try:
- with open(filename, "wb") as content_file:
- content_file.write(content_new)
- except Exception as e:
- self.fail("Failure to write '%s': %s" % (filename, e))
-
- if skip_test_for_l10n_diff:
- # nmcli loads translations from the installation path. This failure commonly
- # happens because you did not install the binary in the --prefix, before
- # running the test. Hence, translations are not available or differ.
- self.skipTest(
- "Skipped asserting for localized tests %s. Set NM_TEST_CLIENT_CHECK_L10N=1 to force fail."
- % (",".join(skip_test_for_l10n_diff))
- )
-
- def skip_without_pexpect(func):
- def f(self):
- if pexpect is None:
- raise unittest.SkipTest("pexpect not available")
- func(self)
-
- return f
-
def nm_test(func):
def f(self):
self.srv_start()
@@ -1404,13 +1410,13 @@ class TestNmcli(unittest.TestCase):
return f
- def setUp(self):
- if not dbus_session_inited:
- self.skipTest(
- "Own D-Bus session for testing is not initialized. Do you have dbus-run-session available?"
- )
- if NM is None:
- self.skipTest("gi.NM is not available. Did you build with introspection?")
+ def skip_without_pexpect(func):
+ def f(self):
+ if pexpect is None:
+ raise unittest.SkipTest("pexpect not available")
+ func(self)
+
+ return f
def init_001(self):
self.srv.op_AddObj("WiredDevice", iface="eth0")