diff options
Diffstat (limited to 'regtest')
-rw-r--r-- | regtest/Printer.py | 44 | ||||
-rw-r--r-- | regtest/TestReferences.py | 50 | ||||
-rw-r--r-- | regtest/TestRun.py | 136 |
3 files changed, 118 insertions, 112 deletions
diff --git a/regtest/Printer.py b/regtest/Printer.py index 0b0b34ae..23dfd34e 100644 --- a/regtest/Printer.py +++ b/regtest/Printer.py @@ -34,14 +34,12 @@ class Printer: self._rewrite = self._stream.isatty() and not self._verbose self._current_line = None - self._tests = {} - self._lock = RLock() Printer.__single = self def _erase_current_line(self): - if not self._rewrite or self._current_line is None: + if self._current_line is None: return line_len = len(self._current_line) @@ -58,25 +56,17 @@ class Printer: self._stream.flush() def printout(self, msg): + if not self._rewrite: + self.printout_ln(msg) + with self._lock: self._erase_current_line() self._print(msg) self._current_line = msg[msg.rfind('\n') + 1:] - def printout_update(self, msg): + def printout_ln(self, msg=''): with self._lock: - if self._rewrite and self._current_line is not None: - msg = self._current_line + msg - elif not self._rewrite: - msg = self._ensure_new_line(msg) - self.printout(msg) - - def printout_ln(self, msg): - with self._lock: - if self._current_line is not None: - self._current_line = None - msg = '\n' + msg - + self._erase_current_line() self._print(self._ensure_new_line(msg)) def printerr(self, msg): @@ -84,25 +74,11 @@ class Printer: self.stderr.write(self._ensure_new_line(msg)) self.stderr.flush() - def print_test_start(self, doc_path, backend_name, n_doc, total_docs): - with self._lock: - self._tests[(doc_path, backend_name)] = n_doc, total_docs - - def print_test_result(self, doc_path, backend_name, msg): - if not self._rewrite: - self.print_test_result_ln(doc_path, backend_name, msg) - return + def print_test_result(self, doc_path, backend_name, n_test, total_tests, msg): + self.printout("[%d/%d] %s (%s): %s" % (n_test, total_tests, doc_path, backend_name, msg)) - with self._lock: - n_doc, total_docs = self._tests.pop((doc_path, backend_name)) - msg = "Tested '%s' using %s backend (%d/%d): %s" % (doc_path, backend_name, n_doc, total_docs, msg) - self.printout(msg) - - def print_test_result_ln(self, doc_path, backend_name, msg): - with self._lock: - n_doc, total_docs = self._tests.pop((doc_path, backend_name)) - msg = "Tested '%s' using %s backend (%d/%d): %s" % (doc_path, backend_name, n_doc, total_docs, msg) - self.printout_ln(msg) + def print_test_result_ln(self, doc_path, backend_name, n_test, total_tests, msg): + self.printout_ln("[%d/%d] %s (%s): %s" % (n_test, total_tests, doc_path, backend_name, msg)) def print_default(self, msg): if self._verbose: diff --git a/regtest/TestReferences.py b/regtest/TestReferences.py index 6cedb4b6..4572ef69 100644 --- a/regtest/TestReferences.py +++ b/regtest/TestReferences.py @@ -24,7 +24,7 @@ from Printer import get_printer from Utils import get_document_paths_from_dir, get_skipped_tests from Queue import Queue -from threading import Thread +from threading import Thread, RLock class TestReferences: @@ -34,8 +34,11 @@ class TestReferences: self._skipped = get_skipped_tests(docsdir) self.config = Config() self.printer = get_printer() + self._total_tests = 1 + self._n_tests = 0 self._queue = Queue() + self._lock = RLock() try: os.makedirs(self._refsdir) @@ -45,9 +48,19 @@ class TestReferences: except: raise - def create_refs_for_file(self, filename, n_doc = 1, total_docs = 1): + def _get_backends(self): + if self.config.backends: + return [get_backend(name) for name in self.config.backends] + + return get_all_backends() + + def create_refs_for_file(self, filename): + backends = self._get_backends() + if filename in self._skipped: - self.printer.print_default("Skipping test '%s' (%d/%d)" % (os.path.join(self._docsdir, filename), n_doc, total_docs)) + with self._lock: + self._n_tests += len(backends) + self.printer.print_default("Skipping test '%s'" % (os.path.join(self._docsdir, filename))) return refs_path = os.path.join(self._refsdir, filename) @@ -60,38 +73,43 @@ class TestReferences: raise doc_path = os.path.join(self._docsdir, filename) - if self.config.backends: - backends = [get_backend(name) for name in self.config.backends] - else: - backends = get_all_backends() - for backend in backends: if not self.config.force and backend.has_results(refs_path): - self.printer.print_default("Results found, skipping '%s' for %s backend (%d/%d)" % (doc_path, backend.get_name(), n_doc, total_docs)) + with self._lock: + self._n_tests += 1 + self.printer.print_default("Results found, skipping '%s' for %s backend" % (doc_path, backend.get_name())) continue - self.printer.printout_ln("Creating refs for '%s' using %s backend (%d/%d)" % (doc_path, backend.get_name(), n_doc, total_docs)) + if backend.create_refs(doc_path, refs_path): backend.create_checksums(refs_path, self.config.checksums_only) + with self._lock: + self._n_tests += 1 + self.printer.printout_ln("[%d/%d] %s (%s): done" % (self._n_tests, self._total_tests, doc_path, backend.get_name())) def _worker_thread(self): while True: - doc, n_doc, total_docs = self._queue.get() - self.create_refs_for_file(doc, n_doc, total_docs) + doc = self._queue.get() + self.create_refs_for_file(doc) self._queue.task_done() def create_refs(self): docs, total_docs = get_document_paths_from_dir(self._docsdir) + backends = self._get_backends() + self._total_tests = total_docs * len(backends) + + self.printer.printout_ln('Found %d documents' % (total_docs)) + self.printer.printout_ln('Backends: %s' % ', '.join([backend.get_name() for backend in backends])) + self.printer.printout_ln('Process %d using %d worker threads' % (os.getpid(), self.config.threads)) + self.printer.printout_ln() - self.printer.printout_ln('Process %d is spawning %d worker threads...' % (os.getpid(), self.config.threads)) + self.printer.printout('Spawning %d workers...' % (self.config.threads)) for n_thread in range(self.config.threads): thread = Thread(target=self._worker_thread) thread.daemon = True thread.start() - n_doc = 0 for doc in docs: - n_doc += 1 - self._queue.put( (doc, n_doc, total_docs) ) + self._queue.put(doc) self._queue.join() diff --git a/regtest/TestRun.py b/regtest/TestRun.py index 8f8513c3..f47485f1 100644 --- a/regtest/TestRun.py +++ b/regtest/TestRun.py @@ -36,9 +36,11 @@ class TestRun: self._skip = get_skipped_tests(docsdir) self.config = Config() self.printer = get_printer() + self._total_tests = 1 # Results self._n_tests = 0 + self._n_run = 0 self._n_passed = 0 self._failed = [] self._crashed = [] @@ -57,7 +59,13 @@ class TestRun: except: raise - def test(self, refs_path, doc_path, test_path, backend, n_doc, total_docs): + def _get_backends(self): + if self.config.backends: + return [get_backend(name) for name in self.config.backends] + + return get_all_backends() + + def test(self, refs_path, doc_path, test_path, backend): # First check whether there are test results for the backend ref_has_md5 = backend.has_md5(refs_path) ref_is_crashed = backend.is_crashed(refs_path) @@ -65,70 +73,71 @@ class TestRun: if not ref_has_md5 and not ref_is_crashed and not ref_is_failed: with self._lock: self._skipped.append("%s (%s)" % (doc_path, backend.get_name())) + self._n_tests += 1 self.printer.print_default("Reference files not found, skipping '%s' for %s backend" % (doc_path, backend.get_name())) return + test_has_md5 = backend.create_refs(doc_path, test_path) + test_passed = False + if ref_has_md5 and test_has_md5: + test_passed = backend.compare_checksums(refs_path, test_path, not self.config.keep_results, self.config.create_diffs, self.config.update_refs) + with self._lock: self._n_tests += 1 + self._n_run += 1 - self.printer.print_test_start(doc_path, backend.get_name(), n_doc, total_docs) - test_has_md5 = backend.create_refs(doc_path, test_path) - - if backend.has_stderr(test_path): - with self._lock: + if backend.has_stderr(test_path): self._stderr.append("%s (%s)" % (doc_path, backend.get_name())) - if ref_has_md5 and test_has_md5: - if backend.compare_checksums(refs_path, test_path, not self.config.keep_results, self.config.create_diffs, self.config.update_refs): - # FIXME: remove dir if it's empty? - self.printer.print_test_result(doc_path, backend.get_name(), "PASS") - with self._lock: + if ref_has_md5 and test_has_md5: + if test_passed: + # FIXME: remove dir if it's empty? + self.printer.print_test_result(doc_path, backend.get_name(), self._n_tests, self._total_tests, "PASS") self._n_passed += 1 - else: - self.printer.print_test_result_ln(doc_path, backend.get_name(), "FAIL") - with self._lock: + else: + self.printer.print_test_result_ln(doc_path, backend.get_name(), self._n_tests, self._total_tests, "FAIL") self._failed.append("%s (%s)" % (doc_path, backend.get_name())) - return - elif test_has_md5: - if ref_is_crashed: - self.printer.print_test_result_ln(doc_path, backend.get_name(), "DOES NOT CRASH") - elif ref_is_failed: - self.printer.print_test_result_ln(doc_path, backend.get_name(), "DOES NOT FAIL") - return + return - test_is_crashed = backend.is_crashed(test_path) - if ref_is_crashed and test_is_crashed: - self.printer.print_test_result(doc_path, backend.get_name(), "PASS (Expected crash)") - with self._lock: + if test_has_md5: + if ref_is_crashed: + self.printer.print_test_result_ln(doc_path, backend.get_name(), self._n_tests, self._total_tests, "DOES NOT CRASH") + elif ref_is_failed: + self.printer.print_test_result_ln(doc_path, backend.get_name(), self._n_tests, self._total_tests, "DOES NOT FAIL") + return + + test_is_crashed = backend.is_crashed(test_path) + if ref_is_crashed and test_is_crashed: + self.printer.print_test_result(doc_path, backend.get_name(), self._n_tests, self._total_tests, "PASS (Expected crash)") self._n_passed += 1 - return + return - test_is_failed = backend.is_failed(test_path) - if ref_is_failed and test_is_failed: - # FIXME: compare status errors - self.printer.print_test_result(doc_path, backend.get_name(), "PASS (Expected fail with status error %d)" % (test_is_failed)) - with self._lock: + test_is_failed = backend.is_failed(test_path) + if ref_is_failed and test_is_failed: + # FIXME: compare status errors + self.printer.print_test_result(doc_path, backend.get_name(), self._n_tests, self._total_tests, "PASS (Expected fail with status error %d)" % (test_is_failed)) self._n_passed += 1 - return + return - if test_is_crashed: - self.printer.print_test_result_ln(doc_path, backend.get_name(), "CRASH") - with self._lock: + if test_is_crashed: + self.printer.print_test_result_ln(doc_path, backend.get_name(), self._n_tests, self._total_tests, "CRASH") self._crashed.append("%s (%s)" % (doc_path, backend.get_name())) - return + return - if test_is_failed: - self.printer.print_test_result_ln(doc_path, backend.get_name(), "FAIL (status error %d)" % (test_is_failed)) - with self._lock: + if test_is_failed: + self.printer.print_test_result_ln(doc_path, backend.get_name(), self._n_tests, self._total_tests, "FAIL (status error %d)" % (test_is_failed)) self._failed_status_error("%s (%s)" % (doc_path, backend.get_name())) - return + return + + def run_test(self, filename): + backends = self._get_backends() - def run_test(self, filename, n_doc = 1, total_docs = 1): if filename in self._skip: doc_path = os.path.join(self._docsdir, filename) with self._lock: self._skipped.append("%s" % (doc_path)) - self.printer.print_default("Skipping test '%s' (%d/%d)" % (doc_path, n_doc, total_docs)) + self._n_tests += len(backends) + self.printer.print_default("Skipping test '%s'" % (doc_path)) return out_path = os.path.join(self._outdir, filename) @@ -145,57 +154,60 @@ class TestRun: if not os.path.isdir(refs_path): with self._lock: self._skipped.append("%s" % (doc_path)) - self.printer.print_default("Reference dir not found for %s, skipping (%d/%d)" % (doc_path, n_doc, total_docs)) + self._n_tests += len(backends) + self.printer.print_default("Reference dir not found for %s, skipping" % (doc_path)) return - if self.config.backends: - backends = [get_backend(name) for name in self.config.backends] - else: - backends = get_all_backends() - for backend in backends: - self.test(refs_path, doc_path, out_path, backend, n_doc, total_docs) + self.test(refs_path, doc_path, out_path, backend) def _worker_thread(self): while True: - doc, n_doc, total_docs = self._queue.get() - self.run_test(doc, n_doc, total_docs) + doc = self._queue.get() + self.run_test(doc) self._queue.task_done() def run_tests(self): docs, total_docs = get_document_paths_from_dir(self._docsdir) + backends = self._get_backends() + self._total_tests = total_docs * len(backends) + + self.printer.printout_ln('Found %d documents' % (total_docs)) + self.printer.printout_ln('Backends: %s' % ', '.join([backend.get_name() for backend in backends])) + self.printer.printout_ln('Process %d using %d worker threads' % (os.getpid(), self.config.threads)) + self.printer.printout_ln() - self.printer.printout_ln('Process %d is spawning %d worker threads...' % (os.getpid(), self.config.threads)) + self.printer.printout('Spawning %d workers...' % (self.config.threads)) for n_thread in range(self.config.threads): thread = Thread(target=self._worker_thread) thread.daemon = True thread.start() - n_doc = 0 for doc in docs: - n_doc += 1 - self._queue.put( (doc, n_doc, total_docs) ) + self._queue.put(doc) self._queue.join() def summary(self): - if not self._n_tests: + if not self._n_run: self.printer.printout_ln("No tests run") return - self.printer.printout_ln("Total %d tests" % (self._n_tests)) - self.printer.printout_ln("%d tests passed (%.2f%%)" % (self._n_passed, (self._n_passed * 100.) / self._n_tests)) + self.printer.printout_ln() + self.printer.printout_ln("%d tests passed (%.2f%%)" % (self._n_passed, (self._n_passed * 100.) / self._n_run)) + self.printer.printout_ln() def report_tests(test_list, test_type): n_tests = len(test_list) if not n_tests: return - self.printer.printout_ln("%d tests %s (%.2f%%): %s" % (n_tests, test_type, (n_tests * 100.) / self._n_tests, ", ".join(test_list))) + self.printer.printout_ln("%d tests %s (%.2f%%): %s" % (n_tests, test_type, (n_tests * 100.) / self._n_run, ", ".join(test_list))) + self.printer.printout_ln() report_tests(self._failed, "failed") report_tests(self._crashed, "crashed") report_tests(self._failed_status_error, "failed to run") report_tests(self._stderr, "have stderr output") - report_tests(self._skipped, "skipped") - - + if self._skipped: + self.printer.printout_ln("%d tests skipped: %s" % (len(self._skipped), ", ".join(self._skipped))) + self.printer.printout_ln() |