summaryrefslogtreecommitdiff
path: root/js/src/tests/lib
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /js/src/tests/lib
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloaduxp-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
Add m-esr52 at 52.6.0
Diffstat (limited to 'js/src/tests/lib')
-rw-r--r--js/src/tests/lib/__init__.py0
-rwxr-xr-xjs/src/tests/lib/jittests.py720
-rw-r--r--js/src/tests/lib/manifest.py395
-rw-r--r--js/src/tests/lib/progressbar.py116
-rw-r--r--js/src/tests/lib/results.py278
-rw-r--r--js/src/tests/lib/tasks_unix.py223
-rw-r--r--js/src/tests/lib/tasks_win.py135
-rw-r--r--js/src/tests/lib/terminal_unix.py38
-rw-r--r--js/src/tests/lib/terminal_win.py109
-rw-r--r--js/src/tests/lib/tests.py216
10 files changed, 2230 insertions, 0 deletions
diff --git a/js/src/tests/lib/__init__.py b/js/src/tests/lib/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/js/src/tests/lib/__init__.py
diff --git a/js/src/tests/lib/jittests.py b/js/src/tests/lib/jittests.py
new file mode 100755
index 0000000000..0a1d1537a3
--- /dev/null
+++ b/js/src/tests/lib/jittests.py
@@ -0,0 +1,720 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+# jit_test.py -- Python harness for JavaScript trace tests.
+
+from __future__ import print_function
+import os, posixpath, sys, tempfile, traceback, time
+import subprocess
+from collections import namedtuple
+import StringIO
+
+if sys.platform.startswith('linux') or sys.platform.startswith('darwin'):
+ from tasks_unix import run_all_tests
+else:
+ from tasks_win import run_all_tests
+
+from progressbar import ProgressBar, NullProgressBar
+from results import TestOutput
+
+TESTS_LIB_DIR = os.path.dirname(os.path.abspath(__file__))
+JS_DIR = os.path.dirname(os.path.dirname(TESTS_LIB_DIR))
+TOP_SRC_DIR = os.path.dirname(os.path.dirname(JS_DIR))
+TEST_DIR = os.path.join(JS_DIR, 'jit-test', 'tests')
+LIB_DIR = os.path.join(JS_DIR, 'jit-test', 'lib') + os.path.sep
+MODULE_DIR = os.path.join(JS_DIR, 'jit-test', 'modules') + os.path.sep
+JS_CACHE_DIR = os.path.join(JS_DIR, 'jit-test', '.js-cache')
+JS_TESTS_DIR = posixpath.join(JS_DIR, 'tests')
+
+# Backported from Python 3.1 posixpath.py
+def _relpath(path, start=None):
+ """Return a relative version of a path"""
+
+ if not path:
+ raise ValueError("no path specified")
+
+ if start is None:
+ start = os.curdir
+
+ start_list = os.path.abspath(start).split(os.sep)
+ path_list = os.path.abspath(path).split(os.sep)
+
+ # Work out how much of the filepath is shared by start and path.
+ i = len(os.path.commonprefix([start_list, path_list]))
+
+ rel_list = [os.pardir] * (len(start_list)-i) + path_list[i:]
+ if not rel_list:
+ return os.curdir
+ return os.path.join(*rel_list)
+
+# Mapping of Python chars to their javascript string representation.
+QUOTE_MAP = {
+ '\\': '\\\\',
+ '\b': '\\b',
+ '\f': '\\f',
+ '\n': '\\n',
+ '\r': '\\r',
+ '\t': '\\t',
+ '\v': '\\v'
+}
+
+# Quote the string S, javascript style.
+def js_quote(quote, s):
+ result = quote
+ for c in s:
+ if c == quote:
+ result += '\\' + quote
+ elif c in QUOTE_MAP:
+ result += QUOTE_MAP[c]
+ else:
+ result += c
+ result += quote
+ return result
+
+os.path.relpath = _relpath
+
+class JitTest:
+
+ VALGRIND_CMD = []
+ paths = (d for d in os.environ['PATH'].split(os.pathsep))
+ valgrinds = (os.path.join(d, 'valgrind') for d in paths)
+ if any(os.path.exists(p) for p in valgrinds):
+ VALGRIND_CMD = [
+ 'valgrind', '-q', '--smc-check=all-non-file',
+ '--error-exitcode=1', '--gen-suppressions=all',
+ '--show-possibly-lost=no', '--leak-check=full',
+ ]
+ if os.uname()[0] == 'Darwin':
+ VALGRIND_CMD.append('--dsymutil=yes')
+
+ del paths
+ del valgrinds
+
+ def __init__(self, path):
+ # Absolute path of the test file.
+ self.path = path
+
+ # Path relative to the top mozilla/ directory.
+ self.relpath_top = os.path.relpath(path, TOP_SRC_DIR)
+
+ # Path relative to mozilla/js/src/jit-test/tests/.
+ self.relpath_tests = os.path.relpath(path, TEST_DIR)
+
+ self.jitflags = [] # jit flags to enable
+ self.slow = False # True means the test is slow-running
+ self.allow_oom = False # True means that OOM is not considered a failure
+ self.allow_unhandlable_oom = False # True means CrashAtUnhandlableOOM
+ # is not considered a failure
+ self.allow_overrecursed = False # True means that hitting recursion the
+ # limits is not considered a failure.
+ self.valgrind = False # True means run under valgrind
+ self.tz_pacific = False # True means force Pacific time for the test
+ self.test_also_noasmjs = False # True means run with and without asm.js
+ # enabled.
+ self.test_also_wasm_baseline = False # True means run with and and without
+ # wasm baseline compiler enabled.
+ self.test_also = [] # List of other configurations to test with.
+ self.test_join = [] # List of other configurations to test with all existing variants.
+ self.expect_error = '' # Errors to expect and consider passing
+ self.expect_status = 0 # Exit status to expect from shell
+ self.expect_crash = False # Exit status or error output.
+ self.is_module = False
+ self.test_reflect_stringify = None # Reflect.stringify implementation to test
+
+ # Expected by the test runner. Always true for jit-tests.
+ self.enable = True
+
+ def copy(self):
+ t = JitTest(self.path)
+ t.jitflags = self.jitflags[:]
+ t.slow = self.slow
+ t.allow_oom = self.allow_oom
+ t.allow_unhandlable_oom = self.allow_unhandlable_oom
+ t.allow_overrecursed = self.allow_overrecursed
+ t.valgrind = self.valgrind
+ t.tz_pacific = self.tz_pacific
+ t.test_also_noasmjs = self.test_also_noasmjs
+ t.test_also_wasm_baseline = self.test_also_noasmjs
+ t.test_also = self.test_also
+ t.test_join = self.test_join
+ t.expect_error = self.expect_error
+ t.expect_status = self.expect_status
+ t.expect_crash = self.expect_crash
+ t.test_reflect_stringify = self.test_reflect_stringify
+ t.enable = True
+ t.is_module = self.is_module
+ return t
+
+ def copy_and_extend_jitflags(self, variant):
+ t = self.copy()
+ t.jitflags.extend(variant)
+ return t
+
+ def copy_variants(self, variants):
+ # Append variants to be tested in addition to the current set of tests.
+ variants = variants + self.test_also
+
+ # For each existing variant, duplicates it for each list of options in
+ # test_join. This will multiply the number of variants by 2 for set of
+ # options.
+ for join_opts in self.test_join:
+ variants = variants + [ opts + join_opts for opts in variants ];
+
+ # For each list of jit flags, make a copy of the test.
+ return [self.copy_and_extend_jitflags(v) for v in variants]
+
+
+ COOKIE = '|jit-test|'
+ CacheDir = JS_CACHE_DIR
+ Directives = {}
+
+ @classmethod
+ def find_directives(cls, file_name):
+ meta = ''
+ line = open(file_name).readline()
+ i = line.find(cls.COOKIE)
+ if i != -1:
+ meta = ';' + line[i + len(cls.COOKIE):].strip('\n')
+ return meta
+
+ @classmethod
+ def from_file(cls, path, options):
+ test = cls(path)
+
+ # If directives.txt exists in the test's directory then it may
+ # contain metainformation that will be catenated with
+ # whatever's in the test file. The form of the directive in
+ # the directive file is the same as in the test file. Only
+ # the first line is considered, just as for the test file.
+
+ dir_meta = ''
+ dir_name = os.path.dirname(path)
+ if dir_name in cls.Directives:
+ dir_meta = cls.Directives[dir_name]
+ else:
+ meta_file_name = os.path.join(dir_name, "directives.txt")
+ if os.path.exists(meta_file_name):
+ dir_meta = cls.find_directives(meta_file_name)
+ cls.Directives[dir_name] = dir_meta
+
+ meta = cls.find_directives(path)
+ if meta != '' or dir_meta != '':
+ meta = meta + dir_meta
+ parts = meta.split(';')
+ for part in parts:
+ part = part.strip()
+ if not part:
+ continue
+ name, _, value = part.partition(':')
+ if value:
+ value = value.strip()
+ if name == 'error':
+ test.expect_error = value
+ elif name == 'exitstatus':
+ try:
+ test.expect_status = int(value, 0)
+ except ValueError:
+ print("warning: couldn't parse exit status"
+ " {}".format(value))
+ elif name == 'thread-count':
+ try:
+ test.jitflags.append('--thread-count={}'.format(
+ int(value, 0)))
+ except ValueError:
+ print("warning: couldn't parse thread-count"
+ " {}".format(value))
+ else:
+ print('{}: warning: unrecognized |jit-test| attribute'
+ ' {}'.format(path, part))
+ else:
+ if name == 'slow':
+ test.slow = True
+ elif name == 'allow-oom':
+ test.allow_oom = True
+ elif name == 'allow-unhandlable-oom':
+ test.allow_unhandlable_oom = True
+ elif name == 'allow-overrecursed':
+ test.allow_overrecursed = True
+ elif name == 'valgrind':
+ test.valgrind = options.valgrind
+ elif name == 'tz-pacific':
+ test.tz_pacific = True
+ elif name == 'test-also-noasmjs':
+ if options.asmjs_enabled:
+ test.test_also.append(['--no-asmjs'])
+ elif name == 'test-also-wasm-baseline':
+ if options.wasm_enabled:
+ test.test_also.append(['--wasm-always-baseline'])
+ elif name == 'test-also-wasm-check-bce':
+ if options.wasm_enabled:
+ test.test_also.append(['--wasm-check-bce'])
+ elif name.startswith('test-also='):
+ test.test_also.append([name[len('test-also='):]])
+ elif name.startswith('test-join='):
+ test.test_join.append([name[len('test-join='):]])
+ elif name == 'module':
+ test.is_module = True
+ elif name == 'crash':
+ test.expect_crash = True
+ elif name.startswith('--'):
+ # // |jit-test| --ion-gvn=off; --no-sse4
+ test.jitflags.append(name)
+ else:
+ print('{}: warning: unrecognized |jit-test| attribute'
+ ' {}'.format(path, part))
+
+ if options.valgrind_all:
+ test.valgrind = True
+
+ if options.test_reflect_stringify is not None:
+ test.expect_error = ''
+ test.expect_status = 0
+
+ return test
+
+ def command(self, prefix, libdir, moduledir, remote_prefix=None):
+ path = self.path
+ if remote_prefix:
+ path = self.path.replace(TEST_DIR, remote_prefix)
+
+ scriptdir_var = os.path.dirname(path)
+ if not scriptdir_var.endswith('/'):
+ scriptdir_var += '/'
+
+ # Platforms where subprocess immediately invokes exec do not care
+ # whether we use double or single quotes. On windows and when using
+ # a remote device, however, we have to be careful to use the quote
+ # style that is the opposite of what the exec wrapper uses.
+ if remote_prefix:
+ quotechar = '"'
+ else:
+ quotechar = "'"
+ expr = "const platform={}; const libdir={}; const scriptdir={}".format(
+ js_quote(quotechar, sys.platform),
+ js_quote(quotechar, libdir),
+ js_quote(quotechar, scriptdir_var))
+
+ # We may have specified '-a' or '-d' twice: once via --jitflags, once
+ # via the "|jit-test|" line. Remove dups because they are toggles.
+ cmd = prefix + ['--js-cache', JitTest.CacheDir]
+ cmd += list(set(self.jitflags)) + ['-e', expr]
+ if self.is_module:
+ cmd += ['--module-load-path', moduledir]
+ cmd += ['--module', path]
+ elif self.test_reflect_stringify is None:
+ cmd += ['-f', path]
+ else:
+ cmd += ['--', self.test_reflect_stringify, "--check", path]
+ if self.valgrind:
+ cmd = self.VALGRIND_CMD + cmd
+ return cmd
+
+ # The test runner expects this to be set to give to get_command.
+ js_cmd_prefix = None
+ def get_command(self, prefix):
+ """Shim for the test runner."""
+ return self.command(prefix, LIB_DIR, MODULE_DIR)
+
+
+def find_tests(substring=None):
+ ans = []
+ for dirpath, dirnames, filenames in os.walk(TEST_DIR):
+ dirnames.sort()
+ filenames.sort()
+ if dirpath == '.':
+ continue
+ for filename in filenames:
+ if not filename.endswith('.js'):
+ continue
+ if filename in ('shell.js', 'browser.js'):
+ continue
+ test = os.path.join(dirpath, filename)
+ if substring is None \
+ or substring in os.path.relpath(test, TEST_DIR):
+ ans.append(test)
+ return ans
+
+def run_test_remote(test, device, prefix, options):
+ if options.test_reflect_stringify:
+ raise ValueError("can't run Reflect.stringify tests remotely")
+ cmd = test.command(prefix,
+ posixpath.join(options.remote_test_root, 'lib/'),
+ posixpath.join(options.remote_test_root, 'modules/'),
+ posixpath.join(options.remote_test_root, 'tests'))
+ if options.show_cmd:
+ print(subprocess.list2cmdline(cmd))
+
+ env = {}
+ if test.tz_pacific:
+ env['TZ'] = 'PST8PDT'
+
+ env['LD_LIBRARY_PATH'] = options.remote_test_root
+
+ buf = StringIO.StringIO()
+ returncode = device.shell(cmd, buf, env=env, cwd=options.remote_test_root,
+ timeout=int(options.timeout))
+
+ out = buf.getvalue()
+ # We can't distinguish between stdout and stderr so we pass
+ # the same buffer to both.
+ return TestOutput(test, cmd, out, out, returncode, None, False)
+
+def check_output(out, err, rc, timed_out, test, options):
+ if timed_out:
+ if test.relpath_tests in options.ignore_timeouts:
+ return True
+
+ # The shell sometimes hangs on shutdown on Windows 7 and Windows
+ # Server 2008. See bug 970063 comment 7 for a description of the
+ # problem. Until bug 956899 is fixed, ignore timeouts on these
+ # platforms (versions 6.0 and 6.1).
+ if sys.platform == 'win32':
+ ver = sys.getwindowsversion()
+ if ver.major == 6 and ver.minor <= 1:
+ return True
+ return False
+
+ if test.expect_error:
+ # The shell exits with code 3 on uncaught exceptions.
+ # Sometimes 0 is returned on Windows for unknown reasons.
+ # See bug 899697.
+ if sys.platform in ['win32', 'cygwin']:
+ if rc != 3 and rc != 0:
+ return False
+ else:
+ if rc != 3:
+ return False
+
+ return test.expect_error in err
+
+ for line in out.split('\n'):
+ if line.startswith('Trace stats check failed'):
+ return False
+
+ for line in err.split('\n'):
+ if 'Assertion failed:' in line:
+ return False
+
+ if test.expect_crash:
+ if sys.platform == 'win32' and rc == 3 - 2 ** 31:
+ return True
+
+ if sys.platform != 'win32' and rc == -11:
+ return True
+
+ # When building with ASan enabled, ASan will convert the -11 returned
+ # value to 1. As a work-around we look for the error output which
+ # includes the crash reason.
+ if rc == 1 and ("Hit MOZ_CRASH" in err or "Assertion failure:" in err):
+ return True
+
+ if rc != test.expect_status:
+ # Tests which expect a timeout check for exit code 6.
+ # Sometimes 0 is returned on Windows for unknown reasons.
+ # See bug 899697.
+ if sys.platform in ['win32', 'cygwin'] and rc == 0:
+ return True
+
+ # Allow a non-zero exit code if we want to allow OOM, but only if we
+ # actually got OOM.
+ if test.allow_oom and 'out of memory' in err \
+ and 'Assertion failure' not in err and 'MOZ_CRASH' not in err:
+ return True
+
+ # Allow a non-zero exit code if we want to allow unhandlable OOM, but
+ # only if we actually got unhandlable OOM.
+ if test.allow_unhandlable_oom \
+ and 'Assertion failure: [unhandlable oom]' in err:
+ return True
+
+ # Allow a non-zero exit code if we want to all too-much-recursion and
+ # the test actually over-recursed.
+ if test.allow_overrecursed and 'too much recursion' in err \
+ and 'Assertion failure' not in err:
+ return True
+
+ return False
+
+ return True
+
+def print_automation_format(ok, res):
+ # Output test failures in a parsable format suitable for automation, eg:
+ # TEST-RESULT | filename.js | Failure description (code N, args "--foobar")
+ #
+ # Example:
+ # TEST-PASS | foo/bar/baz.js | (code 0, args "--ion-eager")
+ # TEST-UNEXPECTED-FAIL | foo/bar/baz.js | TypeError: or something (code -9, args "--no-ion")
+ # INFO exit-status : 3
+ # INFO timed-out : False
+ # INFO stdout > foo
+ # INFO stdout > bar
+ # INFO stdout > baz
+ # INFO stderr 2> TypeError: or something
+ # TEST-UNEXPECTED-FAIL | jit_test.py: Test execution interrupted by user
+ result = "TEST-PASS" if ok else "TEST-UNEXPECTED-FAIL"
+ message = "Success" if ok else res.describe_failure()
+ jitflags = " ".join(res.test.jitflags)
+ print("{} | {} | {} (code {}, args \"{}\")".format(
+ result, res.test.relpath_top, message, res.rc, jitflags))
+
+ # For failed tests, print as much information as we have, to aid debugging.
+ if ok:
+ return
+ print("INFO exit-status : {}".format(res.rc))
+ print("INFO timed-out : {}".format(res.timed_out))
+ for line in res.out.splitlines():
+ print("INFO stdout > " + line.strip())
+ for line in res.err.splitlines():
+ print("INFO stderr 2> " + line.strip())
+
+def print_test_summary(num_tests, failures, complete, doing, options):
+ if failures:
+ if options.write_failures:
+ try:
+ out = open(options.write_failures, 'w')
+ # Don't write duplicate entries when we are doing multiple
+ # failures per job.
+ written = set()
+ for res in failures:
+ if res.test.path not in written:
+ out.write(os.path.relpath(res.test.path, TEST_DIR)
+ + '\n')
+ if options.write_failure_output:
+ out.write(res.out)
+ out.write(res.err)
+ out.write('Exit code: ' + str(res.rc) + "\n")
+ written.add(res.test.path)
+ out.close()
+ except IOError:
+ sys.stderr.write("Exception thrown trying to write failure"
+ " file '{}'\n".format(options.write_failures))
+ traceback.print_exc()
+ sys.stderr.write('---\n')
+
+ def show_test(res):
+ if options.show_failed:
+ print(' ' + subprocess.list2cmdline(res.cmd))
+ else:
+ print(' ' + ' '.join(res.test.jitflags + [res.test.path]))
+
+ print('FAILURES:')
+ for res in failures:
+ if not res.timed_out:
+ show_test(res)
+
+ print('TIMEOUTS:')
+ for res in failures:
+ if res.timed_out:
+ show_test(res)
+ else:
+ print('PASSED ALL'
+ + ('' if complete
+ else ' (partial run -- interrupted by user {})'.format(doing)))
+
+ if options.format == 'automation':
+ num_failures = len(failures) if failures else 0
+ print('Result summary:')
+ print('Passed: {:d}'.format(num_tests - num_failures))
+ print('Failed: {:d}'.format(num_failures))
+
+ return not failures
+
+def create_progressbar(num_tests, options):
+ if not options.hide_progress and not options.show_cmd \
+ and ProgressBar.conservative_isatty():
+ fmt = [
+ {'value': 'PASS', 'color': 'green'},
+ {'value': 'FAIL', 'color': 'red'},
+ {'value': 'TIMEOUT', 'color': 'blue'},
+ {'value': 'SKIP', 'color': 'brightgray'},
+ ]
+ return ProgressBar(num_tests, fmt)
+ return NullProgressBar()
+
+def process_test_results(results, num_tests, pb, options):
+ failures = []
+ timeouts = 0
+ complete = False
+ output_dict = {}
+ doing = 'before starting'
+
+ if num_tests == 0:
+ pb.finish(True)
+ complete = True
+ return print_test_summary(num_tests, failures, complete, doing, options)
+
+ try:
+ for i, res in enumerate(results):
+ ok = check_output(res.out, res.err, res.rc, res.timed_out,
+ res.test, options)
+
+ if ok:
+ show_output = options.show_output and not options.failed_only
+ else:
+ show_output = options.show_output or not options.no_show_failed
+
+ if show_output:
+ pb.beginline()
+ sys.stdout.write(res.out)
+ sys.stdout.write(res.err)
+ sys.stdout.write('Exit code: {}\n'.format(res.rc))
+
+ if res.test.valgrind and not show_output:
+ pb.beginline()
+ sys.stdout.write(res.err)
+
+ if options.check_output:
+ if res.test.path in output_dict.keys():
+ if output_dict[res.test.path] != res.out:
+ pb.message("FAIL - OUTPUT DIFFERS {}".format(res.test.relpath_tests))
+ else:
+ output_dict[res.test.path] = res.out
+
+ doing = 'after {}'.format(res.test.relpath_tests)
+ if not ok:
+ failures.append(res)
+ if res.timed_out:
+ pb.message("TIMEOUT - {}".format(res.test.relpath_tests))
+ timeouts += 1
+ else:
+ pb.message("FAIL - {}".format(res.test.relpath_tests))
+
+ if options.format == 'automation':
+ print_automation_format(ok, res)
+
+ n = i + 1
+ pb.update(n, {
+ 'PASS': n - len(failures),
+ 'FAIL': len(failures),
+ 'TIMEOUT': timeouts,
+ 'SKIP': 0
+ })
+ complete = True
+ except KeyboardInterrupt:
+ print("TEST-UNEXPECTED-FAIL | jit_test.py" +
+ " : Test execution interrupted by user")
+
+ pb.finish(True)
+ return print_test_summary(num_tests, failures, complete, doing, options)
+
+def run_tests(tests, num_tests, prefix, options):
+ # The jstests tasks runner requires the following options. The names are
+ # taken from the jstests options processing code, which are frequently
+ # subtly different from the options jit-tests expects. As such, we wrap
+ # them here, as needed.
+ AdaptorOptions = namedtuple("AdaptorOptions", [
+ "worker_count", "passthrough", "timeout", "output_fp",
+ "hide_progress", "run_skipped", "show_cmd"])
+ shim_options = AdaptorOptions(options.max_jobs, False, options.timeout,
+ sys.stdout, False, True, options.show_cmd)
+
+ # The test runner wants the prefix as a static on the Test class.
+ JitTest.js_cmd_prefix = prefix
+
+ pb = create_progressbar(num_tests, options)
+ gen = run_all_tests(tests, prefix, pb, shim_options)
+ ok = process_test_results(gen, num_tests, pb, options)
+ return ok
+
+def get_remote_results(tests, device, prefix, options):
+ from mozdevice import devicemanager
+
+ try:
+ for i in xrange(0, options.repeat):
+ for test in tests:
+ yield run_test_remote(test, device, prefix, options)
+ except devicemanager.DMError as e:
+ # After a devicemanager error, the device is typically in a
+ # state where all further tests will fail so there is no point in
+ # continuing here.
+ sys.stderr.write("Error running remote tests: {}".format(e.message))
+
+def push_libs(options, device):
+ # This saves considerable time in pushing unnecessary libraries
+ # to the device but needs to be updated if the dependencies change.
+ required_libs = ['libnss3.so', 'libmozglue.so', 'libnspr4.so',
+ 'libplc4.so', 'libplds4.so']
+
+ for file in os.listdir(options.local_lib):
+ if file in required_libs:
+ remote_file = posixpath.join(options.remote_test_root, file)
+ device.pushFile(os.path.join(options.local_lib, file), remote_file)
+
+def push_progs(options, device, progs):
+ for local_file in progs:
+ remote_file = posixpath.join(options.remote_test_root,
+ os.path.basename(local_file))
+ device.pushFile(local_file, remote_file)
+
+def run_tests_remote(tests, num_tests, prefix, options):
+ # Setup device with everything needed to run our tests.
+ from mozdevice import devicemanagerADB, devicemanagerSUT
+
+ if options.device_transport == 'adb':
+ if options.device_ip:
+ dm = devicemanagerADB.DeviceManagerADB(
+ options.device_ip, options.device_port,
+ deviceSerial=options.device_serial,
+ packageName=None,
+ deviceRoot=options.remote_test_root)
+ else:
+ dm = devicemanagerADB.DeviceManagerADB(
+ deviceSerial=options.device_serial,
+ packageName=None,
+ deviceRoot=options.remote_test_root)
+ else:
+ dm = devicemanagerSUT.DeviceManagerSUT(
+ options.device_ip, options.device_port,
+ deviceRoot=options.remote_test_root)
+ if options.device_ip == None:
+ print('Error: you must provide a device IP to connect to via the'
+ ' --device option')
+ sys.exit(1)
+
+ # Update the test root to point to our test directory.
+ jit_tests_dir = posixpath.join(options.remote_test_root, 'jit-tests')
+ options.remote_test_root = posixpath.join(jit_tests_dir, 'jit-tests')
+
+ # Push js shell and libraries.
+ if dm.dirExists(jit_tests_dir):
+ dm.removeDir(jit_tests_dir)
+ dm.mkDirs(options.remote_test_root)
+ push_libs(options, dm)
+ push_progs(options, dm, [prefix[0]])
+ dm.chmodDir(options.remote_test_root)
+
+ JitTest.CacheDir = posixpath.join(options.remote_test_root, '.js-cache')
+ dm.mkDir(JitTest.CacheDir)
+
+ dm.pushDir(JS_TESTS_DIR, posixpath.join(jit_tests_dir, 'tests'),
+ timeout=600)
+
+ dm.pushDir(os.path.dirname(TEST_DIR), options.remote_test_root,
+ timeout=600)
+ prefix[0] = os.path.join(options.remote_test_root, 'js')
+
+ # Run all tests.
+ pb = create_progressbar(num_tests, options)
+ gen = get_remote_results(tests, dm, prefix, options)
+ ok = process_test_results(gen, num_tests, pb, options)
+ return ok
+
+def platform_might_be_android():
+ try:
+ # The python package for SL4A provides an |android| module.
+ # If that module is present, we're likely in SL4A-python on
+ # device. False positives and negatives are possible,
+ # however.
+ import android
+ return True
+ except ImportError:
+ return False
+
+def stdio_might_be_broken():
+ return platform_might_be_android()
+
+if __name__ == '__main__':
+ print('Use ../jit-test/jit_test.py to run these tests.')
diff --git a/js/src/tests/lib/manifest.py b/js/src/tests/lib/manifest.py
new file mode 100644
index 0000000000..7079a186f6
--- /dev/null
+++ b/js/src/tests/lib/manifest.py
@@ -0,0 +1,395 @@
+# Library for JSTest manifests.
+#
+# This includes classes for representing and parsing JS manifests.
+
+from __future__ import print_function
+
+import os, re, sys
+from subprocess import Popen, PIPE
+
+from tests import RefTestCase
+
+
+def split_path_into_dirs(path):
+ dirs = [path]
+
+ while True:
+ path, tail = os.path.split(path)
+ if not tail:
+ break
+ dirs.append(path)
+ return dirs
+
+class XULInfo:
+ def __init__(self, abi, os, isdebug):
+ self.abi = abi
+ self.os = os
+ self.isdebug = isdebug
+ self.browserIsRemote = False
+
+ def as_js(self):
+ """Return JS that when executed sets up variables so that JS expression
+ predicates on XUL build info evaluate properly."""
+
+ return ('var xulRuntime = {{ OS: "{}", XPCOMABI: "{}", shell: true }};'
+ 'var isDebugBuild={}; var Android={}; '
+ 'var browserIsRemote={}'.format(
+ self.os,
+ self.abi,
+ str(self.isdebug).lower(),
+ str(self.os == "Android").lower(),
+ str(self.browserIsRemote).lower()))
+
+ @classmethod
+ def create(cls, jsdir):
+ """Create a XULInfo based on the current platform's characteristics."""
+
+ # Our strategy is to find the autoconf.mk generated for the build and
+ # read the values from there.
+
+ # Find config/autoconf.mk.
+ dirs = split_path_into_dirs(os.getcwd()) + split_path_into_dirs(jsdir)
+
+ path = None
+ for dir in dirs:
+ _path = os.path.join(dir, 'config/autoconf.mk')
+ if os.path.isfile(_path):
+ path = _path
+ break
+
+ if path == None:
+ print("Can't find config/autoconf.mk on a directory containing"
+ " the JS shell (searched from {})".format(jsdir))
+ sys.exit(1)
+
+ # Read the values.
+ val_re = re.compile(r'(TARGET_XPCOM_ABI|OS_TARGET|MOZ_DEBUG)\s*=\s*(.*)')
+ kw = {'isdebug': False}
+ for line in open(path):
+ m = val_re.match(line)
+ if m:
+ key, val = m.groups()
+ val = val.rstrip()
+ if key == 'TARGET_XPCOM_ABI':
+ kw['abi'] = val
+ if key == 'OS_TARGET':
+ kw['os'] = val
+ if key == 'MOZ_DEBUG':
+ kw['isdebug'] = (val == '1')
+ return cls(**kw)
+
+class XULInfoTester:
+ def __init__(self, xulinfo, js_bin):
+ self.js_prologue = xulinfo.as_js()
+ self.js_bin = js_bin
+ # Maps JS expr to evaluation result.
+ self.cache = {}
+
+ def test(self, cond):
+ """Test a XUL predicate condition against this local info."""
+ ans = self.cache.get(cond, None)
+ if ans is None:
+ cmd = [
+ self.js_bin,
+ # run in safe configuration, since it is hard to debug
+ # crashes when running code here. In particular, msan will
+ # error out if the jit is active.
+ '--no-baseline',
+ '-e', self.js_prologue,
+ '-e', 'print(!!({}))'.format(cond)
+ ]
+ p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ out, err = p.communicate()
+ if out in ('true\n', 'true\r\n'):
+ ans = True
+ elif out in ('false\n', 'false\r\n'):
+ ans = False
+ else:
+ raise Exception("Failed to test XUL condition {!r};"
+ " output was {!r}, stderr was {!r}".format(
+ cond, out, err))
+ self.cache[cond] = ans
+ return ans
+
+class NullXULInfoTester:
+ """Can be used to parse manifests without a JS shell."""
+ def test(self, cond):
+ return False
+
+def _parse_one(testcase, xul_tester):
+ pos = 0
+ parts = testcase.terms.split()
+ while pos < len(parts):
+ if parts[pos] == 'fails':
+ testcase.expect = False
+ pos += 1
+ elif parts[pos] == 'skip':
+ testcase.expect = testcase.enable = False
+ pos += 1
+ elif parts[pos] == 'random':
+ testcase.random = True
+ pos += 1
+ elif parts[pos].startswith('fails-if'):
+ cond = parts[pos][len('fails-if('):-1]
+ if xul_tester.test(cond):
+ testcase.expect = False
+ pos += 1
+ elif parts[pos].startswith('asserts-if'):
+ # This directive means we may flunk some number of
+ # NS_ASSERTIONs in the browser. For the shell, ignore it.
+ pos += 1
+ elif parts[pos].startswith('skip-if'):
+ cond = parts[pos][len('skip-if('):-1]
+ if xul_tester.test(cond):
+ testcase.expect = testcase.enable = False
+ pos += 1
+ elif parts[pos].startswith('random-if'):
+ cond = parts[pos][len('random-if('):-1]
+ if xul_tester.test(cond):
+ testcase.random = True
+ pos += 1
+ elif parts[pos] == 'slow':
+ testcase.slow = True
+ pos += 1
+ elif parts[pos] == 'silentfail':
+ # silentfails use tons of memory, and Darwin doesn't support ulimit.
+ if xul_tester.test("xulRuntime.OS == 'Darwin'"):
+ testcase.expect = testcase.enable = False
+ pos += 1
+ else:
+ print('warning: invalid manifest line element "{}"'.format(
+ parts[pos]))
+ pos += 1
+
+def _build_manifest_script_entry(script_name, test):
+ line = []
+ if test.terms:
+ line.append(test.terms)
+ line.append("script")
+ line.append(script_name)
+ if test.comment:
+ line.append("#")
+ line.append(test.comment)
+ return ' '.join(line)
+
+def _map_prefixes_left(test_gen):
+ """
+ Splits tests into a dictionary keyed on the first component of the test
+ path, aggregating tests with a common base path into a list.
+ """
+ byprefix = {}
+ for t in test_gen:
+ left, sep, remainder = t.path.partition(os.sep)
+ if left not in byprefix:
+ byprefix[left] = []
+ if remainder:
+ t.path = remainder
+ byprefix[left].append(t)
+ return byprefix
+
+def _emit_manifest_at(location, relative, test_gen, depth):
+ """
+ location - str: absolute path where we want to write the manifest
+ relative - str: relative path from topmost manifest directory to current
+ test_gen - (str): generator of all test paths and directorys
+ depth - int: number of dirs we are below the topmost manifest dir
+ """
+ manifests = _map_prefixes_left(test_gen)
+
+ filename = os.path.join(location, 'jstests.list')
+ manifest = []
+ numTestFiles = 0
+ for k, test_list in manifests.iteritems():
+ fullpath = os.path.join(location, k)
+ if os.path.isdir(fullpath):
+ manifest.append("include " + k + "/jstests.list")
+ relpath = os.path.join(relative, k)
+ _emit_manifest_at(fullpath, relpath, test_list, depth + 1)
+ else:
+ numTestFiles += 1
+ if len(test_list) != 1:
+ import pdb; pdb.set_trace()
+ assert len(test_list) == 1
+ line = _build_manifest_script_entry(k, test_list[0])
+ manifest.append(line)
+
+ # Always present our manifest in sorted order.
+ manifest.sort()
+
+ # If we have tests, we have to set the url-prefix so reftest can find them.
+ if numTestFiles > 0:
+ manifest = ["url-prefix {}jsreftest.html?test={}/".format(
+ '../' * depth, relative)] + manifest
+
+ fp = open(filename, 'w')
+ try:
+ fp.write('\n'.join(manifest) + '\n')
+ finally:
+ fp.close()
+
+def make_manifests(location, test_gen):
+ _emit_manifest_at(location, '', test_gen, 0)
+
+def _find_all_js_files(base, location):
+ for root, dirs, files in os.walk(location):
+ root = root[len(base) + 1:]
+ for fn in files:
+ if fn.endswith('.js'):
+ yield root, fn
+
+TEST_HEADER_PATTERN_INLINE = re.compile(r'//\s*\|(.*?)\|\s*(.*?)\s*(--\s*(.*))?$')
+TEST_HEADER_PATTERN_MULTI = re.compile(r'/\*\s*\|(.*?)\|\s*(.*?)\s*(--\s*(.*))?\*/')
+
+def _parse_test_header(fullpath, testcase, xul_tester):
+ """
+ This looks a bit weird. The reason is that it needs to be efficient, since
+ it has to be done on every test
+ """
+ fp = open(fullpath, 'r')
+ try:
+ buf = fp.read(512)
+ finally:
+ fp.close()
+
+ # Bail early if we do not start with a single comment.
+ if not buf.startswith("//"):
+ return
+
+ # Extract the token.
+ buf, _, _ = buf.partition('\n')
+ matches = TEST_HEADER_PATTERN_INLINE.match(buf)
+
+ if not matches:
+ matches = TEST_HEADER_PATTERN_MULTI.match(buf)
+ if not matches:
+ return
+
+ testcase.tag = matches.group(1)
+ testcase.terms = matches.group(2)
+ testcase.comment = matches.group(4)
+
+ _parse_one(testcase, xul_tester)
+
+def _parse_external_manifest(filename, relpath):
+ """
+ Reads an external manifest file for test suites whose individual test cases
+ can't be decorated with reftest comments.
+ filename - str: name of the manifest file
+ relpath - str: relative path of the directory containing the manifest
+ within the test suite
+ """
+ entries = []
+
+ with open(filename, 'r') as fp:
+ manifest_re = re.compile(r'^\s*(.*)\s+(include|script)\s+(\S+)$')
+ for line in fp:
+ line, _, comment = line.partition('#')
+ line = line.strip()
+ if not line:
+ continue
+ matches = manifest_re.match(line)
+ if not matches:
+ print('warning: unrecognized line in jstests.list:'
+ ' {0}'.format(line))
+ continue
+
+ path = os.path.normpath(os.path.join(relpath, matches.group(3)))
+ if matches.group(2) == 'include':
+ # The manifest spec wants a reference to another manifest here,
+ # but we need just the directory. We do need the trailing
+ # separator so we don't accidentally match other paths of which
+ # this one is a prefix.
+ assert(path.endswith('jstests.list'))
+ path = path[:-len('jstests.list')]
+
+ entries.append({'path': path, 'terms': matches.group(1),
+ 'comment': comment.strip()})
+
+ # if one directory name is a prefix of another, we want the shorter one
+ # first
+ entries.sort(key=lambda x: x["path"])
+ return entries
+
+def _apply_external_manifests(filename, testcase, entries, xul_tester):
+ for entry in entries:
+ if filename.startswith(entry["path"]):
+ # The reftest spec would require combining the terms (failure types)
+ # that may already be defined in the test case with the terms
+ # specified in entry; for example, a skip overrides a random, which
+ # overrides a fails. Since we don't necessarily know yet in which
+ # environment the test cases will be run, we'd also have to
+ # consider skip-if, random-if, and fails-if with as-yet unresolved
+ # conditions.
+ # At this point, we use external manifests only for test cases
+ # that can't have their own failure type comments, so we simply
+ # use the terms for the most specific path.
+ testcase.terms = entry["terms"]
+ testcase.comment = entry["comment"]
+ _parse_one(testcase, xul_tester)
+
+def _is_test_file(path_from_root, basename, filename, requested_paths,
+ excluded_paths):
+ # Any file whose basename matches something in this set is ignored.
+ EXCLUDED = set(('browser.js', 'shell.js', 'template.js',
+ 'user.js', 'sta.js',
+ 'test262-browser.js', 'test262-shell.js',
+ 'test402-browser.js', 'test402-shell.js',
+ 'testBuiltInObject.js', 'testIntl.js',
+ 'js-test-driver-begin.js', 'js-test-driver-end.js'))
+
+ # Skip js files in the root test directory.
+ if not path_from_root:
+ return False
+
+ # Skip files that we know are not tests.
+ if basename in EXCLUDED:
+ return False
+
+ # If any tests are requested by name, skip tests that do not match.
+ if requested_paths \
+ and not any(req in filename for req in requested_paths):
+ return False
+
+ # Skip excluded tests.
+ if filename in excluded_paths:
+ return False
+
+ return True
+
+
+def count_tests(location, requested_paths, excluded_paths):
+ count = 0
+ for root, basename in _find_all_js_files(location, location):
+ filename = os.path.join(root, basename)
+ if _is_test_file(root, basename, filename, requested_paths, excluded_paths):
+ count += 1
+ return count
+
+
+def load_reftests(location, requested_paths, excluded_paths, xul_tester, reldir=''):
+ """
+ Locates all tests by walking the filesystem starting at |location|.
+ Uses xul_tester to evaluate any test conditions in the test header.
+ Failure type and comment for a test case can come from
+ - an external manifest entry for the test case,
+ - an external manifest entry for a containing directory,
+ - most commonly: the header of the test case itself.
+ """
+ manifestFile = os.path.join(location, 'jstests.list')
+ externalManifestEntries = _parse_external_manifest(manifestFile, '')
+
+ for root, basename in _find_all_js_files(location, location):
+ # Get the full path and relative location of the file.
+ filename = os.path.join(root, basename)
+ if not _is_test_file(root, basename, filename, requested_paths, excluded_paths):
+ continue
+
+ # Skip empty files.
+ fullpath = os.path.join(location, filename)
+ statbuf = os.stat(fullpath)
+
+ testcase = RefTestCase(os.path.join(reldir, filename))
+ _apply_external_manifests(filename, testcase, externalManifestEntries,
+ xul_tester)
+ _parse_test_header(fullpath, testcase, xul_tester)
+ yield testcase
diff --git a/js/src/tests/lib/progressbar.py b/js/src/tests/lib/progressbar.py
new file mode 100644
index 0000000000..29361660d2
--- /dev/null
+++ b/js/src/tests/lib/progressbar.py
@@ -0,0 +1,116 @@
+# Text progress bar library, like curl or scp.
+
+from datetime import datetime, timedelta
+import math
+import sys
+
+if sys.platform.startswith('win'):
+ from terminal_win import Terminal
+else:
+ from terminal_unix import Terminal
+
+class NullProgressBar(object):
+ def update(self, current, data): pass
+ def poke(self): pass
+ def finish(self, complete=True): pass
+ def beginline(self): pass
+ def message(self, msg): sys.stdout.write(msg + '\n')
+ @staticmethod
+ def update_granularity(): return timedelta.max
+
+class ProgressBar(object):
+ def __init__(self, limit, fmt):
+ assert self.conservative_isatty()
+
+ self.prior = None
+ self.atLineStart = True
+ self.counters_fmt = fmt # [{str:str}] Describtion of how to lay out each
+ # field in the counters map.
+ self.limit = limit # int: The value of 'current' equal to 100%.
+ self.limit_digits = int(math.ceil(math.log10(self.limit))) # int: max digits in limit
+ self.t0 = datetime.now() # datetime: The start time.
+
+ # Compute the width of the counters and build the format string.
+ self.counters_width = 1 # [
+ for layout in self.counters_fmt:
+ self.counters_width += self.limit_digits
+ self.counters_width += 1 # | (or ']' for the last one)
+
+ self.barlen = 64 - self.counters_width
+
+ @staticmethod
+ def update_granularity():
+ return timedelta(seconds=0.1)
+
+ def update(self, current, data):
+ # Record prior for poke.
+ self.prior = (current, data)
+ self.atLineStart = False
+
+ # Build counters string.
+ sys.stdout.write('\r[')
+ for layout in self.counters_fmt:
+ Terminal.set_color(layout['color'])
+ sys.stdout.write(('{:' + str(self.limit_digits) + 'd}').format(
+ data[layout['value']]))
+ Terminal.reset_color()
+ if layout != self.counters_fmt[-1]:
+ sys.stdout.write('|')
+ else:
+ sys.stdout.write('] ')
+
+ # Build the bar.
+ pct = int(100.0 * current / self.limit)
+ sys.stdout.write('{:3d}% '.format(pct))
+
+ barlen = int(1.0 * self.barlen * current / self.limit) - 1
+ bar = '=' * barlen + '>' + ' ' * (self.barlen - barlen - 1)
+ sys.stdout.write(bar + '|')
+
+ # Update the bar.
+ dt = datetime.now() - self.t0
+ dt = dt.seconds + dt.microseconds * 1e-6
+ sys.stdout.write('{:6.1f}s'.format(dt))
+ Terminal.clear_right()
+
+ # Force redisplay, since we didn't write a \n.
+ sys.stdout.flush()
+
+ def poke(self):
+ if not self.prior:
+ return
+ self.update(*self.prior)
+
+ def finish(self, complete=True):
+ if not self.prior:
+ sys.stdout.write('No test run... You can try adding'
+ ' --run-slow-tests or --run-skipped to run more tests\n')
+ return
+ final_count = self.limit if complete else self.prior[0]
+ self.update(final_count, self.prior[1])
+ sys.stdout.write('\n')
+
+ def beginline(self):
+ if not self.atLineStart:
+ sys.stdout.write('\n')
+ self.atLineStart = True
+
+ def message(self, msg):
+ self.beginline()
+ sys.stdout.write(msg)
+ sys.stdout.write('\n')
+
+ @staticmethod
+ def conservative_isatty():
+ """
+ Prefer erring on the side of caution and not using terminal commands if
+ the current output stream may be a file. We explicitly check for the
+ Android platform because terminal commands work poorly over ADB's
+ redirection.
+ """
+ try:
+ import android
+ return False
+ except ImportError:
+ return sys.stdout.isatty()
+ return False
diff --git a/js/src/tests/lib/results.py b/js/src/tests/lib/results.py
new file mode 100644
index 0000000000..451fe5b657
--- /dev/null
+++ b/js/src/tests/lib/results.py
@@ -0,0 +1,278 @@
+from __future__ import print_function
+
+import re
+from progressbar import NullProgressBar, ProgressBar
+import pipes
+
+# subprocess.list2cmdline does not properly escape for sh-like shells
+def escape_cmdline(args):
+ return ' '.join([pipes.quote(a) for a in args])
+
+class TestOutput:
+ """Output from a test run."""
+ def __init__(self, test, cmd, out, err, rc, dt, timed_out):
+ self.test = test # Test
+ self.cmd = cmd # str: command line of test
+ self.out = out # str: stdout
+ self.err = err # str: stderr
+ self.rc = rc # int: return code
+ self.dt = dt # float: run time
+ self.timed_out = timed_out # bool: did the test time out
+
+ def describe_failure(self):
+ if self.timed_out:
+ return "Timeout"
+ lines = self.err.splitlines()
+ for line in lines:
+ # Skip the asm.js compilation success message.
+ if "Successfully compiled asm.js code" not in line:
+ return line
+ return "Unknown"
+
+class NullTestOutput:
+ """Variant of TestOutput that indicates a test was not run."""
+ def __init__(self, test):
+ self.test = test
+ self.cmd = ''
+ self.out = ''
+ self.err = ''
+ self.rc = 0
+ self.dt = 0.0
+ self.timed_out = False
+
+class TestResult:
+ PASS = 'PASS'
+ FAIL = 'FAIL'
+ CRASH = 'CRASH'
+
+ """Classified result from a test run."""
+ def __init__(self, test, result, results):
+ self.test = test
+ self.result = result
+ self.results = results
+
+ @classmethod
+ def from_output(cls, output):
+ test = output.test
+ result = None # str: overall result, see class-level variables
+ results = [] # (str,str) list: subtest results (pass/fail, message)
+
+ out, rc = output.out, output.rc
+
+ failures = 0
+ passes = 0
+
+ expected_rcs = []
+ if test.path.endswith('-n.js'):
+ expected_rcs.append(3)
+
+ for line in out.split('\n'):
+ if line.startswith(' FAILED!'):
+ failures += 1
+ msg = line[len(' FAILED! '):]
+ results.append((cls.FAIL, msg))
+ elif line.startswith(' PASSED!'):
+ passes += 1
+ msg = line[len(' PASSED! '):]
+ results.append((cls.PASS, msg))
+ else:
+ m = re.match('--- NOTE: IN THIS TESTCASE, WE EXPECT EXIT CODE'
+ ' ((?:-|\\d)+) ---', line)
+ if m:
+ expected_rcs.append(int(m.group(1)))
+
+ if rc and not rc in expected_rcs:
+ if rc == 3:
+ result = cls.FAIL
+ else:
+ result = cls.CRASH
+ else:
+ if (rc or passes > 0) and failures == 0:
+ result = cls.PASS
+ else:
+ result = cls.FAIL
+
+ return cls(test, result, results)
+
+class ResultsSink:
+ def __init__(self, options, testcount):
+ self.options = options
+ self.fp = options.output_fp
+
+ self.groups = {}
+ self.output_dict = {}
+ self.counts = {'PASS': 0, 'FAIL': 0, 'TIMEOUT': 0, 'SKIP': 0}
+ self.n = 0
+
+ if options.hide_progress:
+ self.pb = NullProgressBar()
+ else:
+ fmt = [
+ {'value': 'PASS', 'color': 'green'},
+ {'value': 'FAIL', 'color': 'red'},
+ {'value': 'TIMEOUT', 'color': 'blue'},
+ {'value': 'SKIP', 'color': 'brightgray'},
+ ]
+ self.pb = ProgressBar(testcount, fmt)
+
+ def push(self, output):
+ if output.timed_out:
+ self.counts['TIMEOUT'] += 1
+ if isinstance(output, NullTestOutput):
+ if self.options.format == 'automation':
+ self.print_automation_result(
+ 'TEST-KNOWN-FAIL', output.test, time=output.dt,
+ skip=True)
+ self.counts['SKIP'] += 1
+ self.n += 1
+ else:
+ result = TestResult.from_output(output)
+ tup = (result.result, result.test.expect, result.test.random)
+ dev_label = self.LABELS[tup][1]
+
+ if self.options.check_output:
+ if output.test.path in self.output_dict.keys():
+ if self.output_dict[output.test.path] != output:
+ self.counts['FAIL'] += 1
+ self.print_automation_result(
+ "TEST-UNEXPECTED-FAIL", result.test, time=output.dt,
+ message="Same test with different flag producing different output")
+ else:
+ self.output_dict[output.test.path] = output
+
+ if output.timed_out:
+ dev_label = 'TIMEOUTS'
+ self.groups.setdefault(dev_label, []).append(result)
+
+ if dev_label == 'REGRESSIONS':
+ show_output = self.options.show_output \
+ or not self.options.no_show_failed
+ elif dev_label == 'TIMEOUTS':
+ show_output = self.options.show_output
+ else:
+ show_output = self.options.show_output \
+ and not self.options.failed_only
+
+ if dev_label in ('REGRESSIONS', 'TIMEOUTS'):
+ show_cmd = self.options.show_cmd
+ else:
+ show_cmd = self.options.show_cmd \
+ and not self.options.failed_only
+
+ if show_output or show_cmd:
+ self.pb.beginline()
+
+ if show_output:
+ print('## {}: rc = {:d}, run time = {}'.format(
+ output.test.path, output.rc, output.dt), file=self.fp)
+
+ if show_cmd:
+ print(escape_cmdline(output.cmd), file=self.fp)
+
+ if show_output:
+ self.fp.write(output.out)
+ self.fp.write(output.err)
+
+ self.n += 1
+
+ if result.result == TestResult.PASS and not result.test.random:
+ self.counts['PASS'] += 1
+ elif result.test.expect and not result.test.random:
+ self.counts['FAIL'] += 1
+ else:
+ self.counts['SKIP'] += 1
+
+ if self.options.format == 'automation':
+ if result.result != TestResult.PASS and len(result.results) > 1:
+ for sub_ok, msg in result.results:
+ tup = (sub_ok, result.test.expect, result.test.random)
+ label = self.LABELS[tup][0]
+ if label == 'TEST-UNEXPECTED-PASS':
+ label = 'TEST-PASS (EXPECTED RANDOM)'
+ self.print_automation_result(
+ label, result.test, time=output.dt,
+ message=msg)
+ tup = (result.result, result.test.expect, result.test.random)
+ self.print_automation_result(
+ self.LABELS[tup][0], result.test, time=output.dt)
+ return
+
+ if dev_label:
+ def singular(label):
+ return "FIXED" if label == "FIXES" else label[:-1]
+ self.pb.message("{} - {}".format(singular(dev_label),
+ output.test.path))
+
+ self.pb.update(self.n, self.counts)
+
+ def finish(self, completed):
+ self.pb.finish(completed)
+ if not self.options.format == 'automation':
+ self.list(completed)
+
+ # Conceptually, this maps (test result x test expection) to text labels.
+ # key is (result, expect, random)
+ # value is (automation label, dev test category)
+ LABELS = {
+ (TestResult.CRASH, False, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'),
+ (TestResult.CRASH, False, True): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'),
+ (TestResult.CRASH, True, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'),
+ (TestResult.CRASH, True, True): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'),
+
+ (TestResult.FAIL, False, False): ('TEST-KNOWN-FAIL', ''),
+ (TestResult.FAIL, False, True): ('TEST-KNOWN-FAIL (EXPECTED RANDOM)', ''),
+ (TestResult.FAIL, True, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'),
+ (TestResult.FAIL, True, True): ('TEST-KNOWN-FAIL (EXPECTED RANDOM)', ''),
+
+ (TestResult.PASS, False, False): ('TEST-UNEXPECTED-PASS', 'FIXES'),
+ (TestResult.PASS, False, True): ('TEST-PASS (EXPECTED RANDOM)', ''),
+ (TestResult.PASS, True, False): ('TEST-PASS', ''),
+ (TestResult.PASS, True, True): ('TEST-PASS (EXPECTED RANDOM)', ''),
+ }
+
+ def list(self, completed):
+ for label, results in sorted(self.groups.items()):
+ if label == '':
+ continue
+
+ print(label)
+ for result in results:
+ print(' {}'.format(' '.join(result.test.jitflags +
+ [result.test.path])))
+
+ if self.options.failure_file:
+ failure_file = open(self.options.failure_file, 'w')
+ if not self.all_passed():
+ if 'REGRESSIONS' in self.groups:
+ for result in self.groups['REGRESSIONS']:
+ print(result.test.path, file=failure_file)
+ if 'TIMEOUTS' in self.groups:
+ for result in self.groups['TIMEOUTS']:
+ print(result.test.path, file=failure_file)
+ failure_file.close()
+
+ suffix = '' if completed else ' (partial run -- interrupted by user)'
+ if self.all_passed():
+ print('PASS' + suffix)
+ else:
+ print('FAIL' + suffix)
+
+ def all_passed(self):
+ return 'REGRESSIONS' not in self.groups and 'TIMEOUTS' not in self.groups
+
+ def print_automation_result(self, label, test, message=None, skip=False,
+ time=None):
+ result = label
+ result += " | " + test.path
+ args = []
+ if self.options.shell_args:
+ args.append(self.options.shell_args)
+ args += test.jitflags
+ result += ' | (args: "{}")'.format(' '.join(args))
+ if message:
+ result += " | " + message
+ if skip:
+ result += ' | (SKIP)'
+ if time > self.options.timeout:
+ result += ' | (TIMEOUT)'
+ print(result)
diff --git a/js/src/tests/lib/tasks_unix.py b/js/src/tests/lib/tasks_unix.py
new file mode 100644
index 0000000000..6550335222
--- /dev/null
+++ b/js/src/tests/lib/tasks_unix.py
@@ -0,0 +1,223 @@
+# A unix-oriented process dispatcher. Uses a single thread with select and
+# waitpid to dispatch tasks. This avoids several deadlocks that are possible
+# with fork/exec + threads + Python.
+
+import errno, os, select, sys
+from datetime import datetime, timedelta
+from progressbar import ProgressBar
+from results import NullTestOutput, TestOutput, escape_cmdline
+
+class Task(object):
+ def __init__(self, test, prefix, pid, stdout, stderr):
+ self.test = test
+ self.cmd = test.get_command(prefix)
+ self.pid = pid
+ self.stdout = stdout
+ self.stderr = stderr
+ self.start = datetime.now()
+ self.out = []
+ self.err = []
+
+def spawn_test(test, prefix, passthrough, run_skipped, show_cmd):
+ """Spawn one child, return a task struct."""
+ if not test.enable and not run_skipped:
+ return None
+
+ cmd = test.get_command(prefix)
+ if show_cmd:
+ print(escape_cmdline(cmd))
+
+ if not passthrough:
+ (rout, wout) = os.pipe()
+ (rerr, werr) = os.pipe()
+
+ rv = os.fork()
+
+ # Parent.
+ if rv:
+ os.close(wout)
+ os.close(werr)
+ return Task(test, prefix, rv, rout, rerr)
+
+ # Child.
+ os.close(rout)
+ os.close(rerr)
+
+ os.dup2(wout, 1)
+ os.dup2(werr, 2)
+
+ os.execvp(cmd[0], cmd)
+
+def get_max_wait(tasks, timeout):
+ """
+ Return the maximum time we can wait before any task should time out.
+ """
+
+ # If we have a progress-meter, we need to wake up to update it frequently.
+ wait = ProgressBar.update_granularity()
+
+ # If a timeout is supplied, we need to wake up for the first task to
+ # timeout if that is sooner.
+ if timeout:
+ now = datetime.now()
+ timeout_delta = timedelta(seconds=timeout)
+ for task in tasks:
+ remaining = task.start + timeout_delta - now
+ if remaining < wait:
+ wait = remaining
+
+ # Return the wait time in seconds, clamped between zero and max_wait.
+ return max(wait.total_seconds(), 0)
+
+def flush_input(fd, frags):
+ """
+ Read any pages sitting in the file descriptor 'fd' into the list 'frags'.
+ """
+ rv = os.read(fd, 4096)
+ frags.append(rv)
+ while len(rv) == 4096:
+ # If read() returns a full buffer, it may indicate there was 1 buffer
+ # worth of data, or that there is more data to read. Poll the socket
+ # before we read again to ensure that we will not block indefinitly.
+ readable, _, _ = select.select([fd], [], [], 0)
+ if not readable:
+ return
+
+ rv = os.read(fd, 4096)
+ frags.append(rv)
+
+def read_input(tasks, timeout):
+ """
+ Select on input or errors from the given task list for a max of timeout
+ seconds.
+ """
+ rlist = []
+ exlist = []
+ outmap = {} # Fast access to fragment list given fd.
+ for t in tasks:
+ rlist.append(t.stdout)
+ rlist.append(t.stderr)
+ outmap[t.stdout] = t.out
+ outmap[t.stderr] = t.err
+ # This will trigger with a close event when the child dies, allowing
+ # us to respond immediately and not leave cores idle.
+ exlist.append(t.stdout)
+
+ readable = []
+ try:
+ readable, _, _ = select.select(rlist, [], exlist, timeout)
+ except OverflowError as e:
+ print >> sys.stderr, "timeout value", timeout
+ raise
+
+ for fd in readable:
+ flush_input(fd, outmap[fd])
+
+def remove_task(tasks, pid):
+ """
+ Return a pair with the removed task and the new, modified tasks list.
+ """
+ index = None
+ for i, t in enumerate(tasks):
+ if t.pid == pid:
+ index = i
+ break
+ else:
+ raise KeyError("No such pid: {}".format(pid))
+
+ out = tasks[index]
+ tasks.pop(index)
+ return out
+
+def timed_out(task, timeout):
+ """
+ Return True if the given task has been running for longer than |timeout|.
+ |timeout| may be falsy, indicating an infinite timeout (in which case
+ timed_out always returns False).
+ """
+ if timeout:
+ now = datetime.now()
+ return (now - task.start) > timedelta(seconds=timeout)
+ return False
+
+def reap_zombies(tasks, timeout):
+ """
+ Search for children of this process that have finished. If they are tasks,
+ then this routine will clean up the child. This method returns a new task
+ list that has had the ended tasks removed, followed by the list of finished
+ tasks.
+ """
+ finished = []
+ while True:
+ try:
+ pid, status = os.waitpid(0, os.WNOHANG)
+ if pid == 0:
+ break
+ except OSError as e:
+ if e.errno == errno.ECHILD:
+ break
+ raise e
+
+ ended = remove_task(tasks, pid)
+ flush_input(ended.stdout, ended.out)
+ flush_input(ended.stderr, ended.err)
+ os.close(ended.stdout)
+ os.close(ended.stderr)
+
+ returncode = os.WEXITSTATUS(status)
+ if os.WIFSIGNALED(status):
+ returncode = -os.WTERMSIG(status)
+
+ finished.append(
+ TestOutput(
+ ended.test,
+ ended.cmd,
+ ''.join(ended.out),
+ ''.join(ended.err),
+ returncode,
+ (datetime.now() - ended.start).total_seconds(),
+ timed_out(ended, timeout)))
+ return tasks, finished
+
+def kill_undead(tasks, timeout):
+ """
+ Signal all children that are over the given timeout.
+ """
+ for task in tasks:
+ if timed_out(task, timeout):
+ os.kill(task.pid, 9)
+
+def run_all_tests(tests, prefix, pb, options):
+ # Copy and reverse for fast pop off end.
+ tests = list(tests)
+ tests = tests[:]
+ tests.reverse()
+
+ # The set of currently running tests.
+ tasks = []
+
+ while len(tests) or len(tasks):
+ while len(tests) and len(tasks) < options.worker_count:
+ test = tests.pop()
+ task = spawn_test(test, prefix,
+ options.passthrough, options.run_skipped, options.show_cmd)
+ if task:
+ tasks.append(task)
+ else:
+ yield NullTestOutput(test)
+
+ timeout = get_max_wait(tasks, options.timeout)
+ read_input(tasks, timeout)
+
+ kill_undead(tasks, options.timeout)
+ tasks, finished = reap_zombies(tasks, options.timeout)
+
+ # With Python3.4+ we could use yield from to remove this loop.
+ for out in finished:
+ yield out
+
+ # If we did not finish any tasks, poke the progress bar to show that
+ # the test harness is at least not frozen.
+ if len(finished) == 0:
+ pb.poke()
+
diff --git a/js/src/tests/lib/tasks_win.py b/js/src/tests/lib/tasks_win.py
new file mode 100644
index 0000000000..7a4540c60d
--- /dev/null
+++ b/js/src/tests/lib/tasks_win.py
@@ -0,0 +1,135 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+from __future__ import print_function, unicode_literals, division
+
+import subprocess
+import sys
+from datetime import datetime, timedelta
+from progressbar import ProgressBar
+from results import NullTestOutput, TestOutput, escape_cmdline
+from threading import Thread
+from Queue import Queue, Empty
+
+
+class EndMarker:
+ pass
+
+
+class TaskFinishedMarker:
+ pass
+
+
+def _do_work(qTasks, qResults, qWatch, prefix, run_skipped, timeout, show_cmd):
+ while True:
+ test = qTasks.get(block=True, timeout=sys.maxint)
+ if test is EndMarker:
+ qWatch.put(EndMarker)
+ qResults.put(EndMarker)
+ return
+
+ if not test.enable and not run_skipped:
+ qResults.put(NullTestOutput(test))
+ continue
+
+ # Spawn the test task.
+ cmd = test.get_command(prefix)
+ if show_cmd:
+ print(escape_cmdline(cmd))
+ tStart = datetime.now()
+ proc = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ # Push the task to the watchdog -- it will kill the task
+ # if it goes over the timeout while we keep its stdout
+ # buffer clear on the "main" worker thread.
+ qWatch.put(proc)
+ out, err = proc.communicate()
+ qWatch.put(TaskFinishedMarker)
+
+ # Create a result record and forward to result processing.
+ dt = datetime.now() - tStart
+ result = TestOutput(test, cmd, out, err, proc.returncode, dt.total_seconds(),
+ dt > timedelta(seconds=timeout))
+ qResults.put(result)
+
+
+def _do_watch(qWatch, timeout):
+ while True:
+ proc = qWatch.get(True)
+ if proc == EndMarker:
+ return
+ try:
+ fin = qWatch.get(block=True, timeout=timeout)
+ assert fin is TaskFinishedMarker, "invalid finish marker"
+ except Empty:
+ # Timed out, force-kill the test.
+ try:
+ proc.terminate()
+ except WindowsError as ex:
+ # If the process finishes after we time out but before we
+ # terminate, the terminate call will fail. We can safely
+ # ignore this.
+ if ex.winerror != 5:
+ raise
+ fin = qWatch.get(block=True, timeout=sys.maxint)
+ assert fin is TaskFinishedMarker, "invalid finish marker"
+
+
+def run_all_tests(tests, prefix, pb, options):
+ """
+ Uses scatter-gather to a thread-pool to manage children.
+ """
+ qTasks, qResults = Queue(), Queue()
+
+ workers = []
+ watchdogs = []
+ for _ in range(options.worker_count):
+ qWatch = Queue()
+ watcher = Thread(target=_do_watch, args=(qWatch, options.timeout))
+ watcher.setDaemon(True)
+ watcher.start()
+ watchdogs.append(watcher)
+ worker = Thread(target=_do_work, args=(qTasks, qResults, qWatch,
+ prefix, options.run_skipped,
+ options.timeout, options.show_cmd))
+ worker.setDaemon(True)
+ worker.start()
+ workers.append(worker)
+
+ # Insert all jobs into the queue, followed by the queue-end
+ # marker, one per worker. This will not block on growing the
+ # queue, only on waiting for more items in the generator. The
+ # workers are already started, however, so this will process as
+ # fast as we can produce tests from the filesystem.
+ def _do_push(num_workers, qTasks):
+ for test in tests:
+ qTasks.put(test)
+ for _ in range(num_workers):
+ qTasks.put(EndMarker)
+ pusher = Thread(target=_do_push, args=(len(workers), qTasks))
+ pusher.setDaemon(True)
+ pusher.start()
+
+ # Read from the results.
+ ended = 0
+ delay = ProgressBar.update_granularity().total_seconds()
+ while ended < len(workers):
+ try:
+ result = qResults.get(block=True, timeout=delay)
+ if result is EndMarker:
+ ended += 1
+ else:
+ yield result
+ except Empty:
+ pb.poke()
+
+ # Cleanup and exit.
+ pusher.join()
+ for worker in workers:
+ worker.join()
+ for watcher in watchdogs:
+ watcher.join()
+ assert qTasks.empty(), "Send queue not drained"
+ assert qResults.empty(), "Result queue not drained"
diff --git a/js/src/tests/lib/terminal_unix.py b/js/src/tests/lib/terminal_unix.py
new file mode 100644
index 0000000000..9a34580083
--- /dev/null
+++ b/js/src/tests/lib/terminal_unix.py
@@ -0,0 +1,38 @@
+import sys
+
+class Terminal(object):
+ COLOR = {
+ 'red': '31',
+ 'green': '32',
+ 'blue': '34',
+ 'gray': '37'
+ }
+ NORMAL_INTENSITY = '1'
+ BRIGHT_INTENSITY = '2'
+ ESCAPE = '\x1b['
+ RESET = '0'
+ SEPARATOR = ';'
+ COLOR_CODE = 'm'
+ CLEAR_RIGHT_CODE = 'K'
+
+ @classmethod
+ def set_color(cls, color):
+ """
+ color: str - color definition string
+ """
+ mod = Terminal.NORMAL_INTENSITY
+ if color.startswith('bright'):
+ mod = Terminal.BRIGHT_INTENSITY
+ color = color[len('bright'):]
+ color_code = Terminal.COLOR[color]
+
+ sys.stdout.write(cls.ESCAPE + color_code + cls.SEPARATOR + mod
+ + cls.COLOR_CODE)
+
+ @classmethod
+ def reset_color(cls):
+ sys.stdout.write(cls.ESCAPE + cls.RESET + cls.COLOR_CODE)
+
+ @classmethod
+ def clear_right(cls):
+ sys.stdout.write(cls.ESCAPE + cls.CLEAR_RIGHT_CODE)
diff --git a/js/src/tests/lib/terminal_win.py b/js/src/tests/lib/terminal_win.py
new file mode 100644
index 0000000000..db9d797c5f
--- /dev/null
+++ b/js/src/tests/lib/terminal_win.py
@@ -0,0 +1,109 @@
+"""
+From Andre Burgaud's Blog, from the CTypes Wiki:
+http://www.burgaud.com/bring-colors-to-the-windows-console-with-python/
+
+Colors text in console mode application (win32).
+Uses ctypes and Win32 methods SetConsoleTextAttribute and
+GetConsoleScreenBufferInfo.
+
+$Id: color_console.py 534 2009-05-10 04:00:59Z andre $
+"""
+
+from ctypes import windll, Structure, c_short, c_ushort, byref
+
+SHORT = c_short
+WORD = c_ushort
+
+class COORD(Structure):
+ """struct in wincon.h."""
+ _fields_ = [
+ ("X", SHORT),
+ ("Y", SHORT)]
+
+class SMALL_RECT(Structure):
+ """struct in wincon.h."""
+ _fields_ = [
+ ("Left", SHORT),
+ ("Top", SHORT),
+ ("Right", SHORT),
+ ("Bottom", SHORT)]
+
+class CONSOLE_SCREEN_BUFFER_INFO(Structure):
+ """struct in wincon.h."""
+ _fields_ = [
+ ("dwSize", COORD),
+ ("dwCursorPosition", COORD),
+ ("wAttributes", WORD),
+ ("srWindow", SMALL_RECT),
+ ("dwMaximumWindowSize", COORD)]
+
+# winbase.h
+STD_INPUT_HANDLE = -10
+STD_OUTPUT_HANDLE = -11
+STD_ERROR_HANDLE = -12
+
+# wincon.h
+FOREGROUND_BLACK = 0x0000
+FOREGROUND_BLUE = 0x0001
+FOREGROUND_GREEN = 0x0002
+FOREGROUND_CYAN = 0x0003
+FOREGROUND_RED = 0x0004
+FOREGROUND_MAGENTA = 0x0005
+FOREGROUND_YELLOW = 0x0006
+FOREGROUND_GREY = 0x0007
+FOREGROUND_INTENSITY = 0x0008 # foreground color is intensified.
+
+BACKGROUND_BLACK = 0x0000
+BACKGROUND_BLUE = 0x0010
+BACKGROUND_GREEN = 0x0020
+BACKGROUND_CYAN = 0x0030
+BACKGROUND_RED = 0x0040
+BACKGROUND_MAGENTA = 0x0050
+BACKGROUND_YELLOW = 0x0060
+BACKGROUND_GREY = 0x0070
+BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
+
+stdout_handle = windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
+SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
+GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
+
+def get_text_attr():
+ csbi = CONSOLE_SCREEN_BUFFER_INFO()
+ GetConsoleScreenBufferInfo(stdout_handle, byref(csbi))
+ return csbi.wAttributes
+
+DEFAULT_COLORS = get_text_attr()
+
+class Terminal(object):
+ COLOR = {
+ 'black': 0x0000,
+ 'blue': 0x0001,
+ 'green': 0x0002,
+ 'cyan': 0x0003,
+ 'red': 0x0004,
+ 'magenta': 0x0005,
+ 'yellow': 0x0006,
+ 'gray': 0x0007
+ }
+ BRIGHT_INTENSITY = 0x0008
+ BACKGROUND_SHIFT = 4
+
+ @classmethod
+ def set_color(cls, color):
+ """
+ color: str - color definition string
+ """
+ color_code = 0
+ if color.startswith('bright'):
+ color_code |= cls.BRIGHT_INTENSITY
+ color = color[len('bright'):]
+ color_code |= Terminal.COLOR[color]
+ SetConsoleTextAttribute(stdout_handle, color_code)
+
+ @classmethod
+ def reset_color(cls):
+ SetConsoleTextAttribute(stdout_handle, DEFAULT_COLORS)
+
+ @classmethod
+ def clear_right(cls):
+ pass
diff --git a/js/src/tests/lib/tests.py b/js/src/tests/lib/tests.py
new file mode 100644
index 0000000000..1de56dc1c4
--- /dev/null
+++ b/js/src/tests/lib/tests.py
@@ -0,0 +1,216 @@
+# Library for JSTest tests.
+#
+# This contains classes that represent an individual test, including
+# metadata, and know how to run the tests and determine failures.
+
+import datetime, os, sys, time
+from contextlib import contextmanager
+from subprocess import Popen, PIPE
+from threading import Thread
+
+from results import TestOutput
+
+# When run on tbpl, we run each test multiple times with the following
+# arguments.
+JITFLAGS = {
+ 'all': [
+ [], # no flags, normal baseline and ion
+ ['--ion-eager', '--ion-offthread-compile=off'], # implies --baseline-eager
+ ['--ion-eager', '--ion-offthread-compile=off', '--non-writable-jitcode',
+ '--ion-check-range-analysis', '--ion-extra-checks', '--no-sse3', '--no-threads'],
+ ['--baseline-eager'],
+ ['--no-baseline', '--no-ion'],
+ ],
+ # used by jit_test.py
+ 'ion': [
+ ['--baseline-eager'],
+ ['--ion-eager', '--ion-offthread-compile=off']
+ ],
+ # Run reduced variants on debug builds, since they take longer time.
+ 'debug': [
+ [], # no flags, normal baseline and ion
+ ['--ion-eager', '--ion-offthread-compile=off'], # implies --baseline-eager
+ ['--baseline-eager'],
+ ],
+ # Interpreter-only, for tools that cannot handle binary code generation.
+ 'interp': [
+ ['--no-baseline', '--no-asmjs', '--no-wasm', '--no-native-regexp']
+ ],
+ 'none': [
+ [] # no flags, normal baseline and ion
+ ]
+}
+
+def get_jitflags(variant, **kwargs):
+ if variant not in JITFLAGS:
+ print('Invalid jitflag: "{}"'.format(variant))
+ sys.exit(1)
+ if variant == 'none' and 'none' in kwargs:
+ return kwargs['none']
+ return JITFLAGS[variant]
+
+def valid_jitflags():
+ return JITFLAGS.keys()
+
+def get_environment_overlay(js_shell):
+ """
+ Build a dict of additional environment variables that must be set to run
+ tests successfully.
+ """
+ env = {
+ # Force Pacific time zone to avoid failures in Date tests.
+ 'TZ': 'PST8PDT',
+ # Force date strings to English.
+ 'LC_TIME': 'en_US.UTF-8',
+ # Tell the shell to disable crash dialogs on windows.
+ 'XRE_NO_WINDOWS_CRASH_DIALOG': '1',
+ }
+ # Add the binary's directory to the library search path so that we find the
+ # nspr and icu we built, instead of the platform supplied ones (or none at
+ # all on windows).
+ if sys.platform.startswith('linux'):
+ env['LD_LIBRARY_PATH'] = os.path.dirname(js_shell)
+ elif sys.platform.startswith('darwin'):
+ env['DYLD_LIBRARY_PATH'] = os.path.dirname(js_shell)
+ elif sys.platform.startswith('win'):
+ env['PATH'] = os.path.dirname(js_shell)
+ return env
+
+
+@contextmanager
+def change_env(env_overlay):
+ # Apply the overlaid environment and record the current state.
+ prior_env = {}
+ for key, val in env_overlay.items():
+ prior_env[key] = os.environ.get(key, None)
+ if 'PATH' in key and key in os.environ:
+ os.environ[key] = '{}{}{}'.format(val, os.pathsep, os.environ[key])
+ else:
+ os.environ[key] = val
+
+ try:
+ # Execute with the new environment.
+ yield
+
+ finally:
+ # Restore the prior environment.
+ for key, val in prior_env.items():
+ if val is not None:
+ os.environ[key] = val
+ else:
+ del os.environ[key]
+
+
+def get_cpu_count():
+ """
+ Guess at a reasonable parallelism count to set as the default for the
+ current machine and run.
+ """
+ # Python 2.6+
+ try:
+ import multiprocessing
+ return multiprocessing.cpu_count()
+ except (ImportError, NotImplementedError):
+ pass
+
+ # POSIX
+ try:
+ res = int(os.sysconf('SC_NPROCESSORS_ONLN'))
+ if res > 0:
+ return res
+ except (AttributeError, ValueError):
+ pass
+
+ # Windows
+ try:
+ res = int(os.environ['NUMBER_OF_PROCESSORS'])
+ if res > 0:
+ return res
+ except (KeyError, ValueError):
+ pass
+
+ return 1
+
+
+class RefTest(object):
+ """A runnable test."""
+ def __init__(self, path):
+ self.path = path # str: path of JS file relative to tests root dir
+ self.options = [] # [str]: Extra options to pass to the shell
+ self.jitflags = [] # [str]: JIT flags to pass to the shell
+ self.test_reflect_stringify = None # str or None: path to
+ # reflect-stringify.js file to test
+ # instead of actually running tests
+
+ @staticmethod
+ def prefix_command(path):
+ """Return the '-f shell.js' options needed to run a test with the given
+ path."""
+ if path == '':
+ return ['-f', 'shell.js']
+ head, base = os.path.split(path)
+ return RefTest.prefix_command(head) \
+ + ['-f', os.path.join(path, 'shell.js')]
+
+ def get_command(self, prefix):
+ dirname, filename = os.path.split(self.path)
+ cmd = prefix + self.jitflags + self.options \
+ + RefTest.prefix_command(dirname)
+ if self.test_reflect_stringify is not None:
+ cmd += [self.test_reflect_stringify, "--check", self.path]
+ else:
+ cmd += ["-f", self.path]
+ return cmd
+
+
+class RefTestCase(RefTest):
+ """A test case consisting of a test and an expected result."""
+ def __init__(self, path):
+ RefTest.__init__(self, path)
+ self.enable = True # bool: True => run test, False => don't run
+ self.expect = True # bool: expected result, True => pass
+ self.random = False # bool: True => ignore output as 'random'
+ self.slow = False # bool: True => test may run slowly
+
+ # The terms parsed to produce the above properties.
+ self.terms = None
+
+ # The tag between |...| in the test header.
+ self.tag = None
+
+ # Anything occuring after -- in the test header.
+ self.comment = None
+
+ def __str__(self):
+ ans = self.path
+ if not self.enable:
+ ans += ', skip'
+ if not self.expect:
+ ans += ', fails'
+ if self.random:
+ ans += ', random'
+ if self.slow:
+ ans += ', slow'
+ if '-d' in self.options:
+ ans += ', debugMode'
+ return ans
+
+ @staticmethod
+ def build_js_cmd_prefix(js_path, js_args, debugger_prefix):
+ parts = []
+ if debugger_prefix:
+ parts += debugger_prefix
+ parts.append(js_path)
+ if js_args:
+ parts += js_args
+ return parts
+
+ def __cmp__(self, other):
+ if self.path == other.path:
+ return 0
+ elif self.path < other.path:
+ return -1
+ return 1
+
+ def __hash__(self):
+ return self.path.__hash__()