Modernize python and update pylint (#5096)

Update code to conform to the newer version of pylint available in
ubuntu-22.04, with few exceptions:
    - disabled `import-outside-toplevel` for `main()` in
      `jerry_client.py`
    - disabled `consider-using-with` for the logfile of `TestSuite` in
      `test262-harness.py` as using `with` is not practical in that case

Update test262-harness.py to use argparse instead of the now deprecated
optparse

Rename variables in jerry_client_main.py that redefined python builtins
or shadowed variables from an outer scope

Update python files to use f-stirngs

Add minimum python versions (3.6 and 3.8) to the CI jobs: without it the
default python version did not support the `with` statement for
`subprocess.Popen` used in `build.py` on macos, or in some cases f-stirngs

Remove `from __future__` imports that are no-ops in python 3

Remove shebang from non executable files

Re-enable most pylint checkers, except `missing-docstring`

JerryScript-DCO-1.0-Signed-off-by: Máté Tokodi mate.tokodi@szteszoftver.hu
This commit is contained in:
Máté Tokodi
2023-10-25 17:32:14 +02:00
committed by GitHub
parent a588e49661
commit bc408b159b
26 changed files with 469 additions and 503 deletions
+5 -7
View File
@@ -14,8 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import fnmatch
import json
@@ -30,7 +28,7 @@ JERRY_PORT = os.path.join(ROOT_DIR, 'jerry-port')
JERRY_MATH = os.path.join(ROOT_DIR, 'jerry-math')
class Amalgamator(object):
class Amalgamator:
# pylint: disable=too-many-instance-attributes
_RE_INCLUDE = re.compile(r'\s*#include ("|<)(.*?)("|>)\n$')
@@ -70,7 +68,7 @@ class Amalgamator(object):
return
normalized_path = repr(os.path.normpath(filename))[1:-1]
line_info = '#line %d "%s"\n' % (line_number, normalized_path)
line_info = f'#line {line_number} "{normalized_path}"\n'
if self._output and self._output[-1].startswith('#line'):
# Avoid emitting multiple line infos in sequence, just overwrite the last one
@@ -92,7 +90,7 @@ class Amalgamator(object):
self._emit_lineinfo(1, filename)
line_idx = 0
with open(filename, 'r') as input_file:
with open(filename, 'r', encoding='utf8') as input_file:
in_copyright = False
for line in input_file:
line_idx += 1
@@ -166,7 +164,7 @@ class Amalgamator(object):
out_fp.write(line)
for include in self._extra_includes:
out_fp.write('#include "%s"\n' % include)
out_fp.write(f'#include "{include}"\n')
for line in self._output:
out_fp.write(line)
@@ -242,7 +240,7 @@ def amalgamate(base_dir, input_files=(), output_file=None,
for fname in sorted(c_files.values(), reverse=True):
amalgam.add_file(fname)
with open(output_file, 'w') as output:
with open(output_file, 'w', encoding='utf8') as output:
amalgam.write_output(output)
+9 -12
View File
@@ -14,8 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import multiprocessing
import os
@@ -32,7 +30,7 @@ def default_toolchain():
(sysname, _, _, _, machine) = os.uname()
toolchain = os.path.join(settings.PROJECT_DIR,
'cmake',
'toolchain_%s_%s.cmake' % (sysname.lower(), machine.lower()))
f'toolchain_{sysname.lower()}_{machine.lower()}.cmake')
return toolchain if os.path.isfile(toolchain) else None
def get_arguments():
@@ -179,7 +177,7 @@ def generate_build_options(arguments):
def build_options_append(cmakeopt, cliarg):
if cliarg:
build_options.append('-D%s=%s' % (cmakeopt, cliarg))
build_options.append(f'-D{cmakeopt}={cliarg}')
# general build options
build_options_append('CMAKE_BUILD_TYPE', arguments.build_type)
@@ -231,7 +229,7 @@ def generate_build_options(arguments):
build_options_append('JERRY_VM_THROW', arguments.vm_throw)
if arguments.gc_mark_limit is not None:
build_options.append('-D%s=%s' % ('JERRY_GC_MARK_LIMIT', arguments.gc_mark_limit))
build_options.append(f'-DJERRY_GC_MARK_LIMIT={arguments.gc_mark_limit}')
# jerry-main options
build_options_append('ENABLE_LINK_MAP', arguments.link_map)
@@ -261,7 +259,7 @@ def configure_jerry(arguments):
cmake_cmd = ['cmake', '-B' + arguments.builddir, '-H' + settings.PROJECT_DIR]
if arguments.install:
cmake_cmd.append('-DCMAKE_INSTALL_PREFIX=%s' % arguments.install)
cmake_cmd.append(f'-DCMAKE_INSTALL_PREFIX={arguments.install}')
cmake_cmd.extend(build_options)
@@ -271,11 +269,10 @@ def make_jerry(arguments):
make_cmd = ['cmake', '--build', arguments.builddir, '--config', arguments.build_type]
env = dict(os.environ)
env['CMAKE_BUILD_PARALLEL_LEVEL'] = str(arguments.jobs)
env['MAKEFLAGS'] = '-j%d' % (arguments.jobs) # Workaround for CMake < 3.12
proc = subprocess.Popen(make_cmd, env=env)
proc.wait()
return proc.returncode
env['MAKEFLAGS'] = f'-j{arguments.jobs}' # Workaround for CMake < 3.12
with subprocess.Popen(make_cmd, env=env) as proc:
proc.wait()
return proc.returncode
def install_jerry(arguments):
install_target = 'INSTALL' if os.path.exists(os.path.join(arguments.builddir, 'Jerry.sln')) else 'install'
@@ -285,7 +282,7 @@ def install_jerry(arguments):
def print_result(ret):
print('=' * 30)
if ret:
print('Build failed with exit code: %s' % (ret))
print(f'Build failed with exit code: {ret}')
else:
print('Build succeeded!')
print('=' * 30)
+19 -24
View File
@@ -14,8 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import multiprocessing
import subprocess
@@ -48,7 +46,7 @@ def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--fix', action='store_true', dest='fix',
help='fix source code stlye')
parser.add_argument('--clang-format', dest='clang_format', default='clang-format-%d' % CLANG_FORMAT_MIN_VERSION,
parser.add_argument('--clang-format', dest='clang_format', default=f'clang-format-{CLANG_FORMAT_MIN_VERSION}',
help='path to clang-format executable')
script_args = parser.parse_args()
@@ -65,14 +63,13 @@ def check_clang_format(args, source_file_name):
cmd.append(source_file_name)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, error = proc.communicate()
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc:
_, error = proc.communicate()
if proc.returncode == 0:
return 0
if proc.returncode == 0:
return 0
print(error.decode('utf8'))
print(error.decode('utf8'))
return 1
@@ -111,26 +108,24 @@ def check_clang_format_version(args):
def main(args):
if check_clang_format_version(args) != 0:
print("clang-format >= %d is not installed." %
CLANG_FORMAT_MIN_VERSION)
print(f"clang-format >= {CLANG_FORMAT_MIN_VERSION} is not installed.")
return 1
pool = multiprocessing.Pool()
failed = 0
with multiprocessing.Pool() as pool:
failed = 0
for folder in FOLDERS:
# pylint: disable=unexpected-keyword-arg
files = sum(([glob(path.join(PROJECT_DIR, folder, "**/*.%s" % e), recursive=True)
for e in ['c', 'h']]), [])
for folder in FOLDERS:
# pylint: disable=unexpected-keyword-arg
files = sum(([glob(path.join(PROJECT_DIR, folder, f"**/*.{e}"), recursive=True)
for e in ['c', 'h']]), [])
failed += run_pass(pool, check_clang_format,
[(args, sourece_file) for sourece_file in files])
failed += run_pass(pool, check_comments,
[([RE_DIRECTIVE_COMMENT, RE_FUNCTION_NAME_COMMENT], sourece_file) for sourece_file in files])
failed += run_pass(pool, check_clang_format,
[(args, sourece_file) for sourece_file in files])
failed += run_pass(pool, check_comments,
[([RE_DIRECTIVE_COMMENT, RE_FUNCTION_NAME_COMMENT], sourece_file) for sourece_file in
files])
pool.close()
return 1 if failed else 0
return 1 if failed else 0
if __name__ == "__main__":
+2 -4
View File
@@ -14,8 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import io
import os
import re
@@ -78,9 +76,9 @@ def main():
for fname in files:
if any(fname.endswith(ext) for ext in EXTENSIONS):
fpath = os.path.join(root, fname)
with io.open(fpath, 'r', errors='ignore') as curr_file:
with io.open(fpath, 'r', errors='ignore', encoding='utf8') as curr_file:
if not LICENSE.search(curr_file.read()):
print('%s: incorrect license' % fpath)
print(f'{fpath}: incorrect license')
is_ok = False
if not is_ok:
+6 -8
View File
@@ -14,8 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import fileinput
import os
@@ -24,7 +22,7 @@ import shlex
import sys
class DoctestExtractor(object):
class DoctestExtractor:
"""
An extractor to process Markdown files and find doctests inside.
"""
@@ -50,7 +48,7 @@ class DoctestExtractor(object):
:param message: a description of the problem.
:param lineno: the location that triggered the warning.
"""
print('%s:%d: %s' % (self._infile, lineno, message), file=sys.stderr)
print(f'{self._infile}:{lineno}: {message}', file=sys.stderr)
def _process_decl(self, params):
"""
@@ -72,7 +70,7 @@ class DoctestExtractor(object):
decl[tokens[i]] = tokens[i + 2].strip('\'"')
if 'name' not in decl:
decl['name'] = '%s%d.c' % (self._outname_base, self._outname_cnt)
decl['name'] = f'{self._outname_base}{self._outname_cnt}.c'
self._outname_cnt += 1
if 'test' not in decl:
@@ -87,7 +85,7 @@ class DoctestExtractor(object):
:return: a tuple of a list (of the first line(s) of the doctest) and the
line number of the start of the code block.
"""
return ['#line %d "%s"\n' % (fileinput.filelineno() + 1, self._infile)], fileinput.filelineno()
return [f'#line {fileinput.filelineno() + 1} "{self._infile}"\n'], fileinput.filelineno()
def _process_code_end(self, decl, code):
"""
@@ -99,9 +97,9 @@ class DoctestExtractor(object):
outname = os.path.join(self._outdir, decl['name']).replace('\\', '/')
action = decl['test']
if self._dry:
print('%s %s' % (action, outname))
print(f'{action} {outname}')
else:
with open(outname, 'w') as outfile:
with open(outname, 'w', encoding='utf8') as outfile:
outfile.writelines(code)
def process(self, infile):
+42 -59
View File
@@ -14,8 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
try:
from configparser import ConfigParser
except ImportError:
@@ -23,13 +21,14 @@ except ImportError:
import argparse
import fileinput
import subprocess
import json
import os
import re
import subprocess
import sys
from settings import FORMAT_SCRIPT, PROJECT_DIR
from gen_c_source import LICENSE
MAGIC_STRINGS_INI = os.path.join(PROJECT_DIR, 'jerry-core', 'lit', 'lit-magic-strings.ini')
MAGIC_STRINGS_INC_H = os.path.join(PROJECT_DIR, 'jerry-core', 'lit', 'lit-magic-strings.inc.h')
@@ -42,6 +41,7 @@ PARSER_ERRORS_INC_H = os.path.join(PROJECT_DIR, 'jerry-core', 'parser', 'js', 'p
LIMIT_MAGIC_STR_LENGTH = 255
def debug_dump(obj):
def deepcopy(obj):
if isinstance(obj, (list, tuple)):
@@ -51,6 +51,7 @@ def debug_dump(obj):
if isinstance(obj, dict):
return {repr(k): deepcopy(e) for k, e in obj.items()}
return obj
return json.dumps(deepcopy(obj), indent=4)
@@ -64,7 +65,7 @@ def read_magic_string_defs(debug, ini_path, item_name):
# [('LIT_MAGIC_STRING_xxx', 'vvv'), ...]
# sorted by length and alpha.
ini_parser = ConfigParser()
ini_parser.optionxform = str # case sensitive options (magic string IDs)
ini_parser.optionxform = str # case sensitive options (magic string IDs)
ini_parser.read(ini_path)
defs = [(str_ref, json.loads(str_value) if str_value != '' else '')
@@ -73,13 +74,12 @@ def read_magic_string_defs(debug, ini_path, item_name):
if len(defs[-1][1]) > LIMIT_MAGIC_STR_LENGTH:
for str_ref, str_value in [x for x in defs if len(x[1]) > LIMIT_MAGIC_STR_LENGTH]:
print("error: The maximum allowed magic string size is {limit} but {str_ref} is {str_len} long.".format(
limit=LIMIT_MAGIC_STR_LENGTH, str_ref=str_ref, str_len=len(str_value)))
exit(1)
print(f"error: The maximum allowed magic string size is "
f"{LIMIT_MAGIC_STR_LENGTH} but {str_ref} is {len(str_value)} long.")
sys.exit(1)
if debug:
print('debug: magic string definitions: {dump}'
.format(dump=debug_dump(defs)))
print(f'debug: magic string definitions: {debug_dump(defs)}')
return defs
@@ -93,12 +93,12 @@ def extract_magic_string_refs(debug, pattern, inc_h_filename):
# = [('zzz.c', 123), ...]
# meaning that the given literal is referenced under the given guards at
# the listed (file, line number) locations.
exception_list = ['%s_DEF' % pattern,
'%s_FIRST_STRING_WITH_SIZE' % pattern,
'%s_LENGTH_LIMIT' % pattern,
'%s__COUNT' % pattern]
exception_list = [f'{pattern}_DEF',
f'{pattern}_FIRST_STRING_WITH_SIZE',
f'{pattern}_LENGTH_LIMIT',
f'{pattern}__COUNT']
for str_ref in re.findall('%s_[a-zA-Z0-9_]+' % pattern, line):
for str_ref in re.findall(f'{pattern}_[a-zA-Z0-9_]+', line):
if str_ref in exception_list:
continue
@@ -144,11 +144,11 @@ def extract_magic_string_refs(debug, pattern, inc_h_filename):
guard_stack.append([process_guard(if_match.group(1))])
elif elif_match is not None:
guards = guard_stack[-1]
guards[-1] = '!(%s)' % guards[-1].strip()
guards[-1] = f'!({guards[-1].strip()})'
guards.append(process_guard(elif_match.group(1)))
elif else_match is not None:
guards = guard_stack[-1]
guards[-1] = '!(%s)' % guards[-1].strip()
guards[-1] = f'!({guards[-1].strip()})'
elif endif_match is not None:
guard_stack.pop()
@@ -156,20 +156,18 @@ def extract_magic_string_refs(debug, pattern, inc_h_filename):
process_line(fname, lnum, line, guard_stack, pattern)
if guard_stack:
print('warning: {fname}: unbalanced preprocessor conditional '
'directives (analysis finished with no closing `#endif` '
'for {guard_stack})'
.format(fname=fname, guard_stack=guard_stack))
print(f'warning: {fname}: unbalanced preprocessor conditional '
f'directives (analysis finished with no closing `#endif` '
f'for {guard_stack})')
for root, _, files in os.walk(os.path.join(PROJECT_DIR, 'jerry-core')):
for fname in files:
if (fname.endswith('.c') or fname.endswith('.h')) \
and fname != inc_h_filename:
and fname != inc_h_filename:
process_file(os.path.join(root, fname), pattern)
if debug:
print('debug: magic string references: {dump}'
.format(dump=debug_dump(results)))
print(f'debug: magic string references: {debug_dump(results)}')
return results
@@ -179,8 +177,7 @@ def calculate_magic_string_guards(defs, uses, debug=False):
for str_ref, str_value in defs:
if str_ref not in uses:
print('warning: unused magic string {str_ref}'
.format(str_ref=str_ref))
print(f'warning: unused magic string {str_ref}')
continue
# Calculate the most compact guard, i.e., if a magic string is
@@ -208,8 +205,7 @@ def calculate_magic_string_guards(defs, uses, debug=False):
extended_defs.append((str_ref, str_value, guards))
if debug:
print('debug: magic string definitions (with guards): {dump}'
.format(dump=debug_dump(extended_defs)))
print(f'debug: magic string definitions (with guards): {debug_dump(extended_defs)}')
return extended_defs
@@ -220,25 +216,11 @@ def guards_to_str(guards):
def generate_header(gen_file, ini_path):
header = \
"""/* Copyright JS Foundation and other contributors, http://js.foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
header = f"""{LICENSE}
/* This file is automatically generated by the %s script
* from %s. Do not edit! */
""" % (os.path.basename(__file__), os.path.basename(ini_path))
/* This file is automatically generated by the {os.path.basename(__file__)} script
* from {os.path.basename(ini_path)}. Do not edit! */
"""
print(header, file=gen_file)
@@ -247,21 +229,20 @@ def generate_magic_string_defs(gen_file, defs, def_macro):
for str_ref, str_value, guards in defs:
if last_guards != guards:
if () not in last_guards:
print('#endif /* {guards} */'.format(guards=guards_to_str(last_guards)), file=gen_file)
print(f'#endif /* {guards_to_str(last_guards)} */', file=gen_file)
if () not in guards:
print('#if {guards}'.format(guards=guards_to_str(guards)), file=gen_file)
print(f'#if {guards_to_str(guards)}', file=gen_file)
print('{macro} ({str_ref}, {str_value})'
.format(macro=def_macro, str_ref=str_ref, str_value=json.dumps(str_value)), file=gen_file)
print(f'{def_macro} ({str_ref}, {json.dumps(str_value)})', file=gen_file)
last_guards = guards
if () not in last_guards:
print('#endif /* {guards} */'.format(guards=guards_to_str(last_guards)), file=gen_file)
print(f'#endif /* {guards_to_str(last_guards)} */', file=gen_file)
def generate_first_magic_strings(gen_file, defs, with_size_macro):
print(file=gen_file) # empty line separator
print(file=gen_file) # empty line separator
max_size = len(defs[-1][1])
for size in range(max_size + 1):
@@ -269,16 +250,15 @@ def generate_first_magic_strings(gen_file, defs, with_size_macro):
for str_ref, str_value, guards in defs:
if len(str_value) >= size:
if () not in guards and () in last_guards:
print('#if {guards}'.format(guards=guards_to_str(guards)), file=gen_file)
print(f'#if {guards_to_str(guards)}', file=gen_file)
elif () not in guards and () not in last_guards:
if guards == last_guards:
continue
print('#elif {guards}'.format(guards=guards_to_str(guards)), file=gen_file)
print(f'#elif {guards_to_str(guards)}', file=gen_file)
elif () in guards and () not in last_guards:
print('#else /* !({guards}) */'.format(guards=guards_to_str(last_guards)), file=gen_file)
print(f'#else /* !({guards_to_str(last_guards)}) */', file=gen_file)
print('{macro} ({size}, {str_ref})'
.format(macro=with_size_macro, size=size, str_ref=str_ref), file=gen_file)
print(f'{with_size_macro} ({size}, {str_ref})', file=gen_file)
if () in guards:
break
@@ -286,7 +266,8 @@ def generate_first_magic_strings(gen_file, defs, with_size_macro):
last_guards = guards
if () not in last_guards:
print('#endif /* {guards} */'.format(guards=guards_to_str(last_guards)), file=gen_file)
print(f'#endif /* {guards_to_str(last_guards)} */', file=gen_file)
def generate_magic_strings(args, ini_path, item_name, pattern, inc_h_path, def_macro, with_size_macro=None):
defs = read_magic_string_defs(args.debug, ini_path, item_name)
@@ -294,12 +275,13 @@ def generate_magic_strings(args, ini_path, item_name, pattern, inc_h_path, def_m
extended_defs = calculate_magic_string_guards(defs, uses, debug=args.debug)
with open(inc_h_path, 'w') as gen_file:
with open(inc_h_path, 'w', encoding='utf8') as gen_file:
generate_header(gen_file, ini_path)
generate_magic_string_defs(gen_file, extended_defs, def_macro)
if with_size_macro:
generate_first_magic_strings(gen_file, extended_defs, with_size_macro)
def main():
parser = argparse.ArgumentParser(description='lit-magic-strings.inc.h generator')
parser.add_argument('--debug', action='store_true', help='enable debug output')
@@ -329,5 +311,6 @@ def main():
subprocess.call([FORMAT_SCRIPT, '--fix'])
if __name__ == '__main__':
main()
+19 -23
View File
@@ -14,8 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import csv
import itertools
@@ -45,7 +43,7 @@ UNICODE_PLANE_TYPE_SUPPLEMENTARY = 1
# common code generation
class UnicodeBasicSource(object):
class UnicodeBasicSource:
# pylint: disable=too-many-instance-attributes
def __init__(self, filepath, character_type="uint16_t", length_type="uint8_t"):
self._filepath = filepath
@@ -81,7 +79,7 @@ class UnicodeBasicSource(object):
idx = 0
for table in tables:
self.add_table(table,
"/**\n * %s %s.\n */" % (self._range_table_descriptions[idx], category),
f"/**\n * {self._range_table_descriptions[idx]} {category}.\n */",
self._range_table_types[idx],
category,
self._range_table_names[idx])
@@ -103,18 +101,16 @@ class UnicodeBasicSource(object):
def add_table(self, table, description, table_type, category, table_name):
if table and sum(table) != 0:
self._data.append(description)
self._data.append("static const %s lit_unicode_%s%s%s[] JERRY_ATTR_CONST_DATA ="
% (table_type,
category.lower(),
"_" + table_name if table_name else "",
self._table_name_suffix))
self._data.append(f"static const {table_type} lit_unicode_{category.lower()}"
f"{'_' + table_name if table_name else ''}{self._table_name_suffix}"
f"[] JERRY_ATTR_CONST_DATA =")
self._data.append("{")
self._data.append(format_code(table, 1, 6 if self._table_name_suffix else 4))
self._data.append("};")
self._data.append("") # for an extra empty line
def generate(self):
with open(self._filepath, 'w') as generated_source:
with open(self._filepath, 'w', encoding='utf8') as generated_source:
generated_source.write("\n".join(self._header))
generated_source.write("\n".join(self._data))
@@ -127,14 +123,14 @@ class UnicodeSupplementarySource(UnicodeBasicSource):
def add_whitepace_range(self, category, categorizer, units):
self.add_range(category, categorizer.create_tables(units))
class UnicodeBasicCategorizer(object):
class UnicodeBasicCategorizer:
def __init__(self):
self._length_limit = 0xff
self.extra_id_continue_units = set([0x200C, 0x200D])
#pylint: disable=no-self-use
def in_range(self, i):
return i >= 0x80 and i < 0x10000
return 0x80 <= i < 0x10000
def _group_ranges(self, units):
"""
@@ -194,7 +190,7 @@ class UnicodeBasicCategorizer(object):
# <HEX>..<HEX> ; <category> # <subcategory>
matcher = r"(?P<start>[\dA-F]+)(?:\.\.(?P<end>[\dA-F]+))?\s+; (?P<category>[\w]+) # (?P<subcategory>[\w&]{2})"
with open(file_path, "r") as src_file:
with open(file_path, "r", encoding='utf8') as src_file:
for line in src_file:
match = re.match(matcher, line)
@@ -227,7 +223,7 @@ class UnicodeBasicCategorizer(object):
upper_case_mapping = {}
# Add one-to-one mappings
with open(unicode_data_file) as unicode_data:
with open(unicode_data_file, encoding='utf8') as unicode_data:
reader = csv.reader(unicode_data, delimiter=';')
for line in reader:
@@ -246,7 +242,7 @@ class UnicodeBasicCategorizer(object):
lower_case_mapping[letter_id] = parse_unicode_sequence(small_letter)
# Update the conversion tables with the special cases
with open(special_casing_file) as special_casing:
with open(special_casing_file, encoding='utf8') as special_casing:
reader = csv.reader(special_casing, delimiter=';')
for line in reader:
@@ -293,8 +289,8 @@ def generate_ranges(script_args, plane_type):
c_source = UnicodeBasicSource(RANGES_C_SOURCE)
categorizer = UnicodeBasicCategorizer()
header_completion = ["/* This file is automatically generated by the %s script" % os.path.basename(__file__),
" * from %s. Do not edit! */" % (DERIVED_PROPS_FILE),
header_completion = [f"/* This file is automatically generated by the {os.path.basename(__file__)} script",
f" * from {DERIVED_PROPS_FILE}. Do not edit! */",
""]
c_source.complete_header("\n".join(header_completion))
@@ -652,8 +648,8 @@ def generate_conversions(script_args, plane_type):
c_source = UnicodeBasicSource(CONVERSIONS_C_SOURCE)
categorizer = UnicodeBasicCategorizer()
header_completion = ["/* This file is automatically generated by the %s script" % os.path.basename(__file__),
" * from %s and %s files. Do not edit! */" % (UNICODE_DATA_FILE, SPECIAL_CASING_FILE),
header_completion = [f"/* This file is automatically generated by the {os.path.basename(__file__)} script",
f" * from {UNICODE_DATA_FILE} and {SPECIAL_CASING_FILE} files. Do not edit! */",
""]
c_source.complete_header("\n".join(header_completion))
@@ -725,8 +721,8 @@ def generate_folding(script_args, plane_type):
c_source = UnicodeBasicSource(FOLDING_C_SOURCE)
categorizer = UnicodeBasicCategorizer()
header_completion = ["/* This file is automatically generated by the %s script" % os.path.basename(__file__),
" * from the %s file. Do not edit! */" % (CASE_FOLDING_FILE),
header_completion = [f"/* This file is automatically generated by the {os.path.basename(__file__)} script",
f" * from the {CASE_FOLDING_FILE} file. Do not edit! */",
""]
c_source.complete_header("\n".join(header_completion))
@@ -740,7 +736,7 @@ def generate_folding(script_args, plane_type):
folding = {}
with open(case_folding_path, 'r') as case_folding:
with open(case_folding_path, 'r', encoding='utf8') as case_folding:
case_folding_re = re.compile(r'(?P<code_point>[^;]*);\s*(?P<type>[^;]*);\s*(?P<folding>[^;]*);')
for line in case_folding:
match = case_folding_re.match(line)
@@ -782,7 +778,7 @@ def main():
''')
def check_dir(path):
if not os.path.isdir(path) or not os.access(path, os.R_OK):
raise argparse.ArgumentTypeError('The %s directory does not exist or is not readable!' % path)
raise argparse.ArgumentTypeError(f'The {path} directory does not exist or is not readable!')
return path
parser.add_argument('--unicode-dir', metavar='DIR', action='store', required=True,
+2 -2
View File
@@ -33,11 +33,11 @@ def format_code(code, indent, digit_number=4):
def regroup(list_to_group, num):
return [list_to_group[i:i+num] for i in range(0, len(list_to_group), num)]
def hex_format(char, digit_number):
def hex_format(char, padding):
if isinstance(char, str):
char = ord(char)
return ("0x{:0%sx}" % digit_number).format(char)
return f"{char:#0{padding + 2}x}"
lines = []
+2 -5
View File
@@ -13,9 +13,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
# force // operator to be integer division in Python 2
from __future__ import division
import argparse
import json
@@ -53,7 +50,7 @@ def check_files(opts):
files = [JERRY_BUILDER, opts.testfile]
for _file in files:
if not os.path.isfile(_file):
sys.exit("File not found: %s" % _file)
sys.exit(f"File not found: {_file}")
def build_bin(heapsize, opts):
@@ -68,7 +65,7 @@ def build_bin(heapsize, opts):
if opts.buildtype == 'debug':
command.append(FLAG_DEBUG)
print('Building JerryScript with: %s' % (' '.join(command)))
print(f"Building JerryScript with: {' '.join(command)}")
subprocess.check_output(command)
+10 -10
View File
@@ -57,7 +57,7 @@ def reduce_code(code):
def js_to_native_code(path, name, build_type):
with open(path, 'r') as js_source:
with open(path, 'r', encoding='utf8') as js_source:
code = js_source.read()
if build_type != 'debug':
@@ -65,13 +65,13 @@ def js_to_native_code(path, name, build_type):
data = format_code(code, 1, 2)
native_code = """const static char {0}_n[] = "{0}";
const static char {0}_s[] =
native_code = f"""const static char {name}_n[] = "{name}";
const static char {name}_s[] =
{{
{1}
{data}
}};
const static int {0}_l = {2};
""".format(name, data, len(code))
const static int {name}_l = {len(code)};
"""
return native_code
@@ -96,13 +96,13 @@ def main():
script_args = parser.parse_args()
gen_line = "/* This file is generated by %s. Please do not modify. */" % os.path.basename(__file__)
gen_line = f"/* This file is generated by {os.path.basename(__file__)}. Please do not modify. */"
gen_output = [LICENSE, "", gen_line, "", HEADER]
gen_structs = [NATIVE_STRUCT]
if script_args.main:
gen_structs.append(' {{ {0}_n, {0}_s, {0}_l }}, \\'.format("main"))
gen_structs.append(' { main_n, main_s, main_l }, \\')
files = glob.glob(os.path.join(script_args.js_source_path, '*.js'))
@@ -111,14 +111,14 @@ def main():
name = extract_name(path)
gen_output.append(js_to_native_code(path, name, script_args.build_type))
if name != 'main':
gen_structs.append(' {{ {0}_n, {0}_s, {0}_l }}, \\'.format(name))
gen_structs.append(f' {{ {name}_n, {name}_s, {name}_l }}, \\')
gen_structs.append(' { NULL, NULL, 0 } \\\n};')
gen_output.append("\n".join(gen_structs))
gen_output.append(FOOTER)
with open(os.path.join(script_args.output_path, 'jerry-targetjs.h'), 'w') as gen_file:
with open(os.path.join(script_args.output_path, 'jerry-targetjs.h'), 'w', encoding='utf8') as gen_file:
gen_file.write("\n".join(gen_output))
+2 -2
View File
@@ -59,8 +59,8 @@ confidence=
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
disable=import-star-module-level,old-octal-literal,oct-method,unpacking-in-except,parameter-unpacking,backtick,old-raise-syntax,old-ne-operator,long-suffix,dict-view-method,dict-iter-method,metaclass-assignment,next-method-called,raising-string,indexing-exception,raw_input-builtin,long-builtin,file-builtin,execfile-builtin,coerce-builtin,cmp-builtin,buffer-builtin,basestring-builtin,apply-builtin,filter-builtin-not-iterating,using-cmp-argument,useless-suppression,range-builtin-not-iterating,suppressed-message,no-absolute-import,old-division,cmp-method,reload-builtin,zip-builtin-not-iterating,intern-builtin,unichr-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,input-builtin,round-builtin,hex-method,nonzero-method,map-builtin-not-iterating,missing-docstring,locally-disabled
disable=
missing-docstring,
[REPORTS]
+25 -27
View File
@@ -14,8 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import collections
import hashlib
@@ -26,7 +24,7 @@ import sys
import settings
if sys.version_info.major >= 3:
import runners.util as util # pylint: disable=import-error
from runners import util
else:
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/runners')
import util
@@ -205,17 +203,19 @@ TERM_BLUE = '\033[1;34m'
TERM_RED = '\033[1;31m'
def report_command(cmd_type, cmd, env=None):
sys.stderr.write('%s%s%s\n' % (TERM_BLUE, cmd_type, TERM_NORMAL))
sys.stderr.write(f'{TERM_BLUE}{cmd_type}{TERM_NORMAL}\n')
if env is not None:
sys.stderr.write(''.join('%s%s=%r \\%s\n' % (TERM_BLUE, var, val, TERM_NORMAL)
sys.stderr.write(''.join(f'{TERM_BLUE}{var}={val!r} \\{TERM_NORMAL}\n'
for var, val in sorted(env.items())))
sys.stderr.write('%s%s%s\n' % (TERM_BLUE, (' \\%s\n\t%s' % (TERM_NORMAL, TERM_BLUE)).join(cmd), TERM_NORMAL))
sys.stderr.write(f"{TERM_BLUE}" +
f" \\{TERM_NORMAL}\n\t{TERM_BLUE}".join(cmd) +
f"{TERM_NORMAL}\n")
def report_skip(job):
sys.stderr.write('%sSkipping: %s' % (TERM_YELLOW, job.name))
sys.stderr.write(f'{TERM_YELLOW}Skipping: {job.name}')
if job.skip:
sys.stderr.write(' (%s)' % job.skip)
sys.stderr.write('%s\n' % TERM_NORMAL)
sys.stderr.write(f' ({job.skip})')
sys.stderr.write(f'{TERM_NORMAL}\n')
def create_binary(job, options):
build_args = job.build_args[:]
@@ -232,20 +232,20 @@ def create_binary(job, options):
build_cmd.append(settings.BUILD_SCRIPT)
build_cmd.extend(build_args)
build_cmd.append('--builddir=%s' % build_dir_path)
build_cmd.append(f'--builddir={build_dir_path}')
install_dir_path = os.path.join(build_dir_path, 'local')
build_cmd.append('--install=%s' % install_dir_path)
build_cmd.append(f'--install={install_dir_path}')
if options.toolchain:
build_cmd.append('--toolchain=%s' % options.toolchain)
build_cmd.append(f'--toolchain={options.toolchain}')
report_command('Build command:', build_cmd)
binary_key = tuple(sorted(build_args))
if binary_key in BINARY_CACHE:
ret, build_dir_path = BINARY_CACHE[binary_key]
sys.stderr.write('(skipping: already built at %s with returncode %d)\n' % (build_dir_path, ret))
sys.stderr.write(f'(skipping: already built at {build_dir_path} with returncode {ret})\n')
return ret, build_dir_path
try:
@@ -282,19 +282,17 @@ def iterate_test_runner_jobs(jobs, options):
yield job, ret_build, None
if build_dir_path in tested_paths:
sys.stderr.write('(skipping: already tested with %s)\n' % build_dir_path)
sys.stderr.write(f'(skipping: already tested with {build_dir_path})\n')
continue
else:
tested_paths.add(build_dir_path)
tested_paths.add(build_dir_path)
bin_path = get_binary_path(build_dir_path)
bin_hash = hash_binary(bin_path)
if bin_hash in tested_hashes:
sys.stderr.write('(skipping: already tested with equivalent %s)\n' % tested_hashes[bin_hash])
sys.stderr.write(f'(skipping: already tested with equivalent {tested_hashes[bin_hash]})\n')
continue
else:
tested_hashes[bin_hash] = build_dir_path
tested_hashes[bin_hash] = build_dir_path
test_cmd = util.get_python_cmd_prefix()
test_cmd.extend([settings.TEST_RUNNER_SCRIPT, '--engine', bin_path])
@@ -309,16 +307,16 @@ def run_check(runnable, env=None):
full_env.update(env)
env = full_env
proc = subprocess.Popen(runnable, env=env)
proc.wait()
return proc.returncode
with subprocess.Popen(runnable, env=env) as proc:
proc.wait()
return proc.returncode
def run_jerry_debugger_tests(options):
ret_build = ret_test = 0
for job in DEBUGGER_TEST_OPTIONS:
ret_build, build_dir_path = create_binary(job, options)
if ret_build:
print("\n%sBuild failed%s\n" % (TERM_RED, TERM_NORMAL))
print(f"\n{TERM_RED}Build failed{TERM_NORMAL}\n")
break
for channel in ["websocket", "rawpacket"]:
@@ -356,7 +354,7 @@ def run_jerry_tests(options):
skip_list = []
if job.name == 'jerry_tests-snapshot':
with open(settings.SNAPSHOT_TESTS_SKIPLIST, 'r') as snapshot_skip_list:
with open(settings.SNAPSHOT_TESTS_SKIPLIST, 'r', encoding='utf8') as snapshot_skip_list:
for line in snapshot_skip_list:
skip_list.append(line.rstrip())
@@ -381,7 +379,7 @@ def run_test262_test_suite(options):
for job in jobs:
ret_build, build_dir_path = create_binary(job, options)
if ret_build:
print("\n%sBuild failed%s\n" % (TERM_RED, TERM_NORMAL))
print(f"\n{TERM_RED}Build failed{TERM_NORMAL}\n")
break
test_cmd = util.get_python_cmd_prefix() + [
@@ -411,7 +409,7 @@ def run_unittests(options):
continue
ret_build, build_dir_path = create_binary(job, options)
if ret_build:
print("\n%sBuild failed%s\n" % (TERM_RED, TERM_NORMAL))
print(f"\n{TERM_RED}Build failed{TERM_NORMAL}\n")
break
if sys.platform == 'win32':
@@ -440,7 +438,7 @@ def run_buildoption_test(options):
ret, _ = create_binary(job, options)
if ret:
print("\n%sBuild failed%s\n" % (TERM_RED, TERM_NORMAL))
print(f"\n{TERM_RED}Build failed{TERM_NORMAL}\n")
break
return ret
+30 -34
View File
@@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import re
@@ -78,7 +77,7 @@ def update_exclude_list(args):
passing_tests = set()
failing_tests = set()
new_passing_tests = set()
with open(os.path.join(os.path.dirname(args.engine), 'test262.report'), 'r') as report_file:
with open(os.path.join(os.path.dirname(args.engine), 'test262.report'), 'r', encoding='utf8') as report_file:
for line in report_file:
match = re.match('(=== )?(.*) (?:failed|passed) in (?:non-strict|strict)', line)
if match:
@@ -92,7 +91,7 @@ def update_exclude_list(args):
# Tests pass in strict-mode but fail in non-strict-mode (or vice versa) should be considered as failures
passing_tests = passing_tests - failing_tests
with open(args.excludelist_path, 'r+') as exclude_file:
with open(args.excludelist_path, 'r+', encoding='utf8') as exclude_file:
lines = exclude_file.readlines()
exclude_file.seek(0)
exclude_file.truncate()
@@ -167,41 +166,38 @@ def main(args):
if args.test262_test_list:
test262_command.extend(args.test262_test_list.split(','))
proc = subprocess.Popen(test262_command,
universal_newlines=True,
stdout=subprocess.PIPE,
**kwargs)
with subprocess.Popen(test262_command, universal_newlines=True, stdout=subprocess.PIPE, **kwargs) as proc:
return_code = 1
with open(os.path.join(os.path.dirname(args.engine), 'test262.report'), 'w') as output_file:
counter = 0
summary_found = False
summary_end_found = False
while True:
output = proc.stdout.readline()
if not output:
break
output_file.write(output)
return_code = 1
with open(os.path.join(os.path.dirname(args.engine), 'test262.report'), 'w', encoding='utf8') as output_file:
counter = 0
summary_found = False
summary_end_found = False
while True:
output = proc.stdout.readline()
if not output:
break
output_file.write(output)
if output.startswith('=== Summary ==='):
summary_found = True
print('')
if output.startswith('=== Summary ==='):
summary_found = True
print('')
if summary_found:
if not summary_end_found:
print(output, end='')
if not output.strip():
summary_end_found = True
if 'All tests succeeded' in output:
return_code = 0
elif re.search('in (non-)?strict mode', output):
counter += 1
if (counter % 100) == 0:
print(".", end='')
if (counter % 5000) == 0:
print(" Executed %d tests." % counter)
if summary_found:
if not summary_end_found:
print(output, end='')
if not output.strip():
summary_end_found = True
if 'All tests succeeded' in output:
return_code = 0
elif re.search('in (non-)?strict mode', output):
counter += 1
if (counter % 100) == 0:
print(".", end='')
if (counter % 5000) == 0:
print(f" Executed {counter} tests.")
proc.wait()
proc.wait()
if sys.platform == 'win32':
util.set_timezone(original_timezone)
+12 -13
View File
@@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import subprocess
@@ -22,7 +21,7 @@ import sys
import util
def get_arguments():
def get_args():
execution_runtime = os.environ.get('RUNTIME')
parser = argparse.ArgumentParser()
parser.add_argument('-q', '--quiet', action='store_true',
@@ -60,7 +59,7 @@ def get_tests(test_dir, test_list, skip_list):
if test_list:
dirname = os.path.dirname(test_list)
with open(test_list, "r") as test_list_fd:
with open(test_list, "r", encoding='utf8') as test_list_fd:
for test in test_list_fd:
tests.append(os.path.normpath(os.path.join(dirname, test.rstrip())))
@@ -79,10 +78,10 @@ def execute_test_command(test_cmd):
kwargs = {}
if sys.version_info.major >= 3:
kwargs['encoding'] = 'unicode_escape'
process = subprocess.Popen(test_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True, **kwargs)
stdout = process.communicate()[0]
return (process.returncode, stdout)
with subprocess.Popen(test_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True, **kwargs) as process:
stdout, _ = process.communicate()
return process.returncode, stdout
def main(args):
@@ -142,10 +141,10 @@ def run_normal_tests(args, tests):
if (returncode == 0 and not is_expected_to_fail) or (returncode == 1 and is_expected_to_fail):
passed += 1
if not args.quiet:
passed_string = 'PASS' + (' (XFAIL)' if is_expected_to_fail else '')
passed_string = f"PASS{' (XFAIL)' if is_expected_to_fail else ''}"
util.print_test_result(tested, total, True, passed_string, test_path)
else:
passed_string = 'FAIL%s (%d)' % (' (XPASS)' if returncode == 0 and is_expected_to_fail else '', returncode)
passed_string = f"FAIL{' (XPASS)' if returncode == 0 and is_expected_to_fail else ''} ({returncode})"
util.print_test_result(tested, total, False, passed_string, test_path)
print("================================================")
print(stdout)
@@ -183,7 +182,7 @@ def run_snapshot_tests(args, tests):
passed_string = 'PASS' + (' (XFAIL)' if returncode else '')
util.print_test_result(tested, total, True, passed_string, test_path, True)
else:
util.print_test_result(tested, total, False, 'FAIL (%d)' % (returncode), test_path, True)
util.print_test_result(tested, total, False, f'FAIL ({returncode})', test_path, True)
print("================================================")
print(stdout)
print("================================================")
@@ -199,10 +198,10 @@ def run_snapshot_tests(args, tests):
if (returncode == 0 and not is_expected_to_fail) or (returncode == 1 and is_expected_to_fail):
passed += 1
if not args.quiet:
passed_string = 'PASS' + (' (XFAIL)' if is_expected_to_fail else '')
passed_string = f"PASS{' (XFAIL)' if is_expected_to_fail else ''}"
util.print_test_result(tested, total, True, passed_string, test_path, False)
else:
passed_string = 'FAIL%s (%d)' % (' (XPASS)' if returncode == 0 and is_expected_to_fail else '', returncode)
passed_string = f"FAIL{' (XPASS)' if returncode == 0 and is_expected_to_fail else ''} ({returncode})"
util.print_test_result(tested, total, False, passed_string, test_path, False)
print("================================================")
print(stdout)
@@ -212,4 +211,4 @@ def run_snapshot_tests(args, tests):
if __name__ == "__main__":
sys.exit(main(get_arguments()))
sys.exit(main(get_args()))
+1 -2
View File
@@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import glob
import os
@@ -71,7 +70,7 @@ def main(args):
util.print_test_result(tested, total, True, 'PASS', test_path)
except subprocess.CalledProcessError as err:
failed += 1
util.print_test_result(tested, total, False, 'FAIL (%d)' % err.returncode, test_path)
util.print_test_result(tested, total, False, f'FAIL ({err.returncode})', test_path)
print("================================================")
print(err.output)
print("================================================")
+88 -101
View File
@@ -40,10 +40,8 @@
# This code is governed by the BSD license found in the LICENSE file.
from __future__ import print_function
import logging
import optparse
import argparse
import os
from os import path
import platform
@@ -55,7 +53,6 @@ import xml.dom.minidom
from collections import Counter
import signal
import threading
import multiprocessing
#######################################################################
@@ -112,7 +109,7 @@ def my_read_dict(lines, indent=""):
def my_read_value(lines, value, indent):
if value == ">" or value == "|":
if value in (">", "|"):
(lines, value) = my_multiline(lines, value == "|")
value = value + "\n"
return (lines, value)
@@ -157,7 +154,7 @@ def my_remove_list_header(indent, line):
def my_read_one_line(value):
if M_YAML_LIST_PATTERN.match(value):
return my_flow_list(value)
elif re.match(r"^[-0-9]*$", value):
if re.match(r"^[-0-9]*$", value):
try:
value = int(value)
except ValueError:
@@ -196,7 +193,7 @@ def my_multiline(lines, preserve_newlines=False):
break
else:
if preserve_newlines:
if was_empty != None:
if was_empty is not None:
value += "\n"
else:
if was_empty:
@@ -248,7 +245,7 @@ _LICENSE_PATTERN = re.compile(
def yaml_attr_parser(test_record, attrs, name, onerror=print):
parsed = yaml_load(attrs)
if parsed is None:
onerror("Failed to parse yaml in name %s" % name)
onerror(f"Failed to parse yaml in name {name}")
return
for key in parsed:
@@ -287,7 +284,7 @@ def parse_test_record(src, name, onerror=print):
# YAML frontmatter is required for all tests.
if frontmatter is None:
onerror("Missing frontmatter: %s" % name)
onerror(f"Missing frontmatter: {name}")
# The license shuold be placed before the frontmatter and there shouldn't be
# any extra content between the license and the frontmatter.
@@ -295,13 +292,13 @@ def parse_test_record(src, name, onerror=print):
header_idx = src.index(header)
frontmatter_idx = src.index(frontmatter)
if header_idx > frontmatter_idx:
onerror("Unexpected license after frontmatter: %s" % name)
onerror(f"Unexpected license after frontmatter: {name}")
# Search for any extra test content, but ignore whitespace only or comment lines.
extra = src[header_idx + len(header): frontmatter_idx]
if extra and any(line.strip() and not line.lstrip().startswith("//") for line in extra.split("\n")):
onerror(
"Unexpected test content between license and frontmatter: %s" % name)
f"Unexpected test content between license and frontmatter: {name}")
# Remove the license and YAML parts from the actual test content.
test = src
@@ -319,7 +316,7 @@ def parse_test_record(src, name, onerror=print):
# Report if the license block is missing in non-generated tests.
if header is None and "generated" not in test_record and "hashbang" not in name:
onerror("No license found in: %s" % name)
onerror(f"No license found in: {name}")
return test_record
@@ -339,36 +336,37 @@ def report_error(error_string):
def build_options():
result = optparse.OptionParser()
result.add_option("--command", default=None,
help="The command-line to run")
result.add_option("--tests", default=path.abspath('.'),
help="Path to the tests")
result.add_option("--exclude-list", default=None,
help="Path to the excludelist.xml file")
result.add_option("--cat", default=False, action="store_true",
help="Print packaged test code that would be run")
result.add_option("--summary", default=False, action="store_true",
help="Print summary after running tests")
result.add_option("--full-summary", default=False, action="store_true",
help="Print summary and test output after running tests")
result.add_option("--strict_only", default=False, action="store_true",
help="Test only strict mode")
result.add_option("--non_strict_only", default=False, action="store_true",
help="Test only non-strict mode")
result.add_option("--unmarked_default", default="both",
help="default mode for tests of unspecified strictness")
result.add_option("-j", "--job-count", default=None, action="store", type=int,
help="Number of parallel test jobs to run. In case of '0' cpu count is used.")
result.add_option("--logname", help="Filename to save stdout to")
result.add_option("--loglevel", default="warning",
help="sets log level to debug, info, warning, error, or critical")
result.add_option("--print-handle", default="print",
help="Command to print from console")
result.add_option("--list-includes", default=False, action="store_true",
help="List includes required by tests")
result.add_option("--module-flag", default="-m",
help="List includes required by tests")
result = argparse.ArgumentParser()
result.add_argument("--command", default=None,
help="The command-line to run")
result.add_argument("--tests", default=path.abspath('.'),
help="Path to the tests")
result.add_argument("--exclude-list", default=None,
help="Path to the excludelist.xml file")
result.add_argument("--cat", default=False, action="store_true",
help="Print packaged test code that would be run")
result.add_argument("--summary", default=False, action="store_true",
help="Print summary after running tests")
result.add_argument("--full-summary", default=False, action="store_true",
help="Print summary and test output after running tests")
result.add_argument("--strict_only", default=False, action="store_true",
help="Test only strict mode")
result.add_argument("--non_strict_only", default=False, action="store_true",
help="Test only non-strict mode")
result.add_argument("--unmarked_default", default="both",
help="default mode for tests of unspecified strictness")
result.add_argument("-j", "--job-count", default=None, action="store", type=int,
help="Number of parallel test jobs to run. In case of '0' cpu count is used.")
result.add_argument("--logname", help="Filename to save stdout to")
result.add_argument("--loglevel", default="warning",
help="sets log level to debug, info, warning, error, or critical")
result.add_argument("--print-handle", default="print",
help="Command to print from console")
result.add_argument("--list-includes", default=False, action="store_true",
help="List includes required by tests")
result.add_argument("--module-flag", default="-m",
help="List includes required by tests")
result.add_argument("test_list", nargs='*', default=None)
return result
@@ -376,15 +374,15 @@ def validate_options(options):
if not options.command:
report_error("A --command must be specified.")
if not path.exists(options.tests):
report_error("Couldn't find test path '%s'" % options.tests)
report_error(f"Couldn't find test path '{options.tests}'")
def is_windows():
actual_platform = platform.system()
return (actual_platform == 'Windows') or (actual_platform == 'Microsoft')
return actual_platform in ('Windows', 'Microsoft')
class TempFile(object):
class TempFile:
def __init__(self, suffix="", prefix="tmp", text=False):
self.suffix = suffix
@@ -405,7 +403,7 @@ class TempFile(object):
os.write(self.file_desc, string.encode('utf8'))
def read(self):
with open(self.name, "r", newline='') as file_desc:
with open(self.name, "r", newline='', encoding='utf8') as file_desc:
return file_desc.read()
def close(self):
@@ -417,11 +415,11 @@ class TempFile(object):
try:
self.close()
os.unlink(self.name)
except OSError as exception:
logging.error("Error disposing temp file: %s", str(exception))
except OSError as os_error:
logging.error("Error disposing temp file: %s", os_error)
class TestResult(object):
class TestResult:
def __init__(self, exit_code, stdout, stderr, case):
self.exit_code = exit_code
@@ -433,37 +431,36 @@ class TestResult(object):
name = self.case.get_name()
mode = self.case.get_mode()
if self.exit_code != 0 and self.exit_code != 1:
sys.stderr.write(u"===%s failed in %s with negative:%s===\n"
% (name, mode, self.case.get_negative_type()))
if self.exit_code not in (0, 1):
sys.stderr.write(f"==={name} failed in {mode} with negative:{self.case.get_negative_type()}===\n")
self.write_output(sys.stderr)
if self.has_unexpected_outcome():
if self.case.is_negative():
print("=== %s passed in %s, but was expected to fail ===" % (name, mode))
print("--- expected error: %s ---\n" % self.case.get_negative_type())
print(f"=== {name} passed in {mode}, but was expected to fail ===")
print(f"--- expected error: {self.case.get_negative_type()} ---\n")
else:
if long_format:
print("=== %s failed in %s ===" % (name, mode))
print(f"=== {name} failed in {mode} ===")
else:
print("%s in %s: " % (name, mode))
print(f"{name} in {mode}: ")
self.write_output(sys.stdout)
if long_format:
print("===")
elif self.case.is_negative():
print("%s failed in %s as expected" % (name, mode))
print(f"{name} failed in {mode} as expected")
else:
print("%s passed in %s" % (name, mode))
print(f"{name} passed in {mode}")
def write_output(self, target):
out = self.stdout.strip()
if out:
target.write("--- output --- \n %s" % out)
target.write(f"--- output --- \n {out}")
error = self.stderr.strip()
if error:
target.write("--- errors --- \n %s" % error)
target.write(f"--- errors --- \n {error}")
target.write("\n--- exit code: %d ---\n" % self.exit_code)
target.write(f"\n--- exit code: {self.exit_code} ---\n")
def has_failed(self):
return self.exit_code != 0
@@ -486,14 +483,14 @@ class TestResult(object):
return self.stdout
class TestCase(object):
class TestCase:
def __init__(self, suite, name, full_path, strict_mode, command_template, module_flag):
self.suite = suite
self.name = name
self.full_path = full_path
self.strict_mode = strict_mode
with open(self.full_path, "r", newline='') as file_desc:
with open(self.full_path, "r", newline='', encoding='utf8') as file_desc:
self.contents = file_desc.read()
test_record = parse_test_record(self.contents, name)
self.test = test_record["test"]
@@ -602,25 +599,25 @@ class TestCase(object):
@staticmethod
def execute(command):
if is_windows():
args = '%s' % command
args = f'{command}'
else:
args = command.split(" ")
stdout = TempFile(prefix="test262-out-")
stderr = TempFile(prefix="test262-err-")
try:
logging.info("exec: %s", str(args))
process = subprocess.Popen(
with subprocess.Popen(
args,
shell=False,
stdout=stdout.file_desc,
stderr=stderr.file_desc
)
timer = threading.Timer(TEST262_CASE_TIMEOUT, process.kill)
timer.start()
code = process.wait()
timer.cancel()
out = stdout.read()
err = stderr.read()
) as process:
try:
code = process.wait(timeout=TEST262_CASE_TIMEOUT)
except subprocess.TimeoutExpired:
process.kill()
out = stdout.read()
err = stderr.read()
finally:
stdout.dispose()
stderr.dispose()
@@ -666,10 +663,10 @@ class TestCase(object):
if 'raw' in flags:
if 'noStrict' in flags:
raise TypeError("The `raw` flag implies the `noStrict` flag")
elif 'onlyStrict' in flags:
if 'onlyStrict' in flags:
raise TypeError(
"The `raw` flag is incompatible with the `onlyStrict` flag")
elif self.get_include_list():
if self.get_include_list():
raise TypeError(
"The `raw` flag is incompatible with the `includes` tag")
@@ -683,7 +680,7 @@ def test_case_run_process(case):
return case.run()
class ProgressIndicator(object):
class ProgressIndicator:
def __init__(self, count):
self.count = count
@@ -700,18 +697,11 @@ class ProgressIndicator(object):
self.succeeded += 1
def make_plural(num):
if num == 1:
return (num, "")
return (num, "s")
def percent_format(partial, total):
return "%i test%s (%.1f%%)" % (make_plural(partial) +
((100.0 * partial)/total,))
return f"{partial} test{'s' if partial > 1 else ''} ({(100.0 * partial)/total:.1f}%)"
class TestSuite(object):
class TestSuite:
def __init__(self, options):
self.test_root = path.join(options.tests, 'test')
@@ -760,7 +750,7 @@ class TestSuite(object):
if not name in self.include_cache:
static = path.join(self.lib_root, name)
if path.exists(static):
with open(static) as file_desc:
with open(static, encoding='utf8') as file_desc:
contents = file_desc.read()
contents = re.sub(r'\r\n', '\n', contents)
self.include_cache[name] = contents + "\n"
@@ -815,7 +805,7 @@ class TestSuite(object):
count = progress.count
succeeded = progress.succeeded
failed = progress.failed
write(" - Ran %i test%s" % make_plural(count))
write(f" - Ran {count} test{'s' if count > 1 else ''}")
if progress.failed == 0:
write(" - All tests succeeded")
else:
@@ -827,12 +817,12 @@ class TestSuite(object):
print("")
write("Failed Tests")
for result in positive:
write(" %s in %s" % (result.case.get_name(), result.case.get_mode()))
write(f" {result.case.get_name()} in {result.case.get_mode()}")
if negative:
print("")
write("Expected to fail but passed ---")
for result in negative:
write(" %s in %s" % (result.case.get_name(), result.case.get_mode()))
write(f" {result.case.get_name()} in {result.case.get_mode()}")
def print_failure_output(self, progress, logfile):
for result in progress.failed_tests:
@@ -849,7 +839,7 @@ class TestSuite(object):
report_error("No tests to run")
progress = ProgressIndicator(len(cases))
if logname:
self.logf = open(logname, "w")
self.logf = open(logname, "w", encoding='utf8') # pylint: disable=consider-using-with
if job_count == 1:
for case in cases:
@@ -861,15 +851,11 @@ class TestSuite(object):
if job_count == 0:
job_count = None # uses multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=job_count, initializer=pool_init)
try:
with multiprocessing.Pool(processes=job_count, initializer=pool_init) as pool:
for result in pool.imap(test_case_run_process, cases):
if logname:
self.write_log(result)
progress.has_run(result)
except KeyboardInterrupt:
pool.terminate()
pool.join()
if print_summary:
self.print_summary(progress, logname)
@@ -887,17 +873,17 @@ class TestSuite(object):
if result.has_unexpected_outcome():
if result.case.is_negative():
self.logf.write(
"=== %s passed in %s, but was expected to fail === \n" % (name, mode))
self.logf.write("--- expected error: %s ---\n" % result.case.GetNegativeType())
f"=== {name} passed in {mode}, but was expected to fail === \n")
self.logf.write(f"--- expected error: {result.case.GetNegativeType()} ---\n")
result.write_output(self.logf)
else:
self.logf.write("=== %s failed in %s === \n" % (name, mode))
self.logf.write(f"=== {name} failed in {mode} === \n")
result.write_output(self.logf)
self.logf.write("===\n")
elif result.case.is_negative():
self.logf.write("%s failed in %s as expected \n" % (name, mode))
self.logf.write(f"{name} failed in {mode} as expected \n")
else:
self.logf.write("%s passed in %s \n" % (name, mode))
self.logf.write(f"{name} passed in {mode} \n")
def print_source(self, tests):
cases = self.enumerate_tests(tests, "")
@@ -917,7 +903,8 @@ class TestSuite(object):
def main():
code = 0
parser = build_options()
(options, args) = parser.parse_args()
options = parser.parse_args()
args = options.test_list
validate_options(options)
test_suite = TestSuite(options)
@@ -951,5 +938,5 @@ if __name__ == '__main__':
try:
sys.exit(main())
except Test262Error as exception:
print("Error: %s" % exception.message)
print(f"Error: {exception.message}")
sys.exit(1)
+6 -9
View File
@@ -1,5 +1,3 @@
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import signal
import subprocess
import sys
@@ -46,13 +43,13 @@ def set_sighdl_to_reset_timezone(timezone):
def print_test_summary(summary_string, total, passed, failed):
print("\n[summary] %s\n" % summary_string)
print("TOTAL: %d" % total)
print("%sPASS: %d%s" % (TERM_GREEN, passed, TERM_NORMAL))
print("%sFAIL: %d%s\n" % (TERM_RED, failed, TERM_NORMAL))
print(f"\n[summary] {summary_string}\n")
print(f"TOTAL: {total}")
print(f"{TERM_GREEN}PASS: {passed}{TERM_NORMAL}")
print(f"{TERM_RED}FAIL: {failed}{TERM_NORMAL}\n")
success_color = TERM_GREEN if passed == total else TERM_RED
print("%sSuccess: %d%%%s" % (success_color, passed*100/total, TERM_NORMAL))
print(f"{success_color}Success: {passed*100/total}{TERM_NORMAL}")
def print_test_result(tested, total, is_passed, passed_string, test_path, is_snapshot_generation=None):
@@ -64,7 +61,7 @@ def print_test_result(tested, total, is_passed, passed_string, test_path, is_sna
snapshot_string = ' (execute snapshot)'
color = TERM_GREEN if is_passed else TERM_RED
print("[%4d/%4d] %s%s: %s%s%s" % (tested, total, color, passed_string, test_path, snapshot_string, TERM_NORMAL))
print(f"[{tested:>4}/{total:>4}] {color}{passed_string}: {test_path}{snapshot_string}{TERM_NORMAL}")
def get_platform_cmd_prefix():
-2
View File
@@ -1,5 +1,3 @@
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
+3 -4
View File
@@ -14,8 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import re
@@ -32,7 +30,8 @@ def main():
)
_ = parser.parse_args()
with open(os.path.join(settings.PROJECT_DIR, 'jerry-core', 'include', 'jerryscript.h'), 'r') as header:
with open(os.path.join(settings.PROJECT_DIR, 'jerry-core', 'include', 'jerryscript.h'), 'r',
encoding='utf8') as header:
version = {}
version_re = re.compile(r'\s*#define\s+JERRY_API_(?P<key>MAJOR|MINOR|PATCH)_VERSION\s+(?P<value>\S+)')
for line in header:
@@ -40,7 +39,7 @@ def main():
if match:
version[match.group('key')] = match.group('value')
print('%(MAJOR)s.%(MINOR)s.%(PATCH)s' % version)
print(f'{version["MAJOR"]}.{version["MINOR"]}.{version["PATCH"]}')
if __name__ == "__main__":