refactor: Move parallel test runner code to a separate file
This commit is contained in:
parent
03d3d67b93
commit
76348b892f
3 changed files with 213 additions and 226 deletions
|
|
@ -592,7 +592,7 @@ def run_tests(context, app=None, module=None, doctype=None, test=(), profile=Fal
|
|||
@click.option('--with-coverage', is_flag=True, help="Build coverage file")
|
||||
@pass_context
|
||||
def run_parallel_tests(context, app, build_number, total_builds, with_coverage):
|
||||
from frappe.test_runner import ParallelTestRunner
|
||||
from frappe.parallel_test_runner import ParallelTestRunner
|
||||
site = get_site(context)
|
||||
ParallelTestRunner(app, site=site, build_number=build_number, total_builds=total_builds, with_coverage=with_coverage)
|
||||
|
||||
|
|
|
|||
212
frappe/parallel_test_runner.py
Normal file
212
frappe/parallel_test_runner.py
Normal file
|
|
@ -0,0 +1,212 @@
|
|||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import unittest
|
||||
import click
|
||||
import frappe
|
||||
|
||||
from .test_runner import (SLOW_TEST_THRESHOLD, make_test_records, set_test_email_config)
|
||||
|
||||
click_ctx = click.get_current_context(True)
|
||||
if click_ctx:
|
||||
click_ctx.color = True
|
||||
|
||||
class ParallelTestRunner():
|
||||
def __init__(self, app, site, build_number=1, total_builds=1, with_coverage=False):
|
||||
self.app = app
|
||||
self.site = site
|
||||
self.with_coverage = with_coverage
|
||||
self.build_number = frappe.utils.cint(build_number) or 1
|
||||
self.total_builds = frappe.utils.cint(total_builds)
|
||||
self.setup_test_site()
|
||||
self.run_tests()
|
||||
|
||||
def setup_test_site(self):
|
||||
frappe.init(site=self.site)
|
||||
if not frappe.db:
|
||||
frappe.connect()
|
||||
|
||||
frappe.flags.in_test = True
|
||||
frappe.clear_cache()
|
||||
frappe.utils.scheduler.disable_scheduler()
|
||||
set_test_email_config()
|
||||
self.before_test_setup()
|
||||
|
||||
def before_test_setup(self):
|
||||
start_time = time.time()
|
||||
for fn in frappe.get_hooks("before_tests", app_name=self.app):
|
||||
frappe.get_attr(fn)()
|
||||
|
||||
test_module = frappe.get_module(f'{self.app}.tests')
|
||||
|
||||
if hasattr(test_module, "global_test_dependencies"):
|
||||
for doctype in test_module.global_test_dependencies:
|
||||
make_test_records(doctype)
|
||||
|
||||
elapsed = time.time() - start_time
|
||||
elapsed = click.style(f' ({elapsed:.03}s)', fg='red')
|
||||
click.echo(f'Before Test {elapsed}')
|
||||
|
||||
def run_tests(self):
|
||||
self.test_result = ParallelTestResult(stream=sys.stderr, descriptions=True, verbosity=2)
|
||||
self.test_status = 'ongoing'
|
||||
|
||||
self.start_coverage()
|
||||
|
||||
for test_file_info in self.get_test_file_list():
|
||||
self.run_tests_for_file(test_file_info)
|
||||
|
||||
self.save_coverage()
|
||||
self.print_result()
|
||||
|
||||
def run_tests_for_file(self, file_info):
|
||||
frappe.set_user('Administrator')
|
||||
path, filename = file_info
|
||||
module = self.get_module(path, filename)
|
||||
self.create_test_dependency_records(module, path, filename)
|
||||
test_suite = unittest.TestSuite()
|
||||
module_test_cases = unittest.TestLoader().loadTestsFromModule(module)
|
||||
test_suite.addTest(module_test_cases)
|
||||
test_suite(self.test_result)
|
||||
|
||||
def create_test_dependency_records(self, module, path, filename):
|
||||
if hasattr(module, "test_dependencies"):
|
||||
for doctype in module.test_dependencies:
|
||||
make_test_records(doctype)
|
||||
|
||||
if os.path.basename(os.path.dirname(path)) == "doctype":
|
||||
# test_data_migration_connector.py > data_migration_connector.json
|
||||
test_record_filename = re.sub('^test_', '', filename).replace(".py", ".json")
|
||||
test_record_file_path = os.path.join(path, test_record_filename)
|
||||
if os.path.exists(test_record_file_path):
|
||||
with open(test_record_file_path, 'r') as f:
|
||||
doc = json.loads(f.read())
|
||||
doctype = doc["name"]
|
||||
make_test_records(doctype)
|
||||
|
||||
def get_module(self, path, filename):
|
||||
app_path = frappe.get_pymodule_path(self.app)
|
||||
relative_path = os.path.relpath(path, app_path)
|
||||
if relative_path == '.':
|
||||
module_name = self.app
|
||||
else:
|
||||
relative_path = relative_path.replace('/', '.')
|
||||
module_name = os.path.splitext(filename)[0]
|
||||
module_name = f'{self.app}.{relative_path}.{module_name}'
|
||||
|
||||
return frappe.get_module(module_name)
|
||||
|
||||
def print_result(self):
|
||||
self.test_result.printErrors()
|
||||
click.echo(self.test_result)
|
||||
if self.test_result.failures or self.test_result.errors:
|
||||
if os.environ.get('CI'):
|
||||
sys.exit(1)
|
||||
|
||||
def start_coverage(self):
|
||||
if self.with_coverage:
|
||||
from coverage import Coverage
|
||||
from frappe.utils import get_bench_path
|
||||
|
||||
# Generate coverage report only for app that is being tested
|
||||
source_path = os.path.join(get_bench_path(), 'apps', self.app)
|
||||
omit=['*.html', '*.js', '*.xml', '*.css', '*.less', '*.scss',
|
||||
'*.vue', '*/doctype/*/*_dashboard.py', '*/patches/*']
|
||||
|
||||
if self.app == 'frappe':
|
||||
omit.append('*/commands/*')
|
||||
|
||||
self.coverage = Coverage(source=[source_path], omit=omit)
|
||||
self.coverage.start()
|
||||
|
||||
def save_coverage(self):
|
||||
if not self.with_coverage:
|
||||
return
|
||||
self.coverage.stop()
|
||||
self.coverage.save()
|
||||
|
||||
def get_test_file_list(self):
|
||||
test_list = get_all_tests(self.app)
|
||||
split_size = frappe.utils.ceil(len(test_list) / self.total_builds)
|
||||
# [1,2,3,4,5,6] to [[1,2], [3,4], [4,6]] if split_size is 2
|
||||
test_chunks = [test_list[x:x+split_size] for x in range(0, len(test_list), split_size)]
|
||||
return test_chunks[self.build_number - 1]
|
||||
|
||||
|
||||
class ParallelTestResult(unittest.TextTestResult):
|
||||
def startTest(self, test):
|
||||
self._started_at = time.time()
|
||||
super(unittest.TextTestResult, self).startTest(test)
|
||||
test_class = unittest.util.strclass(test.__class__)
|
||||
if not hasattr(self, 'current_test_class') or self.current_test_class != test_class:
|
||||
click.echo(f"\n{unittest.util.strclass(test.__class__)}")
|
||||
self.current_test_class = test_class
|
||||
|
||||
def getTestMethodName(self, test):
|
||||
return test._testMethodName if hasattr(test, '_testMethodName') else str(test)
|
||||
|
||||
def addSuccess(self, test):
|
||||
super(unittest.TextTestResult, self).addSuccess(test)
|
||||
elapsed = time.time() - self._started_at
|
||||
threshold_passed = elapsed >= SLOW_TEST_THRESHOLD
|
||||
elapsed = click.style(f' ({elapsed:.03}s)', fg='red') if threshold_passed else ''
|
||||
click.echo(f" {click.style(' ✔ ', fg='green')} {self.getTestMethodName(test)}{elapsed}")
|
||||
|
||||
def addError(self, test, err):
|
||||
super(unittest.TextTestResult, self).addError(test, err)
|
||||
click.echo(f" {click.style(' ✖ ', fg='red')} {self.getTestMethodName(test)}")
|
||||
|
||||
def addFailure(self, test, err):
|
||||
super(unittest.TextTestResult, self).addFailure(test, err)
|
||||
click.echo(f" {click.style(' ✖ ', fg='red')} {self.getTestMethodName(test)}")
|
||||
|
||||
def addSkip(self, test, reason):
|
||||
super(unittest.TextTestResult, self).addSkip(test, reason)
|
||||
click.echo(f" {click.style(' = ', fg='white')} {self.getTestMethodName(test)}")
|
||||
|
||||
def addExpectedFailure(self, test, err):
|
||||
super(unittest.TextTestResult, self).addExpectedFailure(test, err)
|
||||
click.echo(f" {click.style(' ✖ ', fg='red')} {self.getTestMethodName(test)}")
|
||||
|
||||
def addUnexpectedSuccess(self, test):
|
||||
super(unittest.TextTestResult, self).addUnexpectedSuccess(test)
|
||||
click.echo(f" {click.style(' ✔ ', fg='green')} {self.getTestMethodName(test)}")
|
||||
|
||||
def printErrors(self):
|
||||
click.echo('\n')
|
||||
self.printErrorList(' ERROR ', self.errors, 'red')
|
||||
self.printErrorList(' FAIL ', self.failures, 'red')
|
||||
|
||||
def printErrorList(self, flavour, errors, color):
|
||||
for test, err in errors:
|
||||
click.echo(self.separator1)
|
||||
click.echo(f"{click.style(flavour, bg=color)} {self.getDescription(test)}")
|
||||
click.echo(self.separator2)
|
||||
click.echo(err)
|
||||
|
||||
def __str__(self):
|
||||
return f"Tests: {self.testsRun}, Failing: {len(self.failures)}, Errors: {len(self.errors)}"
|
||||
|
||||
def get_all_tests(app):
|
||||
test_file_list = []
|
||||
for path, folders, files in os.walk(frappe.get_pymodule_path(app)):
|
||||
for dontwalk in ('locals', '.git', 'public', '__pycache__'):
|
||||
if dontwalk in folders:
|
||||
folders.remove(dontwalk)
|
||||
|
||||
# for predictability
|
||||
folders.sort()
|
||||
files.sort()
|
||||
|
||||
if os.path.sep.join(["doctype", "doctype", "boilerplate"]) in path:
|
||||
# in /doctype/doctype/boilerplate/
|
||||
continue
|
||||
|
||||
for filename in files:
|
||||
if filename.startswith("test_") and filename.endswith(".py") \
|
||||
and filename != 'test_runner.py':
|
||||
test_file_list.append([path, filename])
|
||||
|
||||
return test_file_list
|
||||
|
|
@ -9,22 +9,15 @@ import time
|
|||
import xmlrunner
|
||||
import importlib
|
||||
from frappe.modules import load_doctype_module, get_module_name
|
||||
from frappe.utils import cstr
|
||||
import frappe.utils.scheduler
|
||||
import cProfile, pstats
|
||||
from six import StringIO
|
||||
from six.moves import reload_module
|
||||
from frappe.model.naming import revert_series_if_last
|
||||
import click
|
||||
import unittest.util
|
||||
|
||||
unittest_runner = unittest.TextTestRunner
|
||||
SLOW_TEST_THRESHOLD = 2
|
||||
|
||||
click_ctx = click.get_current_context(True)
|
||||
if click_ctx:
|
||||
click_ctx.color = True
|
||||
|
||||
def xmlrunner_wrapper(output):
|
||||
"""Convenience wrapper to keep method signature unchanged for XMLTestRunner and TextTestRunner"""
|
||||
def _runner(*args, **kwargs):
|
||||
|
|
@ -428,221 +421,3 @@ def get_test_record_log():
|
|||
frappe.flags.test_record_log = []
|
||||
|
||||
return frappe.flags.test_record_log
|
||||
|
||||
|
||||
class ParallelTestResult(unittest.TextTestResult):
|
||||
def startTest(self, test):
|
||||
self._started_at = time.time()
|
||||
super(unittest.TextTestResult, self).startTest(test)
|
||||
test_class = unittest.util.strclass(test.__class__)
|
||||
if not hasattr(self, 'current_test_class') or self.current_test_class != test_class:
|
||||
click.echo(f"\n{unittest.util.strclass(test.__class__)}")
|
||||
self.current_test_class = test_class
|
||||
|
||||
def getTestMethodName(self, test):
|
||||
return test._testMethodName if hasattr(test, '_testMethodName') else str(test)
|
||||
|
||||
def addSuccess(self, test):
|
||||
super(unittest.TextTestResult, self).addSuccess(test)
|
||||
elapsed = time.time() - self._started_at
|
||||
threshold_passed = elapsed >= SLOW_TEST_THRESHOLD
|
||||
elapsed = click.style(f' ({elapsed:.03}s)', fg='red') if threshold_passed else ''
|
||||
click.echo(f" {click.style(' ✔ ', fg='green')} {self.getTestMethodName(test)}{elapsed}")
|
||||
|
||||
def addError(self, test, err):
|
||||
super(unittest.TextTestResult, self).addError(test, err)
|
||||
click.echo(f" {click.style(' ✖ ', fg='red')} {self.getTestMethodName(test)}")
|
||||
|
||||
def addFailure(self, test, err):
|
||||
super(unittest.TextTestResult, self).addFailure(test, err)
|
||||
click.echo(f" {click.style(' ✖ ', fg='red')} {self.getTestMethodName(test)}")
|
||||
|
||||
def addSkip(self, test, reason):
|
||||
super(unittest.TextTestResult, self).addSkip(test, reason)
|
||||
click.echo(f" {click.style(' = ', fg='white')} {self.getTestMethodName(test)}")
|
||||
|
||||
def addExpectedFailure(self, test, err):
|
||||
super(unittest.TextTestResult, self).addExpectedFailure(test, err)
|
||||
click.echo(f" {click.style(' ✖ ', fg='red')} {self.getTestMethodName(test)}")
|
||||
|
||||
def addUnexpectedSuccess(self, test):
|
||||
super(unittest.TextTestResult, self).addUnexpectedSuccess(test)
|
||||
click.echo(f" {click.style(' ✔ ', fg='green')} {self.getTestMethodName(test)}")
|
||||
|
||||
def printErrors(self):
|
||||
click.echo('\n')
|
||||
self.printErrorList(' ERROR ', self.errors, 'red')
|
||||
self.printErrorList(' FAIL ', self.failures, 'red')
|
||||
|
||||
def printErrorList(self, flavour, errors, color):
|
||||
for test, err in errors:
|
||||
click.echo(self.separator1)
|
||||
click.echo(f"{click.style(flavour, bg=color)} {self.getDescription(test)}")
|
||||
click.echo(self.separator2)
|
||||
click.echo(err)
|
||||
|
||||
def __repr__(self):
|
||||
return f"Tests={self.testsRun} Failing={len(self.failures)} Errors={len(self.errors)}"
|
||||
|
||||
def get_all_tests(app):
|
||||
test_file_list = []
|
||||
for path, folders, files in os.walk(frappe.get_pymodule_path(app)):
|
||||
for dontwalk in ('locals', '.git', 'public', '__pycache__'):
|
||||
if dontwalk in folders:
|
||||
folders.remove(dontwalk)
|
||||
|
||||
# for predictability
|
||||
folders.sort()
|
||||
files.sort()
|
||||
|
||||
# print path
|
||||
for filename in files:
|
||||
if filename.startswith("test_") and filename.endswith(".py") \
|
||||
and filename != 'test_runner.py':
|
||||
test_file_list.append(os.path.join(path, filename))
|
||||
return test_file_list
|
||||
|
||||
class ParallelTestRunner():
|
||||
def __init__(self, app, site, build_number=1, total_builds=1, with_coverage=False):
|
||||
self.app = app
|
||||
self.site = site
|
||||
self.with_coverage = with_coverage
|
||||
self.setup_test_site()
|
||||
self.build_number = frappe.utils.cint(build_number) or 1
|
||||
self.total_builds = frappe.utils.cint(total_builds)
|
||||
frappe.flags.in_test = True
|
||||
self.run_before_test_hooks()
|
||||
self.start_test()
|
||||
|
||||
def setup_test_site(self):
|
||||
frappe.init(site=self.site)
|
||||
if not frappe.db:
|
||||
frappe.connect()
|
||||
|
||||
frappe.clear_cache()
|
||||
frappe.utils.scheduler.disable_scheduler()
|
||||
set_test_email_config()
|
||||
|
||||
def run_before_test_hooks(self):
|
||||
start_time = time.time()
|
||||
for fn in frappe.get_hooks("before_tests", app_name=self.app):
|
||||
frappe.get_attr(fn)()
|
||||
self.make_test_records()
|
||||
|
||||
elapsed = time.time() - start_time
|
||||
elapsed = click.style(f' ({elapsed:.03}s)', fg='red')
|
||||
click.echo(f'Before Test {elapsed}')
|
||||
|
||||
|
||||
def start_test(self):
|
||||
self.test_result = ParallelTestResult(stream=sys.stderr, descriptions=True, verbosity=2)
|
||||
self.test_status = 'ongoing'
|
||||
|
||||
self.setup_coverage()
|
||||
for test in self.get_test_list():
|
||||
self.run_tests_for_file(test)
|
||||
|
||||
self.print_result()
|
||||
self.save_coverage()
|
||||
|
||||
if self.test_result.failures or self.test_result.errors:
|
||||
if os.environ.get('CI'):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def make_test_records(self):
|
||||
test_module = importlib.import_module(f'{self.app}.tests')
|
||||
|
||||
if hasattr(test_module, "global_test_dependencies"):
|
||||
for doctype in test_module.global_test_dependencies:
|
||||
make_test_records(doctype)
|
||||
|
||||
def run_tests_for_file(self, file_path):
|
||||
if not file_path:
|
||||
return
|
||||
|
||||
app = self.app
|
||||
filename = file_path.split('/')[-1]
|
||||
path = file_path.rsplit('/', 1)[0]
|
||||
|
||||
if os.path.sep.join(["doctype", "doctype", "boilerplate"]) in path:
|
||||
# in /doctype/doctype/boilerplate/
|
||||
return
|
||||
|
||||
app_path = frappe.get_pymodule_path(app)
|
||||
relative_path = os.path.relpath(path, app_path)
|
||||
if relative_path == '.':
|
||||
module_name = app
|
||||
else:
|
||||
module_name = '{app}.{relative_path}.{module_name}'.format(app=app,
|
||||
relative_path=relative_path.replace('/', '.'), module_name=filename[:-3])
|
||||
|
||||
module = importlib.import_module(module_name)
|
||||
frappe.set_user('Administrator')
|
||||
if hasattr(module, "test_dependencies"):
|
||||
for doctype in module.test_dependencies:
|
||||
try:
|
||||
make_test_records(doctype)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if os.path.basename(os.path.dirname(path)) == "doctype":
|
||||
txt_file = os.path.join(path, filename[5:].replace(".py", ".json"))
|
||||
if os.path.exists(txt_file):
|
||||
with open(txt_file, 'r') as f:
|
||||
doc = json.loads(f.read())
|
||||
doctype = doc["name"]
|
||||
make_test_records(doctype)
|
||||
|
||||
test_suite = unittest.TestSuite()
|
||||
module_test_cases = unittest.TestLoader().loadTestsFromModule(module)
|
||||
test_suite.addTest(module_test_cases)
|
||||
test_suite(self.test_result)
|
||||
|
||||
def print_result(self):
|
||||
self.test_result.printErrors()
|
||||
click.echo(self.test_result)
|
||||
|
||||
|
||||
def setup_coverage(self):
|
||||
if self.with_coverage:
|
||||
from coverage import Coverage
|
||||
from frappe.utils import get_bench_path
|
||||
|
||||
# Generate coverage report only for app that is being tested
|
||||
source_path = os.path.join(get_bench_path(), 'apps', self.app)
|
||||
omit=[
|
||||
'*.html',
|
||||
'*.js',
|
||||
'*.xml',
|
||||
'*.css',
|
||||
'*.less',
|
||||
'*.scss',
|
||||
'*.vue',
|
||||
'*/doctype/*/*_dashboard.py',
|
||||
'*/patches/*'
|
||||
]
|
||||
|
||||
if self.app == 'frappe':
|
||||
omit.append('*/commands/*')
|
||||
|
||||
self.coverage = Coverage(
|
||||
source=[source_path],
|
||||
omit=omit
|
||||
)
|
||||
self.coverage.start()
|
||||
|
||||
def save_coverage(self):
|
||||
if not self.with_coverage:
|
||||
return
|
||||
|
||||
self.coverage.stop()
|
||||
self.coverage.save()
|
||||
|
||||
|
||||
def get_test_list(self):
|
||||
test_list = get_all_tests(self.app)
|
||||
split_size = frappe.utils.ceil(len(test_list) / self.total_builds)
|
||||
test_chunks = [test_list[x:x+split_size] for x in range(0, len(test_list), split_size)]
|
||||
return test_chunks[self.build_number - 1]
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue