refactor: Test runner 2 (#27987)
* feat: Improve logging in test runner * feat: Categorize tests as unit or integration * feat: Add support for selecting test categories * feat: Split unit and integration tests execution * test: better output on cli runner * feat: Create TestRunner class * feat: Implement run method in TestRunner class * refactor: Refactor test discovery and execution in TestRunner class * feat: Integrate _run_doctype_tests functionality into TestRunner class * feat: Integrate _run_unittest functionality into TestRunner class * refactor: Handle distinction between loading specific test case and entire module * feat: Add handling of test dependencies in _add_module_tests method * refactor: Merge _add_tests into discover_tests * feat: Improve test results printing with click * refactor: wrap in proper error handling * fix: some signatures * feat: Add debug logs to frappe/test_runner.py * refactor: Move before_tests hooks after test discovery * refactor: Use TestConfig instead of frappe.flags.skip_before_tests * refactor: Add skip_test_records to TestConfig and update calling sites * feat: Defer test record creation until after before_tests hooks * feat: Add app parameter to _run_doctype_tests and _run_module_tests * feat: Add --test-category option to run_tests command * refactor: Add explanatory comments for skipping before_tests hooks and test record creation callbacks for unit tests * feat: Add test category option to run_tests command * feat: Unify explanatory comments in _prepare_integration_tests * fix: wrap implicit db access in try-except block * fix: mark current site * fix: case counting
This commit is contained in:
parent
a26afa14a7
commit
e39ecfa274
4 changed files with 587 additions and 282 deletions
|
|
@ -769,6 +769,12 @@ def transform_database(context: CliCtxObj, table, engine, row_format, failfast):
|
|||
@click.option(
|
||||
"--failfast", is_flag=True, default=False, help="Stop the test run on the first error or failure"
|
||||
)
|
||||
@click.option(
|
||||
"--test-category",
|
||||
type=click.Choice(["unit", "integration", "all"]),
|
||||
default="all",
|
||||
help="Select test category to run",
|
||||
)
|
||||
@pass_context
|
||||
def run_tests(
|
||||
context: CliCtxObj,
|
||||
|
|
@ -785,6 +791,7 @@ def run_tests(
|
|||
skip_before_tests=False,
|
||||
failfast=False,
|
||||
case=None,
|
||||
test_category="all",
|
||||
pdb=False,
|
||||
):
|
||||
"""Run python unit-tests"""
|
||||
|
|
@ -809,7 +816,7 @@ def run_tests(
|
|||
click.secho(f"bench --site {site} set-config allow_tests true", fg="green")
|
||||
return
|
||||
|
||||
ret = frappe.test_runner.main(
|
||||
unit_ret, integration_ret = frappe.test_runner.main(
|
||||
site,
|
||||
app,
|
||||
module,
|
||||
|
|
@ -826,11 +833,18 @@ def run_tests(
|
|||
skip_test_records=skip_test_records,
|
||||
skip_before_tests=skip_before_tests,
|
||||
pdb_on_exceptions=pdb_on_exceptions,
|
||||
selected_categories=[] if test_category == "all" else test_category,
|
||||
)
|
||||
|
||||
if len(ret.failures) == 0 and len(ret.errors) == 0:
|
||||
if (
|
||||
len(unit_ret.failures) == 0
|
||||
and len(unit_ret.errors) == 0
|
||||
and len(integration_ret.failures) == 0
|
||||
and len(integration_ret.errors) == 0
|
||||
):
|
||||
ret = 0
|
||||
|
||||
else:
|
||||
ret = (unit_ret, integration_ret)
|
||||
if os.environ.get("CI"):
|
||||
sys.exit(ret)
|
||||
|
||||
|
|
@ -1193,6 +1207,13 @@ def rebuild_global_search(context: CliCtxObj, static_pages=False):
|
|||
def list_sites(context: CliCtxObj, output_json=False):
|
||||
"List all the sites in current bench"
|
||||
site_dir = os.getcwd()
|
||||
# Get the current site from common_site_config.json
|
||||
common_site_config_path = os.path.join(site_dir, "common_site_config.json")
|
||||
default_site = None
|
||||
if os.path.exists(common_site_config_path):
|
||||
with open(common_site_config_path) as f:
|
||||
config = json.load(f)
|
||||
default_site = config.get("default_site")
|
||||
sites = [
|
||||
site
|
||||
for site in os.listdir(site_dir)
|
||||
|
|
@ -1205,7 +1226,10 @@ def list_sites(context: CliCtxObj, output_json=False):
|
|||
elif sites:
|
||||
click.echo("Available sites:")
|
||||
for site in sites:
|
||||
click.echo(f" {site}")
|
||||
if site == default_site:
|
||||
click.echo(f"* {site}")
|
||||
else:
|
||||
click.echo(f" {site}")
|
||||
else:
|
||||
click.echo("No sites found")
|
||||
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import requests
|
|||
|
||||
import frappe
|
||||
|
||||
from .test_runner import SLOW_TEST_THRESHOLD, make_test_records
|
||||
from .test_runner import TestResult, make_test_records
|
||||
|
||||
click_ctx = click.get_current_context(True)
|
||||
if click_ctx:
|
||||
|
|
@ -70,7 +70,7 @@ class ParallelTestRunner:
|
|||
click.echo(f"Estimated total tests for build {self.build_number}: {self.total_tests}")
|
||||
|
||||
def run_tests(self):
|
||||
self.test_result = ParallelTestResult(stream=sys.stderr, descriptions=True, verbosity=2)
|
||||
self.test_result = TestResult(stream=sys.stderr, descriptions=True, verbosity=2)
|
||||
|
||||
for test_file_info in self.test_file_list:
|
||||
self.run_tests_for_file(test_file_info)
|
||||
|
|
@ -175,62 +175,6 @@ def split_by_weight(work, weights, chunk_count):
|
|||
return chunks
|
||||
|
||||
|
||||
class ParallelTestResult(unittest.TextTestResult):
|
||||
def startTest(self, test):
|
||||
self.tb_locals = True
|
||||
self._started_at = time.monotonic()
|
||||
super(unittest.TextTestResult, self).startTest(test)
|
||||
test_class = unittest.util.strclass(test.__class__)
|
||||
if not hasattr(self, "current_test_class") or self.current_test_class != test_class:
|
||||
click.echo(f"\n{unittest.util.strclass(test.__class__)}")
|
||||
self.current_test_class = test_class
|
||||
|
||||
def getTestMethodName(self, test):
|
||||
return test._testMethodName if hasattr(test, "_testMethodName") else str(test)
|
||||
|
||||
def addSuccess(self, test):
|
||||
super(unittest.TextTestResult, self).addSuccess(test)
|
||||
elapsed = time.monotonic() - self._started_at
|
||||
threshold_passed = elapsed >= SLOW_TEST_THRESHOLD
|
||||
elapsed = click.style(f" ({elapsed:.03}s)", fg="red") if threshold_passed else ""
|
||||
click.echo(f" {click.style(' ✔ ', fg='green')} {self.getTestMethodName(test)}{elapsed}")
|
||||
|
||||
def addError(self, test, err):
|
||||
super(unittest.TextTestResult, self).addError(test, err)
|
||||
click.echo(f" {click.style(' ✖ ', fg='red')} {self.getTestMethodName(test)}")
|
||||
|
||||
def addFailure(self, test, err):
|
||||
super(unittest.TextTestResult, self).addFailure(test, err)
|
||||
click.echo(f" {click.style(' ✖ ', fg='red')} {self.getTestMethodName(test)}")
|
||||
|
||||
def addSkip(self, test, reason):
|
||||
super(unittest.TextTestResult, self).addSkip(test, reason)
|
||||
click.echo(f" {click.style(' = ', fg='white')} {self.getTestMethodName(test)}")
|
||||
|
||||
def addExpectedFailure(self, test, err):
|
||||
super(unittest.TextTestResult, self).addExpectedFailure(test, err)
|
||||
click.echo(f" {click.style(' ✖ ', fg='red')} {self.getTestMethodName(test)}")
|
||||
|
||||
def addUnexpectedSuccess(self, test):
|
||||
super(unittest.TextTestResult, self).addUnexpectedSuccess(test)
|
||||
click.echo(f" {click.style(' ✔ ', fg='green')} {self.getTestMethodName(test)}")
|
||||
|
||||
def printErrors(self):
|
||||
click.echo("\n")
|
||||
self.printErrorList(" ERROR ", self.errors, "red")
|
||||
self.printErrorList(" FAIL ", self.failures, "red")
|
||||
|
||||
def printErrorList(self, flavour, errors, color):
|
||||
for test, err in errors:
|
||||
click.echo(self.separator1)
|
||||
click.echo(f"{click.style(flavour, bg=color)} {self.getDescription(test)}")
|
||||
click.echo(self.separator2)
|
||||
click.echo(err)
|
||||
|
||||
def __str__(self):
|
||||
return f"Tests: {self.testsRun}, Failing: {len(self.failures)}, Errors: {len(self.errors)}"
|
||||
|
||||
|
||||
def get_all_tests(app):
|
||||
test_file_list = []
|
||||
for path, folders, files in os.walk(frappe.get_app_path(app)):
|
||||
|
|
|
|||
|
|
@ -18,22 +18,299 @@ import pstats
|
|||
import sys
|
||||
import time
|
||||
import unittest
|
||||
from dataclasses import dataclass
|
||||
from functools import cache
|
||||
from dataclasses import dataclass, field
|
||||
from functools import cache, wraps
|
||||
from importlib import reload
|
||||
from io import StringIO
|
||||
from pathlib import Path
|
||||
from typing import Optional, Union
|
||||
|
||||
import click
|
||||
|
||||
import frappe
|
||||
import frappe.utils.scheduler
|
||||
from frappe.model.naming import revert_series_if_last
|
||||
from frappe.modules import get_module_name, load_doctype_module
|
||||
from frappe.tests.utils import FrappeIntegrationTestCase
|
||||
from frappe.utils import cint
|
||||
|
||||
unittest_runner = unittest.TextTestRunner
|
||||
SLOW_TEST_THRESHOLD = 2
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def debug_timer(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
start_time = time.time()
|
||||
result = func(*args, **kwargs)
|
||||
end_time = time.time()
|
||||
logger.debug(f" {func.__name__} took {end_time - start_time:.3f} seconds")
|
||||
return result
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class TestRunner(unittest.TextTestRunner):
|
||||
def __init__(
|
||||
self,
|
||||
stream=None,
|
||||
descriptions=True,
|
||||
verbosity=1,
|
||||
failfast=False,
|
||||
buffer=False,
|
||||
resultclass=None,
|
||||
warnings=None,
|
||||
*,
|
||||
tb_locals=False,
|
||||
junit_xml_output: bool = False,
|
||||
profile: bool = False,
|
||||
):
|
||||
super().__init__(
|
||||
stream=stream,
|
||||
descriptions=descriptions,
|
||||
verbosity=verbosity,
|
||||
failfast=failfast,
|
||||
buffer=buffer,
|
||||
resultclass=resultclass or TestResult,
|
||||
warnings=warnings,
|
||||
tb_locals=tb_locals,
|
||||
)
|
||||
self.junit_xml_output = junit_xml_output
|
||||
self.profile = profile
|
||||
self.test_record_callbacks = []
|
||||
logger.debug("TestRunner initialized")
|
||||
|
||||
def add_test_record_callback(self, callback):
|
||||
self.test_record_callbacks.append(callback)
|
||||
|
||||
def execute_test_record_callbacks(self):
|
||||
for callback in self.test_record_callbacks:
|
||||
callback()
|
||||
self.test_record_callbacks.clear()
|
||||
|
||||
def run(
|
||||
self, test_suites: tuple[unittest.TestSuite, unittest.TestSuite]
|
||||
) -> tuple[unittest.TestResult, unittest.TestResult | None]:
|
||||
unit_suite, integration_suite = test_suites
|
||||
|
||||
if self.profile:
|
||||
pr = cProfile.Profile()
|
||||
pr.enable()
|
||||
|
||||
# Run unit tests
|
||||
click.echo(
|
||||
"\n" + click.style(f"Running {unit_suite.countTestCases()} unit tests", fg="cyan", bold=True)
|
||||
)
|
||||
unit_result = super().run(unit_suite)
|
||||
|
||||
# Run integration tests only if unit tests pass
|
||||
integration_result = None
|
||||
if unit_result.wasSuccessful():
|
||||
click.echo(
|
||||
"\n"
|
||||
+ click.style(
|
||||
f"Running {integration_suite.countTestCases()} integration tests",
|
||||
fg="cyan",
|
||||
bold=True,
|
||||
)
|
||||
)
|
||||
integration_result = super().run(integration_suite)
|
||||
|
||||
if self.profile:
|
||||
pr.disable()
|
||||
s = StringIO()
|
||||
ps = pstats.Stats(pr, stream=s).sort_stats("cumulative")
|
||||
ps.print_stats()
|
||||
print(s.getvalue())
|
||||
|
||||
return unit_result, integration_result
|
||||
|
||||
def discover_tests(
|
||||
self, apps: list[str], config: TestConfig
|
||||
) -> tuple[unittest.TestSuite, unittest.TestSuite]:
|
||||
logger.debug(f"Discovering tests for apps: {apps}")
|
||||
unit_test_suite = unittest.TestSuite()
|
||||
integration_test_suite = unittest.TestSuite()
|
||||
|
||||
for app in apps:
|
||||
app_path = Path(frappe.get_app_path(app))
|
||||
for path in app_path.rglob("test_*.py"):
|
||||
if path.parts[-4:-1] == ("doctype", "doctype", "boilerplate"):
|
||||
continue
|
||||
if path.name == "test_runner.py":
|
||||
continue
|
||||
relative_path = path.relative_to(app_path)
|
||||
if any(part in relative_path.parts for part in ["locals", ".git", "public", "__pycache__"]):
|
||||
continue
|
||||
|
||||
module_name = (
|
||||
f"{app_path.stem}.{'.'.join(relative_path.parent.parts)}.{path.stem}"
|
||||
if str(relative_path.parent) != "."
|
||||
else f"{app_path.stem}.{path.stem}"
|
||||
)
|
||||
module = importlib.import_module(module_name)
|
||||
|
||||
if path.parent.name == "doctype" and not config.skip_test_records:
|
||||
json_file = path.with_name(path.stem[5:] + ".json")
|
||||
if json_file.exists():
|
||||
with json_file.open() as f:
|
||||
doctype = json.loads(f.read())["name"]
|
||||
self.add_test_record_callback(lambda: make_test_records(doctype, commit=True))
|
||||
|
||||
self._add_module_tests(module, unit_test_suite, integration_test_suite, config)
|
||||
|
||||
logger.debug(
|
||||
f"Discovered {unit_test_suite.countTestCases()} unit tests and {integration_test_suite.countTestCases()} integration tests"
|
||||
)
|
||||
return unit_test_suite, integration_test_suite
|
||||
|
||||
def discover_doctype_tests(
|
||||
self, doctypes: str | list[str], config: TestConfig, force: bool = False
|
||||
) -> tuple[unittest.TestSuite, unittest.TestSuite]:
|
||||
unit_test_suite = unittest.TestSuite()
|
||||
integration_test_suite = unittest.TestSuite()
|
||||
|
||||
if isinstance(doctypes, str):
|
||||
doctypes = [doctypes]
|
||||
|
||||
for doctype in doctypes:
|
||||
module = frappe.db.get_value("DocType", doctype, "module")
|
||||
if not module:
|
||||
raise TestRunnerError(f"Invalid doctype {doctype}")
|
||||
|
||||
test_module = get_module_name(doctype, module, "test_")
|
||||
if force:
|
||||
frappe.db.delete(doctype)
|
||||
|
||||
try:
|
||||
module = importlib.import_module(test_module)
|
||||
self._add_module_tests(module, unit_test_suite, integration_test_suite, config)
|
||||
except ImportError:
|
||||
logger.warning(f"No test module found for doctype {doctype}")
|
||||
|
||||
if not config.skip_test_records:
|
||||
self.add_test_record_callback(lambda: make_test_records(doctype, force=force, commit=True))
|
||||
|
||||
return unit_test_suite, integration_test_suite
|
||||
|
||||
def discover_module_tests(
|
||||
self, modules, config: TestConfig
|
||||
) -> tuple[unittest.TestSuite, unittest.TestSuite]:
|
||||
unit_test_suite = unittest.TestSuite()
|
||||
integration_test_suite = unittest.TestSuite()
|
||||
|
||||
modules = [modules] if not isinstance(modules, list | tuple) else modules
|
||||
|
||||
for module in modules:
|
||||
module = importlib.import_module(module)
|
||||
self._add_module_tests(module, unit_test_suite, integration_test_suite, config)
|
||||
|
||||
return unit_test_suite, integration_test_suite
|
||||
|
||||
def _add_module_tests(
|
||||
self,
|
||||
module,
|
||||
unit_test_suite: unittest.TestSuite,
|
||||
integration_test_suite: unittest.TestSuite,
|
||||
config: TestConfig,
|
||||
):
|
||||
# Handle module test dependencies
|
||||
if hasattr(module, "test_dependencies") and not config.skip_test_records:
|
||||
for doctype in module.test_dependencies:
|
||||
make_test_records(doctype, commit=True)
|
||||
|
||||
if config.case:
|
||||
test_suite = unittest.TestLoader().loadTestsFromTestCase(getattr(module, config.case))
|
||||
else:
|
||||
test_suite = unittest.TestLoader().loadTestsFromModule(module)
|
||||
|
||||
for test in self._iterate_suite(test_suite):
|
||||
if config.tests and test._testMethodName not in config.tests:
|
||||
continue
|
||||
|
||||
category = "integration" if isinstance(test, FrappeIntegrationTestCase) else "unit"
|
||||
|
||||
if config.selected_categories and category not in config.selected_categories:
|
||||
continue
|
||||
|
||||
config.categories[category].append(test)
|
||||
if category == "unit":
|
||||
unit_test_suite.addTest(test)
|
||||
else:
|
||||
integration_test_suite.addTest(test)
|
||||
|
||||
@staticmethod
|
||||
def _iterate_suite(suite):
|
||||
for test in suite:
|
||||
if isinstance(test, unittest.TestSuite):
|
||||
yield from TestRunner._iterate_suite(test)
|
||||
elif isinstance(test, unittest.TestCase):
|
||||
yield test
|
||||
|
||||
|
||||
class TestResult(unittest.TextTestResult):
|
||||
def startTest(self, test):
|
||||
logger.debug(f"--- Starting test: {test}")
|
||||
self.tb_locals = True
|
||||
self._started_at = time.monotonic()
|
||||
super(unittest.TextTestResult, self).startTest(test)
|
||||
test_class = unittest.util.strclass(test.__class__)
|
||||
if not hasattr(self, "current_test_class") or self.current_test_class != test_class:
|
||||
click.echo(f"\n{unittest.util.strclass(test.__class__)}")
|
||||
self.current_test_class = test_class
|
||||
|
||||
def getTestMethodName(self, test):
|
||||
return test._testMethodName if hasattr(test, "_testMethodName") else str(test)
|
||||
|
||||
def addSuccess(self, test):
|
||||
super(unittest.TextTestResult, self).addSuccess(test)
|
||||
elapsed = time.monotonic() - self._started_at
|
||||
threshold_passed = elapsed >= SLOW_TEST_THRESHOLD
|
||||
elapsed = click.style(f" ({elapsed:.03}s)", fg="red") if threshold_passed else ""
|
||||
click.echo(f" {click.style(' ✔ ', fg='green')} {self.getTestMethodName(test)}{elapsed}")
|
||||
logger.debug(f"=== Test passed: {test}")
|
||||
|
||||
def addError(self, test, err):
|
||||
super(unittest.TextTestResult, self).addError(test, err)
|
||||
click.echo(f" {click.style(' ✖ ', fg='red')} {self.getTestMethodName(test)}")
|
||||
logger.debug(f"=== Test error: {test}")
|
||||
|
||||
def addFailure(self, test, err):
|
||||
super(unittest.TextTestResult, self).addFailure(test, err)
|
||||
click.echo(f" {click.style(' ✖ ', fg='red')} {self.getTestMethodName(test)}")
|
||||
logger.debug(f"=== Test failed: {test}")
|
||||
|
||||
def addSkip(self, test, reason):
|
||||
super(unittest.TextTestResult, self).addSkip(test, reason)
|
||||
click.echo(f" {click.style(' = ', fg='white')} {self.getTestMethodName(test)}")
|
||||
logger.debug(f"=== Test skipped: {test}")
|
||||
|
||||
def addExpectedFailure(self, test, err):
|
||||
super(unittest.TextTestResult, self).addExpectedFailure(test, err)
|
||||
click.echo(f" {click.style(' ✖ ', fg='red')} {self.getTestMethodName(test)}")
|
||||
logger.debug(f"=== Test expected failure: {test}")
|
||||
|
||||
def addUnexpectedSuccess(self, test):
|
||||
super(unittest.TextTestResult, self).addUnexpectedSuccess(test)
|
||||
click.echo(f" {click.style(' ✔ ', fg='green')} {self.getTestMethodName(test)}")
|
||||
logger.debug(f"=== Test unexpected success: {test}")
|
||||
|
||||
def printErrors(self):
|
||||
click.echo("\n")
|
||||
self.printErrorList(" ERROR ", self.errors, "red")
|
||||
self.printErrorList(" FAIL ", self.failures, "red")
|
||||
|
||||
def printErrorList(self, flavour, errors, color):
|
||||
for test, err in errors:
|
||||
click.echo(self.separator1)
|
||||
click.echo(f"{click.style(flavour, bg=color)} {self.getDescription(test)}")
|
||||
click.echo(self.separator2)
|
||||
click.echo(err)
|
||||
|
||||
def __str__(self):
|
||||
return f"Tests: {self.testsRun}, Failing: {len(self.failures)}, Errors: {len(self.errors)}"
|
||||
|
||||
|
||||
class TestRunnerError(Exception):
|
||||
"""Custom exception for test runner errors"""
|
||||
|
|
@ -41,7 +318,7 @@ class TestRunnerError(Exception):
|
|||
pass
|
||||
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logging.basicConfig(format="%(levelname)s: %(message)s")
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
|
@ -49,13 +326,16 @@ logger = logging.getLogger(__name__)
|
|||
class TestConfig:
|
||||
"""Configuration class for test runner"""
|
||||
|
||||
verbose: bool = False
|
||||
profile: bool = False
|
||||
failfast: bool = False
|
||||
junit_xml_output: bool = False
|
||||
tests: tuple = ()
|
||||
case: str | None = None
|
||||
pdb_on_exceptions: tuple | None = None
|
||||
categories: dict = field(default_factory=lambda: {"unit": [], "integration": []})
|
||||
selected_categories: list[str] = field(default_factory=list)
|
||||
skip_before_tests: bool = False
|
||||
skip_test_records: bool = False # New attribute
|
||||
|
||||
|
||||
def xmlrunner_wrapper(output):
|
||||
|
|
@ -91,61 +371,157 @@ def main(
|
|||
skip_test_records: bool = False,
|
||||
skip_before_tests: bool = False,
|
||||
pdb_on_exceptions: bool = False,
|
||||
selected_categories: list[str] | None = None,
|
||||
) -> None:
|
||||
"""Main function to run tests"""
|
||||
global unittest_runner
|
||||
logger.setLevel(logging.DEBUG if verbose else logging.INFO)
|
||||
start_time = time.time()
|
||||
|
||||
# Check for mutually exclusive arguments
|
||||
exclusive_args = [doctype, doctype_list_path, module_def, module]
|
||||
if sum(arg is not None for arg in exclusive_args) > 1:
|
||||
error_message = (
|
||||
"Error: The following arguments are mutually exclusive: "
|
||||
"doctype, doctype_list_path, module_def, and module. "
|
||||
"Please specify only one of these."
|
||||
)
|
||||
logger.error(error_message)
|
||||
sys.exit(1)
|
||||
|
||||
# Prepare debug log message
|
||||
debug_params = []
|
||||
for param_name in ["site", "app", "module", "doctype", "module_def", "doctype_list_path"]:
|
||||
param_value = locals()[param_name]
|
||||
if param_value is not None:
|
||||
debug_params.append(f"{param_name}={param_value}")
|
||||
|
||||
if debug_params:
|
||||
logger.debug(f"Starting test run with parameters: {', '.join(debug_params)}")
|
||||
else:
|
||||
logger.debug("Starting test run with no specific parameters")
|
||||
|
||||
test_config = TestConfig(
|
||||
verbose=verbose,
|
||||
profile=profile,
|
||||
failfast=failfast,
|
||||
junit_xml_output=bool(junit_xml_output),
|
||||
tests=tests,
|
||||
case=case,
|
||||
pdb_on_exceptions=pdb_on_exceptions,
|
||||
selected_categories=selected_categories or [],
|
||||
skip_before_tests=skip_before_tests,
|
||||
skip_test_records=skip_test_records,
|
||||
)
|
||||
_initialize_test_environment(site, skip_before_tests, skip_test_records, test_config)
|
||||
|
||||
_initialize_test_environment(site, test_config)
|
||||
|
||||
xml_output_file = _setup_xml_output(junit_xml_output)
|
||||
|
||||
try:
|
||||
scheduler_disabled_by_user = _disable_scheduler_if_needed()
|
||||
|
||||
if not frappe.flags.skip_before_tests:
|
||||
_run_before_test_hooks(test_config, app)
|
||||
# Create TestRunner instance
|
||||
runner = TestRunner(
|
||||
resultclass=TestResult if not test_config.junit_xml_output else None,
|
||||
verbosity=2 if logger.getEffectiveLevel() < logging.INFO else 1,
|
||||
failfast=test_config.failfast,
|
||||
tb_locals=logger.getEffectiveLevel() <= logging.INFO,
|
||||
junit_xml_output=test_config.junit_xml_output,
|
||||
profile=test_config.profile,
|
||||
)
|
||||
|
||||
if doctype or doctype_list_path:
|
||||
doctype = _load_doctype_list(doctype_list_path) if doctype_list_path else doctype
|
||||
test_result = _run_doctype_tests(doctype, test_config, force)
|
||||
unit_result, integration_result = _run_doctype_tests(doctype, test_config, runner, force, app)
|
||||
elif module_def:
|
||||
test_result = _run_module_def_tests(app, module_def, test_config, force)
|
||||
unit_result, integration_result = _run_module_def_tests(
|
||||
app, module_def, test_config, runner, force
|
||||
)
|
||||
elif module:
|
||||
test_result = _run_module_tests(module, test_config)
|
||||
unit_result, integration_result = _run_module_tests(module, test_config, runner, app)
|
||||
else:
|
||||
test_result = _run_all_tests(app, test_config)
|
||||
unit_result, integration_result = _run_all_tests(app, test_config, runner)
|
||||
|
||||
_cleanup_after_tests(scheduler_disabled_by_user)
|
||||
print_test_results(unit_result, integration_result)
|
||||
|
||||
return test_result
|
||||
# Determine overall success
|
||||
success = unit_result.wasSuccessful() and (
|
||||
integration_result is None or integration_result.wasSuccessful()
|
||||
)
|
||||
|
||||
if not success:
|
||||
sys.exit(1)
|
||||
|
||||
return unit_result, integration_result
|
||||
|
||||
finally:
|
||||
if xml_output_file:
|
||||
xml_output_file.close()
|
||||
|
||||
end_time = time.time()
|
||||
logger.debug(f"Total test run time: {end_time - start_time:.3f} seconds")
|
||||
|
||||
def _initialize_test_environment(site, skip_before_tests, skip_test_records, test_config):
|
||||
|
||||
def print_test_results(unit_result: unittest.TestResult, integration_result: unittest.TestResult | None):
|
||||
"""Print detailed test results including failures and errors"""
|
||||
click.echo("\n" + click.style("Test Results:", fg="cyan", bold=True))
|
||||
|
||||
def _print_result(result, category):
|
||||
tests_run = result.testsRun
|
||||
failures = len(result.failures)
|
||||
errors = len(result.errors)
|
||||
click.echo(
|
||||
f"\n{click.style(f'{category} Tests:', bold=True)}\n"
|
||||
f" Ran: {click.style(f'{tests_run:<3}', fg='cyan')}"
|
||||
f" Failures: {click.style(f'{failures:<3}', fg='red' if failures else 'green')}"
|
||||
f" Errors: {click.style(f'{errors:<3}', fg='red' if errors else 'green')}"
|
||||
)
|
||||
|
||||
if failures > 0:
|
||||
click.echo(f"\n{click.style(category + ' Test Failures:', fg='red', bold=True)}")
|
||||
for i, failure in enumerate(result.failures, 1):
|
||||
click.echo(f" {i}. {click.style(str(failure[0]), fg='yellow')}")
|
||||
|
||||
if errors > 0:
|
||||
click.echo(f"\n{click.style(category + ' Test Errors:', fg='red', bold=True)}")
|
||||
for i, error in enumerate(result.errors, 1):
|
||||
click.echo(f" {i}. {click.style(str(error[0]), fg='yellow')}")
|
||||
click.echo(click.style(" " + str(error[1]).split("\n")[-2], fg="red"))
|
||||
|
||||
_print_result(unit_result, "Unit")
|
||||
|
||||
if integration_result:
|
||||
_print_result(integration_result, "Integration")
|
||||
|
||||
# Print overall status
|
||||
total_failures = len(unit_result.failures) + (
|
||||
len(integration_result.failures) if integration_result else 0
|
||||
)
|
||||
total_errors = len(unit_result.errors) + (len(integration_result.errors) if integration_result else 0)
|
||||
|
||||
if total_failures == 0 and total_errors == 0:
|
||||
click.echo(f"\n{click.style('All tests passed successfully!', fg='green', bold=True)}")
|
||||
else:
|
||||
click.echo(f"\n{click.style('Some tests failed or encountered errors.', fg='red', bold=True)}")
|
||||
|
||||
|
||||
@debug_timer
|
||||
def _initialize_test_environment(site, config: TestConfig):
|
||||
"""Initialize the test environment"""
|
||||
logger.debug(f"Initializing test environment for site: {site}")
|
||||
frappe.init(site)
|
||||
if not frappe.db:
|
||||
frappe.connect()
|
||||
|
||||
frappe.flags.skip_before_tests = skip_before_tests
|
||||
frappe.flags.skip_test_records = skip_test_records
|
||||
try:
|
||||
# require db access
|
||||
_disable_scheduler_if_needed()
|
||||
frappe.clear_cache()
|
||||
except Exception as e:
|
||||
logger.error(f"Error connecting to the database: {e!s}")
|
||||
raise TestRunnerError(f"Failed to connect to the database: {e}") from e
|
||||
|
||||
# Set various test-related flags
|
||||
frappe.flags.print_messages = test_config.verbose
|
||||
frappe.flags.in_test = True
|
||||
frappe.clear_cache()
|
||||
frappe.flags.print_messages = logger.getEffectiveLevel() < logging.INFO
|
||||
frappe.flags.tests_verbose = logger.getEffectiveLevel() < logging.INFO
|
||||
logger.debug("Test environment initialized")
|
||||
|
||||
|
||||
def _setup_xml_output(junit_xml_output):
|
||||
|
|
@ -161,22 +537,6 @@ def _setup_xml_output(junit_xml_output):
|
|||
return None
|
||||
|
||||
|
||||
def _disable_scheduler_if_needed():
|
||||
"""Disable scheduler if it's not already disabled"""
|
||||
scheduler_disabled_by_user = frappe.utils.scheduler.is_scheduler_disabled(verbose=False)
|
||||
if not scheduler_disabled_by_user:
|
||||
frappe.utils.scheduler.disable_scheduler()
|
||||
return scheduler_disabled_by_user
|
||||
|
||||
|
||||
def _run_before_test_hooks(test_config, app):
|
||||
"""Run 'before_tests' hooks if not skipped by the caller"""
|
||||
if test_config.verbose:
|
||||
print('Running "before_tests" hooks')
|
||||
for hook_function in frappe.get_hooks("before_tests", app_name=app):
|
||||
frappe.get_attr(hook_function)()
|
||||
|
||||
|
||||
def _load_doctype_list(doctype_list_path):
|
||||
"""Load the list of doctypes from the specified file"""
|
||||
app, path = doctype_list_path.split(os.path.sep, 1)
|
||||
|
|
@ -184,10 +544,12 @@ def _load_doctype_list(doctype_list_path):
|
|||
return f.read().strip().splitlines()
|
||||
|
||||
|
||||
def _run_module_def_tests(app, module_def, test_config, force):
|
||||
def _run_module_def_tests(
|
||||
app, module_def, config: TestConfig, runner: TestRunner, force
|
||||
) -> tuple[unittest.TestResult, unittest.TestResult | None]:
|
||||
"""Run tests for the specified module definition"""
|
||||
doctypes = _get_doctypes_for_module_def(app, module_def)
|
||||
return _run_doctype_tests(doctypes, test_config, force)
|
||||
return _run_doctype_tests(doctypes, config, runner, force, app)
|
||||
|
||||
|
||||
def _get_doctypes_for_module_def(app, module_def):
|
||||
|
|
@ -209,195 +571,151 @@ def _get_doctypes_for_module_def(app, module_def):
|
|||
return doctypes
|
||||
|
||||
|
||||
def _cleanup_after_tests(scheduler_disabled_by_user):
|
||||
# Global variable to track scheduler state
|
||||
scheduler_disabled_by_user = False
|
||||
|
||||
|
||||
def _disable_scheduler_if_needed():
|
||||
"""Disable scheduler if it's not already disabled"""
|
||||
global scheduler_disabled_by_user
|
||||
scheduler_disabled_by_user = frappe.utils.scheduler.is_scheduler_disabled(verbose=False)
|
||||
if not scheduler_disabled_by_user:
|
||||
frappe.utils.scheduler.disable_scheduler()
|
||||
|
||||
|
||||
def _cleanup_after_tests():
|
||||
"""Perform cleanup operations after running tests"""
|
||||
global scheduler_disabled_by_user
|
||||
if not scheduler_disabled_by_user:
|
||||
frappe.utils.scheduler.enable_scheduler()
|
||||
|
||||
if frappe.db:
|
||||
frappe.db.commit()
|
||||
|
||||
frappe.clear_cache()
|
||||
frappe.clear_cache()
|
||||
|
||||
|
||||
class TimeLoggingTestResult(unittest.TextTestResult):
|
||||
"""Custom TestResult class for logging test execution time"""
|
||||
|
||||
def startTest(self, test):
|
||||
self._started_at = time.monotonic()
|
||||
super().startTest(test)
|
||||
|
||||
def addSuccess(self, test):
|
||||
elapsed = time.monotonic() - self._started_at
|
||||
name = self.getDescription(test)
|
||||
if elapsed >= SLOW_TEST_THRESHOLD:
|
||||
self.stream.write(f"\n{name} ({elapsed:.03}s)\n")
|
||||
super().addSuccess(test)
|
||||
|
||||
|
||||
def _run_all_tests(app: str | None, config: TestConfig) -> unittest.TestResult:
|
||||
@debug_timer
|
||||
def _run_all_tests(
|
||||
app: str | None, config: TestConfig, runner: TestRunner
|
||||
) -> tuple[unittest.TestResult, unittest.TestResult | None]:
|
||||
"""Run all tests for the specified app or all installed apps"""
|
||||
|
||||
apps = [app] if app else frappe.get_installed_apps()
|
||||
test_suite = unittest.TestSuite()
|
||||
|
||||
for app in apps:
|
||||
app_path = Path(frappe.get_app_path(app))
|
||||
for path in app_path.rglob("test_*.py"):
|
||||
if path.name != "test_runner.py":
|
||||
relative_path = path.relative_to(app_path)
|
||||
if not any(
|
||||
part in relative_path.parts for part in ["locals", ".git", "public", "__pycache__"]
|
||||
):
|
||||
_add_test(app_path, path, config.verbose, test_suite)
|
||||
|
||||
runner = unittest_runner(
|
||||
resultclass=TimeLoggingTestResult if not config.junit_xml_output else None,
|
||||
verbosity=1 + cint(config.verbose),
|
||||
failfast=config.failfast,
|
||||
tb_locals=config.verbose,
|
||||
)
|
||||
|
||||
if config.profile:
|
||||
pr = cProfile.Profile()
|
||||
pr.enable()
|
||||
|
||||
out = runner.run(test_suite)
|
||||
|
||||
if config.profile:
|
||||
pr.disable()
|
||||
s = StringIO()
|
||||
pstats.Stats(pr, stream=s).sort_stats("cumulative").print_stats()
|
||||
print(s.getvalue())
|
||||
|
||||
return out
|
||||
|
||||
|
||||
def _run_doctype_tests(doctypes, config: TestConfig, force=False):
|
||||
"""Run tests for the specified doctype(s)"""
|
||||
logger.debug(f"Running tests for apps: {apps}")
|
||||
try:
|
||||
modules = []
|
||||
doctypes = [doctypes] if not isinstance(doctypes, list | tuple) else doctypes
|
||||
unit_test_suite, integration_test_suite = runner.discover_tests(apps, config)
|
||||
logger.debug(
|
||||
f"Discovered {len(list(runner._iterate_suite(unit_test_suite)))} unit tests and {len(list(runner._iterate_suite(integration_test_suite)))} integration tests"
|
||||
)
|
||||
|
||||
for doctype in doctypes:
|
||||
module = frappe.db.get_value("DocType", doctype, "module")
|
||||
if not module:
|
||||
raise TestRunnerError(f"Invalid doctype {doctype}")
|
||||
if config.pdb_on_exceptions:
|
||||
for test_suite in (unit_test_suite, integration_test_suite):
|
||||
for test_case in runner._iterate_suite(test_suite):
|
||||
if hasattr(test_case, "_apply_debug_decorator"):
|
||||
test_case._apply_debug_decorator(config.pdb_on_exceptions)
|
||||
|
||||
test_module = get_module_name(doctype, module, "test_")
|
||||
if force:
|
||||
frappe.db.delete(doctype)
|
||||
make_test_records(doctype, verbose=config.verbose, force=force, commit=True)
|
||||
modules.append(importlib.import_module(test_module))
|
||||
_prepare_integration_tests(runner, integration_test_suite, config, app)
|
||||
res = runner.run((unit_test_suite, integration_test_suite))
|
||||
_cleanup_after_tests()
|
||||
return res
|
||||
except Exception as e:
|
||||
logger.error(f"Error running all tests for {app or 'all apps'}: {e!s}")
|
||||
raise TestRunnerError(f"Failed to run tests for {app or 'all apps'}: {e!s}") from e
|
||||
|
||||
return _run_unittest(modules, config=config)
|
||||
|
||||
@debug_timer
|
||||
def _run_doctype_tests(
|
||||
doctypes, config: TestConfig, runner: TestRunner, force=False, app: str | None = None
|
||||
) -> tuple[unittest.TestResult, unittest.TestResult | None]:
|
||||
"""Run tests for the specified doctype(s)"""
|
||||
|
||||
try:
|
||||
unit_test_suite, integration_test_suite = runner.discover_doctype_tests(doctypes, config, force)
|
||||
|
||||
if config.pdb_on_exceptions:
|
||||
for test_suite in (unit_test_suite, integration_test_suite):
|
||||
for test_case in runner._iterate_suite(test_suite):
|
||||
if hasattr(test_case, "_apply_debug_decorator"):
|
||||
test_case._apply_debug_decorator(config.pdb_on_exceptions)
|
||||
|
||||
_prepare_integration_tests(runner, integration_test_suite, config, app)
|
||||
res = runner.run((unit_test_suite, integration_test_suite))
|
||||
_cleanup_after_tests()
|
||||
return res
|
||||
except Exception as e:
|
||||
logger.error(f"Error running tests for doctypes {doctypes}: {e!s}")
|
||||
raise TestRunnerError(f"Failed to run tests for doctypes: {e!s}") from e
|
||||
|
||||
|
||||
def _run_module_tests(module, config: TestConfig):
|
||||
@debug_timer
|
||||
def _run_module_tests(
|
||||
module, config: TestConfig, runner: TestRunner, app: str | None = None
|
||||
) -> tuple[unittest.TestResult, unittest.TestResult | None]:
|
||||
"""Run tests for the specified module"""
|
||||
module = importlib.import_module(module)
|
||||
if hasattr(module, "test_dependencies"):
|
||||
for doctype in module.test_dependencies:
|
||||
make_test_records(doctype, verbose=config.verbose, commit=True)
|
||||
try:
|
||||
unit_test_suite, integration_test_suite = runner.discover_module_tests(module, config)
|
||||
|
||||
frappe.db.commit()
|
||||
return _run_unittest(module, config=config)
|
||||
if config.pdb_on_exceptions:
|
||||
for test_suite in (unit_test_suite, integration_test_suite):
|
||||
for test_case in runner._iterate_suite(test_suite):
|
||||
if hasattr(test_case, "_apply_debug_decorator"):
|
||||
test_case._apply_debug_decorator(config.pdb_on_exceptions)
|
||||
|
||||
_prepare_integration_tests(runner, integration_test_suite, config, app)
|
||||
res = runner.run((unit_test_suite, integration_test_suite))
|
||||
_cleanup_after_tests()
|
||||
return res
|
||||
except Exception as e:
|
||||
logger.error(f"Error running tests for module {module}: {e!s}")
|
||||
raise TestRunnerError(f"Failed to run tests for module: {e!s}") from e
|
||||
|
||||
|
||||
def _run_unittest(modules, config: TestConfig):
|
||||
"""Run unittest for the specified module(s)"""
|
||||
frappe.db.begin()
|
||||
modules = [modules] if not isinstance(modules, list | tuple) else modules
|
||||
final_test_suite = unittest.TestSuite()
|
||||
|
||||
for module in modules:
|
||||
test_suite = unittest.TestLoader().loadTestsFromModule(module)
|
||||
if config.case:
|
||||
test_suite = unittest.TestLoader().loadTestsFromTestCase(getattr(module, config.case))
|
||||
|
||||
if config.tests:
|
||||
final_test_suite.addTests(
|
||||
test for test in _iterate_suite(test_suite) if test._testMethodName in config.tests
|
||||
)
|
||||
else:
|
||||
final_test_suite.addTest(test_suite)
|
||||
|
||||
if config.pdb_on_exceptions:
|
||||
for test_case in _iterate_suite(final_test_suite):
|
||||
if hasattr(test_case, "_apply_debug_decorator"):
|
||||
test_case._apply_debug_decorator(config.pdb_on_exceptions)
|
||||
|
||||
runner = unittest_runner(
|
||||
resultclass=None if config.junit_xml_output else TimeLoggingTestResult,
|
||||
verbosity=1 + cint(config.verbose),
|
||||
failfast=config.failfast,
|
||||
tb_locals=config.verbose,
|
||||
)
|
||||
|
||||
frappe.flags.tests_verbose = config.verbose
|
||||
|
||||
if config.profile:
|
||||
pr = cProfile.Profile()
|
||||
pr.enable()
|
||||
|
||||
out = runner.run(final_test_suite)
|
||||
|
||||
if config.profile:
|
||||
pr.disable()
|
||||
s = StringIO()
|
||||
ps = pstats.Stats(pr, stream=s).sort_stats("cumulative")
|
||||
ps.print_stats()
|
||||
print(s.getvalue())
|
||||
|
||||
return out
|
||||
|
||||
|
||||
def _iterate_suite(suite):
|
||||
"""Helper function to iterate through a test suite"""
|
||||
for test in suite:
|
||||
if isinstance(test, unittest.TestSuite):
|
||||
yield from _iterate_suite(test)
|
||||
elif isinstance(test, unittest.TestCase):
|
||||
yield test
|
||||
|
||||
|
||||
def _add_test(
|
||||
app_path: Path, path: Path, verbose: bool, test_suite: unittest.TestSuite | None = None
|
||||
def _prepare_integration_tests(
|
||||
runner: TestRunner, integration_test_suite: unittest.TestSuite, config: TestConfig, app: str
|
||||
) -> None:
|
||||
relative_path = path.relative_to(app_path)
|
||||
"""Prepare the environment for integration tests."""
|
||||
if next(runner._iterate_suite(integration_test_suite), None) is not None:
|
||||
# Explanatory comment
|
||||
"""
|
||||
We perform specific setup steps only for integration tests:
|
||||
|
||||
if path.parts[-4:-1] == ("doctype", "doctype", "boilerplate"):
|
||||
return # Skip boilerplate files
|
||||
1. Database Connection:
|
||||
- Initialized only for integration tests to avoid overhead in unit tests.
|
||||
- Essential for end-to-end functionality testing in integration tests.
|
||||
- Maintains separation between unit and integration tests.
|
||||
|
||||
module_name = (
|
||||
f"{app_path.stem}.{'.'.join(relative_path.parent.parts)}.{path.stem}"
|
||||
if str(relative_path.parent) != "."
|
||||
else f"{app_path.stem}.{path.stem}"
|
||||
)
|
||||
module = importlib.import_module(module_name)
|
||||
2. Before Tests Hooks:
|
||||
- Executed only for integration tests unless explicitly skipped.
|
||||
- Provides necessary environment setup for integration tests.
|
||||
- Skipped for unit tests to maintain their independence and isolation.
|
||||
|
||||
if hasattr(module, "test_dependencies"):
|
||||
for doctype in module.test_dependencies:
|
||||
make_test_records(doctype, verbose=verbose, commit=True)
|
||||
3. Test Record Creation:
|
||||
- Performed only for integration tests unless explicitly skipped.
|
||||
- Creates or modifies database records needed for integration tests.
|
||||
- Ensures consistent starting state and allows for complex test scenarios.
|
||||
- Skipped for unit tests to maintain their isolation and reproducibility.
|
||||
|
||||
test_suite = test_suite or unittest.TestSuite()
|
||||
These steps are crucial for integration tests but unnecessary or potentially
|
||||
harmful for unit tests, which should be independent of external state and fast to execute.
|
||||
By selectively applying these setup steps, we maintain the integrity and purpose
|
||||
of both unit and integration tests while optimizing performance.
|
||||
"""
|
||||
if not config.skip_before_tests:
|
||||
_run_before_test_hooks(config, app)
|
||||
else:
|
||||
logger.debug("Skipping before_tests hooks: Explicitly skipped")
|
||||
|
||||
if path.parent.name == "doctype":
|
||||
json_file = path.with_name(path.stem[5:] + ".json")
|
||||
if json_file.exists():
|
||||
with json_file.open() as f:
|
||||
doctype = json.loads(f.read())["name"]
|
||||
make_test_records(doctype, verbose=verbose, commit=True)
|
||||
|
||||
test_suite.addTest(unittest.TestLoader().loadTestsFromModule(module))
|
||||
if not config.skip_test_records:
|
||||
_execute_test_record_callbacks(runner)
|
||||
else:
|
||||
logger.debug("Skipping test record creation: Explicitly skipped")
|
||||
else:
|
||||
logger.debug("Skipping before_tests hooks and test record creation: No integration tests")
|
||||
|
||||
|
||||
def make_test_records(doctype, verbose=0, force=False, commit=False):
|
||||
def make_test_records(doctype, force=False, commit=False):
|
||||
"""Make test records for the specified doctype"""
|
||||
if frappe.flags.skip_test_records:
|
||||
return
|
||||
logger.debug(f"Making test records for doctype: {doctype}")
|
||||
|
||||
for options in get_dependencies(doctype):
|
||||
if options == "[Select]":
|
||||
|
|
@ -405,8 +723,8 @@ def make_test_records(doctype, verbose=0, force=False, commit=False):
|
|||
|
||||
if options not in frappe.local.test_objects:
|
||||
frappe.local.test_objects[options] = []
|
||||
make_test_records(options, verbose, force, commit=commit)
|
||||
make_test_records_for_doctype(options, verbose, force, commit=commit)
|
||||
make_test_records(options, force, commit=commit)
|
||||
make_test_records_for_doctype(options, force, commit=commit)
|
||||
|
||||
|
||||
@cache
|
||||
|
|
@ -450,38 +768,39 @@ def get_dependencies(doctype):
|
|||
return options_list
|
||||
|
||||
|
||||
def make_test_records_for_doctype(doctype, verbose=0, force=False, commit=False):
|
||||
def make_test_records_for_doctype(doctype, force=False, commit=False):
|
||||
"""Make test records for the specified doctype"""
|
||||
|
||||
test_record_log_instance = TestRecordLog()
|
||||
if not force and doctype in test_record_log_instance.get():
|
||||
return
|
||||
|
||||
module, test_module = get_modules(doctype)
|
||||
if verbose:
|
||||
print(f"Making for {doctype}")
|
||||
logger.debug(f"Making test records for {doctype}")
|
||||
|
||||
if hasattr(test_module, "_make_test_records"):
|
||||
frappe.local.test_objects[doctype] = frappe.local.test_objects.get(
|
||||
doctype, []
|
||||
) + test_module._make_test_records(verbose)
|
||||
frappe.local.test_objects[doctype] = (
|
||||
frappe.local.test_objects.get(doctype, []) + test_module._make_test_records()
|
||||
)
|
||||
elif hasattr(test_module, "test_records"):
|
||||
frappe.local.test_objects[doctype] = frappe.local.test_objects.get(doctype, []) + make_test_objects(
|
||||
doctype, test_module.test_records, verbose, force, commit=commit
|
||||
doctype, test_module.test_records, force, commit=commit
|
||||
)
|
||||
else:
|
||||
test_records = frappe.get_test_records(doctype)
|
||||
if test_records:
|
||||
frappe.local.test_objects[doctype] = frappe.local.test_objects.get(
|
||||
doctype, []
|
||||
) + make_test_objects(doctype, test_records, verbose, force, commit=commit)
|
||||
elif verbose:
|
||||
) + make_test_objects(doctype, test_records, force, commit=commit)
|
||||
elif logger.getEffectiveLevel() < logging.INFO:
|
||||
print_mandatory_fields(doctype)
|
||||
|
||||
test_record_log_instance.add(doctype)
|
||||
|
||||
|
||||
def make_test_objects(doctype, test_records=None, verbose=None, reset=False, commit=False):
|
||||
def make_test_objects(doctype, test_records=None, reset=False, commit=False):
|
||||
"""Make test objects from given list of `test_records` or from `test_records.json`"""
|
||||
logger.debug(f"Making test objects for doctype: {doctype}")
|
||||
records = []
|
||||
|
||||
def revert_naming(d):
|
||||
|
|
@ -536,7 +855,7 @@ def make_test_objects(doctype, test_records=None, verbose=None, reset=False, com
|
|||
):
|
||||
revert_naming(d)
|
||||
else:
|
||||
verbose and print("Error in making test record for", d.doctype, d.name)
|
||||
logger.debug(f"Error in making test record for {d.doctype} {d.name}")
|
||||
raise
|
||||
|
||||
records.append(d.name)
|
||||
|
|
@ -549,13 +868,13 @@ def make_test_objects(doctype, test_records=None, verbose=None, reset=False, com
|
|||
def print_mandatory_fields(doctype):
|
||||
"""Print mandatory fields for the specified doctype"""
|
||||
meta = frappe.get_meta(doctype)
|
||||
print(f"Please setup make_test_records for: {doctype}")
|
||||
print("-" * 60)
|
||||
print(f"Autoname: {meta.autoname or ''}")
|
||||
print("Mandatory Fields:")
|
||||
logger.debug(f"Please setup make_test_records for: {doctype}")
|
||||
logger.debug("-" * 60)
|
||||
logger.debug(f"Autoname: {meta.autoname or ''}")
|
||||
logger.debug("Mandatory Fields:")
|
||||
for d in meta.get("fields", {"reqd": 1}):
|
||||
print(f" - {d.parent}:{d.fieldname} | {d.fieldtype} | {d.options or ''}")
|
||||
print()
|
||||
logger.debug(f" - {d.parent}:{d.fieldname} | {d.fieldtype} | {d.options or ''}")
|
||||
logger.debug("")
|
||||
|
||||
|
||||
class TestRecordLog:
|
||||
|
|
@ -592,3 +911,18 @@ def add_to_test_record_log(doctype):
|
|||
|
||||
def get_test_record_log():
|
||||
return TestRecordLog().get()
|
||||
|
||||
|
||||
@debug_timer
|
||||
def _run_before_test_hooks(config: TestConfig, app: str | None):
|
||||
"""Run 'before_tests' hooks"""
|
||||
logger.debug('Running "before_tests" hooks')
|
||||
for hook_function in frappe.get_hooks("before_tests", app_name=app):
|
||||
frappe.get_attr(hook_function)()
|
||||
|
||||
|
||||
@debug_timer
|
||||
def _execute_test_record_callbacks(runner):
|
||||
"""Execute test record creation callbacks"""
|
||||
logger.debug("Running test record creation callbacks")
|
||||
runner.execute_test_record_callbacks()
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ def debug_on(*exceptions):
|
|||
return decorator
|
||||
|
||||
|
||||
class FrappeTestCase(unittest.TestCase):
|
||||
class FrappeIntegrationTestCase(unittest.TestCase):
|
||||
"""Base test class for Frappe tests.
|
||||
|
||||
|
||||
|
|
@ -315,7 +315,10 @@ class FrappeTestCase(unittest.TestCase):
|
|||
yield
|
||||
|
||||
|
||||
class MockedRequestTestCase(FrappeTestCase):
|
||||
FrappeTestCase = FrappeIntegrationTestCase
|
||||
|
||||
|
||||
class MockedRequestTestCase(FrappeIntegrationTestCase):
|
||||
def setUp(self):
|
||||
import responses
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue