Testing Module (#28000)
* refactor: add create_handler alongside create_logger * fix: bench helper click exceptions for --help (2nd attempt) * fix: increase logging level missing test records * fix: reorganize test runner into testing module and refine logging * fix: semgrep complaint * chore: remove unused code; only keep deprecation proxies * fix: import statements * fix: output & string processing
This commit is contained in:
parent
349dd8ed12
commit
7e453ea515
16 changed files with 1114 additions and 845 deletions
|
|
@ -1,8 +1,5 @@
|
|||
# Copyright (c) 2015, Web Notes Technologies Pvt. Ltd. and Contributors
|
||||
# License: MIT. See LICENSE
|
||||
|
||||
import cProfile
|
||||
import pstats
|
||||
import subprocess # nosec
|
||||
import sys
|
||||
from functools import wraps
|
||||
|
|
@ -22,6 +19,8 @@ def pass_context(f):
|
|||
def _func(ctx, *args, **kwargs):
|
||||
profile = ctx.obj.profile
|
||||
if profile:
|
||||
import cProfile
|
||||
|
||||
pr = cProfile.Profile()
|
||||
pr.enable()
|
||||
|
||||
|
|
@ -37,6 +36,8 @@ def pass_context(f):
|
|||
if profile:
|
||||
pr.disable()
|
||||
s = StringIO()
|
||||
import pstats
|
||||
|
||||
ps = pstats.Stats(pr, stream=s).sort_stats("cumtime", "tottime", "ncalls")
|
||||
ps.print_stats()
|
||||
|
||||
|
|
@ -108,6 +109,7 @@ def get_commands():
|
|||
from .redis_utils import commands as redis_commands
|
||||
from .scheduler import commands as scheduler_commands
|
||||
from .site import commands as site_commands
|
||||
from .testing import commands as testing_commands
|
||||
from .translate import commands as translate_commands
|
||||
from .utils import commands as utils_commands
|
||||
|
||||
|
|
@ -115,6 +117,7 @@ def get_commands():
|
|||
all_commands = (
|
||||
scheduler_commands
|
||||
+ site_commands
|
||||
+ testing_commands
|
||||
+ translate_commands
|
||||
+ gettext_commands
|
||||
+ utils_commands
|
||||
|
|
|
|||
459
frappe/commands/testing.py
Normal file
459
frappe/commands/testing.py
Normal file
|
|
@ -0,0 +1,459 @@
|
|||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import click
|
||||
|
||||
import frappe
|
||||
from frappe.commands import get_site, pass_context
|
||||
from frappe.utils.bench_helper import CliCtxObj
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import unittest
|
||||
|
||||
from frappe.testing import TestRunner
|
||||
|
||||
|
||||
def main(
|
||||
site: str | None = None,
|
||||
app: str | None = None,
|
||||
module: str | None = None,
|
||||
doctype: str | None = None,
|
||||
module_def: str | None = None,
|
||||
verbose: bool = False,
|
||||
tests: tuple = (),
|
||||
force: bool = False,
|
||||
profile: bool = False,
|
||||
junit_xml_output: str | None = None,
|
||||
doctype_list_path: str | None = None,
|
||||
failfast: bool = False,
|
||||
case: str | None = None,
|
||||
skip_before_tests: bool = False,
|
||||
pdb_on_exceptions: bool = False,
|
||||
selected_categories: list[str] | None = None,
|
||||
) -> None:
|
||||
"""Main function to run tests"""
|
||||
import logging
|
||||
|
||||
from frappe.testing import (
|
||||
TestConfig,
|
||||
TestRunner,
|
||||
discover_all_tests,
|
||||
discover_doctype_tests,
|
||||
discover_module_tests,
|
||||
)
|
||||
from frappe.testing.environment import _cleanup_after_tests, _initialize_test_environment
|
||||
|
||||
testing_module_logger = logging.getLogger("frappe.testing")
|
||||
testing_module_logger.setLevel(logging.DEBUG if verbose else logging.INFO)
|
||||
start_time = time.time()
|
||||
|
||||
# Check for mutually exclusive arguments
|
||||
exclusive_args = [doctype, doctype_list_path, module_def, module]
|
||||
if sum(arg is not None for arg in exclusive_args) > 1:
|
||||
raise click.UsageError(
|
||||
"Error: The following arguments are mutually exclusive: "
|
||||
"doctype, doctype_list_path, module_def, and module. "
|
||||
"Please specify only one of these."
|
||||
)
|
||||
|
||||
# Prepare debug log message
|
||||
debug_params = []
|
||||
for param_name in ["site", "app", "module", "doctype", "module_def", "doctype_list_path"]:
|
||||
param_value = locals()[param_name]
|
||||
if param_value is not None:
|
||||
debug_params.append(f"{param_name}={param_value}")
|
||||
|
||||
if debug_params:
|
||||
click.secho(f"Starting test run with parameters: {', '.join(debug_params)}", fg="cyan", bold=True)
|
||||
testing_module_logger.info(f"started with: {', '.join(debug_params)}")
|
||||
else:
|
||||
click.secho("Starting test run with no specific parameters", fg="cyan", bold=True)
|
||||
testing_module_logger.info("started with no specific parameters")
|
||||
for handler in testing_module_logger.handlers:
|
||||
if file := getattr(handler, "baseFilename", None):
|
||||
click.secho(
|
||||
f"Detailed logs{' (augment with --verbose)' if not verbose else ''}: {click.style(file, bold=True)}"
|
||||
)
|
||||
|
||||
test_config = TestConfig(
|
||||
profile=profile,
|
||||
failfast=failfast,
|
||||
tests=tests,
|
||||
case=case,
|
||||
pdb_on_exceptions=pdb_on_exceptions,
|
||||
selected_categories=selected_categories or [],
|
||||
skip_before_tests=skip_before_tests,
|
||||
)
|
||||
|
||||
_initialize_test_environment(site, test_config)
|
||||
|
||||
xml_output_file = _setup_xml_output(junit_xml_output)
|
||||
|
||||
try:
|
||||
# Create TestRunner instance
|
||||
runner = TestRunner(
|
||||
verbosity=2 if testing_module_logger.getEffectiveLevel() < logging.INFO else 1,
|
||||
tb_locals=testing_module_logger.getEffectiveLevel() <= logging.INFO,
|
||||
cfg=test_config,
|
||||
)
|
||||
|
||||
if doctype or doctype_list_path:
|
||||
doctype = _load_doctype_list(doctype_list_path) if doctype_list_path else doctype
|
||||
discover_doctype_tests(doctype, runner, force, app)
|
||||
elif module_def:
|
||||
_run_module_def_tests(app, module_def, runner, force)
|
||||
elif module:
|
||||
discover_module_tests(module, runner, app)
|
||||
else:
|
||||
apps = [app] if app else frappe.get_installed_apps()
|
||||
discover_all_tests(apps, runner)
|
||||
|
||||
results = []
|
||||
for app, category, suite in runner.iterRun():
|
||||
click.secho(
|
||||
f"\nRunning {suite.countTestCases()} {category} tests for {app}", fg="cyan", bold=True
|
||||
)
|
||||
results.append([app, category, runner.run(suite)])
|
||||
|
||||
success = all(r.wasSuccessful() for _, _, r in results)
|
||||
click.secho("\nTest Results:", fg="cyan", bold=True)
|
||||
|
||||
def _print_result(app, category, result):
|
||||
tests_run = result.testsRun
|
||||
failures = len(result.failures)
|
||||
errors = len(result.errors)
|
||||
click.echo(
|
||||
f"\n{click.style(f'{category} Tests in {app}:', bold=True)}\n"
|
||||
f" Ran: {click.style(f'{tests_run:<3}', fg='cyan')}"
|
||||
f" Failures: {click.style(f'{failures:<3}', fg='red' if failures else 'green')}"
|
||||
f" Errors: {click.style(f'{errors:<3}', fg='red' if errors else 'green')}"
|
||||
)
|
||||
|
||||
if failures > 0:
|
||||
click.echo(f"\n{click.style(category + ' Test Failures:', fg='red', bold=True)}")
|
||||
for i, failure in enumerate(result.failures, 1):
|
||||
click.echo(f" {i}. {click.style(str(failure[0]), fg='yellow')}")
|
||||
|
||||
if errors > 0:
|
||||
click.echo(f"\n{click.style(category + ' Test Errors:', fg='red', bold=True)}")
|
||||
for i, error in enumerate(result.errors, 1):
|
||||
click.echo(f" {i}. {click.style(str(error[0]), fg='yellow')}")
|
||||
click.echo(click.style(" " + str(error[1]).split("\n")[-2], fg="red"))
|
||||
|
||||
for app, category, result in results:
|
||||
_print_result(frappe.unscrub(app or "Unspecified App"), frappe.unscrub(category), result)
|
||||
|
||||
if success:
|
||||
click.echo(f"\n{click.style('All tests passed successfully!', fg='green', bold=True)}")
|
||||
else:
|
||||
click.echo(f"\n{click.style('Some tests failed or encountered errors.', fg='red', bold=True)}")
|
||||
|
||||
if not success:
|
||||
sys.exit(1)
|
||||
|
||||
return results
|
||||
|
||||
finally:
|
||||
_cleanup_after_tests()
|
||||
if xml_output_file:
|
||||
xml_output_file.close()
|
||||
|
||||
end_time = time.time()
|
||||
testing_module_logger.debug(f"Total test run time: {end_time - start_time:.3f} seconds")
|
||||
|
||||
|
||||
def _setup_xml_output(junit_xml_output):
|
||||
"""Setup XML output for test results if specified"""
|
||||
global unittest_runner
|
||||
import unittest
|
||||
|
||||
if junit_xml_output:
|
||||
xml_output_file = open(junit_xml_output, "wb")
|
||||
try:
|
||||
import xmlrunner
|
||||
|
||||
unittest_runner = xmlrunner.XMLTestRunner(output=xml_output_file)
|
||||
except ImportError:
|
||||
print("xmlrunner not found. Please install it to use XML output.")
|
||||
unittest_runner = unittest.TextTestRunner()
|
||||
return xml_output_file
|
||||
else:
|
||||
unittest_runner = unittest.TextTestRunner()
|
||||
return None
|
||||
|
||||
|
||||
def _load_doctype_list(doctype_list_path):
|
||||
"""Load the list of doctypes from the specified file"""
|
||||
app, path = doctype_list_path.split(os.path.sep, 1)
|
||||
with open(frappe.get_app_path(app, path)) as f:
|
||||
return f.read().strip().splitlines()
|
||||
|
||||
|
||||
def _run_module_def_tests(app, module_def, runner: "TestRunner", force) -> "TestRunner":
|
||||
"""Run tests for the specified module definition"""
|
||||
from frappe.testing import discover_doctype_tests
|
||||
|
||||
doctypes = _get_doctypes_for_module_def(app, module_def)
|
||||
return discover_doctype_tests(doctypes, runner, force, app)
|
||||
|
||||
|
||||
def _get_doctypes_for_module_def(app, module_def):
|
||||
"""Get the list of doctypes for the specified module definition"""
|
||||
doctypes = []
|
||||
doctypes_ = frappe.get_list(
|
||||
"DocType",
|
||||
filters={"module": module_def, "istable": 0},
|
||||
fields=["name", "module"],
|
||||
as_list=True,
|
||||
)
|
||||
from frappe.modules import get_module_name
|
||||
|
||||
for doctype, module in doctypes_:
|
||||
test_module = get_module_name(doctype, module, "test_", app=app)
|
||||
try:
|
||||
import importlib
|
||||
|
||||
importlib.import_module(test_module)
|
||||
doctypes.append(doctype)
|
||||
except Exception:
|
||||
pass
|
||||
return doctypes
|
||||
|
||||
|
||||
@click.command("run-tests")
|
||||
@click.option("--app", help="For App")
|
||||
@click.option("--doctype", help="For DocType")
|
||||
@click.option("--module-def", help="For all Doctypes in Module Def")
|
||||
@click.option("--case", help="Select particular TestCase")
|
||||
@click.option(
|
||||
"--doctype-list-path",
|
||||
help="Path to .txt file for list of doctypes. Example erpnext/tests/server/agriculture.txt",
|
||||
)
|
||||
@click.option("--test", multiple=True, help="Specific test")
|
||||
@click.option("--module", help="Run tests in a module")
|
||||
@click.option("--pdb", is_flag=True, default=False, help="Open pdb on AssertionError")
|
||||
@click.option("--profile", is_flag=True, default=False)
|
||||
@click.option("--coverage", is_flag=True, default=False)
|
||||
@click.option("--skip-test-records", is_flag=True, default=False, help="DEPRECATED")
|
||||
@click.option("--skip-before-tests", is_flag=True, default=False, help="Don't run before tests hook")
|
||||
@click.option("--junit-xml-output", help="Destination file path for junit xml report")
|
||||
@click.option(
|
||||
"--failfast", is_flag=True, default=False, help="Stop the test run on the first error or failure"
|
||||
)
|
||||
@click.option(
|
||||
"--test-category",
|
||||
type=click.Choice(["unit", "integration", "all"]),
|
||||
default="all",
|
||||
help="Select test category to run",
|
||||
)
|
||||
@pass_context
|
||||
def run_tests(
|
||||
context: CliCtxObj,
|
||||
app=None,
|
||||
module=None,
|
||||
doctype=None,
|
||||
module_def=None,
|
||||
test=(),
|
||||
profile=False,
|
||||
coverage=False,
|
||||
junit_xml_output=False,
|
||||
doctype_list_path=None,
|
||||
skip_test_records=False,
|
||||
skip_before_tests=False,
|
||||
failfast=False,
|
||||
case=None,
|
||||
test_category="all",
|
||||
pdb=False,
|
||||
):
|
||||
"""Run python unit-tests"""
|
||||
|
||||
pdb_on_exceptions = None
|
||||
if pdb:
|
||||
pdb_on_exceptions = (AssertionError,)
|
||||
|
||||
from frappe.coverage import CodeCoverage
|
||||
|
||||
with CodeCoverage(coverage, app):
|
||||
import frappe
|
||||
|
||||
tests = test
|
||||
site = get_site(context)
|
||||
|
||||
frappe.init(site)
|
||||
allow_tests = frappe.get_conf().allow_tests
|
||||
|
||||
if not (allow_tests or os.environ.get("CI")):
|
||||
click.secho("Testing is disabled for the site!", bold=True)
|
||||
click.secho("You can enable tests by entering following command:")
|
||||
click.secho(f"bench --site {site} set-config allow_tests true", fg="green")
|
||||
return
|
||||
|
||||
if skip_test_records:
|
||||
click.secho("--skip-test-records is deprecated and without effect!", bold=True)
|
||||
click.secho("All records are loaded lazily on first use, so the flag is useless, now.")
|
||||
click.secho("Simply remove the flag.", fg="green")
|
||||
return
|
||||
|
||||
main(
|
||||
site,
|
||||
app,
|
||||
module,
|
||||
doctype,
|
||||
module_def,
|
||||
context.verbose,
|
||||
tests=tests,
|
||||
force=context.force,
|
||||
profile=profile,
|
||||
junit_xml_output=junit_xml_output,
|
||||
doctype_list_path=doctype_list_path,
|
||||
failfast=failfast,
|
||||
case=case,
|
||||
skip_before_tests=skip_before_tests,
|
||||
pdb_on_exceptions=pdb_on_exceptions,
|
||||
selected_categories=[] if test_category == "all" else test_category,
|
||||
)
|
||||
|
||||
|
||||
@click.command("run-parallel-tests")
|
||||
@click.option("--app", help="For App", default="frappe")
|
||||
@click.option("--build-number", help="Build number", default=1)
|
||||
@click.option("--total-builds", help="Total number of builds", default=1)
|
||||
@click.option(
|
||||
"--with-coverage",
|
||||
is_flag=True,
|
||||
help="Build coverage file",
|
||||
envvar="CAPTURE_COVERAGE",
|
||||
)
|
||||
@click.option("--use-orchestrator", is_flag=True, help="Use orchestrator to run parallel tests")
|
||||
@click.option("--dry-run", is_flag=True, default=False, help="Dont actually run tests")
|
||||
@pass_context
|
||||
def run_parallel_tests(
|
||||
context: CliCtxObj,
|
||||
app,
|
||||
build_number,
|
||||
total_builds,
|
||||
with_coverage=False,
|
||||
use_orchestrator=False,
|
||||
dry_run=False,
|
||||
):
|
||||
from traceback_with_variables import activate_by_import
|
||||
|
||||
from frappe.coverage import CodeCoverage
|
||||
|
||||
with CodeCoverage(with_coverage, app):
|
||||
site = get_site(context)
|
||||
if use_orchestrator:
|
||||
from frappe.parallel_test_runner import ParallelTestWithOrchestrator
|
||||
|
||||
ParallelTestWithOrchestrator(app, site=site)
|
||||
else:
|
||||
from frappe.parallel_test_runner import ParallelTestRunner
|
||||
|
||||
runner = ParallelTestRunner(
|
||||
app,
|
||||
site=site,
|
||||
build_number=build_number,
|
||||
total_builds=total_builds,
|
||||
dry_run=dry_run,
|
||||
)
|
||||
runner.setup_and_run()
|
||||
|
||||
|
||||
@click.command(
|
||||
"run-ui-tests",
|
||||
context_settings=dict(
|
||||
ignore_unknown_options=True,
|
||||
),
|
||||
)
|
||||
@click.argument("app")
|
||||
@click.argument("cypressargs", nargs=-1, type=click.UNPROCESSED)
|
||||
@click.option("--headless", is_flag=True, help="Run UI Test in headless mode")
|
||||
@click.option("--parallel", is_flag=True, help="Run UI Test in parallel mode")
|
||||
@click.option("--with-coverage", is_flag=True, help="Generate coverage report")
|
||||
@click.option("--browser", default="chrome", help="Browser to run tests in")
|
||||
@click.option("--ci-build-id")
|
||||
@pass_context
|
||||
def run_ui_tests(
|
||||
context: CliCtxObj,
|
||||
app,
|
||||
headless=False,
|
||||
parallel=True,
|
||||
with_coverage=False,
|
||||
browser="chrome",
|
||||
ci_build_id=None,
|
||||
cypressargs=None,
|
||||
):
|
||||
"Run UI tests"
|
||||
site = get_site(context)
|
||||
frappe.init(site)
|
||||
app_base_path = frappe.get_app_source_path(app)
|
||||
site_url = frappe.utils.get_site_url(site)
|
||||
admin_password = frappe.get_conf().admin_password
|
||||
|
||||
# override baseUrl using env variable
|
||||
site_env = f"CYPRESS_baseUrl={site_url}"
|
||||
password_env = f"CYPRESS_adminPassword={admin_password}" if admin_password else ""
|
||||
coverage_env = f"CYPRESS_coverage={str(with_coverage).lower()}"
|
||||
|
||||
os.chdir(app_base_path)
|
||||
|
||||
node_bin = subprocess.getoutput("(cd ../frappe && yarn bin)")
|
||||
cypress_path = f"{node_bin}/cypress"
|
||||
drag_drop_plugin_path = f"{node_bin}/../@4tw/cypress-drag-drop"
|
||||
real_events_plugin_path = f"{node_bin}/../cypress-real-events"
|
||||
testing_library_path = f"{node_bin}/../@testing-library"
|
||||
coverage_plugin_path = f"{node_bin}/../@cypress/code-coverage"
|
||||
|
||||
# check if cypress in path...if not, install it.
|
||||
if not (
|
||||
os.path.exists(cypress_path)
|
||||
and os.path.exists(drag_drop_plugin_path)
|
||||
and os.path.exists(real_events_plugin_path)
|
||||
and os.path.exists(testing_library_path)
|
||||
and os.path.exists(coverage_plugin_path)
|
||||
):
|
||||
# install cypress & dependent plugins
|
||||
click.secho("Installing Cypress...", fg="yellow")
|
||||
packages = " ".join(
|
||||
[
|
||||
"cypress@^13",
|
||||
"@4tw/cypress-drag-drop@^2",
|
||||
"cypress-real-events",
|
||||
"@testing-library/cypress@^10",
|
||||
"@testing-library/dom@8.17.1",
|
||||
"@cypress/code-coverage@^3",
|
||||
]
|
||||
)
|
||||
frappe.commands.popen(f"(cd ../frappe && yarn add {packages} --no-lockfile)")
|
||||
|
||||
# run for headless mode
|
||||
run_or_open = f"run --browser {browser}" if headless else "open"
|
||||
formatted_command = f"{site_env} {password_env} {coverage_env} {cypress_path} {run_or_open}"
|
||||
|
||||
if os.environ.get("CYPRESS_RECORD_KEY"):
|
||||
formatted_command += " --record"
|
||||
|
||||
if parallel:
|
||||
formatted_command += " --parallel"
|
||||
|
||||
if ci_build_id:
|
||||
formatted_command += f" --ci-build-id {ci_build_id}"
|
||||
|
||||
if cypressargs:
|
||||
formatted_command += " " + " ".join(cypressargs)
|
||||
|
||||
click.secho("Running Cypress...", fg="yellow")
|
||||
frappe.commands.popen(formatted_command, cwd=app_base_path, raise_err=True)
|
||||
|
||||
|
||||
commands = [
|
||||
run_tests,
|
||||
run_parallel_tests,
|
||||
run_ui_tests,
|
||||
]
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -749,229 +749,6 @@ def transform_database(context: CliCtxObj, table, engine, row_format, failfast):
|
|||
frappe.destroy()
|
||||
|
||||
|
||||
@click.command("run-tests")
|
||||
@click.option("--app", help="For App")
|
||||
@click.option("--doctype", help="For DocType")
|
||||
@click.option("--module-def", help="For all Doctypes in Module Def")
|
||||
@click.option("--case", help="Select particular TestCase")
|
||||
@click.option(
|
||||
"--doctype-list-path",
|
||||
help="Path to .txt file for list of doctypes. Example erpnext/tests/server/agriculture.txt",
|
||||
)
|
||||
@click.option("--test", multiple=True, help="Specific test")
|
||||
@click.option("--module", help="Run tests in a module")
|
||||
@click.option("--pdb", is_flag=True, default=False, help="Open pdb on AssertionError")
|
||||
@click.option("--profile", is_flag=True, default=False)
|
||||
@click.option("--coverage", is_flag=True, default=False)
|
||||
@click.option("--skip-test-records", is_flag=True, default=False, help="DEPRECATED")
|
||||
@click.option("--skip-before-tests", is_flag=True, default=False, help="Don't run before tests hook")
|
||||
@click.option("--junit-xml-output", help="Destination file path for junit xml report")
|
||||
@click.option(
|
||||
"--failfast", is_flag=True, default=False, help="Stop the test run on the first error or failure"
|
||||
)
|
||||
@click.option(
|
||||
"--test-category",
|
||||
type=click.Choice(["unit", "integration", "all"]),
|
||||
default="all",
|
||||
help="Select test category to run",
|
||||
)
|
||||
@pass_context
|
||||
def run_tests(
|
||||
context: CliCtxObj,
|
||||
app=None,
|
||||
module=None,
|
||||
doctype=None,
|
||||
module_def=None,
|
||||
test=(),
|
||||
profile=False,
|
||||
coverage=False,
|
||||
junit_xml_output=False,
|
||||
doctype_list_path=None,
|
||||
skip_test_records=False,
|
||||
skip_before_tests=False,
|
||||
failfast=False,
|
||||
case=None,
|
||||
test_category="all",
|
||||
pdb=False,
|
||||
):
|
||||
"""Run python unit-tests"""
|
||||
|
||||
pdb_on_exceptions = None
|
||||
if pdb:
|
||||
pdb_on_exceptions = (AssertionError,)
|
||||
|
||||
with CodeCoverage(coverage, app):
|
||||
import frappe
|
||||
import frappe.test_runner
|
||||
|
||||
tests = test
|
||||
site = get_site(context)
|
||||
|
||||
frappe.init(site)
|
||||
allow_tests = frappe.get_conf().allow_tests
|
||||
|
||||
if not (allow_tests or os.environ.get("CI")):
|
||||
click.secho("Testing is disabled for the site!", bold=True)
|
||||
click.secho("You can enable tests by entering following command:")
|
||||
click.secho(f"bench --site {site} set-config allow_tests true", fg="green")
|
||||
return
|
||||
|
||||
if skip_test_records:
|
||||
click.secho("--skip-test-records is deprecated and without effect!", bold=True)
|
||||
click.secho("All records are loaded lazily on first use, so the flag is useless, now.")
|
||||
click.secho("Simply remove the flag.", fg="green")
|
||||
return
|
||||
|
||||
frappe.test_runner.main(
|
||||
site,
|
||||
app,
|
||||
module,
|
||||
doctype,
|
||||
module_def,
|
||||
context.verbose,
|
||||
tests=tests,
|
||||
force=context.force,
|
||||
profile=profile,
|
||||
junit_xml_output=junit_xml_output,
|
||||
doctype_list_path=doctype_list_path,
|
||||
failfast=failfast,
|
||||
case=case,
|
||||
skip_before_tests=skip_before_tests,
|
||||
pdb_on_exceptions=pdb_on_exceptions,
|
||||
selected_categories=[] if test_category == "all" else test_category,
|
||||
)
|
||||
|
||||
|
||||
@click.command("run-parallel-tests")
|
||||
@click.option("--app", help="For App", default="frappe")
|
||||
@click.option("--build-number", help="Build number", default=1)
|
||||
@click.option("--total-builds", help="Total number of builds", default=1)
|
||||
@click.option(
|
||||
"--with-coverage",
|
||||
is_flag=True,
|
||||
help="Build coverage file",
|
||||
envvar="CAPTURE_COVERAGE",
|
||||
)
|
||||
@click.option("--use-orchestrator", is_flag=True, help="Use orchestrator to run parallel tests")
|
||||
@click.option("--dry-run", is_flag=True, default=False, help="Dont actually run tests")
|
||||
@pass_context
|
||||
def run_parallel_tests(
|
||||
context: CliCtxObj,
|
||||
app,
|
||||
build_number,
|
||||
total_builds,
|
||||
with_coverage=False,
|
||||
use_orchestrator=False,
|
||||
dry_run=False,
|
||||
):
|
||||
from traceback_with_variables import activate_by_import
|
||||
|
||||
with CodeCoverage(with_coverage, app):
|
||||
site = get_site(context)
|
||||
if use_orchestrator:
|
||||
from frappe.parallel_test_runner import ParallelTestWithOrchestrator
|
||||
|
||||
ParallelTestWithOrchestrator(app, site=site)
|
||||
else:
|
||||
from frappe.parallel_test_runner import ParallelTestRunner
|
||||
|
||||
runner = ParallelTestRunner(
|
||||
app,
|
||||
site=site,
|
||||
build_number=build_number,
|
||||
total_builds=total_builds,
|
||||
dry_run=dry_run,
|
||||
)
|
||||
runner.setup_and_run()
|
||||
|
||||
|
||||
@click.command(
|
||||
"run-ui-tests",
|
||||
context_settings=dict(
|
||||
ignore_unknown_options=True,
|
||||
),
|
||||
)
|
||||
@click.argument("app")
|
||||
@click.argument("cypressargs", nargs=-1, type=click.UNPROCESSED)
|
||||
@click.option("--headless", is_flag=True, help="Run UI Test in headless mode")
|
||||
@click.option("--parallel", is_flag=True, help="Run UI Test in parallel mode")
|
||||
@click.option("--with-coverage", is_flag=True, help="Generate coverage report")
|
||||
@click.option("--browser", default="chrome", help="Browser to run tests in")
|
||||
@click.option("--ci-build-id")
|
||||
@pass_context
|
||||
def run_ui_tests(
|
||||
context: CliCtxObj,
|
||||
app,
|
||||
headless=False,
|
||||
parallel=True,
|
||||
with_coverage=False,
|
||||
browser="chrome",
|
||||
ci_build_id=None,
|
||||
cypressargs=None,
|
||||
):
|
||||
"Run UI tests"
|
||||
site = get_site(context)
|
||||
frappe.init(site)
|
||||
app_base_path = frappe.get_app_source_path(app)
|
||||
site_url = frappe.utils.get_site_url(site)
|
||||
admin_password = frappe.get_conf().admin_password
|
||||
|
||||
# override baseUrl using env variable
|
||||
site_env = f"CYPRESS_baseUrl={site_url}"
|
||||
password_env = f"CYPRESS_adminPassword={admin_password}" if admin_password else ""
|
||||
coverage_env = f"CYPRESS_coverage={str(with_coverage).lower()}"
|
||||
|
||||
os.chdir(app_base_path)
|
||||
|
||||
node_bin = subprocess.getoutput("(cd ../frappe && yarn bin)")
|
||||
cypress_path = f"{node_bin}/cypress"
|
||||
drag_drop_plugin_path = f"{node_bin}/../@4tw/cypress-drag-drop"
|
||||
real_events_plugin_path = f"{node_bin}/../cypress-real-events"
|
||||
testing_library_path = f"{node_bin}/../@testing-library"
|
||||
coverage_plugin_path = f"{node_bin}/../@cypress/code-coverage"
|
||||
|
||||
# check if cypress in path...if not, install it.
|
||||
if not (
|
||||
os.path.exists(cypress_path)
|
||||
and os.path.exists(drag_drop_plugin_path)
|
||||
and os.path.exists(real_events_plugin_path)
|
||||
and os.path.exists(testing_library_path)
|
||||
and os.path.exists(coverage_plugin_path)
|
||||
):
|
||||
# install cypress & dependent plugins
|
||||
click.secho("Installing Cypress...", fg="yellow")
|
||||
packages = " ".join(
|
||||
[
|
||||
"cypress@^13",
|
||||
"@4tw/cypress-drag-drop@^2",
|
||||
"cypress-real-events",
|
||||
"@testing-library/cypress@^10",
|
||||
"@testing-library/dom@8.17.1",
|
||||
"@cypress/code-coverage@^3",
|
||||
]
|
||||
)
|
||||
frappe.commands.popen(f"(cd ../frappe && yarn add {packages} --no-lockfile)")
|
||||
|
||||
# run for headless mode
|
||||
run_or_open = f"run --browser {browser}" if headless else "open"
|
||||
formatted_command = f"{site_env} {password_env} {coverage_env} {cypress_path} {run_or_open}"
|
||||
|
||||
if os.environ.get("CYPRESS_RECORD_KEY"):
|
||||
formatted_command += " --record"
|
||||
|
||||
if parallel:
|
||||
formatted_command += " --parallel"
|
||||
|
||||
if ci_build_id:
|
||||
formatted_command += f" --ci-build-id {ci_build_id}"
|
||||
|
||||
if cypressargs:
|
||||
formatted_command += " " + " ".join(cypressargs)
|
||||
|
||||
click.secho("Running Cypress...", fg="yellow")
|
||||
frappe.commands.popen(formatted_command, cwd=app_base_path, raise_err=True)
|
||||
|
||||
|
||||
@click.command("serve")
|
||||
@click.option("--port", default=8000)
|
||||
@click.option("--profile", is_flag=True, default=False)
|
||||
|
|
@ -1250,8 +1027,6 @@ commands = [
|
|||
postgres,
|
||||
request,
|
||||
reset_perms,
|
||||
run_tests,
|
||||
run_ui_tests,
|
||||
serve,
|
||||
set_config,
|
||||
show_config,
|
||||
|
|
@ -1259,6 +1034,5 @@ commands = [
|
|||
bulk_rename,
|
||||
add_to_email_queue,
|
||||
rebuild_global_search,
|
||||
run_parallel_tests,
|
||||
list_sites,
|
||||
]
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ import requests
|
|||
import frappe
|
||||
from frappe.tests.utils import make_test_records
|
||||
|
||||
from .test_runner import TestResult
|
||||
from .testing.result import TestResult
|
||||
|
||||
click_ctx = click.get_current_context(True)
|
||||
if click_ctx:
|
||||
|
|
|
|||
|
|
@ -1,328 +1,15 @@
|
|||
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
|
||||
# License: MIT. See LICENSE
|
||||
"""
|
||||
This module provides functionality for running tests in Frappe applications.
|
||||
DEPRECATED.
|
||||
|
||||
It includes utilities for running tests for specific doctypes, modules, or entire applications,
|
||||
as well as functions for creating and managing test records.
|
||||
This entire file is deprecated and will be removed in v17.
|
||||
|
||||
DO NOT ADD ANYTHING!
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import cProfile
|
||||
import importlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import pstats
|
||||
import sys
|
||||
import time
|
||||
import unittest
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass, field
|
||||
from functools import wraps
|
||||
from io import StringIO
|
||||
from pathlib import Path
|
||||
from typing import Optional, Union
|
||||
|
||||
import click
|
||||
|
||||
import frappe
|
||||
import frappe.utils.scheduler
|
||||
from frappe.modules import get_module_name
|
||||
from frappe.tests.utils import IntegrationTestCase
|
||||
from frappe.utils import cint
|
||||
|
||||
SLOW_TEST_THRESHOLD = 2
|
||||
|
||||
# Define category priorities
|
||||
CATEGORY_PRIORITIES = {
|
||||
"unit": 1,
|
||||
"integration": 2,
|
||||
"functional": 3,
|
||||
# Add more categories and their priorities as needed
|
||||
}
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def debug_timer(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
start_time = time.monotonic()
|
||||
result = func(*args, **kwargs)
|
||||
end_time = time.monotonic()
|
||||
logger.debug(f" {func.__name__:<50} ⌛{end_time - start_time:>6.3f} seconds")
|
||||
return result
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def iterOnFirstArg(func):
|
||||
return lambda self, arg, *args, **kwargs: [
|
||||
func(self, a, *args, **kwargs) for a in ([arg] if isinstance(arg, str | tuple) else arg)
|
||||
][-1]
|
||||
|
||||
|
||||
class TestRunner(unittest.TextTestRunner):
|
||||
def __init__(
|
||||
self,
|
||||
stream=None,
|
||||
descriptions=True,
|
||||
verbosity=1,
|
||||
failfast=False,
|
||||
buffer=False,
|
||||
resultclass=None,
|
||||
warnings=None,
|
||||
*,
|
||||
tb_locals=False,
|
||||
cfg: TestConfig,
|
||||
):
|
||||
super().__init__(
|
||||
stream=stream,
|
||||
descriptions=descriptions,
|
||||
verbosity=verbosity,
|
||||
failfast=cfg.failfast,
|
||||
buffer=buffer,
|
||||
resultclass=resultclass or (TestResult if not cfg.junit_xml_output else None),
|
||||
warnings=warnings,
|
||||
tb_locals=tb_locals,
|
||||
)
|
||||
self.cfg = cfg
|
||||
self.per_app_categories = defaultdict(lambda: defaultdict(unittest.TestSuite))
|
||||
logger.debug("TestRunner initialized")
|
||||
|
||||
def run(self) -> list[unittest.TestResult]:
|
||||
results = []
|
||||
for app, categories in self.per_app_categories.items():
|
||||
sorted_categories = sorted(
|
||||
categories.items(), key=lambda x: CATEGORY_PRIORITIES.get(x[0], float("inf"))
|
||||
)
|
||||
for category, suite in sorted_categories:
|
||||
if not self._has_tests(suite):
|
||||
continue
|
||||
|
||||
self._prepare_category(category, suite, app)
|
||||
self._apply_debug_decorators(suite)
|
||||
|
||||
with self._profile():
|
||||
click.secho(
|
||||
f"\nRunning {suite.countTestCases()} {category} tests for {app}", fg="cyan", bold=True
|
||||
)
|
||||
result = super().run(suite)
|
||||
results.append((app, category, result))
|
||||
if not result.wasSuccessful() and self.cfg.failfast:
|
||||
break
|
||||
return results
|
||||
|
||||
def _has_tests(self, suite):
|
||||
return next(self._iterate_suite(suite), None) is not None
|
||||
|
||||
def _prepare_category(self, category, suite, app):
|
||||
dispatcher = {
|
||||
"integration": self._prepare_integration,
|
||||
# Add other categories here as needed
|
||||
}
|
||||
prepare_method = dispatcher.get(category.lower())
|
||||
if prepare_method:
|
||||
prepare_method(suite, app)
|
||||
else:
|
||||
logger.warning(f"Unknown test category: {category}. No specific preparation performed.")
|
||||
|
||||
def _apply_debug_decorators(self, suite):
|
||||
if self.cfg.pdb_on_exceptions:
|
||||
for test in self._iterate_suite(suite):
|
||||
if hasattr(test, "_apply_debug_decorator"):
|
||||
test._apply_debug_decorator(self.cfg.pdb_on_exceptions)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _profile(self):
|
||||
if self.cfg.profile:
|
||||
pr = cProfile.Profile()
|
||||
pr.enable()
|
||||
yield
|
||||
if self.cfg.profile:
|
||||
pr.disable()
|
||||
s = StringIO()
|
||||
ps = pstats.Stats(pr, stream=s).sort_stats("cumulative")
|
||||
ps.print_stats()
|
||||
print(s.getvalue())
|
||||
|
||||
@iterOnFirstArg
|
||||
def discover_tests(self, app: str) -> TestRunner:
|
||||
logger.debug(f"Discovering tests for app: {app}")
|
||||
app_path = Path(frappe.get_app_path(app))
|
||||
for path, folders, files in os.walk(app_path):
|
||||
folders[:] = [f for f in folders if not f.startswith(".")]
|
||||
for dontwalk in ("node_modules", "locals", "public", "__pycache__"):
|
||||
if dontwalk in folders:
|
||||
folders.remove(dontwalk)
|
||||
if os.path.sep.join(["doctype", "doctype", "boilerplate"]) in path:
|
||||
continue
|
||||
path = Path(path)
|
||||
for file in [
|
||||
path.joinpath(filename)
|
||||
for filename in files
|
||||
if filename.startswith("test_") and filename.endswith(".py") and filename != "test_runner.py"
|
||||
]:
|
||||
module_name = f"{'.'.join(file.relative_to(app_path.parent).parent.parts)}.{file.stem}"
|
||||
self._add_module_tests(app, module_name)
|
||||
return self
|
||||
|
||||
@iterOnFirstArg
|
||||
def discover_module_tests(self, spec: tuple[str, str]) -> TestRunner:
|
||||
app, module = spec
|
||||
self._add_module_tests(app, module)
|
||||
return self
|
||||
|
||||
def _add_module_tests(self, app: str, module: str):
|
||||
module = importlib.import_module(module)
|
||||
if self.cfg.case:
|
||||
test_suite = unittest.TestLoader().loadTestsFromTestCase(getattr(module, self.cfg.case))
|
||||
else:
|
||||
test_suite = unittest.TestLoader().loadTestsFromModule(module)
|
||||
|
||||
for test in self._iterate_suite(test_suite):
|
||||
if self.cfg.tests and test._testMethodName not in self.cfg.tests:
|
||||
continue
|
||||
category = "integration" if isinstance(test, IntegrationTestCase) else "unit"
|
||||
if self.cfg.selected_categories and category not in self.cfg.selected_categories:
|
||||
continue
|
||||
self.per_app_categories[app or "default"][category].addTest(test)
|
||||
|
||||
def _prepare_integration(self, suite: unittest.TestSuite, app: str) -> None:
|
||||
"""Prepare the environment for integration tests."""
|
||||
if not self.cfg.skip_before_tests:
|
||||
self._run_before_test_hooks(app)
|
||||
else:
|
||||
logger.debug("Skipping before_tests hooks: Explicitly skipped")
|
||||
|
||||
if app:
|
||||
self._create_global_test_record_dependencies(app)
|
||||
|
||||
@staticmethod
|
||||
@debug_timer
|
||||
def _run_before_test_hooks(app: str | None):
|
||||
"""Run 'before_tests' hooks"""
|
||||
logger.debug('Running "before_tests" hooks')
|
||||
for hook_function in frappe.get_hooks("before_tests", app_name=app):
|
||||
frappe.get_attr(hook_function)()
|
||||
|
||||
@staticmethod
|
||||
@debug_timer
|
||||
def _create_global_test_record_dependencies(app: str | None):
|
||||
"""Create global test record dependencies"""
|
||||
test_module = frappe.get_module(f"{app}.tests")
|
||||
if hasattr(test_module, "global_test_dependencies"):
|
||||
logger.info("Creating global test record dependencies ...")
|
||||
for doctype in test_module.global_test_dependencies:
|
||||
logger.debug(f"Creating global test records for {doctype}")
|
||||
make_test_records(doctype, commit=True)
|
||||
|
||||
@staticmethod
|
||||
def _iterate_suite(suite):
|
||||
for test in suite:
|
||||
if isinstance(test, unittest.TestSuite):
|
||||
yield from TestRunner._iterate_suite(test)
|
||||
elif isinstance(test, unittest.TestCase):
|
||||
yield test
|
||||
|
||||
|
||||
class TestResult(unittest.TextTestResult):
|
||||
def startTest(self, test):
|
||||
self.tb_locals = True
|
||||
self._started_at = time.monotonic()
|
||||
super(unittest.TextTestResult, self).startTest(test)
|
||||
test_class = unittest.util.strclass(test.__class__)
|
||||
if getattr(self, "current_test_class", None) != test_class:
|
||||
if new_doctypes := getattr(test.__class__, "_newly_created_test_records", None):
|
||||
click.echo(f"\n{unittest.util.strclass(test.__class__)}")
|
||||
click.secho(
|
||||
f" Test Records created: {', '.join([f'{name} ({qty})' for name, qty in reversed(new_doctypes)])}",
|
||||
fg="bright_black",
|
||||
)
|
||||
else:
|
||||
click.echo(f"\n{unittest.util.strclass(test.__class__)}")
|
||||
self.current_test_class = test_class
|
||||
|
||||
def getTestMethodName(self, test):
|
||||
return test._testMethodName if hasattr(test, "_testMethodName") else str(test)
|
||||
|
||||
def addSuccess(self, test):
|
||||
super(unittest.TextTestResult, self).addSuccess(test)
|
||||
elapsed = time.monotonic() - self._started_at
|
||||
threshold_passed = elapsed >= SLOW_TEST_THRESHOLD
|
||||
elapsed_over_threashold = click.style(f" ({elapsed:.03}s)", fg="red") if threshold_passed else ""
|
||||
logger.info(
|
||||
f" {click.style(' ✔ ', fg='green')} {self.getTestMethodName(test)}{elapsed_over_threashold}"
|
||||
)
|
||||
logger.debug(f"=== success === {test} {elapsed}")
|
||||
|
||||
def addError(self, test, err):
|
||||
super(unittest.TextTestResult, self).addError(test, err)
|
||||
click.echo(f" {click.style(' ✖ ', fg='red')} {self.getTestMethodName(test)}")
|
||||
logger.debug(f"=== error === {test}")
|
||||
|
||||
def addFailure(self, test, err):
|
||||
super(unittest.TextTestResult, self).addFailure(test, err)
|
||||
click.echo(f" {click.style(' ✖ ', fg='red')} {self.getTestMethodName(test)}")
|
||||
logger.debug(f"=== failure === {test}")
|
||||
|
||||
def addSkip(self, test, reason):
|
||||
super(unittest.TextTestResult, self).addSkip(test, reason)
|
||||
click.echo(f" {click.style(' = ', fg='white')} {self.getTestMethodName(test)}")
|
||||
logger.debug(f"=== skipped === {test}")
|
||||
|
||||
def addExpectedFailure(self, test, err):
|
||||
super(unittest.TextTestResult, self).addExpectedFailure(test, err)
|
||||
click.echo(f" {click.style(' ✖ ', fg='red')} {self.getTestMethodName(test)}")
|
||||
logger.debug(f"=== expected failure === {test}")
|
||||
|
||||
def addUnexpectedSuccess(self, test):
|
||||
super(unittest.TextTestResult, self).addUnexpectedSuccess(test)
|
||||
click.echo(f" {click.style(' ✔ ', fg='green')} {self.getTestMethodName(test)}")
|
||||
logger.debug(f"=== unexpected success === {test}")
|
||||
|
||||
def printErrors(self):
|
||||
click.echo("\n")
|
||||
self.printErrorList(" ERROR ", self.errors, "red")
|
||||
self.printErrorList(" FAIL ", self.failures, "red")
|
||||
|
||||
def printErrorList(self, flavour, errors, color):
|
||||
for test, err in errors:
|
||||
click.echo(self.separator1)
|
||||
click.echo(f"{click.style(flavour, bg=color)} {self.getDescription(test)}")
|
||||
click.echo(self.separator2)
|
||||
click.echo(err)
|
||||
|
||||
def __str__(self):
|
||||
return f"Tests: {self.testsRun}, Failing: {len(self.failures)}, Errors: {len(self.errors)}"
|
||||
|
||||
|
||||
class TestRunnerError(Exception):
|
||||
"""Custom exception for test runner errors"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
logging.basicConfig(format="%(levelname)s: %(message)s")
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TestConfig:
|
||||
"""Configuration class for test runner"""
|
||||
|
||||
profile: bool = False
|
||||
failfast: bool = False
|
||||
junit_xml_output: bool = False
|
||||
tests: tuple = ()
|
||||
case: str | None = None
|
||||
pdb_on_exceptions: tuple | None = None
|
||||
selected_categories: list[str] = field(default_factory=list)
|
||||
skip_before_tests: bool = False
|
||||
from frappe.commands.testing import main
|
||||
from frappe.testing.result import SLOW_TEST_THRESHOLD
|
||||
|
||||
|
||||
def xmlrunner_wrapper(output):
|
||||
|
|
@ -341,277 +28,6 @@ def xmlrunner_wrapper(output):
|
|||
return _runner
|
||||
|
||||
|
||||
def main(
|
||||
site: str | None = None,
|
||||
app: str | None = None,
|
||||
module: str | None = None,
|
||||
doctype: str | None = None,
|
||||
module_def: str | None = None,
|
||||
verbose: bool = False,
|
||||
tests: tuple = (),
|
||||
force: bool = False,
|
||||
profile: bool = False,
|
||||
junit_xml_output: str | None = None,
|
||||
doctype_list_path: str | None = None,
|
||||
failfast: bool = False,
|
||||
case: str | None = None,
|
||||
skip_before_tests: bool = False,
|
||||
pdb_on_exceptions: bool = False,
|
||||
selected_categories: list[str] | None = None,
|
||||
) -> None:
|
||||
"""Main function to run tests"""
|
||||
logger.setLevel(logging.DEBUG if verbose else logging.INFO)
|
||||
start_time = time.time()
|
||||
|
||||
# Check for mutually exclusive arguments
|
||||
exclusive_args = [doctype, doctype_list_path, module_def, module]
|
||||
if sum(arg is not None for arg in exclusive_args) > 1:
|
||||
error_message = (
|
||||
"Error: The following arguments are mutually exclusive: "
|
||||
"doctype, doctype_list_path, module_def, and module. "
|
||||
"Please specify only one of these."
|
||||
)
|
||||
logger.error(error_message)
|
||||
sys.exit(1)
|
||||
|
||||
# Prepare debug log message
|
||||
debug_params = []
|
||||
for param_name in ["site", "app", "module", "doctype", "module_def", "doctype_list_path"]:
|
||||
param_value = locals()[param_name]
|
||||
if param_value is not None:
|
||||
debug_params.append(f"{param_name}={param_value}")
|
||||
|
||||
if debug_params:
|
||||
logger.debug(f"Starting test run with parameters: {', '.join(debug_params)}")
|
||||
else:
|
||||
logger.debug("Starting test run with no specific parameters")
|
||||
|
||||
test_config = TestConfig(
|
||||
profile=profile,
|
||||
failfast=failfast,
|
||||
junit_xml_output=bool(junit_xml_output),
|
||||
tests=tests,
|
||||
case=case,
|
||||
pdb_on_exceptions=pdb_on_exceptions,
|
||||
selected_categories=selected_categories or [],
|
||||
skip_before_tests=skip_before_tests,
|
||||
)
|
||||
|
||||
_initialize_test_environment(site, test_config)
|
||||
|
||||
xml_output_file = _setup_xml_output(junit_xml_output)
|
||||
|
||||
try:
|
||||
# Create TestRunner instance
|
||||
runner = TestRunner(
|
||||
verbosity=2 if logger.getEffectiveLevel() < logging.INFO else 1,
|
||||
tb_locals=logger.getEffectiveLevel() <= logging.INFO,
|
||||
cfg=test_config,
|
||||
)
|
||||
|
||||
if doctype or doctype_list_path:
|
||||
doctype = _load_doctype_list(doctype_list_path) if doctype_list_path else doctype
|
||||
results = _run_doctype_tests(doctype, runner, force, app)
|
||||
elif module_def:
|
||||
results = _run_module_def_tests(app, module_def, runner, force)
|
||||
elif module:
|
||||
results = _run_module_tests(module, runner, app)
|
||||
else:
|
||||
apps = [app] if app else frappe.get_installed_apps()
|
||||
results = _run_all_tests(apps, runner)
|
||||
|
||||
# Determine overall success by checking if any test suite failed
|
||||
success = all(result.wasSuccessful() for _, result in results)
|
||||
click.secho("\nTest Results:", fg="cyan", bold=True)
|
||||
|
||||
def _print_result(category, result):
|
||||
tests_run = result.testsRun
|
||||
failures = len(result.failures)
|
||||
errors = len(result.errors)
|
||||
click.echo(
|
||||
f"\n{click.style(f'{category} Tests:', bold=True)}\n"
|
||||
f" Ran: {click.style(f'{tests_run:<3}', fg='cyan')}"
|
||||
f" Failures: {click.style(f'{failures:<3}', fg='red' if failures else 'green')}"
|
||||
f" Errors: {click.style(f'{errors:<3}', fg='red' if errors else 'green')}"
|
||||
)
|
||||
|
||||
if failures > 0:
|
||||
click.echo(f"\n{click.style(category + ' Test Failures:', fg='red', bold=True)}")
|
||||
for i, failure in enumerate(result.failures, 1):
|
||||
click.echo(f" {i}. {click.style(str(failure[0]), fg='yellow')}")
|
||||
|
||||
if errors > 0:
|
||||
click.echo(f"\n{click.style(category + ' Test Errors:', fg='red', bold=True)}")
|
||||
for i, error in enumerate(result.errors, 1):
|
||||
click.echo(f" {i}. {click.style(str(error[0]), fg='yellow')}")
|
||||
click.echo(click.style(" " + str(error[1]).split("\n")[-2], fg="red"))
|
||||
|
||||
for category, result in results:
|
||||
_print_result(category.title(), result)
|
||||
|
||||
if success:
|
||||
click.echo(f"\n{click.style('All tests passed successfully!', fg='green', bold=True)}")
|
||||
else:
|
||||
click.echo(f"\n{click.style('Some tests failed or encountered errors.', fg='red', bold=True)}")
|
||||
|
||||
if not success:
|
||||
sys.exit(1)
|
||||
|
||||
return results
|
||||
|
||||
finally:
|
||||
_cleanup_after_tests()
|
||||
if xml_output_file:
|
||||
xml_output_file.close()
|
||||
|
||||
end_time = time.time()
|
||||
logger.debug(f"Total test run time: {end_time - start_time:.3f} seconds")
|
||||
|
||||
|
||||
@debug_timer
|
||||
def _initialize_test_environment(site, config: TestConfig):
|
||||
"""Initialize the test environment"""
|
||||
logger.debug(f"Initializing test environment for site: {site}")
|
||||
frappe.init(site)
|
||||
if not frappe.db:
|
||||
frappe.connect()
|
||||
try:
|
||||
# require db access
|
||||
_disable_scheduler_if_needed()
|
||||
frappe.clear_cache()
|
||||
except Exception as e:
|
||||
logger.error(f"Error connecting to the database: {e!s}")
|
||||
raise TestRunnerError(f"Failed to connect to the database: {e}") from e
|
||||
|
||||
# Set various test-related flags
|
||||
frappe.flags.in_test = True
|
||||
frappe.flags.print_messages = logger.getEffectiveLevel() < logging.INFO
|
||||
frappe.flags.tests_verbose = logger.getEffectiveLevel() < logging.INFO
|
||||
logger.debug("Test environment initialized")
|
||||
|
||||
|
||||
def _setup_xml_output(junit_xml_output):
|
||||
"""Setup XML output for test results if specified"""
|
||||
global unittest_runner
|
||||
|
||||
if junit_xml_output:
|
||||
xml_output_file = open(junit_xml_output, "wb")
|
||||
unittest_runner = xmlrunner_wrapper(xml_output_file)
|
||||
return xml_output_file
|
||||
else:
|
||||
unittest_runner = unittest.TextTestRunner
|
||||
return None
|
||||
|
||||
|
||||
def _load_doctype_list(doctype_list_path):
|
||||
"""Load the list of doctypes from the specified file"""
|
||||
app, path = doctype_list_path.split(os.path.sep, 1)
|
||||
with open(frappe.get_app_path(app, path)) as f:
|
||||
return f.read().strip().splitlines()
|
||||
|
||||
|
||||
def _run_module_def_tests(
|
||||
app, module_def, runner: TestRunner, force
|
||||
) -> tuple[unittest.TestResult, unittest.TestResult | None]:
|
||||
"""Run tests for the specified module definition"""
|
||||
doctypes = _get_doctypes_for_module_def(app, module_def)
|
||||
return _run_doctype_tests(doctypes, runner, force, app)
|
||||
|
||||
|
||||
def _get_doctypes_for_module_def(app, module_def):
|
||||
"""Get the list of doctypes for the specified module definition"""
|
||||
doctypes = []
|
||||
doctypes_ = frappe.get_list(
|
||||
"DocType",
|
||||
filters={"module": module_def, "istable": 0},
|
||||
fields=["name", "module"],
|
||||
as_list=True,
|
||||
)
|
||||
for doctype, module in doctypes_:
|
||||
test_module = get_module_name(doctype, module, "test_", app=app)
|
||||
try:
|
||||
importlib.import_module(test_module)
|
||||
doctypes.append(doctype)
|
||||
except Exception:
|
||||
pass
|
||||
return doctypes
|
||||
|
||||
|
||||
# Global variable to track scheduler state
|
||||
scheduler_disabled_by_user = False
|
||||
|
||||
|
||||
def _disable_scheduler_if_needed():
|
||||
"""Disable scheduler if it's not already disabled"""
|
||||
global scheduler_disabled_by_user
|
||||
scheduler_disabled_by_user = frappe.utils.scheduler.is_scheduler_disabled(verbose=False)
|
||||
if not scheduler_disabled_by_user:
|
||||
frappe.utils.scheduler.disable_scheduler()
|
||||
|
||||
|
||||
def _cleanup_after_tests():
|
||||
"""Perform cleanup operations after running tests"""
|
||||
global scheduler_disabled_by_user
|
||||
if not scheduler_disabled_by_user:
|
||||
frappe.utils.scheduler.enable_scheduler()
|
||||
|
||||
if frappe.db:
|
||||
frappe.db.commit()
|
||||
frappe.clear_cache()
|
||||
|
||||
|
||||
@debug_timer
|
||||
def _run_all_tests(apps: list[str], runner: TestRunner) -> list[unittest.TestResult]:
|
||||
"""Run all tests for the specified app or all installed apps"""
|
||||
logger.debug(f"Running tests for apps: {apps}")
|
||||
try:
|
||||
runner = runner.discover_tests(apps).run()
|
||||
except Exception as e:
|
||||
logger.error(f"Error running all tests for {apps or 'all apps'}: {e!s}")
|
||||
raise TestRunnerError(f"Failed to run tests for {apps or 'all apps'}: {e!s}") from e
|
||||
|
||||
|
||||
@debug_timer
|
||||
def _run_doctype_tests(
|
||||
doctypes: str | list[str], runner: TestRunner, force=False, app: str | None = None
|
||||
) -> list[unittest.TestResult]:
|
||||
"""Run tests for the specified doctype(s)"""
|
||||
if isinstance(doctypes, str):
|
||||
doctypes = [doctypes]
|
||||
args = []
|
||||
for doctype in doctypes:
|
||||
module = frappe.db.get_value("DocType", doctype, "module")
|
||||
if not module:
|
||||
raise TestRunnerError(f"Invalid doctype {doctype}")
|
||||
|
||||
# Check if the DocType belongs to the specified app
|
||||
doctype_app = frappe.db.get_value("Module Def", module, "app_name")
|
||||
if app and doctype_app != app:
|
||||
raise TestRunnerError(f"DocType {doctype} does not belong to app {app}")
|
||||
elif not app:
|
||||
app = doctype_app
|
||||
test_module = frappe.modules.utils.get_module_name(doctype, module, "test_")
|
||||
args.append((app, test_module))
|
||||
force and frappe.db.delete(doctype)
|
||||
|
||||
try:
|
||||
return runner.discover_module_tests(args).run(app)
|
||||
except Exception as e:
|
||||
logger.error(f"Error running tests for doctypes {doctypes}: {e!s}")
|
||||
raise TestRunnerError(f"Failed to run tests for doctypes: {e!s}") from e
|
||||
|
||||
|
||||
@debug_timer
|
||||
def _run_module_tests(module, runner: TestRunner, app: str | None = None) -> list[unittest.TestResult]:
|
||||
"""Run tests for the specified module python test module"""
|
||||
try:
|
||||
return runner.discover_module_tests((app, module)).run(app)
|
||||
except Exception as e:
|
||||
logger.error(f"Error running tests for module {module}: {e!s}")
|
||||
raise TestRunnerError(f"Failed to run tests for module: {e!s}") from e
|
||||
|
||||
|
||||
# Backwards-compatible aliases
|
||||
from frappe.tests.utils import (
|
||||
TestRecordLog,
|
||||
get_dependencies,
|
||||
|
|
|
|||
37
frappe/testing/README.md
Normal file
37
frappe/testing/README.md
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
# Frappe Testing Module
|
||||
|
||||
This module provides a comprehensive framework for running tests in Frappe applications. It includes functionality for test discovery, execution, result reporting, and environment setup.
|
||||
|
||||
## Key Components
|
||||
|
||||
- `TestConfig`: Configuration class for customizing test execution
|
||||
- `TestRunner`: Main class for running test suites with additional Frappe-specific functionality
|
||||
- `TestResult`: Custom test result class for improved output formatting and logging
|
||||
- `discover_all_tests`: Function to discover all tests in specified Frappe apps
|
||||
- `discover_doctype_tests`: Function to discover tests for specific DocTypes
|
||||
- `discover_module_tests`: Function to discover tests in specific modules
|
||||
|
||||
## Usage
|
||||
|
||||
This module is typically used by Frappe's CLI commands for running tests, but can also be used programmatically for custom test execution scenarios.
|
||||
|
||||
For detailed information about each component, please refer to the well-commented code in the following files:
|
||||
|
||||
- [`__init__.py`](./__init__.py): Module initialization and logging setup
|
||||
- [`runner.py`](./runner.py): TestRunner class and test execution logic
|
||||
- [`discovery.py`](./discovery.py): Test discovery functions
|
||||
- [`result.py`](./result.py): Custom TestResult class for result handling
|
||||
- [`environment.py`](./environment.py): Test environment setup and teardown
|
||||
|
||||
## Example
|
||||
|
||||
```python
|
||||
from frappe.testing import TestConfig, TestRunner, discover_all_tests
|
||||
|
||||
config = TestConfig(failfast=True, verbose=2)
|
||||
runner = TestRunner(cfg=config)
|
||||
discover_all_tests(['my_app'], runner)
|
||||
runner.run()
|
||||
```
|
||||
|
||||
For more detailed information about each component and its functionality, please refer to the docstrings and comments in the respective files.
|
||||
60
frappe/testing/__init__.py
Normal file
60
frappe/testing/__init__.py
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
"""
|
||||
Frappe Testing Module
|
||||
|
||||
This module provides a comprehensive framework for running tests in Frappe applications.
|
||||
It includes functionality for test discovery, execution, result reporting, and environment setup.
|
||||
|
||||
Key components:
|
||||
- TestConfig: Configuration class for customizing test execution
|
||||
- TestRunner: Main class for running test suites with additional Frappe-specific functionality
|
||||
- TestResult: Custom test result class for improved output formatting and logging
|
||||
- discover_all_tests: Function to discover all tests in specified Frappe apps
|
||||
- discover_doctype_tests: Function to discover tests for specific DocTypes
|
||||
- discover_module_tests: Function to discover tests in specific modules
|
||||
|
||||
The module also includes:
|
||||
- Logging configuration for the testing framework
|
||||
- Environment setup and teardown utilities
|
||||
- Integration with Frappe's hooks and test record creation system
|
||||
|
||||
Usage:
|
||||
This module is typically used by Frappe's CLI commands for running tests, but can also
|
||||
be used programmatically for custom test execution scenarios.
|
||||
|
||||
Example:
|
||||
from frappe.testing import TestConfig, TestRunner, discover_all_tests
|
||||
|
||||
config = TestConfig(failfast=True, verbose=2)
|
||||
runner = TestRunner(cfg=config)
|
||||
discover_all_tests(['my_app'], runner)
|
||||
runner.run()
|
||||
"""
|
||||
|
||||
import logging
|
||||
import logging.config
|
||||
|
||||
from .config import TestConfig
|
||||
from .discovery import discover_all_tests, discover_doctype_tests, discover_module_tests
|
||||
from .result import TestResult
|
||||
from .runner import TestRunner
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from frappe.utils.logger import create_handler as createFrappeFileHandler
|
||||
|
||||
LOGGING_CONFIG = {
|
||||
"version": 1,
|
||||
"disable_existing_loggers": False,
|
||||
"formatters": {},
|
||||
"loggers": {
|
||||
f"{__name__}": {
|
||||
"handlers": [], # only log to the frappe handler
|
||||
"propagate": False,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
logging.config.dictConfig(LOGGING_CONFIG)
|
||||
handlers = createFrappeFileHandler(__name__)
|
||||
for handler in handlers:
|
||||
logger.addHandler(handler)
|
||||
15
frappe/testing/config.py
Normal file
15
frappe/testing/config.py
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
from dataclasses import dataclass, field
|
||||
from typing import Optional, Union
|
||||
|
||||
|
||||
@dataclass
|
||||
class TestConfig:
|
||||
"""Configuration class for test runner"""
|
||||
|
||||
profile: bool = False
|
||||
failfast: bool = False
|
||||
tests: tuple = ()
|
||||
case: str | None = None
|
||||
pdb_on_exceptions: tuple | None = None
|
||||
selected_categories: list[str] = field(default_factory=list)
|
||||
skip_before_tests: bool = False
|
||||
131
frappe/testing/discovery.py
Normal file
131
frappe/testing/discovery.py
Normal file
|
|
@ -0,0 +1,131 @@
|
|||
"""
|
||||
This module provides functionality for discovering and organizing tests in the Frappe framework.
|
||||
|
||||
Key components:
|
||||
- discover_all_tests: Discovers all tests for specified app(s)
|
||||
- discover_doctype_tests: Discovers tests for specific DocType(s)
|
||||
- discover_module_tests: Discovers tests for specific module(s)
|
||||
- _add_module_tests: Helper function to add tests from a module to the test runner
|
||||
|
||||
The module uses various strategies to find and categorize tests, including:
|
||||
- Walking through app directories
|
||||
- Importing test modules
|
||||
- Categorizing tests (e.g., unit, integration)
|
||||
- Filtering tests based on configuration
|
||||
|
||||
It also includes error handling and logging to facilitate debugging and provide informative error messages.
|
||||
|
||||
Usage:
|
||||
These functions are typically called by the test runner to populate the test suite before execution.
|
||||
"""
|
||||
|
||||
import importlib
|
||||
import logging
|
||||
import os
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import frappe
|
||||
from frappe.tests.utils import IntegrationTestCase
|
||||
|
||||
from .utils import debug_timer
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .runner import TestRunner
|
||||
|
||||
logger = logging.getLogger("frappe.testing.discovery")
|
||||
|
||||
|
||||
@debug_timer
|
||||
def discover_all_tests(apps: list[str], runner) -> "TestRunner":
|
||||
"""Discover all tests for the specified app(s)"""
|
||||
logger.debug(f"Discovering tests for apps: {apps}")
|
||||
if isinstance(apps, str):
|
||||
apps = [apps]
|
||||
try:
|
||||
for app in apps:
|
||||
app_path = Path(frappe.get_app_path(app))
|
||||
for path, folders, files in os.walk(app_path):
|
||||
folders[:] = [f for f in folders if not f.startswith(".")]
|
||||
for dontwalk in ("node_modules", "locals", "public", "__pycache__"):
|
||||
if dontwalk in folders:
|
||||
folders.remove(dontwalk)
|
||||
if os.path.sep.join(["doctype", "doctype", "boilerplate"]) in path:
|
||||
continue
|
||||
path = Path(path)
|
||||
for file in [
|
||||
path.joinpath(filename)
|
||||
for filename in files
|
||||
if filename.startswith("test_")
|
||||
and filename.endswith(".py")
|
||||
and filename != "test_runner.py"
|
||||
]:
|
||||
module_name = f"{'.'.join(file.relative_to(app_path.parent).parent.parts)}.{file.stem}"
|
||||
_add_module_tests(runner, app, module_name)
|
||||
except Exception as e:
|
||||
logger.error(f"Error discovering all tests for {apps}: {e!s}")
|
||||
raise TestRunnerError(f"Failed to discover tests for {apps}: {e!s}") from e
|
||||
return runner
|
||||
|
||||
|
||||
@debug_timer
|
||||
def discover_doctype_tests(doctypes: list[str], runner, app: str, force: bool = False) -> "TestRunner":
|
||||
"""Discover tests for the specified doctype(s)"""
|
||||
if isinstance(doctypes, str):
|
||||
doctypes = [doctypes]
|
||||
for doctype in doctypes:
|
||||
try:
|
||||
module = frappe.db.get_value("DocType", doctype, "module")
|
||||
if not module:
|
||||
raise TestRunnerError(f"Invalid doctype {doctype}")
|
||||
|
||||
# Check if the DocType belongs to the specified app
|
||||
doctype_app = frappe.db.get_value("Module Def", module, "app_name")
|
||||
if app and doctype_app != app:
|
||||
raise TestRunnerError(f"DocType {doctype} does not belong to app {app}")
|
||||
elif not app:
|
||||
app = doctype_app
|
||||
test_module = frappe.modules.utils.get_module_name(doctype, module, "test_")
|
||||
force and frappe.db.delete(doctype)
|
||||
_add_module_tests(runner, app, test_module)
|
||||
except Exception as e:
|
||||
logger.error(f"Error discovering tests for {doctype}: {e!s}")
|
||||
raise TestRunnerError(f"Failed to discover tests for {doctype}: {e!s}") from e
|
||||
return runner
|
||||
|
||||
|
||||
@debug_timer
|
||||
def discover_module_tests(modules: list[str], runner, app: str) -> "TestRunner":
|
||||
"""Discover tests for the specified test module"""
|
||||
if isinstance(modules, str):
|
||||
modules = [modules]
|
||||
try:
|
||||
for module in modules:
|
||||
_add_module_tests(runner, app, module)
|
||||
except Exception as e:
|
||||
logger.error(f"Error discovering tests for {module}: {e!s}")
|
||||
raise TestRunnerError(f"Failed to discover tests for {module}: {e!s}") from e
|
||||
return runner
|
||||
|
||||
|
||||
def _add_module_tests(runner, app: str, module: str):
|
||||
module = importlib.import_module(module)
|
||||
if runner.cfg.case:
|
||||
test_suite = unittest.TestLoader().loadTestsFromTestCase(getattr(module, runner.cfg.case))
|
||||
else:
|
||||
test_suite = unittest.TestLoader().loadTestsFromModule(module)
|
||||
|
||||
for test in runner._iterate_suite(test_suite):
|
||||
if runner.cfg.tests and test._testMethodName not in runner.cfg.tests:
|
||||
continue
|
||||
category = "integration" if isinstance(test, IntegrationTestCase) else "unit"
|
||||
if runner.cfg.selected_categories and category not in runner.cfg.selected_categories:
|
||||
continue
|
||||
runner.per_app_categories[app][category].addTest(test)
|
||||
|
||||
|
||||
class TestRunnerError(Exception):
|
||||
"""Custom exception for test runner errors"""
|
||||
|
||||
pass
|
||||
111
frappe/testing/environment.py
Normal file
111
frappe/testing/environment.py
Normal file
|
|
@ -0,0 +1,111 @@
|
|||
"""
|
||||
This module handles the setup and teardown of the test environment for Frappe applications.
|
||||
|
||||
Key components:
|
||||
- _initialize_test_environment: Initializes the test environment for a given site
|
||||
- _cleanup_after_tests: Performs cleanup operations after running tests
|
||||
- _disable_scheduler_if_needed: Disables the scheduler if it's not already disabled
|
||||
- IntegrationTestPreparation: A class to prepare the environment for integration tests
|
||||
|
||||
The module provides functionality for:
|
||||
- Initializing the database connection
|
||||
- Setting test-related flags
|
||||
- Disabling the scheduler during tests
|
||||
- Running 'before_tests' hooks
|
||||
- Creating global test record dependencies
|
||||
|
||||
Usage:
|
||||
These functions and classes are typically used by the test runner to set up
|
||||
and tear down the test environment before and after test execution.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import unittest
|
||||
|
||||
import frappe
|
||||
import frappe.utils.scheduler
|
||||
from frappe.tests.utils import make_test_records
|
||||
|
||||
from .runner import TestRunnerError
|
||||
from .utils import debug_timer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@debug_timer
|
||||
def _initialize_test_environment(site, config):
|
||||
"""Initialize the test environment"""
|
||||
logger.debug(f"Initializing test environment for site: {site}")
|
||||
frappe.init(site)
|
||||
if not frappe.db:
|
||||
frappe.connect()
|
||||
try:
|
||||
# require db access
|
||||
_disable_scheduler_if_needed()
|
||||
frappe.clear_cache()
|
||||
except Exception as e:
|
||||
logger.error(f"Error connecting to the database: {e!s}")
|
||||
raise TestRunnerError(f"Failed to connect to the database: {e}") from e
|
||||
|
||||
# Set various test-related flags
|
||||
frappe.flags.in_test = True
|
||||
frappe.flags.print_messages = logger.getEffectiveLevel() < logging.INFO
|
||||
frappe.flags.tests_verbose = logger.getEffectiveLevel() < logging.INFO
|
||||
|
||||
|
||||
def _cleanup_after_tests():
|
||||
"""Perform cleanup operations after running tests"""
|
||||
global scheduler_disabled_by_user
|
||||
if not scheduler_disabled_by_user:
|
||||
frappe.utils.scheduler.enable_scheduler()
|
||||
|
||||
if frappe.db:
|
||||
# this commit ends the transaction
|
||||
frappe.db.commit() # nosemgrep
|
||||
frappe.clear_cache()
|
||||
|
||||
|
||||
# Global variable to track scheduler state
|
||||
scheduler_disabled_by_user = False
|
||||
|
||||
|
||||
def _disable_scheduler_if_needed():
|
||||
"""Disable scheduler if it's not already disabled"""
|
||||
global scheduler_disabled_by_user
|
||||
scheduler_disabled_by_user = frappe.utils.scheduler.is_scheduler_disabled(verbose=False)
|
||||
if not scheduler_disabled_by_user:
|
||||
frappe.utils.scheduler.disable_scheduler()
|
||||
|
||||
|
||||
class IntegrationTestPreparation:
|
||||
def __init__(self, cfg):
|
||||
self.cfg = cfg
|
||||
|
||||
def __call__(self, suite: unittest.TestSuite, app: str, category: str) -> None:
|
||||
"""Prepare the environment for integration tests."""
|
||||
if not self.cfg.skip_before_tests:
|
||||
self._run_before_test_hooks(app, category)
|
||||
else:
|
||||
logger.debug("Skipping before_tests hooks: Explicitly skipped")
|
||||
|
||||
self._create_global_test_record_dependencies(app, category)
|
||||
|
||||
@staticmethod
|
||||
@debug_timer
|
||||
def _run_before_test_hooks(app: str, category: str):
|
||||
"""Run 'before_tests' hooks"""
|
||||
logger.info(f'Running "before_tests" hooks for {category} tests on app: {app}')
|
||||
for hook_function in frappe.get_hooks("before_tests", app_name=app):
|
||||
logger.info('Running "before_tests" hook function {hook_function}')
|
||||
frappe.get_attr(hook_function)()
|
||||
|
||||
@staticmethod
|
||||
@debug_timer
|
||||
def _create_global_test_record_dependencies(app: str, category: str):
|
||||
"""Create global test record dependencies"""
|
||||
test_module = frappe.get_module(f"{app}.tests")
|
||||
if hasattr(test_module, "global_test_dependencies"):
|
||||
logger.info(f"Creating global test record dependencies for {category} tests on {app} ...")
|
||||
for doctype in test_module.global_test_dependencies:
|
||||
logger.debug(f"Creating global test records for {doctype}")
|
||||
make_test_records(doctype, commit=True)
|
||||
99
frappe/testing/result.py
Normal file
99
frappe/testing/result.py
Normal file
|
|
@ -0,0 +1,99 @@
|
|||
"""
|
||||
This module contains the TestResult class, which extends unittest.TextTestResult
|
||||
to provide custom formatting and logging for test results in the Frappe framework.
|
||||
|
||||
Key components:
|
||||
- TestResult: The main class for handling test results
|
||||
- SLOW_TEST_THRESHOLD: A constant defining the threshold for slow tests
|
||||
|
||||
The TestResult class provides:
|
||||
- Custom output formatting for different test outcomes (success, failure, error, skip)
|
||||
- Timing information for each test, with highlighting for slow tests
|
||||
- Logging of test results for debugging purposes
|
||||
- Custom error reporting
|
||||
|
||||
Usage:
|
||||
This TestResult class is typically used by the TestRunner to collect and display
|
||||
test results during test execution in the Frappe framework.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
import unittest
|
||||
|
||||
import click
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TestResult(unittest.TextTestResult):
|
||||
def startTest(self, test):
|
||||
self.tb_locals = True
|
||||
self._started_at = time.monotonic()
|
||||
super(unittest.TextTestResult, self).startTest(test)
|
||||
test_class = unittest.util.strclass(test.__class__)
|
||||
if getattr(self, "current_test_class", None) != test_class:
|
||||
self.current_test_class = test_class
|
||||
click.echo(f"\n{unittest.util.strclass(test.__class__)}")
|
||||
logger.info(f"{unittest.util.strclass(test.__class__)}")
|
||||
if new_doctypes := getattr(test.__class__, "_newly_created_test_records", None):
|
||||
records = [f"{name} ({qty})" for name, qty in reversed(new_doctypes)]
|
||||
click.secho(
|
||||
f" Test Records created: {', '.join(records)}",
|
||||
fg="bright_black",
|
||||
)
|
||||
logger.info(f"records created: {', '.join(records)}")
|
||||
|
||||
def getTestMethodName(self, test):
|
||||
return test._testMethodName if hasattr(test, "_testMethodName") else str(test)
|
||||
|
||||
def addSuccess(self, test):
|
||||
super(unittest.TextTestResult, self).addSuccess(test)
|
||||
elapsed = time.monotonic() - self._started_at
|
||||
threshold_passed = elapsed >= SLOW_TEST_THRESHOLD
|
||||
long_elapsed = click.style(f" ({elapsed:.03}s)", fg="red") if threshold_passed else ""
|
||||
click.echo(f" {click.style(' ✔ ', fg='green')} {self.getTestMethodName(test)}{long_elapsed}")
|
||||
logger.debug(f"{test!s:<200} {'[success]':>20} ⌛{elapsed}")
|
||||
|
||||
def addError(self, test, err):
|
||||
super(unittest.TextTestResult, self).addError(test, err)
|
||||
click.echo(f" {click.style(' ✖ ', fg='red')} {self.getTestMethodName(test)}")
|
||||
logger.debug(f"{test!s:<200} {'[error]':>20}")
|
||||
|
||||
def addFailure(self, test, err):
|
||||
super(unittest.TextTestResult, self).addFailure(test, err)
|
||||
click.echo(f" {click.style(' ✖ ', fg='red')} {self.getTestMethodName(test)}")
|
||||
logger.debug(f"{test!s:<200} {'[failure]':>20}")
|
||||
|
||||
def addSkip(self, test, reason):
|
||||
super(unittest.TextTestResult, self).addSkip(test, reason)
|
||||
click.echo(f" {click.style(' = ', fg='white')} {self.getTestMethodName(test)}")
|
||||
logger.debug(f"{test!s:<200} {'[skipped]':>20}")
|
||||
|
||||
def addExpectedFailure(self, test, err):
|
||||
super(unittest.TextTestResult, self).addExpectedFailure(test, err)
|
||||
click.echo(f" {click.style(' ✖ ', fg='red')} {self.getTestMethodName(test)}")
|
||||
logger.debug(f"{test!s:<200} {'[expected failure]':>20}")
|
||||
|
||||
def addUnexpectedSuccess(self, test):
|
||||
super(unittest.TextTestResult, self).addUnexpectedSuccess(test)
|
||||
click.echo(f" {click.style(' ✔ ', fg='green')} {self.getTestMethodName(test)}")
|
||||
logger.debug(f"{test!s:<200} {'[unexpected success]':>20}")
|
||||
|
||||
def printErrors(self):
|
||||
click.echo("\n")
|
||||
self.printErrorList(" ERROR ", self.errors, "red")
|
||||
self.printErrorList(" FAIL ", self.failures, "red")
|
||||
|
||||
def printErrorList(self, flavour, errors, color):
|
||||
for test, err in errors:
|
||||
click.echo(self.separator1)
|
||||
click.echo(f"{click.style(flavour, bg=color)} {self.getDescription(test)}")
|
||||
click.echo(self.separator2)
|
||||
click.echo(err)
|
||||
|
||||
def __str__(self):
|
||||
return f"Tests: {self.testsRun}, Failing: {len(self.failures)}, Errors: {len(self.errors)}"
|
||||
|
||||
|
||||
SLOW_TEST_THRESHOLD = 2
|
||||
138
frappe/testing/runner.py
Normal file
138
frappe/testing/runner.py
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
"""
|
||||
This module contains the TestRunner class, which is responsible for executing test suites in Frappe.
|
||||
|
||||
The TestRunner class extends unittest.TextTestRunner and provides additional functionality:
|
||||
- Categorization of tests (unit, integration, functional)
|
||||
- Priority-based execution of test categories
|
||||
- Profiling capabilities
|
||||
- Integration with Frappe's configuration and environment setup
|
||||
|
||||
Key components:
|
||||
- TestRunner: The main class for running tests
|
||||
- CATEGORY_PRIORITIES: A dictionary defining the execution order of test categories
|
||||
- Various utility methods for test preparation, profiling, and iteration
|
||||
|
||||
Usage:
|
||||
The TestRunner is typically instantiated and used by Frappe's test discovery and execution system.
|
||||
It can be customized through the TestConfig object passed during initialization.
|
||||
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
import cProfile
|
||||
import logging
|
||||
import os
|
||||
import pstats
|
||||
import unittest
|
||||
from collections import defaultdict
|
||||
from collections.abc import Iterator
|
||||
from io import StringIO
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
|
||||
import frappe
|
||||
|
||||
from .config import TestConfig
|
||||
from .discovery import TestRunnerError
|
||||
from .environment import IntegrationTestPreparation
|
||||
from .result import TestResult
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Define category priorities
|
||||
CATEGORY_PRIORITIES = {
|
||||
"unit": 1,
|
||||
"integration": 2,
|
||||
"functional": 3,
|
||||
# Add more categories and their priorities as needed
|
||||
}
|
||||
|
||||
|
||||
class TestRunner(unittest.TextTestRunner):
|
||||
def __init__(
|
||||
self,
|
||||
stream=None,
|
||||
descriptions=True,
|
||||
verbosity=1,
|
||||
failfast=False,
|
||||
buffer=False,
|
||||
resultclass=None,
|
||||
warnings=None,
|
||||
*,
|
||||
tb_locals=False,
|
||||
cfg: TestConfig,
|
||||
):
|
||||
super().__init__(
|
||||
stream=stream,
|
||||
descriptions=descriptions,
|
||||
verbosity=verbosity,
|
||||
failfast=cfg.failfast,
|
||||
buffer=buffer,
|
||||
resultclass=resultclass or TestResult,
|
||||
warnings=warnings,
|
||||
tb_locals=tb_locals,
|
||||
)
|
||||
self.cfg = cfg
|
||||
self.per_app_categories = defaultdict(lambda: defaultdict(unittest.TestSuite))
|
||||
self.integration_preparation = IntegrationTestPreparation(cfg)
|
||||
logger.debug("TestRunner initialized")
|
||||
|
||||
def iterRun(self) -> Iterator[tuple[str, str, unittest.TestSuite]]:
|
||||
for app, categories in self.per_app_categories.items():
|
||||
sorted_categories = sorted(
|
||||
categories.items(), key=lambda x: CATEGORY_PRIORITIES.get(x[0], float("inf"))
|
||||
)
|
||||
for category, suite in sorted_categories:
|
||||
if not self._has_tests(suite):
|
||||
logger.debug(f"no tests for: {app}, {category}")
|
||||
continue
|
||||
|
||||
self._prepare_category(category, suite, app)
|
||||
self._apply_debug_decorators(suite)
|
||||
|
||||
with self._profile():
|
||||
logger.info(f"Starting tests for app: {app}, category: {category}")
|
||||
yield app, category, suite
|
||||
|
||||
def _has_tests(self, suite):
|
||||
return next(self._iterate_suite(suite), None) is not None
|
||||
|
||||
def _prepare_category(self, category, suite, app):
|
||||
dispatcher = {
|
||||
"integration": self.integration_preparation,
|
||||
# Add other categories here as needed
|
||||
}
|
||||
prepare_method = dispatcher.get(category.lower())
|
||||
if prepare_method:
|
||||
prepare_method(suite, app, category)
|
||||
else:
|
||||
logger.debug(f"Unknown test category: {category}. No specific preparation performed.")
|
||||
|
||||
def _apply_debug_decorators(self, suite):
|
||||
if self.cfg.pdb_on_exceptions:
|
||||
for test in self._iterate_suite(suite):
|
||||
if hasattr(test, "_apply_debug_decorator"):
|
||||
test._apply_debug_decorator(self.cfg.pdb_on_exceptions)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _profile(self):
|
||||
if self.cfg.profile:
|
||||
logger.debug("profiling enabled")
|
||||
pr = cProfile.Profile()
|
||||
pr.enable()
|
||||
yield
|
||||
if self.cfg.profile:
|
||||
pr.disable()
|
||||
s = StringIO()
|
||||
ps = pstats.Stats(pr, stream=s).sort_stats("cumulative")
|
||||
ps.print_stats()
|
||||
print(s.getvalue())
|
||||
|
||||
@staticmethod
|
||||
def _iterate_suite(suite):
|
||||
for test in suite:
|
||||
if isinstance(test, unittest.TestSuite):
|
||||
yield from TestRunner._iterate_suite(test)
|
||||
elif isinstance(test, unittest.TestCase):
|
||||
yield test
|
||||
17
frappe/testing/utils.py
Normal file
17
frappe/testing/utils.py
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
import logging
|
||||
import time
|
||||
from functools import wraps
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def debug_timer(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
start_time = time.monotonic()
|
||||
result = func(*args, **kwargs)
|
||||
end_time = time.monotonic()
|
||||
logger.debug(f" {func.__name__:<50} ⌛{end_time - start_time:>6.3f} seconds")
|
||||
return result
|
||||
|
||||
return wrapper
|
||||
|
|
@ -129,7 +129,7 @@ def _make_test_records_for_doctype(doctype, force=False, commit=False):
|
|||
test_records = frappe.get_test_records(doctype)
|
||||
if test_records:
|
||||
yield from _make_test_objects(doctype, test_records, force, commit=commit)
|
||||
elif logger.getEffectiveLevel() < logging.INFO:
|
||||
else:
|
||||
print_mandatory_fields(doctype)
|
||||
|
||||
test_record_log_instance.add(doctype)
|
||||
|
|
@ -203,13 +203,13 @@ def _make_test_objects(doctype, test_records=None, reset=False, commit=False):
|
|||
def print_mandatory_fields(doctype):
|
||||
"""Print mandatory fields for the specified doctype"""
|
||||
meta = frappe.get_meta(doctype)
|
||||
logger.debug(f"Please setup make_test_records for: {doctype}")
|
||||
logger.debug("-" * 60)
|
||||
logger.debug(f"Autoname: {meta.autoname or ''}")
|
||||
logger.debug("Mandatory Fields:")
|
||||
logger.warning(f"Please setup make_test_records for: {doctype}")
|
||||
logger.warning("-" * 60)
|
||||
logger.warning(f"Autoname: {meta.autoname or ''}")
|
||||
logger.warning("Mandatory Fields:")
|
||||
for d in meta.get("fields", {"reqd": 1}):
|
||||
logger.debug(f" - {d.parent}:{d.fieldname} | {d.fieldtype} | {d.options or ''}")
|
||||
logger.debug("")
|
||||
logger.warning(f" - {d.parent}:{d.fieldname} | {d.fieldtype} | {d.options or ''}")
|
||||
logger.warning("")
|
||||
|
||||
|
||||
class TestRecordLog:
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ def FrappeClickWrapper(cls, handler):
|
|||
def make_context(self, info_name, args, parent=None, **extra):
|
||||
try:
|
||||
return super().make_context(info_name, args, parent=parent, **extra)
|
||||
except click.ClickException as e:
|
||||
except (click.ClickException, click.exceptions.Exit, click.exceptions.Abort) as e:
|
||||
raise e
|
||||
except Exception as exc:
|
||||
# call the handler
|
||||
|
|
|
|||
|
|
@ -13,6 +13,28 @@ default_log_level = logging.WARNING if frappe._dev_server else logging.ERROR
|
|||
stream_logging = os.environ.get("FRAPPE_STREAM_LOGGING")
|
||||
|
||||
|
||||
def create_handler(module, site=None, max_size=100_000, file_count=20, stream_only=False):
|
||||
"""Create and return a Frappe-specific logging handler."""
|
||||
formatter = logging.Formatter(f"%(asctime)s %(levelname)s {module} %(message)s")
|
||||
|
||||
if stream_only:
|
||||
handler = logging.StreamHandler()
|
||||
else:
|
||||
logfile = f"{module}.log"
|
||||
log_filename = os.path.join("..", "logs", logfile)
|
||||
handler = RotatingFileHandler(log_filename, maxBytes=max_size, backupCount=file_count)
|
||||
|
||||
handler.setFormatter(formatter)
|
||||
|
||||
if site and not stream_only:
|
||||
sitelog_filename = os.path.join(site, "logs", logfile)
|
||||
site_handler = RotatingFileHandler(sitelog_filename, maxBytes=max_size, backupCount=file_count)
|
||||
site_handler.setFormatter(formatter)
|
||||
return [handler, site_handler]
|
||||
|
||||
return [handler]
|
||||
|
||||
|
||||
def get_logger(
|
||||
module=None,
|
||||
with_more_info=False,
|
||||
|
|
@ -54,29 +76,16 @@ def get_logger(
|
|||
module = "frappe"
|
||||
with_more_info = True
|
||||
|
||||
logfile = module + ".log"
|
||||
log_filename = os.path.join("..", "logs", logfile)
|
||||
|
||||
logger = logging.getLogger(logger_name)
|
||||
logger.setLevel(frappe.log_level or default_log_level)
|
||||
logger.propagate = False
|
||||
|
||||
formatter = logging.Formatter(f"%(asctime)s %(levelname)s {module} %(message)s")
|
||||
if stream_only:
|
||||
handler = logging.StreamHandler()
|
||||
else:
|
||||
handler = RotatingFileHandler(log_filename, maxBytes=max_size, backupCount=file_count)
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
if site and not stream_only:
|
||||
sitelog_filename = os.path.join(site, "logs", logfile)
|
||||
site_handler = RotatingFileHandler(sitelog_filename, maxBytes=max_size, backupCount=file_count)
|
||||
site_handler.setFormatter(formatter)
|
||||
logger.addHandler(site_handler)
|
||||
handlers = create_handler(module, site, max_size, file_count, stream_only)
|
||||
for handler in handlers:
|
||||
logger.addHandler(handler)
|
||||
|
||||
if with_more_info:
|
||||
handler.addFilter(SiteContextFilter())
|
||||
handlers[0].addFilter(SiteContextFilter())
|
||||
|
||||
if filter:
|
||||
logger.addFilter(filter)
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue