import os
import copy
import logging
import pytest
from .. import Environment
from ..consoleloggingreporter import ConsoleLoggingReporter
from ..util.helper import processwrapper
from ..logging import StepFormatter, StepLogger
from ..exceptions import NoStrategyFoundError
LABGRID_ENV_KEY = pytest.StashKey[Environment]()
[docs]
@pytest.hookimpl(tryfirst=True)
def pytest_cmdline_main(config):
def set_cli_log_level(level):
nonlocal config
try:
current_level = config.getoption("log_cli_level") or config.getini("log_cli_level")
except ValueError:
return
print(f"current_level: {current_level}")
if isinstance(current_level, str):
try:
current_level = int(logging.getLevelName(current_level))
except ValueError:
current_level = None
# If no level was set previously (via ini or cli) or current_level is
# less verbose than level, set to new level.
if current_level is None or level < current_level:
config.option.log_cli_level = str(level)
verbosity = config.getoption("verbose")
if verbosity > 3: # enable with -vvvv
set_cli_log_level(logging.DEBUG)
elif verbosity > 2: # enable with -vvv
set_cli_log_level(logging.CONSOLE)
elif verbosity > 1: # enable with -vv
set_cli_log_level(logging.INFO)
[docs]
@pytest.hookimpl()
def pytest_collection_modifyitems(config, items):
"""This function matches function feature flags with those found in the
environment and disables the item if no match is found"""
env = config.stash[LABGRID_ENV_KEY]
if not env:
return
have_feature = env.get_features() | env.get_target_features()
for item in items:
# pytest.mark.lg_feature
lg_feature_signature = "pytest.mark.lg_feature(features: str | list[str])"
want_feature = set()
for marker in item.iter_markers("lg_feature"):
if len(marker.args) != 1 or marker.kwargs:
raise pytest.UsageError(f"Unexpected number of args/kwargs for {lg_feature_signature}")
elif isinstance(marker.args[0], str):
want_feature.add(marker.args[0])
elif isinstance(marker.args[0], list):
want_feature.update(marker.args[0])
else:
raise pytest.UsageError(f"Unsupported 'features' argument type ({type(marker.args[0])}) for {lg_feature_signature}")
missing_feature = want_feature - have_feature
if missing_feature:
reason = f'unsupported feature(s): {", ".join(missing_feature)}'
item.add_marker(pytest.mark.skip(reason=reason))
# pytest.mark.lg_xfail_feature
lg_xfail_feature_signature = "pytest.mark.lg_xfail_feature(feature: str, *, **xfail_kwargs), xfail_kwargs as pytest.mark.xfail expects them"
for marker in item.iter_markers("lg_xfail_feature"):
if len(marker.args) != 1:
raise pytest.UsageError(f"Unexpected number of arguments for {lg_xfail_feature_signature}")
elif not isinstance(marker.args[0], str):
raise pytest.UsageError(f"Unsupported 'feature' argument type {type(marker.args[0])} for {lg_xfail_feature_signature}")
if "condition" in marker.kwargs:
raise pytest.UsageError(f"Unsupported 'condition' argument for {lg_xfail_feature_signature}")
kwargs = copy.copy(marker.kwargs)
reason = kwargs.pop("reason", marker.args[0])
item.add_marker(
pytest.mark.xfail(
condition=marker.args[0] in have_feature,
reason=reason,
**kwargs,
)
)
[docs]
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_setup(item):
"""
Skip test if one of the targets uses a strategy considered broken.
"""
# Before any fixtures run for the test, check if the session-scoped strategy fixture was
# requested (might have been executed already for a prior test). If that's the case and the
# strategy is broken, skip the test.
if "strategy" in item.fixturenames:
env = item.config.stash[LABGRID_ENV_KEY]
# skip test even if only one of the targets in the env has a broken strategy
for target_name in env.config.get_targets():
target = env.get_target(target_name)
try:
strategy = target.get_strategy()
if strategy.broken:
pytest.skip(f"{strategy.__class__.__name__} is in broken state")
except NoStrategyFoundError:
pass