conftest.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763
  1. # -*- coding: utf-8 -*-
  2. """
  3. :codeauthor: Pedro Algarvio (pedro@algarvio.me)
  4. tests.conftest
  5. ~~~~~~~~~~~~~~
  6. Prepare py.test for our test suite
  7. """
  8. # pylint: disable=wrong-import-order,wrong-import-position,3rd-party-local-module-not-gated
  9. # pylint: disable=redefined-outer-name,invalid-name,3rd-party-module-not-gated
  10. from __future__ import absolute_import, print_function, unicode_literals
  11. import logging
  12. import os
  13. import pathlib
  14. import pprint
  15. import re
  16. import sys
  17. from functools import partial, wraps
  18. import _pytest.logging
  19. import _pytest.skipping
  20. import psutil
  21. import pytest
  22. import salt.config
  23. import salt.loader
  24. import salt.log.mixins
  25. import salt.log.setup
  26. import salt.utils.files
  27. import salt.utils.path
  28. import salt.utils.platform
  29. import salt.utils.win_functions
  30. import saltfactories.utils.compat
  31. from _pytest.mark.evaluate import MarkEvaluator
  32. from salt.serializers import yaml
  33. from tests.support.helpers import PRE_PYTEST_SKIP_OR_NOT, PRE_PYTEST_SKIP_REASON
  34. from tests.support.pytest.helpers import * # pylint: disable=unused-wildcard-import
  35. from tests.support.runtests import RUNTIME_VARS
  36. from tests.support.sminion import check_required_sminion_attributes, create_sminion
  37. TESTS_DIR = pathlib.Path(__file__).resolve().parent
  38. CODE_DIR = TESTS_DIR.parent
  39. # Change to code checkout directory
  40. os.chdir(str(CODE_DIR))
  41. # Make sure the current directory is the first item in sys.path
  42. if str(CODE_DIR) in sys.path:
  43. sys.path.remove(str(CODE_DIR))
  44. sys.path.insert(0, str(CODE_DIR))
  45. # Coverage
  46. if "COVERAGE_PROCESS_START" in os.environ:
  47. MAYBE_RUN_COVERAGE = True
  48. COVERAGERC_FILE = os.environ["COVERAGE_PROCESS_START"]
  49. else:
  50. COVERAGERC_FILE = str(CODE_DIR / ".coveragerc")
  51. MAYBE_RUN_COVERAGE = (
  52. sys.argv[0].endswith("pytest.py") or "_COVERAGE_RCFILE" in os.environ
  53. )
  54. if MAYBE_RUN_COVERAGE:
  55. # Flag coverage to track suprocesses by pointing it to the right .coveragerc file
  56. os.environ[str("COVERAGE_PROCESS_START")] = str(COVERAGERC_FILE)
  57. # Define the pytest plugins we rely on
  58. pytest_plugins = ["tempdir", "helpers_namespace"]
  59. # Define where not to collect tests from
  60. collect_ignore = ["setup.py"]
  61. # Patch PyTest logging handlers
  62. class LogCaptureHandler(
  63. salt.log.mixins.ExcInfoOnLogLevelFormatMixIn, _pytest.logging.LogCaptureHandler
  64. ):
  65. """
  66. Subclassing PyTest's LogCaptureHandler in order to add the
  67. exc_info_on_loglevel functionality and actually make it a NullHandler,
  68. it's only used to print log messages emmited during tests, which we
  69. have explicitly disabled in pytest.ini
  70. """
  71. _pytest.logging.LogCaptureHandler = LogCaptureHandler
  72. class LiveLoggingStreamHandler(
  73. salt.log.mixins.ExcInfoOnLogLevelFormatMixIn,
  74. _pytest.logging._LiveLoggingStreamHandler,
  75. ):
  76. """
  77. Subclassing PyTest's LiveLoggingStreamHandler in order to add the
  78. exc_info_on_loglevel functionality.
  79. """
  80. _pytest.logging._LiveLoggingStreamHandler = LiveLoggingStreamHandler
  81. # Reset logging root handlers
  82. for handler in logging.root.handlers[:]:
  83. logging.root.removeHandler(handler)
  84. # Reset the root logger to its default level(because salt changed it)
  85. logging.root.setLevel(logging.WARNING)
  86. log = logging.getLogger("salt.testsuite")
  87. # ----- PyTest Tempdir Plugin Hooks --------------------------------------------------------------------------------->
  88. def pytest_tempdir_basename():
  89. """
  90. Return the temporary directory basename for the salt test suite.
  91. """
  92. return "salt-tests-tmpdir"
  93. # <---- PyTest Tempdir Plugin Hooks ----------------------------------------------------------------------------------
  94. # ----- CLI Options Setup ------------------------------------------------------------------------------------------->
  95. def pytest_addoption(parser):
  96. """
  97. register argparse-style options and ini-style config values.
  98. """
  99. test_selection_group = parser.getgroup("Tests Selection")
  100. test_selection_group.addoption(
  101. "--from-filenames",
  102. default=None,
  103. help=(
  104. "Pass a comma-separated list of file paths, and any test module which corresponds to the "
  105. "specified file(s) will run. For example, if 'setup.py' was passed, then the corresponding "
  106. "test files defined in 'tests/filename_map.yml' would run. Absolute paths are assumed to be "
  107. "files containing relative paths, one per line. Providing the paths in a file can help get "
  108. "around shell character limits when the list of files is long."
  109. ),
  110. )
  111. # Add deprecated CLI flag until we completely switch to PyTest
  112. test_selection_group.addoption(
  113. "--names-file", default=None, help="Deprecated option"
  114. )
  115. test_selection_group.addoption(
  116. "--transport",
  117. default="zeromq",
  118. choices=("zeromq", "tcp"),
  119. help=(
  120. "Select which transport to run the integration tests with, zeromq or tcp. Default: %default"
  121. ),
  122. )
  123. test_selection_group.addoption(
  124. "--ssh",
  125. "--ssh-tests",
  126. dest="ssh",
  127. action="store_true",
  128. default=False,
  129. help="Run salt-ssh tests. These tests will spin up a temporary "
  130. "SSH server on your machine. In certain environments, this "
  131. "may be insecure! Default: False",
  132. )
  133. test_selection_group.addoption(
  134. "--proxy",
  135. "--proxy-tests",
  136. dest="proxy",
  137. action="store_true",
  138. default=False,
  139. help="Run proxy tests",
  140. )
  141. test_selection_group.addoption(
  142. "--run-slow", action="store_true", default=False, help="Run slow tests.",
  143. )
  144. output_options_group = parser.getgroup("Output Options")
  145. output_options_group.addoption(
  146. "--output-columns",
  147. default=80,
  148. type=int,
  149. help="Number of maximum columns to use on the output",
  150. )
  151. output_options_group.addoption(
  152. "--no-colors",
  153. "--no-colours",
  154. default=False,
  155. action="store_true",
  156. help="Disable colour printing.",
  157. )
  158. # ----- Test Groups --------------------------------------------------------------------------------------------->
  159. # This will allow running the tests in chunks
  160. test_selection_group.addoption(
  161. "--test-group-count",
  162. dest="test-group-count",
  163. type=int,
  164. help="The number of groups to split the tests into",
  165. )
  166. test_selection_group.addoption(
  167. "--test-group",
  168. dest="test-group",
  169. type=int,
  170. help="The group of tests that should be executed",
  171. )
  172. # <---- Test Groups ----------------------------------------------------------------------------------------------
  173. # <---- CLI Options Setup --------------------------------------------------------------------------------------------
  174. # ----- Register Markers -------------------------------------------------------------------------------------------->
  175. @pytest.mark.trylast
  176. def pytest_configure(config):
  177. """
  178. called after command line options have been parsed
  179. and all plugins and initial conftest files been loaded.
  180. """
  181. for dirname in CODE_DIR.iterdir():
  182. if not dirname.is_dir():
  183. continue
  184. if dirname != TESTS_DIR:
  185. config.addinivalue_line("norecursedirs", str(CODE_DIR / dirname))
  186. # Expose the markers we use to pytest CLI
  187. config.addinivalue_line(
  188. "markers",
  189. "requires_salt_modules(*required_module_names): Skip if at least one module is not available.",
  190. )
  191. config.addinivalue_line(
  192. "markers",
  193. "requires_salt_states(*required_state_names): Skip if at least one state module is not available.",
  194. )
  195. config.addinivalue_line(
  196. "markers", "windows_whitelisted: Mark test as whitelisted to run under Windows"
  197. )
  198. # Make sure the test suite "knows" this is a pytest test run
  199. RUNTIME_VARS.PYTEST_SESSION = True
  200. # "Flag" the slotTest decorator if we're skipping slow tests or not
  201. os.environ["SLOW_TESTS"] = str(config.getoption("--run-slow"))
  202. # <---- Register Markers ---------------------------------------------------------------------------------------------
  203. # ----- PyTest Tweaks ----------------------------------------------------------------------------------------------->
  204. def set_max_open_files_limits(min_soft=3072, min_hard=4096):
  205. # Get current limits
  206. if salt.utils.platform.is_windows():
  207. import win32file
  208. prev_hard = win32file._getmaxstdio()
  209. prev_soft = 512
  210. else:
  211. import resource
  212. prev_soft, prev_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
  213. # Check minimum required limits
  214. set_limits = False
  215. if prev_soft < min_soft:
  216. soft = min_soft
  217. set_limits = True
  218. else:
  219. soft = prev_soft
  220. if prev_hard < min_hard:
  221. hard = min_hard
  222. set_limits = True
  223. else:
  224. hard = prev_hard
  225. # Increase limits
  226. if set_limits:
  227. log.debug(
  228. " * Max open files settings is too low (soft: %s, hard: %s) for running the tests. "
  229. "Trying to raise the limits to soft: %s, hard: %s",
  230. prev_soft,
  231. prev_hard,
  232. soft,
  233. hard,
  234. )
  235. try:
  236. if salt.utils.platform.is_windows():
  237. hard = 2048 if hard > 2048 else hard
  238. win32file._setmaxstdio(hard)
  239. else:
  240. resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
  241. except Exception as err: # pylint: disable=broad-except
  242. log.error(
  243. "Failed to raise the max open files settings -> %s. Please issue the following command "
  244. "on your console: 'ulimit -u %s'",
  245. err,
  246. soft,
  247. )
  248. exit(1)
  249. return soft, hard
  250. def pytest_report_header():
  251. soft, hard = set_max_open_files_limits()
  252. return "max open files; soft: {}; hard: {}".format(soft, hard)
  253. @pytest.hookimpl(hookwrapper=True, trylast=True)
  254. def pytest_collection_modifyitems(config, items):
  255. """
  256. called after collection has been performed, may filter or re-order
  257. the items in-place.
  258. :param _pytest.main.Session session: the pytest session object
  259. :param _pytest.config.Config config: pytest config object
  260. :param List[_pytest.nodes.Item] items: list of item objects
  261. """
  262. # Let PyTest or other plugins handle the initial collection
  263. yield
  264. groups_collection_modifyitems(config, items)
  265. from_filenames_collection_modifyitems(config, items)
  266. log.warning("Mofifying collected tests to keep track of fixture usage")
  267. for item in items:
  268. for fixture in item.fixturenames:
  269. if fixture not in item._fixtureinfo.name2fixturedefs:
  270. continue
  271. for fixturedef in item._fixtureinfo.name2fixturedefs[fixture]:
  272. if fixturedef.scope == "function":
  273. continue
  274. try:
  275. node_ids = fixturedef.node_ids
  276. except AttributeError:
  277. node_ids = fixturedef.node_ids = set()
  278. node_ids.add(item.nodeid)
  279. try:
  280. fixturedef.finish.__wrapped__
  281. except AttributeError:
  282. original_func = fixturedef.finish
  283. def wrapper(func, fixturedef):
  284. @wraps(func)
  285. def wrapped(self, request):
  286. try:
  287. return self._finished
  288. except AttributeError:
  289. if self.node_ids:
  290. if (
  291. not request.session.shouldfail
  292. and not request.session.shouldstop
  293. ):
  294. log.debug(
  295. "%s is still going to be used, not terminating it. "
  296. "Still in use on:\n%s",
  297. self,
  298. pprint.pformat(list(self.node_ids)),
  299. )
  300. return
  301. log.debug("Finish called on %s", self)
  302. try:
  303. return func(request)
  304. finally:
  305. self._finished = True
  306. return partial(wrapped, fixturedef)
  307. fixturedef.finish = wrapper(fixturedef.finish, fixturedef)
  308. try:
  309. fixturedef.finish.__wrapped__
  310. except AttributeError:
  311. fixturedef.finish.__wrapped__ = original_func
  312. @pytest.hookimpl(trylast=True, hookwrapper=True)
  313. def pytest_runtest_protocol(item, nextitem):
  314. """
  315. implements the runtest_setup/call/teardown protocol for
  316. the given test item, including capturing exceptions and calling
  317. reporting hooks.
  318. :arg item: test item for which the runtest protocol is performed.
  319. :arg nextitem: the scheduled-to-be-next test item (or None if this
  320. is the end my friend). This argument is passed on to
  321. :py:func:`pytest_runtest_teardown`.
  322. :return boolean: True if no further hook implementations should be invoked.
  323. Stops at first non-None result, see :ref:`firstresult`
  324. """
  325. request = item._request
  326. used_fixture_defs = []
  327. for fixture in item.fixturenames:
  328. if fixture not in item._fixtureinfo.name2fixturedefs:
  329. continue
  330. for fixturedef in reversed(item._fixtureinfo.name2fixturedefs[fixture]):
  331. if fixturedef.scope == "function":
  332. continue
  333. used_fixture_defs.append(fixturedef)
  334. try:
  335. # Run the test
  336. yield
  337. finally:
  338. for fixturedef in used_fixture_defs:
  339. if item.nodeid in fixturedef.node_ids:
  340. fixturedef.node_ids.remove(item.nodeid)
  341. if not fixturedef.node_ids:
  342. # This fixture is not used in any more test functions
  343. fixturedef.finish(request)
  344. del request
  345. del used_fixture_defs
  346. def pytest_runtest_teardown(item, nextitem):
  347. """
  348. called after ``pytest_runtest_call``.
  349. :arg nextitem: the scheduled-to-be-next test item (None if no further
  350. test item is scheduled). This argument can be used to
  351. perform exact teardowns, i.e. calling just enough finalizers
  352. so that nextitem only needs to call setup-functions.
  353. """
  354. # PyTest doesn't reset the capturing log handler when done with it.
  355. # Reset it to free used memory and python objects
  356. # We currently have PyTest's log_print setting set to false, if it was
  357. # set to true, the call bellow would make PyTest not print any logs at all.
  358. item.catch_log_handler.reset()
  359. # <---- PyTest Tweaks ------------------------------------------------------------------------------------------------
  360. # ----- Test Setup -------------------------------------------------------------------------------------------------->
  361. @pytest.hookimpl(tryfirst=True)
  362. def pytest_runtest_setup(item):
  363. """
  364. Fixtures injection based on markers or test skips based on CLI arguments
  365. """
  366. integration_utils_tests_path = str(CODE_DIR / "tests" / "integration" / "utils")
  367. if (
  368. str(item.fspath).startswith(integration_utils_tests_path)
  369. and PRE_PYTEST_SKIP_OR_NOT is True
  370. ):
  371. item._skipped_by_mark = True
  372. pytest.skip(PRE_PYTEST_SKIP_REASON)
  373. if saltfactories.utils.compat.has_unittest_attr(item, "__slow_test__"):
  374. if item.config.getoption("--run-slow") is False:
  375. item._skipped_by_mark = True
  376. pytest.skip("Slow tests are disabled!")
  377. requires_salt_modules_marker = item.get_closest_marker("requires_salt_modules")
  378. if requires_salt_modules_marker is not None:
  379. required_salt_modules = requires_salt_modules_marker.args
  380. if len(required_salt_modules) == 1 and isinstance(
  381. required_salt_modules[0], (list, tuple, set)
  382. ):
  383. required_salt_modules = required_salt_modules[0]
  384. required_salt_modules = set(required_salt_modules)
  385. not_available_modules = check_required_sminion_attributes(
  386. "functions", required_salt_modules
  387. )
  388. if not_available_modules:
  389. item._skipped_by_mark = True
  390. if len(not_available_modules) == 1:
  391. pytest.skip(
  392. "Salt module '{}' is not available".format(*not_available_modules)
  393. )
  394. pytest.skip(
  395. "Salt modules not available: {}".format(
  396. ", ".join(not_available_modules)
  397. )
  398. )
  399. requires_salt_states_marker = item.get_closest_marker("requires_salt_states")
  400. if requires_salt_states_marker is not None:
  401. required_salt_states = requires_salt_states_marker.args
  402. if len(required_salt_states) == 1 and isinstance(
  403. required_salt_states[0], (list, tuple, set)
  404. ):
  405. required_salt_states = required_salt_states[0]
  406. required_salt_states = set(required_salt_states)
  407. not_available_states = check_required_sminion_attributes(
  408. "states", required_salt_states
  409. )
  410. if not_available_states:
  411. item._skipped_by_mark = True
  412. if len(not_available_states) == 1:
  413. pytest.skip(
  414. "Salt state module '{}' is not available".format(
  415. *not_available_states
  416. )
  417. )
  418. pytest.skip(
  419. "Salt state modules not available: {}".format(
  420. ", ".join(not_available_states)
  421. )
  422. )
  423. if salt.utils.platform.is_windows():
  424. if not item.fspath.fnmatch(str(CODE_DIR / "tests" / "unit" / "*")):
  425. # Unit tests are whitelisted on windows by default, so, we're only
  426. # after all other tests
  427. windows_whitelisted_marker = item.get_closest_marker("windows_whitelisted")
  428. if windows_whitelisted_marker is None:
  429. item._skipped_by_mark = True
  430. pytest.skip("Test is not whitelisted for Windows")
  431. # <---- Test Setup ---------------------------------------------------------------------------------------------------
  432. # ----- Test Groups Selection --------------------------------------------------------------------------------------->
  433. def get_group_size_and_start(total_items, total_groups, group_id):
  434. """
  435. Calculate group size and start index.
  436. """
  437. base_size = total_items // total_groups
  438. rem = total_items % total_groups
  439. start = base_size * (group_id - 1) + min(group_id - 1, rem)
  440. size = base_size + 1 if group_id <= rem else base_size
  441. return (start, size)
  442. def get_group(items, total_groups, group_id):
  443. """
  444. Get the items from the passed in group based on group size.
  445. """
  446. if not 0 < group_id <= total_groups:
  447. raise ValueError("Invalid test-group argument")
  448. start, size = get_group_size_and_start(len(items), total_groups, group_id)
  449. selected = items[start : start + size]
  450. deselected = items[:start] + items[start + size :]
  451. assert len(selected) + len(deselected) == len(items)
  452. return selected, deselected
  453. def groups_collection_modifyitems(config, items):
  454. group_count = config.getoption("test-group-count")
  455. group_id = config.getoption("test-group")
  456. if not group_count or not group_id:
  457. # We're not selection tests using groups, don't do any filtering
  458. return
  459. total_items = len(items)
  460. tests_in_group, deselected = get_group(items, group_count, group_id)
  461. # Replace all items in the list
  462. items[:] = tests_in_group
  463. if deselected:
  464. config.hook.pytest_deselected(items=deselected)
  465. terminal_reporter = config.pluginmanager.get_plugin("terminalreporter")
  466. terminal_reporter.write(
  467. "Running test group #{0} ({1} tests)\n".format(group_id, len(items)),
  468. yellow=True,
  469. )
  470. # <---- Test Groups Selection ----------------------------------------------------------------------------------------
  471. # ----- Fixtures Overrides ------------------------------------------------------------------------------------------>
  472. @pytest.fixture(scope="session")
  473. def salt_factories_config():
  474. """
  475. Return a dictionary with the keyworkd arguments for SaltFactoriesManager
  476. """
  477. return {
  478. "executable": sys.executable,
  479. "code_dir": str(CODE_DIR),
  480. "inject_coverage": MAYBE_RUN_COVERAGE,
  481. "inject_sitecustomize": MAYBE_RUN_COVERAGE,
  482. "start_timeout": 120
  483. if (os.environ.get("JENKINS_URL") or os.environ.get("CI"))
  484. else 60,
  485. }
  486. # <---- Pytest Helpers -----------------------------------------------------------------------------------------------
  487. # ----- From Filenames Test Selection ------------------------------------------------------------------------------->
  488. def _match_to_test_file(match):
  489. parts = match.split(".")
  490. parts[-1] += ".py"
  491. return TESTS_DIR.joinpath(*parts).relative_to(CODE_DIR)
  492. def from_filenames_collection_modifyitems(config, items):
  493. from_filenames = config.getoption("--from-filenames")
  494. if not from_filenames:
  495. # Don't do anything
  496. return
  497. test_categories_paths = (
  498. (CODE_DIR / "tests" / "integration").relative_to(CODE_DIR),
  499. (CODE_DIR / "tests" / "multimaster").relative_to(CODE_DIR),
  500. (CODE_DIR / "tests" / "unit").relative_to(CODE_DIR),
  501. (CODE_DIR / "tests" / "pytests" / "e2e").relative_to(CODE_DIR),
  502. (CODE_DIR / "tests" / "pytests" / "functional").relative_to(CODE_DIR),
  503. (CODE_DIR / "tests" / "pytests" / "integration").relative_to(CODE_DIR),
  504. (CODE_DIR / "tests" / "pytests" / "unit").relative_to(CODE_DIR),
  505. )
  506. test_module_paths = set()
  507. from_filenames_listing = set()
  508. for path in [pathlib.Path(path.strip()) for path in from_filenames.split(",")]:
  509. if path.is_absolute():
  510. # In this case, this path is considered to be a file containing a line separated list
  511. # of files to consider
  512. with salt.utils.files.fopen(str(path)) as rfh:
  513. for line in rfh:
  514. line_path = pathlib.Path(line.strip())
  515. if not line_path.exists():
  516. continue
  517. from_filenames_listing.add(line_path)
  518. continue
  519. from_filenames_listing.add(path)
  520. filename_map = yaml.deserialize(
  521. (CODE_DIR / "tests" / "filename_map.yml").read_text()
  522. )
  523. # Let's add the match all rule
  524. for rule, matches in filename_map.items():
  525. if rule == "*":
  526. for match in matches:
  527. test_module_paths.add(_match_to_test_file(match))
  528. break
  529. # Let's now go through the list of files gathered
  530. for filename in from_filenames_listing:
  531. if str(filename).startswith("tests/"):
  532. # Tests in the listing don't require additional matching and will be added to the
  533. # list of tests to run
  534. test_module_paths.add(filename)
  535. continue
  536. if filename.name == "setup.py" or str(filename).startswith("salt/"):
  537. if path.name == "__init__.py":
  538. # No direct macthing
  539. continue
  540. # Now let's try a direct match between the passed file and possible test modules
  541. for test_categories_path in test_categories_paths:
  542. test_module_path = test_categories_path / "test_{}".format(path.name)
  543. if test_module_path.is_file():
  544. test_module_paths.add(test_module_path)
  545. continue
  546. # Do we have an entry in tests/filename_map.yml
  547. for rule, matches in filename_map.items():
  548. if rule == "*":
  549. continue
  550. elif "|" in rule:
  551. # This is regex
  552. if re.match(rule, str(filename)):
  553. for match in matches:
  554. test_module_paths.add(_match_to_test_file(match))
  555. elif "*" in rule or "\\" in rule:
  556. # Glob matching
  557. for filerule in CODE_DIR.glob(rule):
  558. if not filerule.exists():
  559. continue
  560. filerule = filerule.relative_to(CODE_DIR)
  561. if filerule != filename:
  562. continue
  563. for match in matches:
  564. test_module_paths.add(_match_to_test_file(match))
  565. else:
  566. if str(filename) != rule:
  567. continue
  568. # Direct file paths as rules
  569. filerule = pathlib.Path(rule)
  570. if not filerule.exists():
  571. continue
  572. for match in matches:
  573. test_module_paths.add(_match_to_test_file(match))
  574. continue
  575. else:
  576. log.debug("Don't know what to do with path %s", filename)
  577. selected = []
  578. deselected = []
  579. for item in items:
  580. itempath = pathlib.Path(str(item.fspath)).resolve().relative_to(CODE_DIR)
  581. if itempath in test_module_paths:
  582. selected.append(item)
  583. else:
  584. deselected.append(item)
  585. items[:] = selected
  586. if deselected:
  587. config.hook.pytest_deselected(items=deselected)
  588. # <---- From Filenames Test Selection --------------------------------------------------------------------------------
  589. # ----- Custom Grains Mark Evaluator -------------------------------------------------------------------------------->
  590. class GrainsMarkEvaluator(MarkEvaluator):
  591. _cached_grains = None
  592. def _getglobals(self):
  593. item_globals = super(GrainsMarkEvaluator, self)._getglobals()
  594. if GrainsMarkEvaluator._cached_grains is None:
  595. sminion = create_sminion()
  596. GrainsMarkEvaluator._cached_grains = sminion.opts["grains"].copy()
  597. item_globals["grains"] = GrainsMarkEvaluator._cached_grains.copy()
  598. return item_globals
  599. # Patch PyTest's skipping MarkEvaluator to use our GrainsMarkEvaluator
  600. _pytest.skipping.MarkEvaluator = GrainsMarkEvaluator
  601. # <---- Custom Grains Mark Evaluator ---------------------------------------------------------------------------------
  602. # ----- Custom Fixtures --------------------------------------------------------------------------------------------->
  603. @pytest.fixture(scope="session")
  604. def reap_stray_processes():
  605. # Run tests
  606. yield
  607. children = psutil.Process(os.getpid()).children(recursive=True)
  608. if not children:
  609. log.info("No astray processes found")
  610. return
  611. def on_terminate(proc):
  612. log.debug("Process %s terminated with exit code %s", proc, proc.returncode)
  613. if children:
  614. # Reverse the order, sublings first, parents after
  615. children.reverse()
  616. log.warning(
  617. "Test suite left %d astray processes running. Killing those processes:\n%s",
  618. len(children),
  619. pprint.pformat(children),
  620. )
  621. _, alive = psutil.wait_procs(children, timeout=3, callback=on_terminate)
  622. for child in alive:
  623. try:
  624. child.kill()
  625. except psutil.NoSuchProcess:
  626. continue
  627. _, alive = psutil.wait_procs(alive, timeout=3, callback=on_terminate)
  628. if alive:
  629. # Give up
  630. for child in alive:
  631. log.warning(
  632. "Process %s survived SIGKILL, giving up:\n%s",
  633. child,
  634. pprint.pformat(child.as_dict()),
  635. )
  636. @pytest.fixture(scope="session")
  637. def grains(request):
  638. sminion = create_sminion()
  639. return sminion.opts["grains"].copy()
  640. # <---- Custom Fixtures ----------------------------------------------------------------------------------------------