1
0

conftest.py 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817
  1. """
  2. :codeauthor: Pedro Algarvio (pedro@algarvio.me)
  3. tests.conftest
  4. ~~~~~~~~~~~~~~
  5. Prepare py.test for our test suite
  6. """
  7. # pylint: disable=wrong-import-order,wrong-import-position,3rd-party-local-module-not-gated
  8. # pylint: disable=redefined-outer-name,invalid-name,3rd-party-module-not-gated
  9. import logging
  10. import os
  11. import pathlib
  12. import pprint
  13. import re
  14. import sys
  15. from functools import partial, wraps
  16. import _pytest.logging
  17. import _pytest.skipping
  18. import psutil
  19. import pytest
  20. import salt.config
  21. import salt.loader
  22. import salt.log.mixins
  23. import salt.log.setup
  24. import salt.utils.files
  25. import salt.utils.path
  26. import salt.utils.platform
  27. import salt.utils.win_functions
  28. import saltfactories.utils.compat
  29. from _pytest.mark.evaluate import MarkEvaluator
  30. from salt.serializers import yaml
  31. from tests.support.helpers import PRE_PYTEST_SKIP_OR_NOT, PRE_PYTEST_SKIP_REASON
  32. from tests.support.pytest.helpers import * # pylint: disable=unused-wildcard-import
  33. from tests.support.runtests import RUNTIME_VARS
  34. from tests.support.saltfactories_compat import LogServer
  35. from tests.support.sminion import check_required_sminion_attributes, create_sminion
  36. TESTS_DIR = pathlib.Path(__file__).resolve().parent
  37. CODE_DIR = TESTS_DIR.parent
  38. # Change to code checkout directory
  39. os.chdir(str(CODE_DIR))
  40. # Make sure the current directory is the first item in sys.path
  41. if str(CODE_DIR) in sys.path:
  42. sys.path.remove(str(CODE_DIR))
  43. sys.path.insert(0, str(CODE_DIR))
  44. # Coverage
  45. if "COVERAGE_PROCESS_START" in os.environ:
  46. MAYBE_RUN_COVERAGE = True
  47. COVERAGERC_FILE = os.environ["COVERAGE_PROCESS_START"]
  48. else:
  49. COVERAGERC_FILE = str(CODE_DIR / ".coveragerc")
  50. MAYBE_RUN_COVERAGE = (
  51. sys.argv[0].endswith("pytest.py") or "_COVERAGE_RCFILE" in os.environ
  52. )
  53. if MAYBE_RUN_COVERAGE:
  54. # Flag coverage to track suprocesses by pointing it to the right .coveragerc file
  55. os.environ["COVERAGE_PROCESS_START"] = str(COVERAGERC_FILE)
  56. # Define the pytest plugins we rely on
  57. pytest_plugins = ["tempdir", "helpers_namespace"]
  58. # Define where not to collect tests from
  59. collect_ignore = ["setup.py"]
  60. # Patch PyTest logging handlers
  61. class LogCaptureHandler(
  62. salt.log.mixins.ExcInfoOnLogLevelFormatMixIn, _pytest.logging.LogCaptureHandler
  63. ):
  64. """
  65. Subclassing PyTest's LogCaptureHandler in order to add the
  66. exc_info_on_loglevel functionality and actually make it a NullHandler,
  67. it's only used to print log messages emmited during tests, which we
  68. have explicitly disabled in pytest.ini
  69. """
  70. _pytest.logging.LogCaptureHandler = LogCaptureHandler
  71. class LiveLoggingStreamHandler(
  72. salt.log.mixins.ExcInfoOnLogLevelFormatMixIn,
  73. _pytest.logging._LiveLoggingStreamHandler,
  74. ):
  75. """
  76. Subclassing PyTest's LiveLoggingStreamHandler in order to add the
  77. exc_info_on_loglevel functionality.
  78. """
  79. _pytest.logging._LiveLoggingStreamHandler = LiveLoggingStreamHandler
  80. # Reset logging root handlers
  81. for handler in logging.root.handlers[:]:
  82. logging.root.removeHandler(handler)
  83. # Reset the root logger to its default level(because salt changed it)
  84. logging.root.setLevel(logging.WARNING)
  85. log = logging.getLogger("salt.testsuite")
  86. # ----- PyTest Tempdir Plugin Hooks --------------------------------------------------------------------------------->
  87. def pytest_tempdir_basename():
  88. """
  89. Return the temporary directory basename for the salt test suite.
  90. """
  91. return "salt-tests-tmpdir"
  92. # <---- PyTest Tempdir Plugin Hooks ----------------------------------------------------------------------------------
  93. # ----- CLI Options Setup ------------------------------------------------------------------------------------------->
  94. def pytest_addoption(parser):
  95. """
  96. register argparse-style options and ini-style config values.
  97. """
  98. test_selection_group = parser.getgroup("Tests Selection")
  99. test_selection_group.addoption(
  100. "--from-filenames",
  101. default=None,
  102. help=(
  103. "Pass a comma-separated list of file paths, and any test module which corresponds to the "
  104. "specified file(s) will run. For example, if 'setup.py' was passed, then the corresponding "
  105. "test files defined in 'tests/filename_map.yml' would run. Absolute paths are assumed to be "
  106. "files containing relative paths, one per line. Providing the paths in a file can help get "
  107. "around shell character limits when the list of files is long."
  108. ),
  109. )
  110. # Add deprecated CLI flag until we completely switch to PyTest
  111. test_selection_group.addoption(
  112. "--names-file", default=None, help="Deprecated option"
  113. )
  114. test_selection_group.addoption(
  115. "--transport",
  116. default="zeromq",
  117. choices=("zeromq", "tcp"),
  118. help=(
  119. "Select which transport to run the integration tests with, zeromq or tcp. Default: %default"
  120. ),
  121. )
  122. test_selection_group.addoption(
  123. "--ssh",
  124. "--ssh-tests",
  125. dest="ssh",
  126. action="store_true",
  127. default=False,
  128. help="Run salt-ssh tests. These tests will spin up a temporary "
  129. "SSH server on your machine. In certain environments, this "
  130. "may be insecure! Default: False",
  131. )
  132. test_selection_group.addoption(
  133. "--proxy",
  134. "--proxy-tests",
  135. dest="proxy",
  136. action="store_true",
  137. default=False,
  138. help="Run proxy tests",
  139. )
  140. test_selection_group.addoption(
  141. "--run-slow", action="store_true", default=False, help="Run slow tests.",
  142. )
  143. output_options_group = parser.getgroup("Output Options")
  144. output_options_group.addoption(
  145. "--output-columns",
  146. default=80,
  147. type=int,
  148. help="Number of maximum columns to use on the output",
  149. )
  150. output_options_group.addoption(
  151. "--no-colors",
  152. "--no-colours",
  153. default=False,
  154. action="store_true",
  155. help="Disable colour printing.",
  156. )
  157. # ----- Test Groups --------------------------------------------------------------------------------------------->
  158. # This will allow running the tests in chunks
  159. test_selection_group.addoption(
  160. "--test-group-count",
  161. dest="test-group-count",
  162. type=int,
  163. help="The number of groups to split the tests into",
  164. )
  165. test_selection_group.addoption(
  166. "--test-group",
  167. dest="test-group",
  168. type=int,
  169. help="The group of tests that should be executed",
  170. )
  171. # <---- Test Groups ----------------------------------------------------------------------------------------------
  172. # <---- CLI Options Setup --------------------------------------------------------------------------------------------
  173. # ----- Register Markers -------------------------------------------------------------------------------------------->
  174. @pytest.mark.trylast
  175. def pytest_configure(config):
  176. """
  177. called after command line options have been parsed
  178. and all plugins and initial conftest files been loaded.
  179. """
  180. for dirname in CODE_DIR.iterdir():
  181. if not dirname.is_dir():
  182. continue
  183. if dirname != TESTS_DIR:
  184. config.addinivalue_line("norecursedirs", str(CODE_DIR / dirname))
  185. # Expose the markers we use to pytest CLI
  186. config.addinivalue_line(
  187. "markers",
  188. "requires_salt_modules(*required_module_names): Skip if at least one module is not available.",
  189. )
  190. config.addinivalue_line(
  191. "markers",
  192. "requires_salt_states(*required_state_names): Skip if at least one state module is not available.",
  193. )
  194. config.addinivalue_line(
  195. "markers", "windows_whitelisted: Mark test as whitelisted to run under Windows"
  196. )
  197. # Make sure the test suite "knows" this is a pytest test run
  198. RUNTIME_VARS.PYTEST_SESSION = True
  199. # "Flag" the slotTest decorator if we're skipping slow tests or not
  200. os.environ["SLOW_TESTS"] = str(config.getoption("--run-slow"))
  201. # If PyTest has no logging configured, default to ERROR level
  202. levels = [logging.ERROR]
  203. logging_plugin = config.pluginmanager.get_plugin("logging-plugin")
  204. try:
  205. level = logging_plugin.log_cli_handler.level
  206. if level is not None:
  207. levels.append(level)
  208. except AttributeError:
  209. # PyTest CLI logging not configured
  210. pass
  211. try:
  212. level = logging_plugin.log_file_level
  213. if level is not None:
  214. levels.append(level)
  215. except AttributeError:
  216. # PyTest Log File logging not configured
  217. pass
  218. if logging.NOTSET in levels:
  219. # We don't want the NOTSET level on the levels
  220. levels.pop(levels.index(logging.NOTSET))
  221. log_level = logging.getLevelName(min(levels))
  222. log_server = LogServer(log_level=log_level)
  223. config.pluginmanager.register(log_server, "salt-saltfactories-log-server")
  224. # <---- Register Markers ---------------------------------------------------------------------------------------------
  225. # ----- PyTest Tweaks ----------------------------------------------------------------------------------------------->
  226. def set_max_open_files_limits(min_soft=3072, min_hard=4096):
  227. # Get current limits
  228. if salt.utils.platform.is_windows():
  229. import win32file
  230. prev_hard = win32file._getmaxstdio()
  231. prev_soft = 512
  232. else:
  233. import resource
  234. prev_soft, prev_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
  235. # Check minimum required limits
  236. set_limits = False
  237. if prev_soft < min_soft:
  238. soft = min_soft
  239. set_limits = True
  240. else:
  241. soft = prev_soft
  242. if prev_hard < min_hard:
  243. hard = min_hard
  244. set_limits = True
  245. else:
  246. hard = prev_hard
  247. # Increase limits
  248. if set_limits:
  249. log.debug(
  250. " * Max open files settings is too low (soft: %s, hard: %s) for running the tests. "
  251. "Trying to raise the limits to soft: %s, hard: %s",
  252. prev_soft,
  253. prev_hard,
  254. soft,
  255. hard,
  256. )
  257. try:
  258. if salt.utils.platform.is_windows():
  259. hard = 2048 if hard > 2048 else hard
  260. win32file._setmaxstdio(hard)
  261. else:
  262. resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
  263. except Exception as err: # pylint: disable=broad-except
  264. log.error(
  265. "Failed to raise the max open files settings -> %s. Please issue the following command "
  266. "on your console: 'ulimit -u %s'",
  267. err,
  268. soft,
  269. )
  270. exit(1)
  271. return soft, hard
  272. def pytest_report_header():
  273. soft, hard = set_max_open_files_limits()
  274. return "max open files; soft: {}; hard: {}".format(soft, hard)
  275. @pytest.hookimpl(hookwrapper=True, trylast=True)
  276. def pytest_collection_modifyitems(config, items):
  277. """
  278. called after collection has been performed, may filter or re-order
  279. the items in-place.
  280. :param _pytest.main.Session session: the pytest session object
  281. :param _pytest.config.Config config: pytest config object
  282. :param List[_pytest.nodes.Item] items: list of item objects
  283. """
  284. # Let PyTest or other plugins handle the initial collection
  285. yield
  286. groups_collection_modifyitems(config, items)
  287. from_filenames_collection_modifyitems(config, items)
  288. log.warning("Mofifying collected tests to keep track of fixture usage")
  289. for item in items:
  290. for fixture in item.fixturenames:
  291. if fixture not in item._fixtureinfo.name2fixturedefs:
  292. continue
  293. for fixturedef in item._fixtureinfo.name2fixturedefs[fixture]:
  294. if fixturedef.scope in ("function", "class", "module"):
  295. continue
  296. try:
  297. node_ids = fixturedef.node_ids
  298. except AttributeError:
  299. node_ids = fixturedef.node_ids = set()
  300. node_ids.add(item.nodeid)
  301. try:
  302. fixturedef.finish.__wrapped__
  303. except AttributeError:
  304. original_func = fixturedef.finish
  305. def wrapper(func, fixturedef):
  306. @wraps(func)
  307. def wrapped(self, request):
  308. try:
  309. return self._finished
  310. except AttributeError:
  311. if self.node_ids:
  312. if (
  313. not request.session.shouldfail
  314. and not request.session.shouldstop
  315. ):
  316. log.debug(
  317. "%s is still going to be used, not terminating it. "
  318. "Still in use on:\n%s",
  319. self,
  320. pprint.pformat(list(self.node_ids)),
  321. )
  322. return
  323. log.debug("Finish called on %s", self)
  324. try:
  325. return func(request)
  326. finally:
  327. self._finished = True
  328. return partial(wrapped, fixturedef)
  329. fixturedef.finish = wrapper(fixturedef.finish, fixturedef)
  330. try:
  331. fixturedef.finish.__wrapped__
  332. except AttributeError:
  333. fixturedef.finish.__wrapped__ = original_func
  334. @pytest.hookimpl(trylast=True, hookwrapper=True)
  335. def pytest_runtest_protocol(item, nextitem):
  336. """
  337. implements the runtest_setup/call/teardown protocol for
  338. the given test item, including capturing exceptions and calling
  339. reporting hooks.
  340. :arg item: test item for which the runtest protocol is performed.
  341. :arg nextitem: the scheduled-to-be-next test item (or None if this
  342. is the end my friend). This argument is passed on to
  343. :py:func:`pytest_runtest_teardown`.
  344. :return boolean: True if no further hook implementations should be invoked.
  345. Stops at first non-None result, see :ref:`firstresult`
  346. """
  347. request = item._request
  348. used_fixture_defs = []
  349. for fixture in item.fixturenames:
  350. if fixture not in item._fixtureinfo.name2fixturedefs:
  351. continue
  352. for fixturedef in reversed(item._fixtureinfo.name2fixturedefs[fixture]):
  353. if fixturedef.scope in ("function", "class", "module"):
  354. continue
  355. used_fixture_defs.append(fixturedef)
  356. try:
  357. # Run the test
  358. yield
  359. finally:
  360. for fixturedef in used_fixture_defs:
  361. if item.nodeid in fixturedef.node_ids:
  362. fixturedef.node_ids.remove(item.nodeid)
  363. if not fixturedef.node_ids:
  364. # This fixture is not used in any more test functions
  365. fixturedef.finish(request)
  366. del request
  367. del used_fixture_defs
  368. def pytest_runtest_teardown(item, nextitem):
  369. """
  370. called after ``pytest_runtest_call``.
  371. :arg nextitem: the scheduled-to-be-next test item (None if no further
  372. test item is scheduled). This argument can be used to
  373. perform exact teardowns, i.e. calling just enough finalizers
  374. so that nextitem only needs to call setup-functions.
  375. """
  376. # PyTest doesn't reset the capturing log handler when done with it.
  377. # Reset it to free used memory and python objects
  378. # We currently have PyTest's log_print setting set to false, if it was
  379. # set to true, the call bellow would make PyTest not print any logs at all.
  380. item.catch_log_handler.reset()
  381. @pytest.hookimpl(tryfirst=True)
  382. def pytest_sessionstart(session):
  383. log_server = session.config.pluginmanager.get_plugin(
  384. "salt-saltfactories-log-server"
  385. )
  386. log_server.start()
  387. @pytest.hookimpl(trylast=True)
  388. def pytest_sessionfinish(session):
  389. log_server = session.config.pluginmanager.get_plugin(
  390. "salt-saltfactories-log-server"
  391. )
  392. log_server.stop()
  393. @pytest.fixture(scope="session")
  394. def log_server():
  395. """
  396. Just overriding the fixture
  397. """
  398. # <---- PyTest Tweaks ------------------------------------------------------------------------------------------------
  399. # ----- Test Setup -------------------------------------------------------------------------------------------------->
  400. @pytest.hookimpl(tryfirst=True)
  401. def pytest_runtest_setup(item):
  402. """
  403. Fixtures injection based on markers or test skips based on CLI arguments
  404. """
  405. integration_utils_tests_path = str(CODE_DIR / "tests" / "integration" / "utils")
  406. if (
  407. str(item.fspath).startswith(integration_utils_tests_path)
  408. and PRE_PYTEST_SKIP_OR_NOT is True
  409. ):
  410. item._skipped_by_mark = True
  411. pytest.skip(PRE_PYTEST_SKIP_REASON)
  412. if saltfactories.utils.compat.has_unittest_attr(item, "__slow_test__"):
  413. if item.config.getoption("--run-slow") is False:
  414. item._skipped_by_mark = True
  415. pytest.skip("Slow tests are disabled!")
  416. requires_salt_modules_marker = item.get_closest_marker("requires_salt_modules")
  417. if requires_salt_modules_marker is not None:
  418. required_salt_modules = requires_salt_modules_marker.args
  419. if len(required_salt_modules) == 1 and isinstance(
  420. required_salt_modules[0], (list, tuple, set)
  421. ):
  422. required_salt_modules = required_salt_modules[0]
  423. required_salt_modules = set(required_salt_modules)
  424. not_available_modules = check_required_sminion_attributes(
  425. "functions", required_salt_modules
  426. )
  427. if not_available_modules:
  428. item._skipped_by_mark = True
  429. if len(not_available_modules) == 1:
  430. pytest.skip(
  431. "Salt module '{}' is not available".format(*not_available_modules)
  432. )
  433. pytest.skip(
  434. "Salt modules not available: {}".format(
  435. ", ".join(not_available_modules)
  436. )
  437. )
  438. requires_salt_states_marker = item.get_closest_marker("requires_salt_states")
  439. if requires_salt_states_marker is not None:
  440. required_salt_states = requires_salt_states_marker.args
  441. if len(required_salt_states) == 1 and isinstance(
  442. required_salt_states[0], (list, tuple, set)
  443. ):
  444. required_salt_states = required_salt_states[0]
  445. required_salt_states = set(required_salt_states)
  446. not_available_states = check_required_sminion_attributes(
  447. "states", required_salt_states
  448. )
  449. if not_available_states:
  450. item._skipped_by_mark = True
  451. if len(not_available_states) == 1:
  452. pytest.skip(
  453. "Salt state module '{}' is not available".format(
  454. *not_available_states
  455. )
  456. )
  457. pytest.skip(
  458. "Salt state modules not available: {}".format(
  459. ", ".join(not_available_states)
  460. )
  461. )
  462. if salt.utils.platform.is_windows():
  463. if not item.fspath.fnmatch(str(CODE_DIR / "tests" / "unit" / "*")):
  464. # Unit tests are whitelisted on windows by default, so, we're only
  465. # after all other tests
  466. windows_whitelisted_marker = item.get_closest_marker("windows_whitelisted")
  467. if windows_whitelisted_marker is None:
  468. item._skipped_by_mark = True
  469. pytest.skip("Test is not whitelisted for Windows")
  470. # <---- Test Setup ---------------------------------------------------------------------------------------------------
  471. # ----- Test Groups Selection --------------------------------------------------------------------------------------->
  472. def get_group_size_and_start(total_items, total_groups, group_id):
  473. """
  474. Calculate group size and start index.
  475. """
  476. base_size = total_items // total_groups
  477. rem = total_items % total_groups
  478. start = base_size * (group_id - 1) + min(group_id - 1, rem)
  479. size = base_size + 1 if group_id <= rem else base_size
  480. return (start, size)
  481. def get_group(items, total_groups, group_id):
  482. """
  483. Get the items from the passed in group based on group size.
  484. """
  485. if not 0 < group_id <= total_groups:
  486. raise ValueError("Invalid test-group argument")
  487. start, size = get_group_size_and_start(len(items), total_groups, group_id)
  488. selected = items[start : start + size]
  489. deselected = items[:start] + items[start + size :]
  490. assert len(selected) + len(deselected) == len(items)
  491. return selected, deselected
  492. def groups_collection_modifyitems(config, items):
  493. group_count = config.getoption("test-group-count")
  494. group_id = config.getoption("test-group")
  495. if not group_count or not group_id:
  496. # We're not selection tests using groups, don't do any filtering
  497. return
  498. total_items = len(items)
  499. tests_in_group, deselected = get_group(items, group_count, group_id)
  500. # Replace all items in the list
  501. items[:] = tests_in_group
  502. if deselected:
  503. config.hook.pytest_deselected(items=deselected)
  504. terminal_reporter = config.pluginmanager.get_plugin("terminalreporter")
  505. terminal_reporter.write(
  506. "Running test group #{} ({} tests)\n".format(group_id, len(items)), yellow=True,
  507. )
  508. # <---- Test Groups Selection ----------------------------------------------------------------------------------------
  509. # ----- Fixtures Overrides ------------------------------------------------------------------------------------------>
  510. @pytest.fixture(scope="session")
  511. def salt_factories_config(request):
  512. """
  513. Return a dictionary with the keyworkd arguments for SaltFactoriesManager
  514. """
  515. log_server = request.config.pluginmanager.get_plugin(
  516. "salt-saltfactories-log-server"
  517. )
  518. return {
  519. "executable": sys.executable,
  520. "code_dir": str(CODE_DIR),
  521. "inject_coverage": MAYBE_RUN_COVERAGE,
  522. "inject_sitecustomize": MAYBE_RUN_COVERAGE,
  523. "start_timeout": 120
  524. if (os.environ.get("JENKINS_URL") or os.environ.get("CI"))
  525. else 60,
  526. "log_server_host": log_server.log_host,
  527. "log_server_port": log_server.log_port,
  528. "log_server_level": log_server.log_level,
  529. }
  530. # <---- Pytest Helpers -----------------------------------------------------------------------------------------------
  531. # ----- From Filenames Test Selection ------------------------------------------------------------------------------->
  532. def _match_to_test_file(match):
  533. parts = match.split(".")
  534. parts[-1] += ".py"
  535. return TESTS_DIR.joinpath(*parts).relative_to(CODE_DIR)
  536. def from_filenames_collection_modifyitems(config, items):
  537. from_filenames = config.getoption("--from-filenames")
  538. if not from_filenames:
  539. # Don't do anything
  540. return
  541. test_categories_paths = (
  542. (CODE_DIR / "tests" / "integration").relative_to(CODE_DIR),
  543. (CODE_DIR / "tests" / "multimaster").relative_to(CODE_DIR),
  544. (CODE_DIR / "tests" / "unit").relative_to(CODE_DIR),
  545. (CODE_DIR / "tests" / "pytests" / "e2e").relative_to(CODE_DIR),
  546. (CODE_DIR / "tests" / "pytests" / "functional").relative_to(CODE_DIR),
  547. (CODE_DIR / "tests" / "pytests" / "integration").relative_to(CODE_DIR),
  548. (CODE_DIR / "tests" / "pytests" / "unit").relative_to(CODE_DIR),
  549. )
  550. test_module_paths = set()
  551. from_filenames_listing = set()
  552. for path in [pathlib.Path(path.strip()) for path in from_filenames.split(",")]:
  553. if path.is_absolute():
  554. # In this case, this path is considered to be a file containing a line separated list
  555. # of files to consider
  556. with salt.utils.files.fopen(str(path)) as rfh:
  557. for line in rfh:
  558. line_path = pathlib.Path(line.strip())
  559. if not line_path.exists():
  560. continue
  561. from_filenames_listing.add(line_path)
  562. continue
  563. from_filenames_listing.add(path)
  564. filename_map = yaml.deserialize(
  565. (CODE_DIR / "tests" / "filename_map.yml").read_text()
  566. )
  567. # Let's add the match all rule
  568. for rule, matches in filename_map.items():
  569. if rule == "*":
  570. for match in matches:
  571. test_module_paths.add(_match_to_test_file(match))
  572. break
  573. # Let's now go through the list of files gathered
  574. for filename in from_filenames_listing:
  575. if str(filename).startswith("tests/"):
  576. # Tests in the listing don't require additional matching and will be added to the
  577. # list of tests to run
  578. test_module_paths.add(filename)
  579. continue
  580. if filename.name == "setup.py" or str(filename).startswith("salt/"):
  581. if path.name == "__init__.py":
  582. # No direct macthing
  583. continue
  584. # Now let's try a direct match between the passed file and possible test modules
  585. for test_categories_path in test_categories_paths:
  586. test_module_path = test_categories_path / "test_{}".format(path.name)
  587. if test_module_path.is_file():
  588. test_module_paths.add(test_module_path)
  589. continue
  590. # Do we have an entry in tests/filename_map.yml
  591. for rule, matches in filename_map.items():
  592. if rule == "*":
  593. continue
  594. elif "|" in rule:
  595. # This is regex
  596. if re.match(rule, str(filename)):
  597. for match in matches:
  598. test_module_paths.add(_match_to_test_file(match))
  599. elif "*" in rule or "\\" in rule:
  600. # Glob matching
  601. for filerule in CODE_DIR.glob(rule):
  602. if not filerule.exists():
  603. continue
  604. filerule = filerule.relative_to(CODE_DIR)
  605. if filerule != filename:
  606. continue
  607. for match in matches:
  608. test_module_paths.add(_match_to_test_file(match))
  609. else:
  610. if str(filename) != rule:
  611. continue
  612. # Direct file paths as rules
  613. filerule = pathlib.Path(rule)
  614. if not filerule.exists():
  615. continue
  616. for match in matches:
  617. test_module_paths.add(_match_to_test_file(match))
  618. continue
  619. else:
  620. log.debug("Don't know what to do with path %s", filename)
  621. selected = []
  622. deselected = []
  623. for item in items:
  624. itempath = pathlib.Path(str(item.fspath)).resolve().relative_to(CODE_DIR)
  625. if itempath in test_module_paths:
  626. selected.append(item)
  627. else:
  628. deselected.append(item)
  629. items[:] = selected
  630. if deselected:
  631. config.hook.pytest_deselected(items=deselected)
  632. # <---- From Filenames Test Selection --------------------------------------------------------------------------------
  633. # ----- Custom Grains Mark Evaluator -------------------------------------------------------------------------------->
  634. class GrainsMarkEvaluator(MarkEvaluator):
  635. _cached_grains = None
  636. def _getglobals(self):
  637. item_globals = super()._getglobals()
  638. if GrainsMarkEvaluator._cached_grains is None:
  639. sminion = create_sminion()
  640. GrainsMarkEvaluator._cached_grains = sminion.opts["grains"].copy()
  641. item_globals["grains"] = GrainsMarkEvaluator._cached_grains.copy()
  642. return item_globals
  643. # Patch PyTest's skipping MarkEvaluator to use our GrainsMarkEvaluator
  644. _pytest.skipping.MarkEvaluator = GrainsMarkEvaluator
  645. # <---- Custom Grains Mark Evaluator ---------------------------------------------------------------------------------
  646. # ----- Custom Fixtures --------------------------------------------------------------------------------------------->
  647. @pytest.fixture(scope="session")
  648. def reap_stray_processes():
  649. # Run tests
  650. yield
  651. children = psutil.Process(os.getpid()).children(recursive=True)
  652. if not children:
  653. log.info("No astray processes found")
  654. return
  655. def on_terminate(proc):
  656. log.debug("Process %s terminated with exit code %s", proc, proc.returncode)
  657. if children:
  658. # Reverse the order, sublings first, parents after
  659. children.reverse()
  660. log.warning(
  661. "Test suite left %d astray processes running. Killing those processes:\n%s",
  662. len(children),
  663. pprint.pformat(children),
  664. )
  665. _, alive = psutil.wait_procs(children, timeout=3, callback=on_terminate)
  666. for child in alive:
  667. try:
  668. child.kill()
  669. except psutil.NoSuchProcess:
  670. continue
  671. _, alive = psutil.wait_procs(alive, timeout=3, callback=on_terminate)
  672. if alive:
  673. # Give up
  674. for child in alive:
  675. log.warning(
  676. "Process %s survived SIGKILL, giving up:\n%s",
  677. child,
  678. pprint.pformat(child.as_dict()),
  679. )
  680. @pytest.fixture(scope="session")
  681. def grains(request):
  682. sminion = create_sminion()
  683. return sminion.opts["grains"].copy()
  684. # <---- Custom Fixtures ----------------------------------------------------------------------------------------------