conftest.py 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816
  1. """
  2. :codeauthor: Pedro Algarvio (pedro@algarvio.me)
  3. tests.conftest
  4. ~~~~~~~~~~~~~~
  5. Prepare py.test for our test suite
  6. """
  7. # pylint: disable=wrong-import-order,wrong-import-position,3rd-party-local-module-not-gated
  8. # pylint: disable=redefined-outer-name,invalid-name,3rd-party-module-not-gated
  9. import logging
  10. import os
  11. import pathlib
  12. import pprint
  13. import re
  14. import sys
  15. from functools import partial, wraps
  16. from unittest import TestCase # pylint: disable=blacklisted-module
  17. import _pytest.logging
  18. import _pytest.skipping
  19. import psutil
  20. import pytest
  21. import salt.config
  22. import salt.loader
  23. import salt.log.mixins
  24. import salt.log.setup
  25. import salt.utils.files
  26. import salt.utils.path
  27. import salt.utils.platform
  28. import salt.utils.win_functions
  29. import saltfactories.utils.compat
  30. from salt.serializers import yaml
  31. from tests.support.helpers import PRE_PYTEST_SKIP_OR_NOT, PRE_PYTEST_SKIP_REASON
  32. from tests.support.pytest.fixtures import * # pylint: disable=unused-wildcard-import
  33. from tests.support.pytest.helpers import * # pylint: disable=unused-wildcard-import
  34. from tests.support.runtests import RUNTIME_VARS
  35. from tests.support.saltfactories_compat import LogServer
  36. from tests.support.sminion import check_required_sminion_attributes, create_sminion
  37. TESTS_DIR = pathlib.Path(__file__).resolve().parent
  38. PYTESTS_DIR = TESTS_DIR / "pytests"
  39. CODE_DIR = TESTS_DIR.parent
  40. # Change to code checkout directory
  41. os.chdir(str(CODE_DIR))
  42. # Make sure the current directory is the first item in sys.path
  43. if str(CODE_DIR) in sys.path:
  44. sys.path.remove(str(CODE_DIR))
  45. sys.path.insert(0, str(CODE_DIR))
  46. # Coverage
  47. if "COVERAGE_PROCESS_START" in os.environ:
  48. MAYBE_RUN_COVERAGE = True
  49. COVERAGERC_FILE = os.environ["COVERAGE_PROCESS_START"]
  50. else:
  51. COVERAGERC_FILE = str(CODE_DIR / ".coveragerc")
  52. MAYBE_RUN_COVERAGE = (
  53. sys.argv[0].endswith("pytest.py") or "_COVERAGE_RCFILE" in os.environ
  54. )
  55. if MAYBE_RUN_COVERAGE:
  56. # Flag coverage to track suprocesses by pointing it to the right .coveragerc file
  57. os.environ["COVERAGE_PROCESS_START"] = str(COVERAGERC_FILE)
  58. # Define the pytest plugins we rely on
  59. pytest_plugins = ["tempdir", "helpers_namespace"]
  60. # Define where not to collect tests from
  61. collect_ignore = ["setup.py"]
  62. # Patch PyTest logging handlers
  63. class LogCaptureHandler(
  64. salt.log.mixins.ExcInfoOnLogLevelFormatMixIn, _pytest.logging.LogCaptureHandler
  65. ):
  66. """
  67. Subclassing PyTest's LogCaptureHandler in order to add the
  68. exc_info_on_loglevel functionality and actually make it a NullHandler,
  69. it's only used to print log messages emmited during tests, which we
  70. have explicitly disabled in pytest.ini
  71. """
  72. _pytest.logging.LogCaptureHandler = LogCaptureHandler
  73. class LiveLoggingStreamHandler(
  74. salt.log.mixins.ExcInfoOnLogLevelFormatMixIn,
  75. _pytest.logging._LiveLoggingStreamHandler,
  76. ):
  77. """
  78. Subclassing PyTest's LiveLoggingStreamHandler in order to add the
  79. exc_info_on_loglevel functionality.
  80. """
  81. _pytest.logging._LiveLoggingStreamHandler = LiveLoggingStreamHandler
  82. # Reset logging root handlers
  83. for handler in logging.root.handlers[:]:
  84. logging.root.removeHandler(handler)
  85. # Reset the root logger to its default level(because salt changed it)
  86. logging.root.setLevel(logging.WARNING)
  87. log = logging.getLogger("salt.testsuite")
  88. # ----- PyTest Tempdir Plugin Hooks --------------------------------------------------------------------------------->
  89. def pytest_tempdir_basename():
  90. """
  91. Return the temporary directory basename for the salt test suite.
  92. """
  93. return "salt-tests-tmpdir"
  94. # <---- PyTest Tempdir Plugin Hooks ----------------------------------------------------------------------------------
  95. # ----- CLI Options Setup ------------------------------------------------------------------------------------------->
  96. def pytest_addoption(parser):
  97. """
  98. register argparse-style options and ini-style config values.
  99. """
  100. test_selection_group = parser.getgroup("Tests Selection")
  101. test_selection_group.addoption(
  102. "--from-filenames",
  103. default=None,
  104. help=(
  105. "Pass a comma-separated list of file paths, and any test module which corresponds to the "
  106. "specified file(s) will run. For example, if 'setup.py' was passed, then the corresponding "
  107. "test files defined in 'tests/filename_map.yml' would run. Absolute paths are assumed to be "
  108. "files containing relative paths, one per line. Providing the paths in a file can help get "
  109. "around shell character limits when the list of files is long."
  110. ),
  111. )
  112. # Add deprecated CLI flag until we completely switch to PyTest
  113. test_selection_group.addoption(
  114. "--names-file", default=None, help="Deprecated option"
  115. )
  116. test_selection_group.addoption(
  117. "--transport",
  118. default="zeromq",
  119. choices=("zeromq", "tcp"),
  120. help=(
  121. "Select which transport to run the integration tests with, zeromq or tcp. Default: %default"
  122. ),
  123. )
  124. test_selection_group.addoption(
  125. "--ssh",
  126. "--ssh-tests",
  127. dest="ssh",
  128. action="store_true",
  129. default=False,
  130. help="Run salt-ssh tests. These tests will spin up a temporary "
  131. "SSH server on your machine. In certain environments, this "
  132. "may be insecure! Default: False",
  133. )
  134. test_selection_group.addoption(
  135. "--proxy",
  136. "--proxy-tests",
  137. dest="proxy",
  138. action="store_true",
  139. default=False,
  140. help="Run proxy tests",
  141. )
  142. test_selection_group.addoption(
  143. "--run-slow", action="store_true", default=False, help="Run slow tests.",
  144. )
  145. output_options_group = parser.getgroup("Output Options")
  146. output_options_group.addoption(
  147. "--output-columns",
  148. default=80,
  149. type=int,
  150. help="Number of maximum columns to use on the output",
  151. )
  152. output_options_group.addoption(
  153. "--no-colors",
  154. "--no-colours",
  155. default=False,
  156. action="store_true",
  157. help="Disable colour printing.",
  158. )
  159. # ----- Test Groups --------------------------------------------------------------------------------------------->
  160. # This will allow running the tests in chunks
  161. test_selection_group.addoption(
  162. "--test-group-count",
  163. dest="test-group-count",
  164. type=int,
  165. help="The number of groups to split the tests into",
  166. )
  167. test_selection_group.addoption(
  168. "--test-group",
  169. dest="test-group",
  170. type=int,
  171. help="The group of tests that should be executed",
  172. )
  173. # <---- Test Groups ----------------------------------------------------------------------------------------------
  174. # <---- CLI Options Setup --------------------------------------------------------------------------------------------
  175. # ----- Register Markers -------------------------------------------------------------------------------------------->
  176. @pytest.mark.trylast
  177. def pytest_configure(config):
  178. """
  179. called after command line options have been parsed
  180. and all plugins and initial conftest files been loaded.
  181. """
  182. for dirname in CODE_DIR.iterdir():
  183. if not dirname.is_dir():
  184. continue
  185. if dirname != TESTS_DIR:
  186. config.addinivalue_line("norecursedirs", str(CODE_DIR / dirname))
  187. # Expose the markers we use to pytest CLI
  188. config.addinivalue_line(
  189. "markers",
  190. "requires_salt_modules(*required_module_names): Skip if at least one module is not available.",
  191. )
  192. config.addinivalue_line(
  193. "markers",
  194. "requires_salt_states(*required_state_names): Skip if at least one state module is not available.",
  195. )
  196. config.addinivalue_line(
  197. "markers", "windows_whitelisted: Mark test as whitelisted to run under Windows"
  198. )
  199. # Make sure the test suite "knows" this is a pytest test run
  200. RUNTIME_VARS.PYTEST_SESSION = True
  201. # "Flag" the slotTest decorator if we're skipping slow tests or not
  202. os.environ["SLOW_TESTS"] = str(config.getoption("--run-slow"))
  203. # If PyTest has no logging configured, default to ERROR level
  204. levels = [logging.ERROR]
  205. logging_plugin = config.pluginmanager.get_plugin("logging-plugin")
  206. try:
  207. level = logging_plugin.log_cli_handler.level
  208. if level is not None:
  209. levels.append(level)
  210. except AttributeError:
  211. # PyTest CLI logging not configured
  212. pass
  213. try:
  214. level = logging_plugin.log_file_level
  215. if level is not None:
  216. levels.append(level)
  217. except AttributeError:
  218. # PyTest Log File logging not configured
  219. pass
  220. if logging.NOTSET in levels:
  221. # We don't want the NOTSET level on the levels
  222. levels.pop(levels.index(logging.NOTSET))
  223. log_level = logging.getLevelName(min(levels))
  224. log_server = LogServer(log_level=log_level)
  225. config.pluginmanager.register(log_server, "salt-saltfactories-log-server")
  226. # <---- Register Markers ---------------------------------------------------------------------------------------------
  227. # ----- PyTest Tweaks ----------------------------------------------------------------------------------------------->
  228. def set_max_open_files_limits(min_soft=3072, min_hard=4096):
  229. # Get current limits
  230. if salt.utils.platform.is_windows():
  231. import win32file
  232. prev_hard = win32file._getmaxstdio()
  233. prev_soft = 512
  234. else:
  235. import resource
  236. prev_soft, prev_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
  237. # Check minimum required limits
  238. set_limits = False
  239. if prev_soft < min_soft:
  240. soft = min_soft
  241. set_limits = True
  242. else:
  243. soft = prev_soft
  244. if prev_hard < min_hard:
  245. hard = min_hard
  246. set_limits = True
  247. else:
  248. hard = prev_hard
  249. # Increase limits
  250. if set_limits:
  251. log.debug(
  252. " * Max open files settings is too low (soft: %s, hard: %s) for running the tests. "
  253. "Trying to raise the limits to soft: %s, hard: %s",
  254. prev_soft,
  255. prev_hard,
  256. soft,
  257. hard,
  258. )
  259. try:
  260. if salt.utils.platform.is_windows():
  261. hard = 2048 if hard > 2048 else hard
  262. win32file._setmaxstdio(hard)
  263. else:
  264. resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
  265. except Exception as err: # pylint: disable=broad-except
  266. log.error(
  267. "Failed to raise the max open files settings -> %s. Please issue the following command "
  268. "on your console: 'ulimit -u %s'",
  269. err,
  270. soft,
  271. )
  272. exit(1)
  273. return soft, hard
  274. def pytest_report_header():
  275. soft, hard = set_max_open_files_limits()
  276. return "max open files; soft: {}; hard: {}".format(soft, hard)
  277. def pytest_itemcollected(item):
  278. """We just collected a test item."""
  279. try:
  280. pathlib.Path(item.fspath.strpath).resolve().relative_to(PYTESTS_DIR)
  281. # Test is under tests/pytests
  282. if item.cls and issubclass(item.cls, TestCase):
  283. pytest.fail(
  284. "The tests under {0!r} MUST NOT use unittest's TestCase class or a subclass of it. "
  285. "Please move {1!r} outside of {0!r}".format(
  286. str(PYTESTS_DIR.relative_to(CODE_DIR)), item.nodeid
  287. )
  288. )
  289. except ValueError:
  290. # Test is not under tests/pytests
  291. if not item.cls or (item.cls and not issubclass(item.cls, TestCase)):
  292. pytest.fail(
  293. "The test {!r} appears to be written for pytest but it's not under {!r}. Please move it there.".format(
  294. item.nodeid, str(PYTESTS_DIR.relative_to(CODE_DIR)), pytrace=False
  295. )
  296. )
  297. @pytest.hookimpl(hookwrapper=True, trylast=True)
  298. def pytest_collection_modifyitems(config, items):
  299. """
  300. called after collection has been performed, may filter or re-order
  301. the items in-place.
  302. :param _pytest.main.Session session: the pytest session object
  303. :param _pytest.config.Config config: pytest config object
  304. :param List[_pytest.nodes.Item] items: list of item objects
  305. """
  306. # Let PyTest or other plugins handle the initial collection
  307. yield
  308. groups_collection_modifyitems(config, items)
  309. from_filenames_collection_modifyitems(config, items)
  310. log.warning("Mofifying collected tests to keep track of fixture usage")
  311. for item in items:
  312. for fixture in item.fixturenames:
  313. if fixture not in item._fixtureinfo.name2fixturedefs:
  314. continue
  315. for fixturedef in item._fixtureinfo.name2fixturedefs[fixture]:
  316. if fixturedef.scope != "package":
  317. continue
  318. try:
  319. fixturedef.finish.__wrapped__
  320. except AttributeError:
  321. original_func = fixturedef.finish
  322. def wrapper(func, fixturedef):
  323. @wraps(func)
  324. def wrapped(self, request, nextitem=False):
  325. try:
  326. return self._finished
  327. except AttributeError:
  328. if nextitem:
  329. fpath = pathlib.Path(self.baseid).resolve()
  330. tpath = pathlib.Path(
  331. nextitem.fspath.strpath
  332. ).resolve()
  333. try:
  334. tpath.relative_to(fpath)
  335. # The test module is within the same package that the fixture is
  336. if (
  337. not request.session.shouldfail
  338. and not request.session.shouldstop
  339. ):
  340. log.debug(
  341. "The next test item is still under the fixture package path. "
  342. "Not terminating %s",
  343. self,
  344. )
  345. return
  346. except ValueError:
  347. pass
  348. log.debug("Finish called on %s", self)
  349. try:
  350. return func(request)
  351. except BaseException as exc: # pylint: disable=broad-except
  352. pytest.fail(
  353. "Failed to run finish() on {}: {}".format(
  354. fixturedef, exc
  355. ),
  356. pytrace=True,
  357. )
  358. finally:
  359. self._finished = True
  360. return partial(wrapped, fixturedef)
  361. fixturedef.finish = wrapper(fixturedef.finish, fixturedef)
  362. try:
  363. fixturedef.finish.__wrapped__
  364. except AttributeError:
  365. fixturedef.finish.__wrapped__ = original_func
  366. @pytest.hookimpl(trylast=True, hookwrapper=True)
  367. def pytest_runtest_protocol(item, nextitem):
  368. """
  369. implements the runtest_setup/call/teardown protocol for
  370. the given test item, including capturing exceptions and calling
  371. reporting hooks.
  372. :arg item: test item for which the runtest protocol is performed.
  373. :arg nextitem: the scheduled-to-be-next test item (or None if this
  374. is the end my friend). This argument is passed on to
  375. :py:func:`pytest_runtest_teardown`.
  376. :return boolean: True if no further hook implementations should be invoked.
  377. Stops at first non-None result, see :ref:`firstresult`
  378. """
  379. request = item._request
  380. used_fixture_defs = []
  381. for fixture in item.fixturenames:
  382. if fixture not in item._fixtureinfo.name2fixturedefs:
  383. continue
  384. for fixturedef in reversed(item._fixtureinfo.name2fixturedefs[fixture]):
  385. if fixturedef.scope != "package":
  386. continue
  387. used_fixture_defs.append(fixturedef)
  388. try:
  389. # Run the test
  390. yield
  391. finally:
  392. for fixturedef in used_fixture_defs:
  393. fixturedef.finish(request, nextitem=nextitem)
  394. del request
  395. del used_fixture_defs
  396. @pytest.hookimpl(tryfirst=True)
  397. def pytest_sessionstart(session):
  398. log_server = session.config.pluginmanager.get_plugin(
  399. "salt-saltfactories-log-server"
  400. )
  401. log_server.start()
  402. @pytest.hookimpl(trylast=True)
  403. def pytest_sessionfinish(session):
  404. log_server = session.config.pluginmanager.get_plugin(
  405. "salt-saltfactories-log-server"
  406. )
  407. log_server.stop()
  408. @pytest.fixture(scope="session")
  409. def log_server():
  410. """
  411. Just overriding the fixture
  412. """
  413. # <---- PyTest Tweaks ------------------------------------------------------------------------------------------------
  414. # ----- Test Setup -------------------------------------------------------------------------------------------------->
  415. @pytest.hookimpl(tryfirst=True)
  416. def pytest_runtest_setup(item):
  417. """
  418. Fixtures injection based on markers or test skips based on CLI arguments
  419. """
  420. integration_utils_tests_path = str(TESTS_DIR / "integration" / "utils")
  421. if (
  422. str(item.fspath).startswith(integration_utils_tests_path)
  423. and PRE_PYTEST_SKIP_OR_NOT is True
  424. ):
  425. item._skipped_by_mark = True
  426. pytest.skip(PRE_PYTEST_SKIP_REASON)
  427. if saltfactories.utils.compat.has_unittest_attr(item, "__slow_test__"):
  428. if item.config.getoption("--run-slow") is False:
  429. item._skipped_by_mark = True
  430. pytest.skip("Slow tests are disabled!")
  431. requires_salt_modules_marker = item.get_closest_marker("requires_salt_modules")
  432. if requires_salt_modules_marker is not None:
  433. required_salt_modules = requires_salt_modules_marker.args
  434. if len(required_salt_modules) == 1 and isinstance(
  435. required_salt_modules[0], (list, tuple, set)
  436. ):
  437. required_salt_modules = required_salt_modules[0]
  438. required_salt_modules = set(required_salt_modules)
  439. not_available_modules = check_required_sminion_attributes(
  440. "functions", required_salt_modules
  441. )
  442. if not_available_modules:
  443. item._skipped_by_mark = True
  444. if len(not_available_modules) == 1:
  445. pytest.skip(
  446. "Salt module '{}' is not available".format(*not_available_modules)
  447. )
  448. pytest.skip(
  449. "Salt modules not available: {}".format(
  450. ", ".join(not_available_modules)
  451. )
  452. )
  453. requires_salt_states_marker = item.get_closest_marker("requires_salt_states")
  454. if requires_salt_states_marker is not None:
  455. required_salt_states = requires_salt_states_marker.args
  456. if len(required_salt_states) == 1 and isinstance(
  457. required_salt_states[0], (list, tuple, set)
  458. ):
  459. required_salt_states = required_salt_states[0]
  460. required_salt_states = set(required_salt_states)
  461. not_available_states = check_required_sminion_attributes(
  462. "states", required_salt_states
  463. )
  464. if not_available_states:
  465. item._skipped_by_mark = True
  466. if len(not_available_states) == 1:
  467. pytest.skip(
  468. "Salt state module '{}' is not available".format(
  469. *not_available_states
  470. )
  471. )
  472. pytest.skip(
  473. "Salt state modules not available: {}".format(
  474. ", ".join(not_available_states)
  475. )
  476. )
  477. if salt.utils.platform.is_windows():
  478. unit_tests_paths = (
  479. str(TESTS_DIR / "unit"),
  480. str(PYTESTS_DIR / "unit"),
  481. )
  482. if not str(pathlib.Path(item.fspath).resolve()).startswith(unit_tests_paths):
  483. # Unit tests are whitelisted on windows by default, so, we're only
  484. # after all other tests
  485. windows_whitelisted_marker = item.get_closest_marker("windows_whitelisted")
  486. if windows_whitelisted_marker is None:
  487. item._skipped_by_mark = True
  488. pytest.skip("Test is not whitelisted for Windows")
  489. # <---- Test Setup ---------------------------------------------------------------------------------------------------
  490. # ----- Test Groups Selection --------------------------------------------------------------------------------------->
  491. def get_group_size_and_start(total_items, total_groups, group_id):
  492. """
  493. Calculate group size and start index.
  494. """
  495. base_size = total_items // total_groups
  496. rem = total_items % total_groups
  497. start = base_size * (group_id - 1) + min(group_id - 1, rem)
  498. size = base_size + 1 if group_id <= rem else base_size
  499. return (start, size)
  500. def get_group(items, total_groups, group_id):
  501. """
  502. Get the items from the passed in group based on group size.
  503. """
  504. if not 0 < group_id <= total_groups:
  505. raise ValueError("Invalid test-group argument")
  506. start, size = get_group_size_and_start(len(items), total_groups, group_id)
  507. selected = items[start : start + size]
  508. deselected = items[:start] + items[start + size :]
  509. assert len(selected) + len(deselected) == len(items)
  510. return selected, deselected
  511. def groups_collection_modifyitems(config, items):
  512. group_count = config.getoption("test-group-count")
  513. group_id = config.getoption("test-group")
  514. if not group_count or not group_id:
  515. # We're not selection tests using groups, don't do any filtering
  516. return
  517. total_items = len(items)
  518. tests_in_group, deselected = get_group(items, group_count, group_id)
  519. # Replace all items in the list
  520. items[:] = tests_in_group
  521. if deselected:
  522. config.hook.pytest_deselected(items=deselected)
  523. terminal_reporter = config.pluginmanager.get_plugin("terminalreporter")
  524. terminal_reporter.write(
  525. "Running test group #{} ({} tests)\n".format(group_id, len(items)), yellow=True,
  526. )
  527. # <---- Test Groups Selection ----------------------------------------------------------------------------------------
  528. # ----- Fixtures Overrides ------------------------------------------------------------------------------------------>
  529. @pytest.fixture(scope="session")
  530. def salt_factories_config(request):
  531. """
  532. Return a dictionary with the keyworkd arguments for SaltFactoriesManager
  533. """
  534. log_server = request.config.pluginmanager.get_plugin(
  535. "salt-saltfactories-log-server"
  536. )
  537. return {
  538. "executable": sys.executable,
  539. "code_dir": str(CODE_DIR),
  540. "inject_coverage": MAYBE_RUN_COVERAGE,
  541. "inject_sitecustomize": MAYBE_RUN_COVERAGE,
  542. "start_timeout": 120
  543. if (os.environ.get("JENKINS_URL") or os.environ.get("CI"))
  544. else 60,
  545. "log_server_host": log_server.log_host,
  546. "log_server_port": log_server.log_port,
  547. "log_server_level": log_server.log_level,
  548. }
  549. # <---- Pytest Helpers -----------------------------------------------------------------------------------------------
  550. # ----- From Filenames Test Selection ------------------------------------------------------------------------------->
  551. def _match_to_test_file(match):
  552. parts = match.split(".")
  553. parts[-1] += ".py"
  554. return TESTS_DIR.joinpath(*parts).relative_to(CODE_DIR)
  555. def from_filenames_collection_modifyitems(config, items):
  556. from_filenames = config.getoption("--from-filenames")
  557. if not from_filenames:
  558. # Don't do anything
  559. return
  560. test_categories_paths = (
  561. (TESTS_DIR / "integration").relative_to(CODE_DIR),
  562. (TESTS_DIR / "multimaster").relative_to(CODE_DIR),
  563. (TESTS_DIR / "unit").relative_to(CODE_DIR),
  564. (PYTESTS_DIR / "e2e").relative_to(CODE_DIR),
  565. (PYTESTS_DIR / "functional").relative_to(CODE_DIR),
  566. (PYTESTS_DIR / "integration").relative_to(CODE_DIR),
  567. (PYTESTS_DIR / "unit").relative_to(CODE_DIR),
  568. )
  569. test_module_paths = set()
  570. from_filenames_listing = set()
  571. for path in [pathlib.Path(path.strip()) for path in from_filenames.split(",")]:
  572. if path.is_absolute():
  573. # In this case, this path is considered to be a file containing a line separated list
  574. # of files to consider
  575. with salt.utils.files.fopen(str(path)) as rfh:
  576. for line in rfh:
  577. line_path = pathlib.Path(line.strip())
  578. if not line_path.exists():
  579. continue
  580. from_filenames_listing.add(line_path)
  581. continue
  582. from_filenames_listing.add(path)
  583. filename_map = yaml.deserialize((TESTS_DIR / "filename_map.yml").read_text())
  584. # Let's add the match all rule
  585. for rule, matches in filename_map.items():
  586. if rule == "*":
  587. for match in matches:
  588. test_module_paths.add(_match_to_test_file(match))
  589. break
  590. # Let's now go through the list of files gathered
  591. for filename in from_filenames_listing:
  592. if str(filename).startswith("tests/"):
  593. # Tests in the listing don't require additional matching and will be added to the
  594. # list of tests to run
  595. test_module_paths.add(filename)
  596. continue
  597. if filename.name == "setup.py" or str(filename).startswith("salt/"):
  598. if path.name == "__init__.py":
  599. # No direct macthing
  600. continue
  601. # Now let's try a direct match between the passed file and possible test modules
  602. for test_categories_path in test_categories_paths:
  603. test_module_path = test_categories_path / "test_{}".format(path.name)
  604. if test_module_path.is_file():
  605. test_module_paths.add(test_module_path)
  606. continue
  607. # Do we have an entry in tests/filename_map.yml
  608. for rule, matches in filename_map.items():
  609. if rule == "*":
  610. continue
  611. elif "|" in rule:
  612. # This is regex
  613. if re.match(rule, str(filename)):
  614. for match in matches:
  615. test_module_paths.add(_match_to_test_file(match))
  616. elif "*" in rule or "\\" in rule:
  617. # Glob matching
  618. for filerule in CODE_DIR.glob(rule):
  619. if not filerule.exists():
  620. continue
  621. filerule = filerule.relative_to(CODE_DIR)
  622. if filerule != filename:
  623. continue
  624. for match in matches:
  625. test_module_paths.add(_match_to_test_file(match))
  626. else:
  627. if str(filename) != rule:
  628. continue
  629. # Direct file paths as rules
  630. filerule = pathlib.Path(rule)
  631. if not filerule.exists():
  632. continue
  633. for match in matches:
  634. test_module_paths.add(_match_to_test_file(match))
  635. continue
  636. else:
  637. log.debug("Don't know what to do with path %s", filename)
  638. selected = []
  639. deselected = []
  640. for item in items:
  641. itempath = pathlib.Path(str(item.fspath)).resolve().relative_to(CODE_DIR)
  642. if itempath in test_module_paths:
  643. selected.append(item)
  644. else:
  645. deselected.append(item)
  646. items[:] = selected
  647. if deselected:
  648. config.hook.pytest_deselected(items=deselected)
  649. # <---- From Filenames Test Selection --------------------------------------------------------------------------------
  650. # ----- Custom Fixtures --------------------------------------------------------------------------------------------->
  651. @pytest.fixture(scope="session")
  652. def reap_stray_processes():
  653. # Run tests
  654. yield
  655. children = psutil.Process(os.getpid()).children(recursive=True)
  656. if not children:
  657. log.info("No astray processes found")
  658. return
  659. def on_terminate(proc):
  660. log.debug("Process %s terminated with exit code %s", proc, proc.returncode)
  661. if children:
  662. # Reverse the order, sublings first, parents after
  663. children.reverse()
  664. log.warning(
  665. "Test suite left %d astray processes running. Killing those processes:\n%s",
  666. len(children),
  667. pprint.pformat(children),
  668. )
  669. _, alive = psutil.wait_procs(children, timeout=3, callback=on_terminate)
  670. for child in alive:
  671. try:
  672. child.kill()
  673. except psutil.NoSuchProcess:
  674. continue
  675. _, alive = psutil.wait_procs(alive, timeout=3, callback=on_terminate)
  676. if alive:
  677. # Give up
  678. for child in alive:
  679. log.warning(
  680. "Process %s survived SIGKILL, giving up:\n%s",
  681. child,
  682. pprint.pformat(child.as_dict()),
  683. )
  684. @pytest.fixture(scope="session")
  685. def grains(request):
  686. sminion = create_sminion()
  687. return sminion.opts["grains"].copy()
  688. # <---- Custom Fixtures ----------------------------------------------------------------------------------------------