test_glusterfs.py 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842
  1. # -*- coding: utf-8 -*-
  2. """
  3. :codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
  4. :codeauthor: Joe Julian <me@joejulian.name>
  5. """
  6. # Import Python libs
  7. from __future__ import absolute_import, print_function, unicode_literals
  8. # Import Salt Libs
  9. import salt.modules.glusterfs as glusterfs
  10. from salt.exceptions import SaltInvocationError
  11. # Import Salt Testing Libs
  12. from tests.support.mixins import LoaderModuleMockMixin
  13. from tests.support.mock import MagicMock, patch
  14. from tests.support.unit import TestCase
  15. class GlusterResults(object):
  16. """ This class holds the xml results from gluster cli transactions """
  17. class v34(object):
  18. """ This is for version 3.4 results """
  19. class list_peers(object):
  20. """ results from "peer status" """
  21. class peer_probe(object):
  22. fail_cant_connect = fail_bad_hostname = "\n".join(
  23. [
  24. '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>',
  25. "<cliOutput>",
  26. " <opRet>-1</opRet>",
  27. " <opErrno>107</opErrno>",
  28. " <opErrstr>Probe returned with unknown errno 107</opErrstr>",
  29. "</cliOutput>",
  30. "",
  31. ]
  32. )
  33. success_self = "\n".join(
  34. [
  35. '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
  36. " <cliOutput>",
  37. " <opRet>0</opRet>",
  38. " <opErrno>1</opErrno>",
  39. " <opErrstr>(null)</opErrstr>",
  40. " <output>success: on localhost not needed</output>",
  41. "</cliOutput>",
  42. "",
  43. ]
  44. )
  45. success_other = "\n".join(
  46. [
  47. '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
  48. " <cliOutput>",
  49. " <opRet>0</opRet>",
  50. " <opErrno>0</opErrno>",
  51. " <opErrstr>(null)</opErrstr>",
  52. " <output>success</output>",
  53. "</cliOutput>",
  54. "",
  55. ]
  56. )
  57. success_hostname_after_ip = success_other
  58. success_ip_after_hostname = success_other
  59. success_already_peer = {
  60. "ip": "\n".join(
  61. [
  62. '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
  63. " <cliOutput>",
  64. " <opRet>0</opRet>",
  65. " <opErrno>2</opErrno>",
  66. " <opErrstr>(null)</opErrstr>",
  67. " <output>success: host 10.0.0.2 port 24007 already in peer list</output>",
  68. "</cliOutput>",
  69. "",
  70. ]
  71. ),
  72. "hostname": "\n".join(
  73. [
  74. '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
  75. " <cliOutput>",
  76. " <opRet>0</opRet>",
  77. " <opErrno>2</opErrno>",
  78. " <opErrstr>(null)</opErrstr>",
  79. " <output>success: host server2 port 24007 already in peer list</output>",
  80. "</cliOutput>",
  81. "",
  82. ]
  83. ),
  84. }
  85. success_reverse_already_peer = {
  86. "ip": "\n".join(
  87. [
  88. '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
  89. " <cliOutput>",
  90. " <opRet>0</opRet>",
  91. " <opErrno>2</opErrno>",
  92. " <opErrstr>(null)</opErrstr>",
  93. " <output>success: host 10.0.0.1 port 24007 already in peer list</output>",
  94. "</cliOutput>",
  95. "",
  96. ]
  97. ),
  98. "hostname": "\n".join(
  99. [
  100. '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
  101. " <cliOutput>",
  102. " <opRet>0</opRet>",
  103. " <opErrno>2</opErrno>",
  104. " <opErrstr>(null)</opErrstr>",
  105. " <output>success: host server1 port 24007 already in peer list</output>",
  106. "</cliOutput>",
  107. "",
  108. ]
  109. ),
  110. }
  111. success_first_hostname_from_second_first_time = success_other
  112. success_first_hostname_from_second_second_time = success_reverse_already_peer[
  113. "hostname"
  114. ]
  115. success_first_ip_from_second_first_time = success_reverse_already_peer["ip"]
  116. class v37(object):
  117. class peer_probe(object):
  118. fail_cant_connect = fail_bad_hostname = "\n".join(
  119. [
  120. '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>',
  121. "<cliOutput>",
  122. " <opRet>-1</opRet>",
  123. " <opErrno>107</opErrno>",
  124. " <opErrstr>Probe returned with Transport endpoint is not connected</opErrstr>",
  125. "</cliOutput>",
  126. "",
  127. ]
  128. )
  129. success_self = "\n".join(
  130. [
  131. '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
  132. " <cliOutput>",
  133. " <opRet>0</opRet>",
  134. " <opErrno>1</opErrno>",
  135. " <opErrstr/>",
  136. " <output>Probe on localhost not needed</output>",
  137. "</cliOutput>",
  138. "",
  139. ]
  140. )
  141. success_other = "\n".join(
  142. [
  143. '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
  144. " <cliOutput>",
  145. " <opRet>0</opRet>",
  146. " <opErrno>0</opErrno>",
  147. " <opErrstr/>",
  148. " <output/>",
  149. "</cliOutput>",
  150. "",
  151. ]
  152. )
  153. success_hostname_after_ip = success_other
  154. success_ip_after_hostname = success_other
  155. success_already_peer = {
  156. "ip": "\n".join(
  157. [
  158. '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
  159. " <cliOutput>",
  160. " <opRet>0</opRet>",
  161. " <opErrno>2</opErrno>",
  162. " <opErrstr/>",
  163. " <output>Host 10.0.0.2 port 24007 already in peer list</output>",
  164. "</cliOutput>",
  165. "",
  166. ]
  167. ),
  168. "hostname": "\n".join(
  169. [
  170. '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
  171. " <cliOutput>",
  172. " <opRet>0</opRet>",
  173. " <opErrno>2</opErrno>",
  174. " <opErrstr/>",
  175. " <output>Host server2 port 24007 already in peer list</output>",
  176. "</cliOutput>",
  177. "",
  178. ]
  179. ),
  180. }
  181. success_reverse_already_peer = {
  182. "ip": "\n".join(
  183. [
  184. '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
  185. " <cliOutput>",
  186. " <opRet>0</opRet>",
  187. " <opErrno>2</opErrno>",
  188. " <opErrstr/>",
  189. " <output>Host 10.0.0.1 port 24007 already in peer list</output>",
  190. "</cliOutput>",
  191. "",
  192. ]
  193. ),
  194. "hostname": "\n".join(
  195. [
  196. '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
  197. " <cliOutput>",
  198. " <opRet>0</opRet>",
  199. " <opErrno>2</opErrno>",
  200. " <opErrstr/>",
  201. " <output>Host server1 port 24007 already in peer list</output>",
  202. "</cliOutput>",
  203. "",
  204. ]
  205. ),
  206. }
  207. success_first_hostname_from_second_first_time = success_reverse_already_peer[
  208. "hostname"
  209. ]
  210. success_first_ip_from_second_first_time = success_other
  211. success_first_ip_from_second_second_time = success_reverse_already_peer[
  212. "ip"
  213. ]
  214. xml_peer_present = """
  215. <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
  216. <cliOutput>
  217. <opRet>0</opRet>
  218. <peer>
  219. <uuid>uuid1</uuid>
  220. <hostname>node02</hostname>
  221. <hostnames>
  222. <hostname>node02.domain.dom</hostname>
  223. <hostname>10.0.0.2</hostname>
  224. </hostnames>
  225. </peer>
  226. </cliOutput>
  227. """
  228. xml_volume_present = """
  229. <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
  230. <cliOutput>
  231. <opRet>0</opRet>
  232. <volList>
  233. <volume>Newvolume1</volume>
  234. <volume>Newvolume2</volume>
  235. </volList>
  236. </cliOutput>
  237. """
  238. xml_volume_absent = """
  239. <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
  240. <cliOutput>
  241. <opRet>0</opRet>
  242. <volList>
  243. <count>0</count>
  244. </volList>
  245. </cliOutput>
  246. """
  247. xml_volume_status = """
  248. <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
  249. <cliOutput>
  250. <opRet>0</opRet>
  251. <volStatus>
  252. <volumes>
  253. <volume>
  254. <volName>myvol1</volName>
  255. <nodeCount>3</nodeCount>
  256. <node>
  257. <hostname>node01</hostname>
  258. <path>/tmp/foo</path>
  259. <peerid>830700d7-0684-497c-a12c-c02e365fb90b</peerid>
  260. <status>1</status>
  261. <port>49155</port>
  262. <ports>
  263. <tcp>49155</tcp>
  264. <rdma>N/A</rdma>
  265. </ports>
  266. <pid>2470</pid>
  267. </node>
  268. <node>
  269. <hostname>NFS Server</hostname>
  270. <path>localhost</path>
  271. <peerid>830700d7-0684-497c-a12c-c02e365fb90b</peerid>
  272. <status>0</status>
  273. <port>N/A</port>
  274. <ports>
  275. <tcp>N/A</tcp>
  276. <rdma>N/A</rdma>
  277. </ports>
  278. <pid>-1</pid>
  279. </node>
  280. <tasks/>
  281. </volume>
  282. </volumes>
  283. </volStatus>
  284. </cliOutput>
  285. """
  286. xml_volume_info_running = """
  287. <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
  288. <cliOutput>
  289. <opRet>0</opRet>
  290. <volInfo>
  291. <volumes>
  292. <volume>
  293. <name>myvol1</name>
  294. <id>f03c2180-cf55-4f77-ae0b-3650f57c82a1</id>
  295. <status>1</status>
  296. <statusStr>Started</statusStr>
  297. <brickCount>1</brickCount>
  298. <distCount>1</distCount>
  299. <stripeCount>1</stripeCount>
  300. <replicaCount>1</replicaCount>
  301. <disperseCount>0</disperseCount>
  302. <redundancyCount>0</redundancyCount>
  303. <type>0</type>
  304. <typeStr>Distribute</typeStr>
  305. <transport>0</transport>
  306. <bricks>
  307. <brick uuid="830700d7-0684-497c-a12c-c02e365fb90b">node01:/tmp/foo<name>node01:/tmp/foo</name><hostUuid>830700d7-0684-497c-a12c-c02e365fb90b</hostUuid></brick>
  308. </bricks>
  309. <optCount>1</optCount>
  310. <options>
  311. <option>
  312. <name>performance.readdir-ahead</name>
  313. <value>on</value>
  314. </option>
  315. </options>
  316. </volume>
  317. <count>1</count>
  318. </volumes>
  319. </volInfo>
  320. </cliOutput>
  321. """
  322. xml_volume_info_stopped = """
  323. <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
  324. <cliOutput>
  325. <opRet>0</opRet>
  326. <volInfo>
  327. <volumes>
  328. <volume>
  329. <name>myvol1</name>
  330. <status>1</status>
  331. </volume>
  332. </volumes>
  333. </volInfo>
  334. </cliOutput>
  335. """
  336. xml_peer_probe_success = """
  337. <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
  338. <cliOutput>
  339. <opRet>0</opRet>
  340. <opErrno>0</opErrno>
  341. <opErrstr/>
  342. <output/>
  343. </cliOutput>
  344. """
  345. xml_peer_probe_already_member = """
  346. <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
  347. <cliOutput>
  348. <opRet>0</opRet>
  349. <opErrno>2</opErrno>
  350. <opErrstr/>
  351. <output>Host salt port 24007 already in peer list</output>
  352. </cliOutput>
  353. """
  354. xml_peer_probe_localhost = """
  355. <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
  356. <cliOutput>
  357. <opRet>0</opRet>
  358. <opErrno>1</opErrno>
  359. <opErrstr/>
  360. <output>Probe on localhost not needed</output>
  361. </cliOutput>
  362. """
  363. xml_peer_probe_fail_cant_connect = """
  364. <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
  365. <cliOutput>
  366. <opRet>-1</opRet>
  367. <opErrno>107</opErrno>
  368. <opErrstr>Probe returned with Transport endpoint is not connected</opErrstr>
  369. </cliOutput>
  370. """
  371. xml_command_success = """
  372. <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
  373. <cliOutput>
  374. <opRet>0</opRet>
  375. </cliOutput>
  376. """
  377. xml_command_fail = """
  378. <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
  379. <cliOutput>
  380. <opRet>-1</opRet>
  381. <opErrno>0</opErrno>
  382. <opErrstr>Command Failed</opErrstr>
  383. </cliOutput>
  384. """
  385. xml_op_version_37 = """
  386. <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
  387. <cliOutput>
  388. <opRet>0</opRet>
  389. <opErrno>0</opErrno>
  390. <opErrstr/>
  391. <volGetopts>
  392. <count>1</count>
  393. <Option>cluster.op-version</Option>
  394. <Value>30707</Value>
  395. </volGetopts>
  396. </cliOutput>
  397. """
  398. xml_op_version_312 = """
  399. <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
  400. <cliOutput>
  401. <opRet>0</opRet>
  402. <opErrno>0</opErrno>
  403. <opErrstr/>
  404. <volGetopts>
  405. <count>1</count>
  406. <Opt>
  407. <Option>cluster.op-version</Option>
  408. <Value>30707</Value>
  409. </Opt>
  410. </volGetopts>
  411. </cliOutput>
  412. """
  413. xml_max_op_version = """
  414. <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
  415. <cliOutput>
  416. <opRet>0</opRet>
  417. <opErrno>0</opErrno>
  418. <opErrstr/>
  419. <volGetopts>
  420. <count>1</count>
  421. <Opt>
  422. <Option>cluster.max-op-version</Option>
  423. <Value>31200</Value>
  424. </Opt>
  425. </volGetopts>
  426. </cliOutput>
  427. """
  428. xml_set_op_version_failure = """
  429. <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
  430. <cliOutput>
  431. <opRet>-1</opRet>
  432. <opErrno>30800</opErrno>
  433. <opErrstr>Required op-version (30707) should not be equal or lower than current cluster op-version (30707).</opErrstr>
  434. <cliOp>volSet</cliOp>
  435. <output>Set volume unsuccessful</output>
  436. </cliOutput>
  437. """
  438. xml_set_op_version_success = """
  439. <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
  440. <cliOutput>
  441. <opRet>0</opRet>
  442. <opErrno>0</opErrno>
  443. <opErrstr/>
  444. <cliOp>volSet</cliOp>
  445. <output>Set volume successful</output>
  446. </cliOutput>
  447. """
  448. class GlusterfsTestCase(TestCase, LoaderModuleMockMixin):
  449. """
  450. Test cases for salt.modules.glusterfs
  451. """
  452. def setup_loader_modules(self):
  453. return {glusterfs: {}}
  454. maxDiff = None
  455. # 'peer_status' function tests: 1
  456. def test_peer_status(self):
  457. """
  458. Test gluster peer status
  459. """
  460. mock_run = MagicMock(return_value=xml_peer_present)
  461. with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}):
  462. self.assertDictEqual(
  463. glusterfs.peer_status(),
  464. {"uuid1": {"hostnames": ["node02", "node02.domain.dom", "10.0.0.2"]}},
  465. )
  466. mock_run = MagicMock(return_value=xml_command_success)
  467. with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}):
  468. self.assertDictEqual(glusterfs.peer_status(), {})
  469. # 'peer' function tests: 1
  470. def test_peer(self):
  471. """
  472. Test if gluster peer call is successful.
  473. """
  474. mock_run = MagicMock()
  475. with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}):
  476. mock_run.return_value = xml_peer_probe_already_member
  477. self.assertTrue(glusterfs.peer("salt"))
  478. mock_run.return_value = xml_peer_probe_localhost
  479. self.assertTrue(glusterfs.peer("salt"))
  480. mock_run.return_value = xml_peer_probe_fail_cant_connect
  481. self.assertFalse(glusterfs.peer("salt"))
  482. # 'create_volume' function tests: 1
  483. def test_create_volume(self):
  484. """
  485. Test if it creates a glusterfs volume.
  486. """
  487. mock_run = MagicMock(return_value=xml_command_success)
  488. with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}):
  489. self.assertRaises(
  490. SaltInvocationError, glusterfs.create_volume, "newvolume", "host1:brick"
  491. )
  492. self.assertRaises(
  493. SaltInvocationError, glusterfs.create_volume, "newvolume", "host1/brick"
  494. )
  495. self.assertFalse(mock_run.called)
  496. mock_start_volume = MagicMock(return_value=True)
  497. with patch.object(glusterfs, "start_volume", mock_start_volume):
  498. # Create, do not start
  499. self.assertTrue(glusterfs.create_volume("newvolume", "host1:/brick"))
  500. self.assertFalse(mock_start_volume.called)
  501. # Create and start
  502. self.assertTrue(
  503. glusterfs.create_volume("newvolume", "host1:/brick", start=True)
  504. )
  505. self.assertTrue(mock_start_volume.called)
  506. mock_start_volume.return_value = False
  507. # Create and fail start
  508. self.assertFalse(
  509. glusterfs.create_volume("newvolume", "host1:/brick", start=True)
  510. )
  511. mock_run.return_value = xml_command_fail
  512. self.assertFalse(
  513. glusterfs.create_volume(
  514. "newvolume", "host1:/brick", True, True, True, "tcp", True
  515. )
  516. )
  517. # 'list_volumes' function tests: 1
  518. def test_list_volumes(self):
  519. """
  520. Test if it list configured volumes
  521. """
  522. mock = MagicMock(return_value=xml_volume_absent)
  523. with patch.dict(glusterfs.__salt__, {"cmd.run": mock}):
  524. self.assertListEqual(glusterfs.list_volumes(), [])
  525. mock = MagicMock(return_value=xml_volume_present)
  526. with patch.dict(glusterfs.__salt__, {"cmd.run": mock}):
  527. self.assertListEqual(glusterfs.list_volumes(), ["Newvolume1", "Newvolume2"])
  528. # 'status' function tests: 1
  529. def test_status(self):
  530. """
  531. Test if it check the status of a gluster volume.
  532. """
  533. mock_run = MagicMock(return_value=xml_command_fail)
  534. with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}):
  535. self.assertIsNone(glusterfs.status("myvol1"))
  536. res = {
  537. "bricks": {
  538. "node01:/tmp/foo": {
  539. "host": "node01",
  540. "hostname": "node01",
  541. "online": True,
  542. "path": "/tmp/foo",
  543. "peerid": "830700d7-0684-497c-a12c-c02e365fb90b",
  544. "pid": "2470",
  545. "port": "49155",
  546. "ports": {"rdma": "N/A", "tcp": "49155"},
  547. "status": "1",
  548. }
  549. },
  550. "healers": {},
  551. "nfs": {
  552. "node01": {
  553. "host": "NFS Server",
  554. "hostname": "NFS Server",
  555. "online": False,
  556. "path": "localhost",
  557. "peerid": "830700d7-0684-497c-a12c-c02e365fb90b",
  558. "pid": "-1",
  559. "port": "N/A",
  560. "ports": {"rdma": "N/A", "tcp": "N/A"},
  561. "status": "0",
  562. }
  563. },
  564. }
  565. mock = MagicMock(return_value=xml_volume_status)
  566. with patch.dict(glusterfs.__salt__, {"cmd.run": mock}):
  567. self.assertDictEqual(glusterfs.status("myvol1"), res)
  568. # 'start_volume' function tests: 1
  569. def test_volume_info(self):
  570. """
  571. Test if it returns the volume info.
  572. """
  573. res = {
  574. "myvol1": {
  575. "brickCount": "1",
  576. "bricks": {
  577. "brick1": {
  578. "hostUuid": "830700d7-0684-497c-a12c-c02e365fb90b",
  579. "path": "node01:/tmp/foo",
  580. "uuid": "830700d7-0684-497c-a12c-c02e365fb90b",
  581. }
  582. },
  583. "disperseCount": "0",
  584. "distCount": "1",
  585. "id": "f03c2180-cf55-4f77-ae0b-3650f57c82a1",
  586. "name": "myvol1",
  587. "optCount": "1",
  588. "options": {"performance.readdir-ahead": "on"},
  589. "redundancyCount": "0",
  590. "replicaCount": "1",
  591. "status": "1",
  592. "statusStr": "Started",
  593. "stripeCount": "1",
  594. "transport": "0",
  595. "type": "0",
  596. "typeStr": "Distribute",
  597. }
  598. }
  599. mock = MagicMock(return_value=xml_volume_info_running)
  600. with patch.dict(glusterfs.__salt__, {"cmd.run": mock}):
  601. self.assertDictEqual(glusterfs.info("myvol1"), res)
  602. def test_start_volume(self):
  603. """
  604. Test if it start a gluster volume.
  605. """
  606. # Stopped volume
  607. mock_info = MagicMock(return_value={"Newvolume1": {"status": "0"}})
  608. with patch.object(glusterfs, "info", mock_info):
  609. mock_run = MagicMock(return_value=xml_command_success)
  610. with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}):
  611. self.assertEqual(glusterfs.start_volume("Newvolume1"), True)
  612. self.assertEqual(glusterfs.start_volume("nonExisting"), False)
  613. mock_run = MagicMock(return_value=xml_command_fail)
  614. with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}):
  615. self.assertEqual(glusterfs.start_volume("Newvolume1"), False)
  616. # Started volume
  617. mock_info = MagicMock(return_value={"Newvolume1": {"status": "1"}})
  618. with patch.object(glusterfs, "info", mock_info):
  619. mock_run = MagicMock(return_value=xml_command_success)
  620. with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}):
  621. self.assertEqual(glusterfs.start_volume("Newvolume1", force=True), True)
  622. mock_run = MagicMock(return_value=xml_command_fail)
  623. with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}):
  624. # cmd.run should not be called for already running volume:
  625. self.assertEqual(glusterfs.start_volume("Newvolume1"), True)
  626. # except when forcing:
  627. self.assertEqual(
  628. glusterfs.start_volume("Newvolume1", force=True), False
  629. )
  630. # 'stop_volume' function tests: 1
  631. def test_stop_volume(self):
  632. """
  633. Test if it stop a gluster volume.
  634. """
  635. # Stopped volume
  636. mock_info = MagicMock(return_value={"Newvolume1": {"status": "0"}})
  637. with patch.object(glusterfs, "info", mock_info):
  638. mock_run = MagicMock(return_value=xml_command_success)
  639. with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}):
  640. self.assertEqual(glusterfs.stop_volume("Newvolume1"), True)
  641. self.assertEqual(glusterfs.stop_volume("nonExisting"), False)
  642. mock_run = MagicMock(return_value=xml_command_fail)
  643. with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}):
  644. # cmd.run should not be called for already stopped volume:
  645. self.assertEqual(glusterfs.stop_volume("Newvolume1"), True)
  646. # Started volume
  647. mock_info = MagicMock(return_value={"Newvolume1": {"status": "1"}})
  648. with patch.object(glusterfs, "info", mock_info):
  649. mock_run = MagicMock(return_value=xml_command_success)
  650. with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}):
  651. self.assertEqual(glusterfs.stop_volume("Newvolume1"), True)
  652. self.assertEqual(glusterfs.stop_volume("nonExisting"), False)
  653. mock_run = MagicMock(return_value=xml_command_fail)
  654. with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}):
  655. self.assertEqual(glusterfs.stop_volume("Newvolume1"), False)
  656. # 'delete_volume' function tests: 1
  657. def test_delete_volume(self):
  658. """
  659. Test if it deletes a gluster volume.
  660. """
  661. mock_info = MagicMock(return_value={"Newvolume1": {"status": "1"}})
  662. with patch.object(glusterfs, "info", mock_info):
  663. # volume doesn't exist
  664. self.assertFalse(glusterfs.delete_volume("Newvolume3"))
  665. mock_stop_volume = MagicMock(return_value=True)
  666. mock_run = MagicMock(return_value=xml_command_success)
  667. with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}):
  668. with patch.object(glusterfs, "stop_volume", mock_stop_volume):
  669. # volume exists, should not be stopped, and is started
  670. self.assertFalse(glusterfs.delete_volume("Newvolume1", False))
  671. self.assertFalse(mock_run.called)
  672. self.assertFalse(mock_stop_volume.called)
  673. # volume exists, should be stopped, and is started
  674. self.assertTrue(glusterfs.delete_volume("Newvolume1"))
  675. self.assertTrue(mock_run.called)
  676. self.assertTrue(mock_stop_volume.called)
  677. # volume exists and isn't started
  678. mock_info = MagicMock(return_value={"Newvolume1": {"status": "2"}})
  679. with patch.object(glusterfs, "info", mock_info):
  680. mock_run = MagicMock(return_value=xml_command_success)
  681. with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}):
  682. self.assertTrue(glusterfs.delete_volume("Newvolume1"))
  683. mock_run.return_value = xml_command_fail
  684. self.assertFalse(glusterfs.delete_volume("Newvolume1"))
  685. # 'add_volume_bricks' function tests: 1
  686. def test_add_volume_bricks(self):
  687. """
  688. Test if it add brick(s) to an existing volume
  689. """
  690. mock_info = MagicMock(
  691. return_value={
  692. "Newvolume1": {
  693. "status": "1",
  694. "bricks": {
  695. "brick1": {"path": "host:/path1"},
  696. "brick2": {"path": "host:/path2"},
  697. },
  698. }
  699. }
  700. )
  701. with patch.object(glusterfs, "info", mock_info):
  702. mock_run = MagicMock(return_value=xml_command_success)
  703. with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}):
  704. # Volume does not exist
  705. self.assertFalse(glusterfs.add_volume_bricks("nonExisting", ["bricks"]))
  706. # Brick already exists
  707. self.assertTrue(
  708. glusterfs.add_volume_bricks("Newvolume1", ["host:/path2"])
  709. )
  710. # Already existing brick as a string
  711. self.assertTrue(
  712. glusterfs.add_volume_bricks("Newvolume1", "host:/path2")
  713. )
  714. self.assertFalse(mock_run.called)
  715. # A new brick:
  716. self.assertTrue(
  717. glusterfs.add_volume_bricks("Newvolume1", ["host:/new1"])
  718. )
  719. self.assertTrue(mock_run.called)
  720. # Gluster call fails
  721. mock_run.return_value = xml_command_fail
  722. self.assertFalse(
  723. glusterfs.add_volume_bricks("Newvolume1", ["new:/path"])
  724. )
  725. # 'get_op_version' function tests: 1
  726. def test_get_op_version(self):
  727. """
  728. Test retrieving the glusterfs op-version
  729. """
  730. # Test with xml output structure from v3.7
  731. mock_run = MagicMock(return_value=xml_op_version_37)
  732. with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}):
  733. self.assertEqual(glusterfs.get_op_version("test"), "30707")
  734. # Test with xml output structure from v3.12
  735. mock_run = MagicMock(return_value=xml_op_version_312)
  736. with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}):
  737. self.assertEqual(glusterfs.get_op_version("test"), "30707")
  738. # 'get_max_op_version' function tests: 1
  739. def test_get_max_op_version(self):
  740. """
  741. Test retrieving the glusterfs max-op-version.
  742. """
  743. mock_xml = MagicMock(return_value=xml_max_op_version)
  744. mock_version = MagicMock(return_value="glusterfs 3.9.1")
  745. with patch.dict(glusterfs.__salt__, {"cmd.run": mock_version}):
  746. self.assertFalse(glusterfs.get_max_op_version()[0])
  747. with patch.object(glusterfs, "_get_version", return_value=(3, 12, 0)):
  748. with patch.dict(glusterfs.__salt__, {"cmd.run": mock_xml}):
  749. self.assertEqual(glusterfs.get_max_op_version(), "31200")
  750. # 'set_op_version' function tests: 1
  751. def test_set_op_version(self):
  752. """
  753. Test setting the glusterfs op-version
  754. """
  755. mock_failure = MagicMock(return_value=xml_set_op_version_failure)
  756. mock_success = MagicMock(return_value=xml_set_op_version_success)
  757. with patch.dict(glusterfs.__salt__, {"cmd.run": mock_failure}):
  758. self.assertFalse(glusterfs.set_op_version(30707)[0])
  759. with patch.dict(glusterfs.__salt__, {"cmd.run": mock_success}):
  760. self.assertEqual(glusterfs.set_op_version(31200), "Set volume successful")