create-external-cluster-resources.py 81 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861
  1. """
  2. Copyright 2020 The Rook Authors. All rights reserved.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. """
  13. import errno
  14. import sys
  15. import json
  16. import argparse
  17. import re
  18. import subprocess
  19. import hmac
  20. from hashlib import sha1 as sha
  21. from os import linesep as LINESEP
  22. from os import path
  23. from email.utils import formatdate
  24. import requests
  25. from requests.auth import AuthBase
  26. py3k = False
  27. if sys.version_info.major >= 3:
  28. py3k = True
  29. import urllib.parse
  30. from ipaddress import ip_address, IPv4Address
  31. ModuleNotFoundError = ImportError
  32. try:
  33. import rados
  34. except ModuleNotFoundError as noModErr:
  35. print(f"Error: {noModErr}\nExiting the script...")
  36. sys.exit(1)
  37. try:
  38. import rbd
  39. except ModuleNotFoundError as noModErr:
  40. print(f"Error: {noModErr}\nExiting the script...")
  41. sys.exit(1)
  42. try:
  43. # for 2.7.x
  44. from StringIO import StringIO
  45. except ModuleNotFoundError:
  46. # for 3.x
  47. from io import StringIO
  48. try:
  49. # for 2.7.x
  50. from urlparse import urlparse
  51. except ModuleNotFoundError:
  52. # for 3.x
  53. from urllib.parse import urlparse
  54. try:
  55. from base64 import encodestring
  56. except:
  57. from base64 import encodebytes as encodestring
  58. class ExecutionFailureException(Exception):
  59. pass
  60. ################################################
  61. ################## DummyRados ##################
  62. ################################################
  63. # this is mainly for testing and could be used where 'rados' is not available
  64. class DummyRados(object):
  65. def __init__(self):
  66. self.return_val = 0
  67. self.err_message = ""
  68. self.state = "connected"
  69. self.cmd_output_map = {}
  70. self.cmd_names = {}
  71. self._init_cmd_output_map()
  72. self.dummy_host_ip_map = {}
  73. def _init_cmd_output_map(self):
  74. json_file_name = "test-data/ceph-status-out"
  75. script_dir = path.abspath(path.dirname(__file__))
  76. ceph_status_str = ""
  77. with open(
  78. path.join(script_dir, json_file_name), mode="r", encoding="UTF-8"
  79. ) as json_file:
  80. ceph_status_str = json_file.read()
  81. self.cmd_names["fs ls"] = """{"format": "json", "prefix": "fs ls"}"""
  82. self.cmd_names[
  83. "quorum_status"
  84. ] = """{"format": "json", "prefix": "quorum_status"}"""
  85. self.cmd_names[
  86. "mgr services"
  87. ] = """{"format": "json", "prefix": "mgr services"}"""
  88. # all the commands and their output
  89. self.cmd_output_map[
  90. self.cmd_names["fs ls"]
  91. ] = """[{"name":"myfs","metadata_pool":"myfs-metadata","metadata_pool_id":2,"data_pool_ids":[3],"data_pools":["myfs-replicated"]}]"""
  92. self.cmd_output_map[
  93. self.cmd_names["quorum_status"]
  94. ] = """{"election_epoch":3,"quorum":[0],"quorum_names":["a"],"quorum_leader_name":"a","quorum_age":14385,"features":{"quorum_con":"4540138292836696063","quorum_mon":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"]},"monmap":{"epoch":1,"fsid":"af4e1673-0b72-402d-990a-22d2919d0f1c","modified":"2020-05-07T03:36:39.918035Z","created":"2020-05-07T03:36:39.918035Z","min_mon_release":15,"min_mon_release_name":"octopus","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"10.110.205.174:3300","nonce":0},{"type":"v1","addr":"10.110.205.174:6789","nonce":0}]},"addr":"10.110.205.174:6789/0","public_addr":"10.110.205.174:6789/0","priority":0,"weight":0}]}}"""
  95. self.cmd_output_map[
  96. self.cmd_names["mgr services"]
  97. ] = """{"dashboard":"https://ceph-dashboard:8443/","prometheus":"http://ceph-dashboard-db:9283/"}"""
  98. self.cmd_output_map[
  99. """{"caps": ["mon", "allow r, allow command quorum_status", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}"""
  100. ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r, allow command quorum_status","osd":"allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"}}]"""
  101. self.cmd_output_map[
  102. """{"caps": ["mon", "profile rbd, allow command 'osd blocklist'", "osd", "profile rbd"], "entity": "client.csi-rbd-node", "format": "json", "prefix": "auth get-or-create"}"""
  103. ] = """[{"entity":"client.csi-rbd-node","key":"AQBOgrNeHbK1AxAAubYBeV8S1U/GPzq5SVeq6g==","caps":{"mon":"profile rbd, allow command 'osd blocklist'","osd":"profile rbd"}}]"""
  104. self.cmd_output_map[
  105. """{"caps": ["mon", "profile rbd, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "profile rbd"], "entity": "client.csi-rbd-provisioner", "format": "json", "prefix": "auth get-or-create"}"""
  106. ] = """[{"entity":"client.csi-rbd-provisioner","key":"AQBNgrNe1geyKxAA8ekViRdE+hss5OweYBkwNg==","caps":{"mgr":"allow rw","mon":"profile rbd, allow command 'osd blocklist'","osd":"profile rbd"}}]"""
  107. self.cmd_output_map[
  108. """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs *=*", "mds", "allow rw"], "entity": "client.csi-cephfs-node", "format": "json", "prefix": "auth get-or-create"}"""
  109. ] = """[{"entity":"client.csi-cephfs-node","key":"AQBOgrNeENunKxAAPCmgE7R6G8DcXnaJ1F32qg==","caps":{"mds":"allow rw","mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs *=*"}}]"""
  110. self.cmd_output_map[
  111. """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get-or-create"}"""
  112. ] = """[{"entity":"client.csi-cephfs-provisioner","key":"AQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs metadata=*"}}]"""
  113. self.cmd_output_map[
  114. """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner-openshift-storage", "format": "json", "prefix": "auth get-or-create"}"""
  115. ] = """[{"entity":"client.csi-cephfs-provisioner-openshift-storage","key":"BQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs metadata=*"}}]"""
  116. self.cmd_output_map[
  117. """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=myfs"], "entity": "client.csi-cephfs-provisioner-openshift-storage-myfs", "format": "json", "prefix": "auth get-or-create"}"""
  118. ] = """[{"entity":"client.csi-cephfs-provisioner-openshift-storage-myfs","key":"CQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs metadata=myfs"}}]"""
  119. self.cmd_output_map[
  120. """{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}"""
  121. ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]"""
  122. self.cmd_output_map[
  123. """{"format": "json", "prefix": "mgr services"}"""
  124. ] = """{"dashboard": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:7000/", "prometheus": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:9283/"}"""
  125. self.cmd_output_map[
  126. """{"entity": "client.healthchecker", "format": "json", "prefix": "auth get"}"""
  127. ] = """{"dashboard": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:7000/", "prometheus": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:9283/"}"""
  128. self.cmd_output_map[
  129. """{"entity": "client.healthchecker", "format": "json", "prefix": "auth get"}"""
  130. ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]"""
  131. self.cmd_output_map[
  132. """{"entity": "client.csi-cephfs-node", "format": "json", "prefix": "auth get"}"""
  133. ] = """[]"""
  134. self.cmd_output_map[
  135. """{"entity": "client.csi-rbd-node", "format": "json", "prefix": "auth get"}"""
  136. ] = """[]"""
  137. self.cmd_output_map[
  138. """{"entity": "client.csi-rbd-provisioner", "format": "json", "prefix": "auth get"}"""
  139. ] = """[]"""
  140. self.cmd_output_map[
  141. """{"entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get"}"""
  142. ] = """[]"""
  143. self.cmd_output_map[
  144. """{"entity": "client.csi-cephfs-provisioner-openshift-storage", "format": "json", "prefix": "auth get"}"""
  145. ] = """[]"""
  146. self.cmd_output_map[
  147. """{"entity": "client.csi-cephfs-provisioner-openshift-storage-myfs", "format": "json", "prefix": "auth get"}"""
  148. ] = """[]"""
  149. self.cmd_output_map[
  150. """{"entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get"}"""
  151. ] = """[{"entity":"client.csi-cephfs-provisioner","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r", "mgr":"allow rw", "osd":"allow rw tag cephfs metadata=*"}}]"""
  152. self.cmd_output_map[
  153. """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth caps"}"""
  154. ] = """[{"entity":"client.csi-cephfs-provisioner","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r, allow command 'osd blocklist'", "mgr":"allow rw", "osd":"allow rw tag cephfs metadata=*"}}]"""
  155. self.cmd_output_map['{"format": "json", "prefix": "status"}'] = ceph_status_str
  156. def shutdown(self):
  157. pass
  158. def get_fsid(self):
  159. return "af4e1673-0b72-402d-990a-22d2919d0f1c"
  160. def conf_read_file(self):
  161. pass
  162. def connect(self):
  163. pass
  164. def pool_exists(self, pool_name):
  165. return True
  166. def mon_command(self, cmd, out):
  167. json_cmd = json.loads(cmd)
  168. json_cmd_str = json.dumps(json_cmd, sort_keys=True)
  169. cmd_output = self.cmd_output_map[json_cmd_str]
  170. return self.return_val, cmd_output, str(self.err_message.encode("utf-8"))
  171. def _convert_hostname_to_ip(self, host_name):
  172. ip_reg_x = re.compile(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}")
  173. # if provided host is directly an IP address, return the same
  174. if ip_reg_x.match(host_name):
  175. return host_name
  176. import random
  177. host_ip = self.dummy_host_ip_map.get(host_name, "")
  178. if not host_ip:
  179. host_ip = f"172.9.{random.randint(0, 254)}.{random.randint(0, 254)}"
  180. self.dummy_host_ip_map[host_name] = host_ip
  181. del random
  182. return host_ip
  183. @classmethod
  184. def Rados(conffile=None):
  185. return DummyRados()
  186. class S3Auth(AuthBase):
  187. """Attaches AWS Authentication to the given Request object."""
  188. service_base_url = "s3.amazonaws.com"
  189. def __init__(self, access_key, secret_key, service_url=None):
  190. if service_url:
  191. self.service_base_url = service_url
  192. self.access_key = str(access_key)
  193. self.secret_key = str(secret_key)
  194. def __call__(self, r):
  195. # Create date header if it is not created yet.
  196. if "date" not in r.headers and "x-amz-date" not in r.headers:
  197. r.headers["date"] = formatdate(timeval=None, localtime=False, usegmt=True)
  198. signature = self.get_signature(r)
  199. if py3k:
  200. signature = signature.decode("utf-8")
  201. r.headers["Authorization"] = f"AWS {self.access_key}:{signature}"
  202. return r
  203. def get_signature(self, r):
  204. canonical_string = self.get_canonical_string(r.url, r.headers, r.method)
  205. if py3k:
  206. key = self.secret_key.encode("utf-8")
  207. msg = canonical_string.encode("utf-8")
  208. else:
  209. key = self.secret_key
  210. msg = canonical_string
  211. h = hmac.new(key, msg, digestmod=sha)
  212. return encodestring(h.digest()).strip()
  213. def get_canonical_string(self, url, headers, method):
  214. parsedurl = urlparse(url)
  215. objectkey = parsedurl.path[1:]
  216. bucket = parsedurl.netloc[: -len(self.service_base_url)]
  217. if len(bucket) > 1:
  218. # remove last dot
  219. bucket = bucket[:-1]
  220. interesting_headers = {"content-md5": "", "content-type": "", "date": ""}
  221. for key in headers:
  222. lk = key.lower()
  223. try:
  224. lk = lk.decode("utf-8")
  225. except:
  226. pass
  227. if headers[key] and (
  228. lk in interesting_headers.keys() or lk.startswith("x-amz-")
  229. ):
  230. interesting_headers[lk] = headers[key].strip()
  231. # If x-amz-date is used it supersedes the date header.
  232. if not py3k:
  233. if "x-amz-date" in interesting_headers:
  234. interesting_headers["date"] = ""
  235. else:
  236. if "x-amz-date" in interesting_headers:
  237. interesting_headers["date"] = ""
  238. buf = f"{method}\n"
  239. for key in sorted(interesting_headers.keys()):
  240. val = interesting_headers[key]
  241. if key.startswith("x-amz-"):
  242. buf += f"{key}:{val}\n"
  243. else:
  244. buf += f"{val}\n"
  245. # append the bucket if it exists
  246. if bucket != "":
  247. buf += f"/{bucket}"
  248. # add the objectkey. even if it doesn't exist, add the slash
  249. buf += f"/{objectkey}"
  250. return buf
  251. class RadosJSON:
  252. EXTERNAL_USER_NAME = "client.healthchecker"
  253. EXTERNAL_RGW_ADMIN_OPS_USER_NAME = "rgw-admin-ops-user"
  254. EMPTY_OUTPUT_LIST = "Empty output list"
  255. DEFAULT_RGW_POOL_PREFIX = "default"
  256. DEFAULT_MONITORING_ENDPOINT_PORT = "9283"
  257. @classmethod
  258. def gen_arg_parser(cls, args_to_parse=None):
  259. argP = argparse.ArgumentParser()
  260. common_group = argP.add_argument_group("common")
  261. common_group.add_argument("--verbose", "-v", action="store_true", default=False)
  262. common_group.add_argument(
  263. "--ceph-conf", "-c", help="Provide a ceph conf file.", type=str
  264. )
  265. common_group.add_argument(
  266. "--keyring", "-k", help="Path to ceph keyring file.", type=str
  267. )
  268. common_group.add_argument(
  269. "--run-as-user",
  270. "-u",
  271. default="",
  272. type=str,
  273. help="Provides a user name to check the cluster's health status, must be prefixed by 'client.'",
  274. )
  275. common_group.add_argument(
  276. "--cluster-name", default="", help="Ceph cluster name"
  277. )
  278. common_group.add_argument(
  279. "--namespace",
  280. default="",
  281. help="Namespace where CephCluster is running",
  282. )
  283. common_group.add_argument(
  284. "--rgw-pool-prefix", default="", help="RGW Pool prefix"
  285. )
  286. common_group.add_argument(
  287. "--restricted-auth-permission",
  288. default=False,
  289. help="Restrict cephCSIKeyrings auth permissions to specific pools, cluster."
  290. + "Mandatory flags that need to be set are --rbd-data-pool-name, and --cluster-name."
  291. + "--cephfs-filesystem-name flag can also be passed in case of cephfs user restriction, so it can restrict user to particular cephfs filesystem"
  292. + "sample run: `python3 /etc/ceph/create-external-cluster-resources.py --cephfs-filesystem-name myfs --rbd-data-pool-name replicapool --cluster-name rookstorage --restricted-auth-permission true`"
  293. + "Note: Restricting the csi-users per pool, and per cluster will require creating new csi-users and new secrets for that csi-users."
  294. + "So apply these secrets only to new `Consumer cluster` deployment while using the same `Source cluster`.",
  295. )
  296. output_group = argP.add_argument_group("output")
  297. output_group.add_argument(
  298. "--format",
  299. "-t",
  300. choices=["json", "bash"],
  301. default="json",
  302. help="Provides the output format (json | bash)",
  303. )
  304. output_group.add_argument(
  305. "--output",
  306. "-o",
  307. default="",
  308. help="Output will be stored into the provided file",
  309. )
  310. output_group.add_argument(
  311. "--cephfs-filesystem-name",
  312. default="",
  313. help="Provides the name of the Ceph filesystem",
  314. )
  315. output_group.add_argument(
  316. "--cephfs-metadata-pool-name",
  317. default="",
  318. help="Provides the name of the cephfs metadata pool",
  319. )
  320. output_group.add_argument(
  321. "--cephfs-data-pool-name",
  322. default="",
  323. help="Provides the name of the cephfs data pool",
  324. )
  325. output_group.add_argument(
  326. "--rbd-data-pool-name",
  327. default="",
  328. required=False,
  329. help="Provides the name of the RBD datapool",
  330. )
  331. output_group.add_argument(
  332. "--alias-rbd-data-pool-name",
  333. default="",
  334. required=False,
  335. help="Provides an alias for the RBD data pool name, necessary if a special character is present in the pool name such as a period or underscore",
  336. )
  337. output_group.add_argument(
  338. "--rgw-endpoint",
  339. default="",
  340. required=False,
  341. help="RADOS Gateway endpoint (in `<IPv4>:<PORT>` or `<[IPv6]>:<PORT>` or `<FQDN>:<PORT>` format)",
  342. )
  343. output_group.add_argument(
  344. "--rgw-tls-cert-path",
  345. default="",
  346. required=False,
  347. help="RADOS Gateway endpoint TLS certificate",
  348. )
  349. output_group.add_argument(
  350. "--rgw-skip-tls",
  351. required=False,
  352. default=False,
  353. help="Ignore TLS certification validation when a self-signed certificate is provided (NOT RECOMMENDED",
  354. )
  355. output_group.add_argument(
  356. "--monitoring-endpoint",
  357. default="",
  358. required=False,
  359. help="Ceph Manager prometheus exporter endpoints (comma separated list of (format `<IPv4>` or `<[IPv6]>` or `<FQDN>`) entries of active and standby mgrs)",
  360. )
  361. output_group.add_argument(
  362. "--monitoring-endpoint-port",
  363. default="",
  364. required=False,
  365. help="Ceph Manager prometheus exporter port",
  366. )
  367. output_group.add_argument(
  368. "--skip-monitoring-endpoint",
  369. default=False,
  370. action="store_true",
  371. help="Do not check for a monitoring endpoint for the Ceph cluster",
  372. )
  373. output_group.add_argument(
  374. "--rbd-metadata-ec-pool-name",
  375. default="",
  376. required=False,
  377. help="Provides the name of erasure coded RBD metadata pool",
  378. )
  379. output_group.add_argument(
  380. "--dry-run",
  381. default=False,
  382. action="store_true",
  383. help="Dry run prints the executed commands without running them",
  384. )
  385. output_group.add_argument(
  386. "--rados-namespace",
  387. default="",
  388. required=False,
  389. help="divides a pool into separate logical namespaces",
  390. )
  391. output_group.add_argument(
  392. "--subvolume-group",
  393. default="",
  394. required=False,
  395. help="provides the name of the subvolume group",
  396. )
  397. output_group.add_argument(
  398. "--rgw-realm-name",
  399. default="",
  400. required=False,
  401. help="provides the name of the rgw-realm",
  402. )
  403. output_group.add_argument(
  404. "--rgw-zone-name",
  405. default="",
  406. required=False,
  407. help="provides the name of the rgw-zone",
  408. )
  409. output_group.add_argument(
  410. "--rgw-zonegroup-name",
  411. default="",
  412. required=False,
  413. help="provides the name of the rgw-zonegroup",
  414. )
  415. upgrade_group = argP.add_argument_group("upgrade")
  416. upgrade_group.add_argument(
  417. "--upgrade",
  418. action="store_true",
  419. default=False,
  420. help="Upgrades the cephCSIKeyrings(For example: client.csi-cephfs-provisioner) with new permissions needed for the new cluster version and older permission will still be applied."
  421. + "Sample run: `python3 /etc/ceph/create-external-cluster-resources.py --upgrade`, this will upgrade all the default csi users(non-restricted)"
  422. + "For restricted users(For example: client.csi-cephfs-provisioner-openshift-storage-myfs), users created using --restricted-auth-permission flag need to pass mandatory flags"
  423. + "mandatory flags: '--rbd-data-pool-name, --cluster-name and --run-as-user' flags while upgrading"
  424. + "in case of cephfs users if you have passed --cephfs-filesystem-name flag while creating user then while upgrading it will be mandatory too"
  425. + "Sample run: `python3 /etc/ceph/create-external-cluster-resources.py --upgrade --rbd-data-pool-name replicapool --cluster-name rookstorage --run-as-user client.csi-rbd-node-rookstorage-replicapool`"
  426. + "PS: An existing non-restricted user cannot be converted to a restricted user by upgrading."
  427. + "Upgrade flag should only be used to append new permissions to users, it shouldn't be used for changing user already applied permission, for example you shouldn't change in which pool user has access",
  428. )
  429. if args_to_parse:
  430. assert (
  431. type(args_to_parse) == list
  432. ), "Argument to 'gen_arg_parser' should be a list"
  433. else:
  434. args_to_parse = sys.argv[1:]
  435. return argP.parse_args(args_to_parse)
  436. def validate_rbd_metadata_ec_pool_name(self):
  437. if self._arg_parser.rbd_metadata_ec_pool_name:
  438. rbd_metadata_ec_pool_name = self._arg_parser.rbd_metadata_ec_pool_name
  439. rbd_pool_name = self._arg_parser.rbd_data_pool_name
  440. if rbd_pool_name == "":
  441. raise ExecutionFailureException(
  442. "Flag '--rbd-data-pool-name' should not be empty"
  443. )
  444. if rbd_metadata_ec_pool_name == "":
  445. raise ExecutionFailureException(
  446. "Flag '--rbd-metadata-ec-pool-name' should not be empty"
  447. )
  448. cmd_json = {"prefix": "osd dump", "format": "json"}
  449. ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
  450. if ret_val != 0 or len(json_out) == 0:
  451. raise ExecutionFailureException(
  452. f"{cmd_json['prefix']} command failed.\n"
  453. f"Error: {err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST}"
  454. )
  455. metadata_pool_exist, pool_exist = False, False
  456. for key in json_out["pools"]:
  457. # if erasure_code_profile is empty and pool name exists then it replica pool
  458. if (
  459. key["erasure_code_profile"] == ""
  460. and key["pool_name"] == rbd_metadata_ec_pool_name
  461. ):
  462. metadata_pool_exist = True
  463. # if erasure_code_profile is not empty and pool name exists then it is ec pool
  464. if key["erasure_code_profile"] and key["pool_name"] == rbd_pool_name:
  465. pool_exist = True
  466. if not metadata_pool_exist:
  467. raise ExecutionFailureException(
  468. "Provided rbd_ec_metadata_pool name,"
  469. f" {rbd_metadata_ec_pool_name}, does not exist"
  470. )
  471. if not pool_exist:
  472. raise ExecutionFailureException(
  473. f"Provided rbd_data_pool name, {rbd_pool_name}, does not exist"
  474. )
  475. return rbd_metadata_ec_pool_name
  476. def dry_run(self, msg):
  477. if self._arg_parser.dry_run:
  478. print("Execute: " + "'" + msg + "'")
  479. def validate_rgw_endpoint_tls_cert(self):
  480. if self._arg_parser.rgw_tls_cert_path:
  481. with open(self._arg_parser.rgw_tls_cert_path, encoding="utf8") as f:
  482. contents = f.read()
  483. return contents.rstrip()
  484. def _check_conflicting_options(self):
  485. if not self._arg_parser.upgrade and not self._arg_parser.rbd_data_pool_name:
  486. raise ExecutionFailureException(
  487. "Either '--upgrade' or '--rbd-data-pool-name <pool_name>' should be specified"
  488. )
  489. def _invalid_endpoint(self, endpoint_str):
  490. # seprating port, by getting last split of `:` delimiter
  491. try:
  492. endpoint_str_ip, port = endpoint_str.rsplit(":", 1)
  493. except ValueError:
  494. raise ExecutionFailureException(f"Not a proper endpoint: {endpoint_str}")
  495. try:
  496. if endpoint_str_ip[0] == "[":
  497. endpoint_str_ip = endpoint_str_ip[1 : len(endpoint_str_ip) - 1]
  498. ip_type = (
  499. "IPv4" if type(ip_address(endpoint_str_ip)) is IPv4Address else "IPv6"
  500. )
  501. except ValueError:
  502. ip_type = "FQDN"
  503. if not port.isdigit():
  504. raise ExecutionFailureException(f"Port not valid: {port}")
  505. intPort = int(port)
  506. if intPort < 1 or intPort > 2**16 - 1:
  507. raise ExecutionFailureException(f"Out of range port number: {port}")
  508. return ip_type
  509. def endpoint_dial(self, endpoint_str, ip_type, timeout=3, cert=None):
  510. # if the 'cluster' instance is a dummy one,
  511. # don't try to reach out to the endpoint
  512. if isinstance(self.cluster, DummyRados):
  513. return "", "", ""
  514. if ip_type == "IPv6":
  515. try:
  516. endpoint_str_ip, endpoint_str_port = endpoint_str.rsplit(":", 1)
  517. except ValueError:
  518. raise ExecutionFailureException(
  519. f"Not a proper endpoint: {endpoint_str}"
  520. )
  521. if endpoint_str_ip[0] != "[":
  522. endpoint_str_ip = "[" + endpoint_str_ip + "]"
  523. endpoint_str = ":".join([endpoint_str_ip, endpoint_str_port])
  524. protocols = ["http", "https"]
  525. response_error = None
  526. for prefix in protocols:
  527. try:
  528. ep = f"{prefix}://{endpoint_str}"
  529. verify = None
  530. # If verify is set to a path to a directory,
  531. # the directory must have been processed using the c_rehash utility supplied with OpenSSL.
  532. if prefix == "https" and self._arg_parser.rgw_skip_tls:
  533. verify = False
  534. r = requests.head(ep, timeout=timeout, verify=False)
  535. elif prefix == "https" and cert:
  536. verify = cert
  537. r = requests.head(ep, timeout=timeout, verify=cert)
  538. else:
  539. r = requests.head(ep, timeout=timeout)
  540. if r.status_code == 200:
  541. return prefix, verify, ""
  542. except Exception as err:
  543. response_error = err
  544. continue
  545. sys.stderr.write(
  546. f"unable to connect to endpoint: {endpoint_str}, failed error: {response_error}"
  547. )
  548. return (
  549. "",
  550. "",
  551. ("-1"),
  552. )
  553. def __init__(self, arg_list=None):
  554. self.out_map = {}
  555. self._excluded_keys = set()
  556. self._arg_parser = self.gen_arg_parser(args_to_parse=arg_list)
  557. self._check_conflicting_options()
  558. self.run_as_user = self._arg_parser.run_as_user
  559. self.output_file = self._arg_parser.output
  560. self.ceph_conf = self._arg_parser.ceph_conf
  561. self.ceph_keyring = self._arg_parser.keyring
  562. self.MIN_USER_CAP_PERMISSIONS = {
  563. "mgr": "allow command config",
  564. "mon": "allow r, allow command quorum_status, allow command version",
  565. "osd": "allow rwx pool={0}.rgw.meta, "
  566. + "allow r pool=.rgw.root, "
  567. + "allow rw pool={0}.rgw.control, "
  568. + "allow rx pool={0}.rgw.log, "
  569. + "allow x pool={0}.rgw.buckets.index",
  570. }
  571. # if user not provided, give a default user
  572. if not self.run_as_user and not self._arg_parser.upgrade:
  573. self.run_as_user = self.EXTERNAL_USER_NAME
  574. if not self._arg_parser.rgw_pool_prefix and not self._arg_parser.upgrade:
  575. self._arg_parser.rgw_pool_prefix = self.DEFAULT_RGW_POOL_PREFIX
  576. if self.ceph_conf:
  577. kwargs = {}
  578. if self.ceph_keyring:
  579. kwargs["conf"] = {"keyring": self.ceph_keyring}
  580. self.cluster = rados.Rados(conffile=self.ceph_conf, **kwargs)
  581. else:
  582. self.cluster = rados.Rados()
  583. self.cluster.conf_read_file()
  584. self.cluster.connect()
  585. def shutdown(self):
  586. if self.cluster.state == "connected":
  587. self.cluster.shutdown()
  588. def get_fsid(self):
  589. if self._arg_parser.dry_run:
  590. return self.dry_run("ceph fsid")
  591. return str(self.cluster.get_fsid())
  592. def _common_cmd_json_gen(self, cmd_json):
  593. cmd = json.dumps(cmd_json, sort_keys=True)
  594. ret_val, cmd_out, err_msg = self.cluster.mon_command(cmd, b"")
  595. if self._arg_parser.verbose:
  596. print(f"Command Input: {cmd}")
  597. print(
  598. f"Return Val: {ret_val}\nCommand Output: {cmd_out}\n"
  599. f"Error Message: {err_msg}\n----------\n"
  600. )
  601. json_out = {}
  602. # if there is no error (i.e; ret_val is ZERO) and 'cmd_out' is not empty
  603. # then convert 'cmd_out' to a json output
  604. if ret_val == 0 and cmd_out:
  605. json_out = json.loads(cmd_out)
  606. return ret_val, json_out, err_msg
  607. def get_ceph_external_mon_data(self):
  608. cmd_json = {"prefix": "quorum_status", "format": "json"}
  609. if self._arg_parser.dry_run:
  610. return self.dry_run("ceph " + cmd_json["prefix"])
  611. ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
  612. # if there is an unsuccessful attempt,
  613. if ret_val != 0 or len(json_out) == 0:
  614. raise ExecutionFailureException(
  615. "'quorum_status' command failed.\n"
  616. f"Error: {err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST}"
  617. )
  618. q_leader_name = json_out["quorum_leader_name"]
  619. q_leader_details = {}
  620. q_leader_matching_list = [
  621. l for l in json_out["monmap"]["mons"] if l["name"] == q_leader_name
  622. ]
  623. if len(q_leader_matching_list) == 0:
  624. raise ExecutionFailureException("No matching 'mon' details found")
  625. q_leader_details = q_leader_matching_list[0]
  626. # get the address vector of the quorum-leader
  627. q_leader_addrvec = q_leader_details.get("public_addrs", {}).get("addrvec", [])
  628. # if the quorum-leader has only one address in the address-vector
  629. # and it is of type 'v2' (ie; with <IP>:3300),
  630. # raise an exception to make user aware that
  631. # they have to enable 'v1' (ie; with <IP>:6789) type as well
  632. if len(q_leader_addrvec) == 1 and q_leader_addrvec[0]["type"] == "v2":
  633. raise ExecutionFailureException(
  634. "Only 'v2' address type is enabled, user should also enable 'v1' type as well"
  635. )
  636. ip_port = str(q_leader_details["public_addr"].split("/")[0])
  637. return f"{str(q_leader_name)}={ip_port}"
  638. def _convert_hostname_to_ip(self, host_name, port, ip_type):
  639. # if 'cluster' instance is a dummy type,
  640. # call the dummy instance's "convert" method
  641. if not host_name:
  642. raise ExecutionFailureException("Empty hostname provided")
  643. if isinstance(self.cluster, DummyRados):
  644. return self.cluster._convert_hostname_to_ip(host_name)
  645. if ip_type == "FQDN":
  646. # check which ip FQDN should be converted to, IPv4 or IPv6
  647. # check the host ip, the endpoint ip type would be similar to host ip
  648. cmd_json = {"prefix": "orch host ls", "format": "json"}
  649. ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
  650. # if there is an unsuccessful attempt,
  651. if ret_val != 0 or len(json_out) == 0:
  652. raise ExecutionFailureException(
  653. "'orch host ls' command failed.\n"
  654. f"Error: {err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST}"
  655. )
  656. host_addr = json_out[0]["addr"]
  657. # add :80 sample port in ip_type, as _invalid_endpoint also verify port
  658. host_ip_type = self._invalid_endpoint(host_addr + ":80")
  659. import socket
  660. # example output [(<AddressFamily.AF_INET: 2>, <SocketKind.SOCK_STREAM: 1>, 6, '', ('93.184.216.34', 80)), ...]
  661. # we need to get 93.184.216.34 so it would be ip[0][4][0]
  662. if host_ip_type == "IPv6":
  663. ip = socket.getaddrinfo(
  664. host_name, port, family=socket.AF_INET6, proto=socket.IPPROTO_TCP
  665. )
  666. elif host_ip_type == "IPv4":
  667. ip = socket.getaddrinfo(
  668. host_name, port, family=socket.AF_INET, proto=socket.IPPROTO_TCP
  669. )
  670. del socket
  671. return ip[0][4][0]
  672. return host_name
  673. def get_active_and_standby_mgrs(self):
  674. if self._arg_parser.dry_run:
  675. return "", self.dry_run("ceph status")
  676. monitoring_endpoint_port = self._arg_parser.monitoring_endpoint_port
  677. monitoring_endpoint_ip_list = self._arg_parser.monitoring_endpoint
  678. standby_mgrs = []
  679. if not monitoring_endpoint_ip_list:
  680. cmd_json = {"prefix": "status", "format": "json"}
  681. ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
  682. # if there is an unsuccessful attempt,
  683. if ret_val != 0 or len(json_out) == 0:
  684. raise ExecutionFailureException(
  685. "'mgr services' command failed.\n"
  686. f"Error: {err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST}"
  687. )
  688. monitoring_endpoint = (
  689. json_out.get("mgrmap", {}).get("services", {}).get("prometheus", "")
  690. )
  691. if not monitoring_endpoint:
  692. return "", ""
  693. # now check the stand-by mgr-s
  694. standby_arr = json_out.get("mgrmap", {}).get("standbys", [])
  695. for each_standby in standby_arr:
  696. if "name" in each_standby.keys():
  697. standby_mgrs.append(each_standby["name"])
  698. try:
  699. parsed_endpoint = urlparse(monitoring_endpoint)
  700. except ValueError:
  701. raise ExecutionFailureException(
  702. f"invalid endpoint: {monitoring_endpoint}"
  703. )
  704. monitoring_endpoint_ip_list = parsed_endpoint.hostname
  705. if not monitoring_endpoint_port:
  706. monitoring_endpoint_port = str(parsed_endpoint.port)
  707. # if monitoring endpoint port is not set, put a default mon port
  708. if not monitoring_endpoint_port:
  709. monitoring_endpoint_port = self.DEFAULT_MONITORING_ENDPOINT_PORT
  710. # user could give comma and space separated inputs (like --monitoring-endpoint="<ip1>, <ip2>")
  711. monitoring_endpoint_ip_list = monitoring_endpoint_ip_list.replace(",", " ")
  712. monitoring_endpoint_ip_list_split = monitoring_endpoint_ip_list.split()
  713. # if monitoring-endpoint could not be found, raise an error
  714. if len(monitoring_endpoint_ip_list_split) == 0:
  715. raise ExecutionFailureException("No 'monitoring-endpoint' found")
  716. # first ip is treated as the main monitoring-endpoint
  717. monitoring_endpoint_ip = monitoring_endpoint_ip_list_split[0]
  718. # rest of the ip-s are added to the 'standby_mgrs' list
  719. standby_mgrs.extend(monitoring_endpoint_ip_list_split[1:])
  720. failed_ip = monitoring_endpoint_ip
  721. monitoring_endpoint = ":".join(
  722. [monitoring_endpoint_ip, monitoring_endpoint_port]
  723. )
  724. ip_type = self._invalid_endpoint(monitoring_endpoint)
  725. try:
  726. monitoring_endpoint_ip = self._convert_hostname_to_ip(
  727. monitoring_endpoint_ip, monitoring_endpoint_port, ip_type
  728. )
  729. # collect all the 'stand-by' mgr ips
  730. mgr_ips = []
  731. for each_standby_mgr in standby_mgrs:
  732. failed_ip = each_standby_mgr
  733. mgr_ips.append(
  734. self._convert_hostname_to_ip(
  735. each_standby_mgr, monitoring_endpoint_port, ip_type
  736. )
  737. )
  738. except:
  739. raise ExecutionFailureException(
  740. f"Conversion of host: {failed_ip} to IP failed. "
  741. "Please enter the IP addresses of all the ceph-mgrs with the '--monitoring-endpoint' flag"
  742. )
  743. _, _, err = self.endpoint_dial(monitoring_endpoint, ip_type)
  744. if err == "-1":
  745. raise ExecutionFailureException(err)
  746. # add the validated active mgr IP into the first index
  747. mgr_ips.insert(0, monitoring_endpoint_ip)
  748. all_mgr_ips_str = ",".join(mgr_ips)
  749. return all_mgr_ips_str, monitoring_endpoint_port
  750. def check_user_exist(self, user):
  751. cmd_json = {"prefix": "auth get", "entity": f"{user}", "format": "json"}
  752. ret_val, json_out, _ = self._common_cmd_json_gen(cmd_json)
  753. if ret_val != 0 or len(json_out) == 0:
  754. return ""
  755. return str(json_out[0]["key"])
  756. def get_cephfs_provisioner_caps_and_entity(self):
  757. entity = "client.csi-cephfs-provisioner"
  758. caps = {
  759. "mon": "allow r, allow command 'osd blocklist'",
  760. "mgr": "allow rw",
  761. "osd": "allow rw tag cephfs metadata=*",
  762. }
  763. if self._arg_parser.restricted_auth_permission:
  764. cluster_name = self._arg_parser.cluster_name
  765. if cluster_name == "":
  766. raise ExecutionFailureException(
  767. "cluster_name not found, please set the '--cluster-name' flag"
  768. )
  769. cephfs_filesystem = self._arg_parser.cephfs_filesystem_name
  770. if cephfs_filesystem == "":
  771. entity = f"{entity}-{cluster_name}"
  772. else:
  773. entity = f"{entity}-{cluster_name}-{cephfs_filesystem}"
  774. caps["osd"] = f"allow rw tag cephfs metadata={cephfs_filesystem}"
  775. return caps, entity
  776. def get_cephfs_node_caps_and_entity(self):
  777. entity = "client.csi-cephfs-node"
  778. caps = {
  779. "mon": "allow r, allow command 'osd blocklist'",
  780. "mgr": "allow rw",
  781. "osd": "allow rw tag cephfs *=*",
  782. "mds": "allow rw",
  783. }
  784. if self._arg_parser.restricted_auth_permission:
  785. cluster_name = self._arg_parser.cluster_name
  786. if cluster_name == "":
  787. raise ExecutionFailureException(
  788. "cluster_name not found, please set the '--cluster-name' flag"
  789. )
  790. cephfs_filesystem = self._arg_parser.cephfs_filesystem_name
  791. if cephfs_filesystem == "":
  792. entity = f"{entity}-{cluster_name}"
  793. else:
  794. entity = f"{entity}-{cluster_name}-{cephfs_filesystem}"
  795. caps["osd"] = f"allow rw tag cephfs *={cephfs_filesystem}"
  796. return caps, entity
  797. def get_entity(self, entity, rbd_pool_name, alias_rbd_pool_name, cluster_name):
  798. if (
  799. rbd_pool_name.count(".") != 0
  800. or rbd_pool_name.count("_") != 0
  801. or alias_rbd_pool_name != ""
  802. # checking alias_rbd_pool_name is not empty as there maybe a special character used other than . or _
  803. ):
  804. if alias_rbd_pool_name == "":
  805. raise ExecutionFailureException(
  806. "please set the '--alias-rbd-data-pool-name' flag as the rbd data pool name contains '.' or '_'"
  807. )
  808. if (
  809. alias_rbd_pool_name.count(".") != 0
  810. or alias_rbd_pool_name.count("_") != 0
  811. ):
  812. raise ExecutionFailureException(
  813. "'--alias-rbd-data-pool-name' flag value should not contain '.' or '_'"
  814. )
  815. entity = f"{entity}-{cluster_name}-{alias_rbd_pool_name}"
  816. else:
  817. entity = f"{entity}-{cluster_name}-{rbd_pool_name}"
  818. return entity
  819. def get_rbd_provisioner_caps_and_entity(self):
  820. entity = "client.csi-rbd-provisioner"
  821. caps = {
  822. "mon": "profile rbd, allow command 'osd blocklist'",
  823. "mgr": "allow rw",
  824. "osd": "profile rbd",
  825. }
  826. if self._arg_parser.restricted_auth_permission:
  827. rbd_pool_name = self._arg_parser.rbd_data_pool_name
  828. alias_rbd_pool_name = self._arg_parser.alias_rbd_data_pool_name
  829. cluster_name = self._arg_parser.cluster_name
  830. if rbd_pool_name == "":
  831. raise ExecutionFailureException(
  832. "mandatory flag not found, please set the '--rbd-data-pool-name' flag"
  833. )
  834. if cluster_name == "":
  835. raise ExecutionFailureException(
  836. "mandatory flag not found, please set the '--cluster-name' flag"
  837. )
  838. entity = self.get_entity(
  839. entity, rbd_pool_name, alias_rbd_pool_name, cluster_name
  840. )
  841. caps["osd"] = f"profile rbd pool={rbd_pool_name}"
  842. return caps, entity
  843. def get_rbd_node_caps_and_entity(self):
  844. entity = "client.csi-rbd-node"
  845. caps = {
  846. "mon": "profile rbd, allow command 'osd blocklist'",
  847. "osd": "profile rbd",
  848. }
  849. if self._arg_parser.restricted_auth_permission:
  850. rbd_pool_name = self._arg_parser.rbd_data_pool_name
  851. alias_rbd_pool_name = self._arg_parser.alias_rbd_data_pool_name
  852. cluster_name = self._arg_parser.cluster_name
  853. if rbd_pool_name == "":
  854. raise ExecutionFailureException(
  855. "mandatory flag not found, please set the '--rbd-data-pool-name' flag"
  856. )
  857. if cluster_name == "":
  858. raise ExecutionFailureException(
  859. "mandatory flag not found, please set the '--cluster-name' flag"
  860. )
  861. entity = self.get_entity(
  862. entity, rbd_pool_name, alias_rbd_pool_name, cluster_name
  863. )
  864. caps["osd"] = f"profile rbd pool={rbd_pool_name}"
  865. return caps, entity
  866. def get_caps_and_entity(self, user_name):
  867. if "client.csi-cephfs-provisioner" in user_name:
  868. if "client.csi-cephfs-provisioner" != user_name:
  869. self._arg_parser.restricted_auth_permission = True
  870. return self.get_cephfs_provisioner_caps_and_entity()
  871. if "client.csi-cephfs-node" in user_name:
  872. if "client.csi-cephfs-node" != user_name:
  873. self._arg_parser.restricted_auth_permission = True
  874. return self.get_cephfs_node_caps_and_entity()
  875. if "client.csi-rbd-provisioner" in user_name:
  876. if "client.csi-rbd-provisioner" != user_name:
  877. self._arg_parser.restricted_auth_permission = True
  878. return self.get_rbd_provisioner_caps_and_entity()
  879. if "client.csi-rbd-node" in user_name:
  880. if "client.csi-rbd-node" != user_name:
  881. self._arg_parser.restricted_auth_permission = True
  882. return self.get_rbd_node_caps_and_entity()
  883. raise ExecutionFailureException(
  884. f"no user found with user_name: {user_name}, "
  885. "get_caps_and_entity command failed.\n"
  886. )
  887. def create_cephCSIKeyring_user(self, user):
  888. """
  889. command: ceph auth get-or-create client.csi-cephfs-provisioner mon 'allow r' mgr 'allow rw' osd 'allow rw tag cephfs metadata=*'
  890. """
  891. caps, entity = self.get_caps_and_entity(user)
  892. cmd_json = {
  893. "prefix": "auth get-or-create",
  894. "entity": entity,
  895. "caps": [cap for cap_list in list(caps.items()) for cap in cap_list],
  896. "format": "json",
  897. }
  898. if self._arg_parser.dry_run:
  899. return (
  900. self.dry_run(
  901. "ceph "
  902. + cmd_json["prefix"]
  903. + " "
  904. + cmd_json["entity"]
  905. + " "
  906. + " ".join(cmd_json["caps"])
  907. ),
  908. "",
  909. )
  910. # check if user already exist
  911. user_key = self.check_user_exist(entity)
  912. if user_key != "":
  913. return user_key, f"{entity.split('.', 1)[1]}"
  914. # entity.split('.',1)[1] to rename entity(client.csi-rbd-node) as csi-rbd-node
  915. ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
  916. # if there is an unsuccessful attempt,
  917. if ret_val != 0 or len(json_out) == 0:
  918. raise ExecutionFailureException(
  919. f"'auth get-or-create {user}' command failed.\n"
  920. f"Error: {err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST}"
  921. )
  922. return str(json_out[0]["key"]), f"{entity.split('.', 1)[1]}"
  923. # entity.split('.',1)[1] to rename entity(client.csi-rbd-node) as csi-rbd-node
  924. def get_cephfs_data_pool_details(self):
  925. cmd_json = {"prefix": "fs ls", "format": "json"}
  926. if self._arg_parser.dry_run:
  927. return self.dry_run("ceph " + cmd_json["prefix"])
  928. ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
  929. # if there is an unsuccessful attempt, report an error
  930. if ret_val != 0:
  931. # if fs and data_pool arguments are not set, silently return
  932. if (
  933. self._arg_parser.cephfs_filesystem_name == ""
  934. and self._arg_parser.cephfs_data_pool_name == ""
  935. ):
  936. return
  937. # if user has provided any of the
  938. # '--cephfs-filesystem-name' or '--cephfs-data-pool-name' arguments,
  939. # raise an exception as we are unable to verify the args
  940. raise ExecutionFailureException(
  941. f"'fs ls' ceph call failed with error: {err_msg}"
  942. )
  943. matching_json_out = {}
  944. # if '--cephfs-filesystem-name' argument is provided,
  945. # check whether the provided filesystem-name exists or not
  946. if self._arg_parser.cephfs_filesystem_name:
  947. # get the matching list
  948. matching_json_out_list = [
  949. matched
  950. for matched in json_out
  951. if str(matched["name"]) == self._arg_parser.cephfs_filesystem_name
  952. ]
  953. # unable to find a matching fs-name, raise an error
  954. if len(matching_json_out_list) == 0:
  955. raise ExecutionFailureException(
  956. f"Filesystem provided, '{self._arg_parser.cephfs_filesystem_name}', "
  957. f"is not found in the fs-list: {[str(x['name']) for x in json_out]}"
  958. )
  959. matching_json_out = matching_json_out_list[0]
  960. # if cephfs filesystem name is not provided,
  961. # try to get a default fs name by doing the following
  962. else:
  963. # a. check if there is only one filesystem is present
  964. if len(json_out) == 1:
  965. matching_json_out = json_out[0]
  966. # b. or else, check if data_pool name is provided
  967. elif self._arg_parser.cephfs_data_pool_name:
  968. # and if present, check whether there exists a fs which has the data_pool
  969. for eachJ in json_out:
  970. if self._arg_parser.cephfs_data_pool_name in eachJ["data_pools"]:
  971. matching_json_out = eachJ
  972. break
  973. # if there is no matching fs exists, that means provided data_pool name is invalid
  974. if not matching_json_out:
  975. raise ExecutionFailureException(
  976. f"Provided data_pool name, {self._arg_parser.cephfs_data_pool_name},"
  977. " does not exists"
  978. )
  979. # c. if nothing is set and couldn't find a default,
  980. else:
  981. # just return silently
  982. return
  983. if matching_json_out:
  984. self._arg_parser.cephfs_filesystem_name = str(matching_json_out["name"])
  985. self._arg_parser.cephfs_metadata_pool_name = str(
  986. matching_json_out["metadata_pool"]
  987. )
  988. if isinstance(matching_json_out["data_pools"], list):
  989. # if the user has already provided data-pool-name,
  990. # through --cephfs-data-pool-name
  991. if self._arg_parser.cephfs_data_pool_name:
  992. # if the provided name is not matching with the one in the list
  993. if (
  994. self._arg_parser.cephfs_data_pool_name
  995. not in matching_json_out["data_pools"]
  996. ):
  997. raise ExecutionFailureException(
  998. f"Provided data-pool-name: '{self._arg_parser.cephfs_data_pool_name}', "
  999. "doesn't match from the data-pools list: "
  1000. f"{[str(x) for x in matching_json_out['data_pools']]}"
  1001. )
  1002. # if data_pool name is not provided,
  1003. # then try to find a default data pool name
  1004. else:
  1005. # if no data_pools exist, silently return
  1006. if len(matching_json_out["data_pools"]) == 0:
  1007. return
  1008. self._arg_parser.cephfs_data_pool_name = str(
  1009. matching_json_out["data_pools"][0]
  1010. )
  1011. # if there are more than one 'data_pools' exist,
  1012. # then warn the user that we are using the selected name
  1013. if len(matching_json_out["data_pools"]) > 1:
  1014. print(
  1015. "WARNING: Multiple data pools detected: "
  1016. f"{[str(x) for x in matching_json_out['data_pools']]}\n"
  1017. f"Using the data-pool: '{self._arg_parser.cephfs_data_pool_name}'\n"
  1018. )
  1019. def create_checkerKey(self):
  1020. cmd_json = {
  1021. "prefix": "auth get-or-create",
  1022. "entity": self.run_as_user,
  1023. "caps": [
  1024. "mon",
  1025. self.MIN_USER_CAP_PERMISSIONS["mon"],
  1026. "mgr",
  1027. self.MIN_USER_CAP_PERMISSIONS["mgr"],
  1028. "osd",
  1029. self.MIN_USER_CAP_PERMISSIONS["osd"].format(
  1030. self._arg_parser.rgw_pool_prefix
  1031. ),
  1032. ],
  1033. "format": "json",
  1034. }
  1035. if self._arg_parser.dry_run:
  1036. return self.dry_run(
  1037. "ceph "
  1038. + cmd_json["prefix"]
  1039. + " "
  1040. + cmd_json["entity"]
  1041. + " "
  1042. + " ".join(cmd_json["caps"])
  1043. )
  1044. ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
  1045. # if there is an unsuccessful attempt,
  1046. if ret_val != 0 or len(json_out) == 0:
  1047. raise ExecutionFailureException(
  1048. f"'auth get-or-create {self.run_as_user}' command failed\n"
  1049. f"Error: {err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST}"
  1050. )
  1051. return str(json_out[0]["key"])
  1052. def get_ceph_dashboard_link(self):
  1053. cmd_json = {"prefix": "mgr services", "format": "json"}
  1054. if self._arg_parser.dry_run:
  1055. return self.dry_run("ceph " + cmd_json["prefix"])
  1056. ret_val, json_out, _ = self._common_cmd_json_gen(cmd_json)
  1057. # if there is an unsuccessful attempt,
  1058. if ret_val != 0 or len(json_out) == 0:
  1059. return None
  1060. if "dashboard" not in json_out:
  1061. return None
  1062. return json_out["dashboard"]
  1063. def create_rgw_admin_ops_user(self):
  1064. cmd = [
  1065. "radosgw-admin",
  1066. "user",
  1067. "create",
  1068. "--uid",
  1069. self.EXTERNAL_RGW_ADMIN_OPS_USER_NAME,
  1070. "--display-name",
  1071. "Rook RGW Admin Ops user",
  1072. "--caps",
  1073. "buckets=*;users=*;usage=read;metadata=read;zone=read",
  1074. "--rgw-realm",
  1075. self._arg_parser.rgw_realm_name,
  1076. "--rgw-zonegroup",
  1077. self._arg_parser.rgw_zonegroup_name,
  1078. "--rgw-zone",
  1079. self._arg_parser.rgw_zone_name,
  1080. ]
  1081. if self._arg_parser.dry_run:
  1082. return self.dry_run("ceph " + " ".join(cmd))
  1083. try:
  1084. output = subprocess.check_output(cmd, stderr=subprocess.PIPE)
  1085. except subprocess.CalledProcessError as execErr:
  1086. # if the user already exists, we just query it
  1087. if execErr.returncode == errno.EEXIST:
  1088. cmd = [
  1089. "radosgw-admin",
  1090. "user",
  1091. "info",
  1092. "--uid",
  1093. self.EXTERNAL_RGW_ADMIN_OPS_USER_NAME,
  1094. "--rgw-realm",
  1095. self._arg_parser.rgw_realm_name,
  1096. "--rgw-zonegroup",
  1097. self._arg_parser.rgw_zonegroup_name,
  1098. "--rgw-zone",
  1099. self._arg_parser.rgw_zone_name,
  1100. ]
  1101. try:
  1102. output = subprocess.check_output(cmd, stderr=subprocess.PIPE)
  1103. except subprocess.CalledProcessError as execErr:
  1104. err_msg = (
  1105. f"failed to execute command {cmd}. Output: {execErr.output}. "
  1106. f"Code: {execErr.returncode}. Error: {execErr.stderr}"
  1107. )
  1108. sys.stderr.write(err_msg)
  1109. return None, None, False, "-1"
  1110. else:
  1111. err_msg = (
  1112. f"failed to execute command {cmd}. Output: {execErr.output}. "
  1113. f"Code: {execErr.returncode}. Error: {execErr.stderr}"
  1114. )
  1115. sys.stderr.write(err_msg)
  1116. return None, None, False, "-1"
  1117. # if it is python2, don't check for ceph version for adding `info=read` cap(rgw_validation)
  1118. if sys.version_info.major < 3:
  1119. jsonoutput = json.loads(output)
  1120. return (
  1121. jsonoutput["keys"][0]["access_key"],
  1122. jsonoutput["keys"][0]["secret_key"],
  1123. False,
  1124. "",
  1125. )
  1126. # separately add info=read caps for rgw-endpoint ip validation
  1127. info_cap_supported = True
  1128. cmd = [
  1129. "radosgw-admin",
  1130. "caps",
  1131. "add",
  1132. "--uid",
  1133. self.EXTERNAL_RGW_ADMIN_OPS_USER_NAME,
  1134. "--caps",
  1135. "info=read",
  1136. "--rgw-realm",
  1137. self._arg_parser.rgw_realm_name,
  1138. "--rgw-zonegroup",
  1139. self._arg_parser.rgw_zonegroup_name,
  1140. "--rgw-zone",
  1141. self._arg_parser.rgw_zone_name,
  1142. ]
  1143. try:
  1144. output = subprocess.check_output(cmd, stderr=subprocess.PIPE)
  1145. except subprocess.CalledProcessError as execErr:
  1146. # if the ceph version not supported for adding `info=read` cap(rgw_validation)
  1147. if (
  1148. "could not add caps: unable to add caps: info=read\n"
  1149. in execErr.stderr.decode("utf-8")
  1150. and execErr.returncode == 244
  1151. ):
  1152. info_cap_supported = False
  1153. else:
  1154. err_msg = (
  1155. f"failed to execute command {cmd}. Output: {execErr.output}. "
  1156. f"Code: {execErr.returncode}. Error: {execErr.stderr}"
  1157. )
  1158. sys.stderr.write(err_msg)
  1159. return None, None, False, "-1"
  1160. jsonoutput = json.loads(output)
  1161. return (
  1162. jsonoutput["keys"][0]["access_key"],
  1163. jsonoutput["keys"][0]["secret_key"],
  1164. info_cap_supported,
  1165. "",
  1166. )
  1167. def validate_rbd_pool(self):
  1168. if not self.cluster.pool_exists(self._arg_parser.rbd_data_pool_name):
  1169. raise ExecutionFailureException(
  1170. f"The provided pool, '{self._arg_parser.rbd_data_pool_name}', does not exist"
  1171. )
  1172. def validate_rados_namespace(self):
  1173. rbd_pool_name = self._arg_parser.rbd_data_pool_name
  1174. rados_namespace = self._arg_parser.rados_namespace
  1175. if rados_namespace == "":
  1176. return
  1177. rbd_inst = rbd.RBD()
  1178. ioctx = self.cluster.open_ioctx(rbd_pool_name)
  1179. if rbd_inst.namespace_exists(ioctx, rados_namespace) is False:
  1180. raise ExecutionFailureException(
  1181. f"The provided rados Namespace, '{rados_namespace}', "
  1182. f"is not found in the pool '{rbd_pool_name}'"
  1183. )
  1184. def get_or_create_subvolume_group(self, subvolume_group, cephfs_filesystem_name):
  1185. cmd = [
  1186. "ceph",
  1187. "fs",
  1188. "subvolumegroup",
  1189. "getpath",
  1190. cephfs_filesystem_name,
  1191. subvolume_group,
  1192. ]
  1193. try:
  1194. _ = subprocess.check_output(cmd, stderr=subprocess.PIPE)
  1195. except subprocess.CalledProcessError:
  1196. cmd = [
  1197. "ceph",
  1198. "fs",
  1199. "subvolumegroup",
  1200. "create",
  1201. cephfs_filesystem_name,
  1202. subvolume_group,
  1203. ]
  1204. try:
  1205. _ = subprocess.check_output(cmd, stderr=subprocess.PIPE)
  1206. except subprocess.CalledProcessError:
  1207. raise ExecutionFailureException(
  1208. f"subvolume group {subvolume_group} is not able to get created"
  1209. )
  1210. def pin_subvolume(
  1211. self, subvolume_group, cephfs_filesystem_name, pin_type, pin_setting
  1212. ):
  1213. cmd = [
  1214. "ceph",
  1215. "fs",
  1216. "subvolumegroup",
  1217. "pin",
  1218. cephfs_filesystem_name,
  1219. subvolume_group,
  1220. pin_type,
  1221. pin_setting,
  1222. ]
  1223. try:
  1224. _ = subprocess.check_output(cmd, stderr=subprocess.PIPE)
  1225. except subprocess.CalledProcessError:
  1226. raise ExecutionFailureException(
  1227. f"subvolume group {subvolume_group} is not able to get pinned"
  1228. )
  1229. def get_rgw_fsid(self, base_url, verify):
  1230. access_key = self.out_map["RGW_ADMIN_OPS_USER_ACCESS_KEY"]
  1231. secret_key = self.out_map["RGW_ADMIN_OPS_USER_SECRET_KEY"]
  1232. rgw_endpoint = self._arg_parser.rgw_endpoint
  1233. base_url = base_url + "://" + rgw_endpoint + "/admin/info?"
  1234. params = {"format": "json"}
  1235. request_url = base_url + urllib.parse.urlencode(params)
  1236. try:
  1237. r = requests.get(
  1238. request_url,
  1239. auth=S3Auth(access_key, secret_key, rgw_endpoint),
  1240. verify=verify,
  1241. )
  1242. except requests.exceptions.Timeout:
  1243. sys.stderr.write(
  1244. f"invalid endpoint:, not able to call admin-ops api{rgw_endpoint}"
  1245. )
  1246. return "", "-1"
  1247. r1 = r.json()
  1248. if r1 is None or r1.get("info") is None:
  1249. sys.stderr.write(
  1250. f"The provided rgw Endpoint, '{self._arg_parser.rgw_endpoint}', is invalid."
  1251. )
  1252. return (
  1253. "",
  1254. "-1",
  1255. )
  1256. return r1["info"]["storage_backends"][0]["cluster_id"], ""
  1257. def validate_rgw_endpoint(self, info_cap_supported):
  1258. # if the 'cluster' instance is a dummy one,
  1259. # don't try to reach out to the endpoint
  1260. if isinstance(self.cluster, DummyRados):
  1261. return
  1262. rgw_endpoint = self._arg_parser.rgw_endpoint
  1263. # validate rgw endpoint only if ip address is passed
  1264. ip_type = self._invalid_endpoint(rgw_endpoint)
  1265. # check if the rgw endpoint is reachable
  1266. cert = None
  1267. if not self._arg_parser.rgw_skip_tls and self.validate_rgw_endpoint_tls_cert():
  1268. cert = self._arg_parser.rgw_tls_cert_path
  1269. base_url, verify, err = self.endpoint_dial(rgw_endpoint, ip_type, cert=cert)
  1270. if err != "":
  1271. return "-1"
  1272. # check if the rgw endpoint belongs to the same cluster
  1273. # only check if `info` cap is supported
  1274. if info_cap_supported:
  1275. fsid = self.get_fsid()
  1276. rgw_fsid, err = self.get_rgw_fsid(base_url, verify)
  1277. if err == "-1":
  1278. return "-1"
  1279. if fsid != rgw_fsid:
  1280. sys.stderr.write(
  1281. f"The provided rgw Endpoint, '{self._arg_parser.rgw_endpoint}', is invalid. We are validating by calling the adminops api through rgw-endpoint and validating the cluster_id '{rgw_fsid}' is equal to the ceph cluster fsid '{fsid}'"
  1282. )
  1283. return "-1"
  1284. # check if the rgw endpoint pool exist
  1285. # only validate if rgw_pool_prefix is passed else it will take default value and we don't create these default pools
  1286. if self._arg_parser.rgw_pool_prefix != "default":
  1287. rgw_pools_to_validate = [
  1288. f"{self._arg_parser.rgw_pool_prefix}.rgw.meta",
  1289. ".rgw.root",
  1290. f"{self._arg_parser.rgw_pool_prefix}.rgw.control",
  1291. f"{self._arg_parser.rgw_pool_prefix}.rgw.log",
  1292. ]
  1293. for _rgw_pool_to_validate in rgw_pools_to_validate:
  1294. if not self.cluster.pool_exists(_rgw_pool_to_validate):
  1295. sys.stderr.write(
  1296. f"The provided pool, '{_rgw_pool_to_validate}', does not exist"
  1297. )
  1298. return "-1"
  1299. return ""
  1300. def validate_rgw_multisite(self, rgw_multisite_config_name, rgw_multisite_config):
  1301. if rgw_multisite_config != "":
  1302. cmd = [
  1303. "radosgw-admin",
  1304. rgw_multisite_config,
  1305. "get",
  1306. "--rgw-" + rgw_multisite_config,
  1307. rgw_multisite_config_name,
  1308. ]
  1309. try:
  1310. _ = subprocess.check_output(cmd, stderr=subprocess.PIPE)
  1311. except subprocess.CalledProcessError as execErr:
  1312. err_msg = (
  1313. f"failed to execute command {cmd}. Output: {execErr.output}. "
  1314. f"Code: {execErr.returncode}. Error: {execErr.stderr}"
  1315. )
  1316. sys.stderr.write(err_msg)
  1317. return "-1"
  1318. return ""
  1319. def _gen_output_map(self):
  1320. if self.out_map:
  1321. return
  1322. self._arg_parser.cluster_name = (
  1323. self._arg_parser.cluster_name.lower()
  1324. ) # always convert cluster name to lowercase characters
  1325. self.validate_rbd_pool()
  1326. self.validate_rados_namespace()
  1327. self._excluded_keys.add("CLUSTER_NAME")
  1328. self.get_cephfs_data_pool_details()
  1329. self.out_map["NAMESPACE"] = self._arg_parser.namespace
  1330. self.out_map["CLUSTER_NAME"] = self._arg_parser.cluster_name
  1331. self.out_map["ROOK_EXTERNAL_FSID"] = self.get_fsid()
  1332. self.out_map["ROOK_EXTERNAL_USERNAME"] = self.run_as_user
  1333. self.out_map["ROOK_EXTERNAL_CEPH_MON_DATA"] = self.get_ceph_external_mon_data()
  1334. self.out_map["ROOK_EXTERNAL_USER_SECRET"] = self.create_checkerKey()
  1335. self.out_map["ROOK_EXTERNAL_DASHBOARD_LINK"] = self.get_ceph_dashboard_link()
  1336. (
  1337. self.out_map["CSI_RBD_NODE_SECRET"],
  1338. self.out_map["CSI_RBD_NODE_SECRET_NAME"],
  1339. ) = self.create_cephCSIKeyring_user("client.csi-rbd-node")
  1340. (
  1341. self.out_map["CSI_RBD_PROVISIONER_SECRET"],
  1342. self.out_map["CSI_RBD_PROVISIONER_SECRET_NAME"],
  1343. ) = self.create_cephCSIKeyring_user("client.csi-rbd-provisioner")
  1344. self.out_map["CEPHFS_POOL_NAME"] = self._arg_parser.cephfs_data_pool_name
  1345. self.out_map[
  1346. "CEPHFS_METADATA_POOL_NAME"
  1347. ] = self._arg_parser.cephfs_metadata_pool_name
  1348. self.out_map["CEPHFS_FS_NAME"] = self._arg_parser.cephfs_filesystem_name
  1349. self.out_map[
  1350. "RESTRICTED_AUTH_PERMISSION"
  1351. ] = self._arg_parser.restricted_auth_permission
  1352. self.out_map["RADOS_NAMESPACE"] = self._arg_parser.rados_namespace
  1353. self.out_map["SUBVOLUME_GROUP"] = self._arg_parser.subvolume_group
  1354. self.out_map["CSI_CEPHFS_NODE_SECRET"] = ""
  1355. self.out_map["CSI_CEPHFS_PROVISIONER_SECRET"] = ""
  1356. # create CephFS node and provisioner keyring only when MDS exists
  1357. if self.out_map["CEPHFS_FS_NAME"] and self.out_map["CEPHFS_POOL_NAME"]:
  1358. (
  1359. self.out_map["CSI_CEPHFS_NODE_SECRET"],
  1360. self.out_map["CSI_CEPHFS_NODE_SECRET_NAME"],
  1361. ) = self.create_cephCSIKeyring_user("client.csi-cephfs-node")
  1362. (
  1363. self.out_map["CSI_CEPHFS_PROVISIONER_SECRET"],
  1364. self.out_map["CSI_CEPHFS_PROVISIONER_SECRET_NAME"],
  1365. ) = self.create_cephCSIKeyring_user("client.csi-cephfs-provisioner")
  1366. # create the default "csi" subvolumegroup
  1367. self.get_or_create_subvolume_group(
  1368. "csi", self._arg_parser.cephfs_filesystem_name
  1369. )
  1370. # pin the default "csi" subvolumegroup
  1371. self.pin_subvolume(
  1372. "csi", self._arg_parser.cephfs_filesystem_name, "distributed", "1"
  1373. )
  1374. if self.out_map["SUBVOLUME_GROUP"]:
  1375. self.get_or_create_subvolume_group(
  1376. self._arg_parser.subvolume_group,
  1377. self._arg_parser.cephfs_filesystem_name,
  1378. )
  1379. self.pin_subvolume(
  1380. self._arg_parser.subvolume_group,
  1381. self._arg_parser.cephfs_filesystem_name,
  1382. "distributed",
  1383. "1",
  1384. )
  1385. self.out_map["RGW_TLS_CERT"] = ""
  1386. self.out_map["MONITORING_ENDPOINT"] = ""
  1387. self.out_map["MONITORING_ENDPOINT_PORT"] = ""
  1388. if not self._arg_parser.skip_monitoring_endpoint:
  1389. (
  1390. self.out_map["MONITORING_ENDPOINT"],
  1391. self.out_map["MONITORING_ENDPOINT_PORT"],
  1392. ) = self.get_active_and_standby_mgrs()
  1393. self.out_map["RBD_POOL_NAME"] = self._arg_parser.rbd_data_pool_name
  1394. self.out_map[
  1395. "RBD_METADATA_EC_POOL_NAME"
  1396. ] = self.validate_rbd_metadata_ec_pool_name()
  1397. self.out_map["RGW_POOL_PREFIX"] = self._arg_parser.rgw_pool_prefix
  1398. self.out_map["RGW_ENDPOINT"] = ""
  1399. if self._arg_parser.rgw_endpoint:
  1400. if self._arg_parser.dry_run:
  1401. self.create_rgw_admin_ops_user()
  1402. else:
  1403. if (
  1404. self._arg_parser.rgw_realm_name != ""
  1405. and self._arg_parser.rgw_zonegroup_name != ""
  1406. and self._arg_parser.rgw_zone_name != ""
  1407. ):
  1408. err = self.validate_rgw_multisite(
  1409. self._arg_parser.rgw_realm_name, "realm"
  1410. )
  1411. err = self.validate_rgw_multisite(
  1412. self._arg_parser.rgw_zonegroup_name, "zonegroup"
  1413. )
  1414. err = self.validate_rgw_multisite(
  1415. self._arg_parser.rgw_zone_name, "zone"
  1416. )
  1417. if (
  1418. self._arg_parser.rgw_realm_name == ""
  1419. and self._arg_parser.rgw_zonegroup_name == ""
  1420. and self._arg_parser.rgw_zone_name == ""
  1421. ) or (
  1422. self._arg_parser.rgw_realm_name != ""
  1423. and self._arg_parser.rgw_zonegroup_name != ""
  1424. and self._arg_parser.rgw_zone_name != ""
  1425. ):
  1426. (
  1427. self.out_map["RGW_ADMIN_OPS_USER_ACCESS_KEY"],
  1428. self.out_map["RGW_ADMIN_OPS_USER_SECRET_KEY"],
  1429. info_cap_supported,
  1430. err,
  1431. ) = self.create_rgw_admin_ops_user()
  1432. err = self.validate_rgw_endpoint(info_cap_supported)
  1433. if self._arg_parser.rgw_tls_cert_path:
  1434. self.out_map[
  1435. "RGW_TLS_CERT"
  1436. ] = self.validate_rgw_endpoint_tls_cert()
  1437. # if there is no error, set the RGW_ENDPOINT
  1438. if err != "-1":
  1439. self.out_map["RGW_ENDPOINT"] = self._arg_parser.rgw_endpoint
  1440. else:
  1441. err = "Please provide all the RGW multisite parameters or none of them"
  1442. sys.stderr.write(err)
  1443. def gen_shell_out(self):
  1444. self._gen_output_map()
  1445. shOutIO = StringIO()
  1446. for k, v in self.out_map.items():
  1447. if v and k not in self._excluded_keys:
  1448. shOutIO.write(f"export {k}={v}{LINESEP}")
  1449. shOut = shOutIO.getvalue()
  1450. shOutIO.close()
  1451. return shOut
  1452. def gen_json_out(self):
  1453. self._gen_output_map()
  1454. if self._arg_parser.dry_run:
  1455. return ""
  1456. json_out = [
  1457. {
  1458. "name": "rook-ceph-mon-endpoints",
  1459. "kind": "ConfigMap",
  1460. "data": {
  1461. "data": self.out_map["ROOK_EXTERNAL_CEPH_MON_DATA"],
  1462. "maxMonId": "0",
  1463. "mapping": "{}",
  1464. },
  1465. },
  1466. {
  1467. "name": "rook-ceph-mon",
  1468. "kind": "Secret",
  1469. "data": {
  1470. "admin-secret": "admin-secret",
  1471. "fsid": self.out_map["ROOK_EXTERNAL_FSID"],
  1472. "mon-secret": "mon-secret",
  1473. },
  1474. },
  1475. {
  1476. "name": "rook-ceph-operator-creds",
  1477. "kind": "Secret",
  1478. "data": {
  1479. "userID": self.out_map["ROOK_EXTERNAL_USERNAME"],
  1480. "userKey": self.out_map["ROOK_EXTERNAL_USER_SECRET"],
  1481. },
  1482. },
  1483. ]
  1484. # if 'MONITORING_ENDPOINT' exists, then only add 'monitoring-endpoint' to Cluster
  1485. if (
  1486. self.out_map["MONITORING_ENDPOINT"]
  1487. and self.out_map["MONITORING_ENDPOINT_PORT"]
  1488. ):
  1489. json_out.append(
  1490. {
  1491. "name": "monitoring-endpoint",
  1492. "kind": "CephCluster",
  1493. "data": {
  1494. "MonitoringEndpoint": self.out_map["MONITORING_ENDPOINT"],
  1495. "MonitoringPort": self.out_map["MONITORING_ENDPOINT_PORT"],
  1496. },
  1497. }
  1498. )
  1499. # if 'CSI_RBD_NODE_SECRET' exists, then only add 'rook-csi-rbd-provisioner' Secret
  1500. if (
  1501. self.out_map["CSI_RBD_NODE_SECRET"]
  1502. and self.out_map["CSI_RBD_NODE_SECRET_NAME"]
  1503. ):
  1504. json_out.append(
  1505. {
  1506. "name": f"rook-{self.out_map['CSI_RBD_NODE_SECRET_NAME']}",
  1507. "kind": "Secret",
  1508. "data": {
  1509. "userID": self.out_map["CSI_RBD_NODE_SECRET_NAME"],
  1510. "userKey": self.out_map["CSI_RBD_NODE_SECRET"],
  1511. },
  1512. }
  1513. )
  1514. # if 'CSI_RBD_PROVISIONER_SECRET' exists, then only add 'rook-csi-rbd-provisioner' Secret
  1515. if (
  1516. self.out_map["CSI_RBD_PROVISIONER_SECRET"]
  1517. and self.out_map["CSI_RBD_PROVISIONER_SECRET_NAME"]
  1518. ):
  1519. json_out.append(
  1520. {
  1521. "name": f"rook-{self.out_map['CSI_RBD_PROVISIONER_SECRET_NAME']}",
  1522. "kind": "Secret",
  1523. "data": {
  1524. "userID": self.out_map["CSI_RBD_PROVISIONER_SECRET_NAME"],
  1525. "userKey": self.out_map["CSI_RBD_PROVISIONER_SECRET"],
  1526. },
  1527. }
  1528. )
  1529. # if 'CSI_CEPHFS_PROVISIONER_SECRET' exists, then only add 'rook-csi-cephfs-provisioner' Secret
  1530. if (
  1531. self.out_map["CSI_CEPHFS_PROVISIONER_SECRET"]
  1532. and self.out_map["CSI_CEPHFS_PROVISIONER_SECRET_NAME"]
  1533. ):
  1534. json_out.append(
  1535. {
  1536. "name": f"rook-{self.out_map['CSI_CEPHFS_PROVISIONER_SECRET_NAME']}",
  1537. "kind": "Secret",
  1538. "data": {
  1539. "adminID": self.out_map["CSI_CEPHFS_PROVISIONER_SECRET_NAME"],
  1540. "adminKey": self.out_map["CSI_CEPHFS_PROVISIONER_SECRET"],
  1541. },
  1542. }
  1543. )
  1544. # if 'CSI_CEPHFS_NODE_SECRET' exists, then only add 'rook-csi-cephfs-node' Secret
  1545. if (
  1546. self.out_map["CSI_CEPHFS_NODE_SECRET"]
  1547. and self.out_map["CSI_CEPHFS_NODE_SECRET_NAME"]
  1548. ):
  1549. json_out.append(
  1550. {
  1551. "name": f"rook-{self.out_map['CSI_CEPHFS_NODE_SECRET_NAME']}",
  1552. "kind": "Secret",
  1553. "data": {
  1554. "adminID": self.out_map["CSI_CEPHFS_NODE_SECRET_NAME"],
  1555. "adminKey": self.out_map["CSI_CEPHFS_NODE_SECRET"],
  1556. },
  1557. }
  1558. )
  1559. # if 'ROOK_EXTERNAL_DASHBOARD_LINK' exists, then only add 'rook-ceph-dashboard-link' Secret
  1560. if self.out_map["ROOK_EXTERNAL_DASHBOARD_LINK"]:
  1561. json_out.append(
  1562. {
  1563. "name": "rook-ceph-dashboard-link",
  1564. "kind": "Secret",
  1565. "data": {
  1566. "userID": "ceph-dashboard-link",
  1567. "userKey": self.out_map["ROOK_EXTERNAL_DASHBOARD_LINK"],
  1568. },
  1569. }
  1570. )
  1571. if self.out_map["RBD_METADATA_EC_POOL_NAME"]:
  1572. json_out.append(
  1573. {
  1574. "name": "ceph-rbd",
  1575. "kind": "StorageClass",
  1576. "data": {
  1577. "dataPool": self.out_map["RBD_POOL_NAME"],
  1578. "pool": self.out_map["RBD_METADATA_EC_POOL_NAME"],
  1579. "csi.storage.k8s.io/provisioner-secret-name": f"rook-{self.out_map['CSI_RBD_PROVISIONER_SECRET_NAME']}",
  1580. "csi.storage.k8s.io/controller-expand-secret-name": f"rook-{self.out_map['CSI_RBD_PROVISIONER_SECRET_NAME']}",
  1581. "csi.storage.k8s.io/node-stage-secret-name": f"rook-{self.out_map['CSI_RBD_NODE_SECRET_NAME']}",
  1582. },
  1583. }
  1584. )
  1585. else:
  1586. json_out.append(
  1587. {
  1588. "name": "ceph-rbd",
  1589. "kind": "StorageClass",
  1590. "data": {
  1591. "pool": self.out_map["RBD_POOL_NAME"],
  1592. "csi.storage.k8s.io/provisioner-secret-name": f"rook-{self.out_map['CSI_RBD_PROVISIONER_SECRET_NAME']}",
  1593. "csi.storage.k8s.io/controller-expand-secret-name": f"rook-{self.out_map['CSI_RBD_PROVISIONER_SECRET_NAME']}",
  1594. "csi.storage.k8s.io/node-stage-secret-name": f"rook-{self.out_map['CSI_RBD_NODE_SECRET_NAME']}",
  1595. },
  1596. }
  1597. )
  1598. # if 'CEPHFS_FS_NAME' exists, then only add 'cephfs' StorageClass
  1599. if self.out_map["CEPHFS_FS_NAME"]:
  1600. json_out.append(
  1601. {
  1602. "name": "cephfs",
  1603. "kind": "StorageClass",
  1604. "data": {
  1605. "fsName": self.out_map["CEPHFS_FS_NAME"],
  1606. "pool": self.out_map["CEPHFS_POOL_NAME"],
  1607. "csi.storage.k8s.io/provisioner-secret-name": f"rook-{self.out_map['CSI_CEPHFS_PROVISIONER_SECRET_NAME']}",
  1608. "csi.storage.k8s.io/controller-expand-secret-name": f"rook-{self.out_map['CSI_CEPHFS_PROVISIONER_SECRET_NAME']}",
  1609. "csi.storage.k8s.io/node-stage-secret-name": f"rook-{self.out_map['CSI_CEPHFS_NODE_SECRET_NAME']}",
  1610. },
  1611. }
  1612. )
  1613. # if 'RGW_ENDPOINT' exists, then only add 'ceph-rgw' StorageClass
  1614. if self.out_map["RGW_ENDPOINT"]:
  1615. json_out.append(
  1616. {
  1617. "name": "ceph-rgw",
  1618. "kind": "StorageClass",
  1619. "data": {
  1620. "endpoint": self.out_map["RGW_ENDPOINT"],
  1621. "poolPrefix": self.out_map["RGW_POOL_PREFIX"],
  1622. },
  1623. }
  1624. )
  1625. json_out.append(
  1626. {
  1627. "name": "rgw-admin-ops-user",
  1628. "kind": "Secret",
  1629. "data": {
  1630. "accessKey": self.out_map["RGW_ADMIN_OPS_USER_ACCESS_KEY"],
  1631. "secretKey": self.out_map["RGW_ADMIN_OPS_USER_SECRET_KEY"],
  1632. },
  1633. }
  1634. )
  1635. # if 'RGW_TLS_CERT' exists, then only add the "ceph-rgw-tls-cert" secret
  1636. if self.out_map["RGW_TLS_CERT"]:
  1637. json_out.append(
  1638. {
  1639. "name": "ceph-rgw-tls-cert",
  1640. "kind": "Secret",
  1641. "data": {
  1642. "cert": self.out_map["RGW_TLS_CERT"],
  1643. },
  1644. }
  1645. )
  1646. return json.dumps(json_out) + LINESEP
  1647. def upgrade_users_permissions(self):
  1648. users = [
  1649. "client.csi-cephfs-node",
  1650. "client.csi-cephfs-provisioner",
  1651. "client.csi-rbd-node",
  1652. "client.csi-rbd-provisioner",
  1653. ]
  1654. if self.run_as_user != "" and self.run_as_user not in users:
  1655. users.append(self.run_as_user)
  1656. for user in users:
  1657. self.upgrade_user_permissions(user)
  1658. def upgrade_user_permissions(self, user):
  1659. # check whether the given user exists or not
  1660. cmd_json = {"prefix": "auth get", "entity": f"{user}", "format": "json"}
  1661. ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
  1662. if ret_val != 0 or len(json_out) == 0:
  1663. print(f"user {user} not found for upgrading.")
  1664. return
  1665. existing_caps = json_out[0]["caps"]
  1666. new_cap, _ = self.get_caps_and_entity(user)
  1667. cap_keys = ["mon", "mgr", "osd", "mds"]
  1668. caps = []
  1669. for eachCap in cap_keys:
  1670. cur_cap_values = existing_caps.get(eachCap, "")
  1671. new_cap_values = new_cap.get(eachCap, "")
  1672. cur_cap_perm_list = [
  1673. x.strip() for x in cur_cap_values.split(",") if x.strip()
  1674. ]
  1675. new_cap_perm_list = [
  1676. x.strip() for x in new_cap_values.split(",") if x.strip()
  1677. ]
  1678. # append new_cap_list to cur_cap_list to maintain the order of caps
  1679. cur_cap_perm_list.extend(new_cap_perm_list)
  1680. # eliminate duplicates without using 'set'
  1681. # set re-orders items in the list and we have to keep the order
  1682. new_cap_list = []
  1683. [new_cap_list.append(x) for x in cur_cap_perm_list if x not in new_cap_list]
  1684. existing_caps[eachCap] = ", ".join(new_cap_list)
  1685. if existing_caps[eachCap]:
  1686. caps.append(eachCap)
  1687. caps.append(existing_caps[eachCap])
  1688. cmd_json = {
  1689. "prefix": "auth caps",
  1690. "entity": user,
  1691. "caps": caps,
  1692. "format": "json",
  1693. }
  1694. ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
  1695. if ret_val != 0:
  1696. raise ExecutionFailureException(
  1697. f"'auth caps {user}' command failed.\n Error: {err_msg}"
  1698. )
  1699. print(f"Updated user {user} successfully.")
  1700. def main(self):
  1701. generated_output = ""
  1702. if self._arg_parser.upgrade:
  1703. self.upgrade_users_permissions()
  1704. elif self._arg_parser.format == "json":
  1705. generated_output = self.gen_json_out()
  1706. elif self._arg_parser.format == "bash":
  1707. generated_output = self.gen_shell_out()
  1708. else:
  1709. raise ExecutionFailureException(
  1710. f"Unsupported format: {self._arg_parser.format}"
  1711. )
  1712. print(generated_output)
  1713. if self.output_file and generated_output:
  1714. fOut = open(self.output_file, mode="w", encoding="UTF-8")
  1715. fOut.write(generated_output)
  1716. fOut.close()
  1717. ################################################
  1718. ##################### MAIN #####################
  1719. ################################################
  1720. if __name__ == "__main__":
  1721. rjObj = RadosJSON()
  1722. try:
  1723. rjObj.main()
  1724. except ExecutionFailureException as err:
  1725. print(f"Execution Failed: {err}")
  1726. raise err
  1727. except KeyError as kErr:
  1728. print(f"KeyError: {kErr}")
  1729. except OSError as osErr:
  1730. print(f"Error while trying to output the data: {osErr}")
  1731. finally:
  1732. rjObj.shutdown()