def rsync_version(self): if which("rsync") is None: raise ex.excError("rsync not found") cmd = ['rsync', '--version'] out, err, ret = justcall(cmd) if ret != 0: raise ex.excError("can not determine rsync capabilities") return out
def rotate_root_pw(self, pw): opts = {} opts['pw'] = pw d = self.collector.call('collector_update_root_pw', opts) if d is None: raise ex.excError("xmlrpc unknown failure") if d['ret'] != 0: raise ex.excError(d['msg'])
def del_zvol(self, name=None, volume=None, **kwargs): for key in ["name", "volume"]: if locals()[key] is None: raise ex.excError("'%s' key is mandatory" % key) path = '/storage/volume/%s/zvols/%s' % (volume, name) response = self.delete(path) if response.status_code != 204: raise ex.excError("delete error: %s (%d)" % (path, response.status_code))
def collector_list_services(self): opts = {'fset': self.options.filterset} d = self.collector.call('collector_list_services', opts) if d is None: raise ex.excError("xmlrpc unknown failure") if d['ret'] != 0: raise ex.excError(d['msg']) return d['data']
def get_endpoints(self): """ determine which head is the replication master and which is replication slave. """ if self.local is not None and self.remote is not None: return heads = list(set(self.filers.values()) - set([self.filer])) if len(heads) != 1: raise ex.excError("two heads need to be setup") self.local = Nexenta(self.filer, self.log) self.remote = Nexenta(heads[0], self.log) prop = 'zfs/to-host' try: localdown = False props = self.local.autosync_get_props(self.autosync) if prop in props and props[prop] == self.filer: self.slave = self.local self.master = self.remote else: self.slave = self.remote self.master = self.local return except ex.excError as e: if 'does not exist' in str(e): path_props = self.local.get_props(self.path) if path_props is None: raise ex.excError( "path '%s' not found on local head '%s'" % (self.path, self.filer)) self.slave = self.local self.master = self.remote else: # local head is down localdown = True try: props = self.remote.autosync_get_props(self.autosync) if prop in props and props[prop] == self.filer: self.slave = self.local self.master = self.remote else: self.slave = self.remote self.master = self.local return except ex.excError as e: if 'does not exist' in str(e): path_props = self.remote.get_props(self.path) if path_props is None: raise ex.excError( "path '%s' not found on remote head '%s'" % (self.path, self.filer)) self.slave = self.remote self.master = self.local elif localdown: raise ex.excError("both heads unreachable")
def get_iscsi_targettoextent(self, id=None, **kwargs): if id is None: raise ex.excError("'id' in mandatory") content = self.get('/services/iscsi/targettoextent/%d' % id) try: data = json.loads(content) except ValueError: raise ex.excError("targettoextent not found") return data
def decode_key(self, key): if not key: raise ex.excError("secret key name can not be empty") data = self.oget("data", key) if not data: raise ex.excError("secret %s key %s does not exist or has no value" % (self.path, key)) if data.startswith("crypt:"): data = data[6:] return self.decrypt(base64.urlsafe_b64decode(data.encode("ascii")))[2]
def add_disk(self, name=None, pool=None, size=None, lun=None, mappings=None, **kwargs): if pool is None: raise ex.excError("--pool is mandatory") if size == 0 or size is None: raise ex.excError("--size is mandatory") pool_id = self.get_pool_by_name(pool)["poolID"] cmd = [ "addvirtualvolume", "capacity=" + str(convert_size(size, _to="KB")), "capacitytype=KB", "poolid=" + str(pool_id), ] out, err, ret = self.cmd(cmd, xml=False, log=True) if ret != 0: raise ex.excError(err) data = self.parse(out) ret = data[0]["ArrayGroup"][0]["Lu"][0] if name: self.rename_disk(devnum=ret["devNum"], name=name) if mappings: self.add_map(name=name, devnum=ret["devNum"], lun=lun, mappings=mappings) lun_data = self.get_lu_data(devnum=ret["displayName"])[0] self.push_diskinfo(lun_data, name, size) mappings = {} for path in lun_data["Path"]: domain = path["domainID"] port = path["portName"] if domain not in self.domain_portname: continue if port not in self.port_portname: continue for hba_id in self.domain_portname[domain]: for tgt_id in self.port_portname[port]: mappings[hba_id + ":" + tgt_id] = { "hba_id": hba_id, "tgt_id": tgt_id, "lun": int(path["LUN"]), } results = { "disk_id": ".".join(lun_data["objectID"].split(".")[-2:]), "disk_devid": lun_data["displayName"], "mappings": mappings, "driver_data": { "lu": lun_data, }, } return results
def collector_show_tags(self): opts = {} if self.path: opts['svcname'] = self.path d = self.collector.call('collector_show_tags', opts) if d is None: raise ex.excError("xmlrpc unknown failure") if d['ret'] != 0: raise ex.excError(d['msg']) return d['data']
def print_context_help(self, action, options): """ Trigger a parser error, which displays the help message contextualized for the action prefix. """ if options.parm_help: raise ex.excError(self.parser.format_help()) else: usage = self.format_digest(action) raise ex.excError("%s\n%s" % (str(action), usage))
def sandboxkey(self): sandboxkey = self.container.container_sandboxkey() if sandboxkey is None: raise ex.excError("failed to get sandboxkey") sandboxkey = str(sandboxkey).strip() if "'" in sandboxkey: sandboxkey = sandboxkey.replace("'","") if sandboxkey == "": raise ex.excError("sandboxkey is empty") return sandboxkey
def get_nspid_docker(self): nspid = self.container.container_pid() if nspid is None: raise ex.excError("failed to get nspid") nspid = str(nspid).strip() if "'" in nspid: nspid = nspid.replace("'","") if nspid == "0": raise ex.excError("nspid is 0") return nspid
def _add_key(self, key, data): if not key: raise ex.excError("secret key name can not be empty") if not data: raise ex.excError("secret value can not be empty") data = "crypt:"+base64.urlsafe_b64encode(self.encrypt(data, cluster_name="join", encode=True)).decode() self.set_multi(["data.%s=%s" % (key, data)]) self.log.info("secret key '%s' added (%s)", key, print_size(len(data), compact=True, unit="b")) # refresh if in use self.postinstall(key)
def collector_tag(self): opts = {} opts['tag_name'] = self.options.tag if self.path: opts['svcname'] = self.path d = self.collector.call('collector_tag', opts) if d is None: raise ex.excError("xmlrpc unknown failure") if d['ret'] != 0: raise ex.excError(d['msg'])
def _collector_list_tags(self): opts = {'pattern': self.options.like} if self.path: opts['svcname'] = self.path d = self.collector.call('collector_list_tags', opts) if d is None: raise ex.excError("xmlrpc unknown failure") if d['ret'] != 0: raise ex.excError(d['msg']) return d['data']
def rcp(self, src, dst): rootfs = self.get_rootfs() if len(rootfs) == 0: raise ex.excError() dst = rootfs + dst cmd = ['cp', src, dst] out, err, ret = justcall(cmd) if ret != 0: raise ex.excError("'%s' execution error:\n%s"%(' '.join(cmd), err)) return out, err, ret
def container_import(self): if not os.path.exists(self.export_file): raise ex.excError("%s does not exist" % self.export_file) cmd = [ 'srp', '-batch', '-import', '-xfile', self.export_file, 'allow_sw_mismatch=yes', 'autostart=no' ] ret, out, err = self.vcall(cmd) if ret != 0: raise ex.excError()
def add_disk(self, name=None, size=None, storagepool=None, targets=None, mappings=None, compression=True, dedup=True, lun=None, hypermetrodomain=None, **kwargs): for key in ["name", "size", "storagepool"]: if locals()[key] is None: raise ex.excError("'%s' key is mandatory" % key) # lun data = self.add_lun(name=name, size=size, storagepool=storagepool, compression=compression, dedup=dedup) if "WWN" not in data: raise ex.excError("no WWN in data") # mappings if mappings: mappings = self.map_lun(name=name, mappings=mappings, targets=targets, lun=lun, lun_data=data) # collector update warnings = [] try: self.add_diskinfo(data, size, storagepool) except Exception as exc: warnings.append(str(exc)) disk_id = data["WWN"] results = { "driver_data": data, "disk_id": disk_id, "disk_devid": data["ID"], "mappings": sorted(self.list_mappings(naa=disk_id).values(), key=lambda x: (x["hba_id"], x["tgt_id"], x["disk_id"])), } if warnings: results["warnings"] = warnings return results
def provisioner(self): if which("mdadm") is None: raise ex.excError("mdadm is not installed") level = self.r.oget("level") devs = self.r.oget("devs") spares = self.r.oget('spares') chunk = self.r.oget("chunk") layout = self.r.oget("layout") if len(devs) == 0: raise ex.excError( "at least 2 devices must be set in the 'devs' provisioning parameter" ) # long md names cause a buffer overflow in mdadm name = self.r.devname() cmd = [ self.r.mdadm, '--create', name, '--force', '--quiet', '--metadata=default' ] cmd += ['-n', str(len(devs) - spares)] if level: cmd += ["-l", level] if spares: cmd += ["-x", str(spares)] if chunk: cmd += ["-c", str(convert_size(chunk, _to="k", _round=4))] if layout: cmd += ["-p", layout] cmd += devs self.r.log.info(" ".join(cmd)) from subprocess import Popen, PIPE proc = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE) out, err = proc.communicate(input=b'no\n') out, err = bdecode(out).strip(), bdecode(err).strip() self.r.log.info(out) if proc.returncode != 0: raise ex.excError(err) self.r.can_rollback = True if len(out) > 0: self.r.log.info(out) if len(err) > 0: self.r.log.error(err) self.r.uuid = os.path.basename(name) uuid = self.get_real_uuid(name) self.r.uuid = uuid if self.r.shared: self.r.log.info("set %s.uuid = %s", self.r.rid, uuid) self.r.svc._set(self.r.rid, "uuid", uuid) else: self.r.log.info("set %s.uuid@%s = %s", self.r.rid, rcEnv.nodename, uuid) self.r.svc._set(self.r.rid, "uuid@" + rcEnv.nodename, uuid) self.r.svc.node.unset_lazy("devtree")
def get_target_targetgroup(self, hba_id): params = {"full": 1} uri = "/targets" hba_id = self.convert_hba_id(hba_id) params["filter"] = "port-address:eq:"+hba_id data = self.get(uri, params=params) if len(data["targets"]) == 0: raise ex.excError("no target found with port-address=%s" % hba_id) if len(data["targets"][0]["tg-id"]) == 0: raise ex.excError("target %s found in no targetgroup" % hba_id) return data["targets"][0]["tg-id"][-1]
def validate_name(name): # strip scaler slice prefix name = re.sub(r"^[0-9]+\.", "", name) if name in rcEnv.kinds: raise ex.excError("invalid name '%s'. names must not clash with kinds" " %s." % (name, ", ".join(rcEnv.kinds))) if re.match(VALID_NAME_RFC952, name): return raise ex.excError("invalid name '%s'. names must contain only dots, letters, " "digits and hyphens, start with a letter and end with " "a digit or letter (rfc 952)." % name)
def get_zonepath_from_zonecfg_export(self): fpath = self.zone_cfg_path() if not os.path.exists(fpath): raise ex.excError("zone config export file %s not found. " "unable to determine zonepath" % fpath) with open(fpath, "r") as f: buff = f.read() for line in buff.split("\n"): if "set zonepath" in line: return line.split("=")[-1].strip() raise ex.excError("set zonepath command not found in %s" % fpath)
def _del_iscsi_targettoextent(self, id=None, **kwargs): try: data = self.get_iscsi_targettoextent(id) except Exception as exc: data = {"error": str(exc)} if id is None: raise ex.excError("'id' in mandatory") response = self.delete('/services/iscsi/targettoextent/%d' % id) if response.status_code != 204: raise ex.excError(str(response)) return data
def locklist(self, image): cmd = self.rbd_rcmd() + ["lock", "list", image, "--format", "json"] out, err, ret = justcall(cmd) if ret != 0: raise ex.excError("rbd lock list failed") data = {} try: data = json.loads(out) except Exception as e: raise ex.excError(str(e)) return data
def do_action(action, array_name=None, node=None, **kwargs): o = Freenass() array = o.get_freenas(array_name) if array is None: raise ex.excError("array %s not found" % array_name) array.node = node if not hasattr(array, action): raise ex.excError("not implemented") result = getattr(array, action)(**kwargs) if result is not None: print(json.dumps(result, indent=4))
def sync_date(self, n): key = self.sync_basename(n) try: e = [d for d in self.ls() if d["key"] == key][0] except: raise ex.excError("key %s not found in bucket" % key) try: _d = datetime.datetime.strptime(e["date"], "%Y-%m-%d %H:%M:%S") except: raise ex.excError("undecodable date %s" % e["date"]) return _d
def install_dst(self, node=None): if node is None: return try: self.dst_btrfs[node].snapshot(self.dst_snap_sent, self.dst, readonly=False) except rcBtrfs.ExistError: self.log.error("%s should not exist on node %s", self.dst_snap_sent, node) raise ex.excError() except rcBtrfs.ExecError: self.log.error("failed to install snapshot %s on node %s"%(self.dst, node)) raise ex.excError()
def on_add(self): try: arrays = rc.Hp3pars(objects=[self.array], log=self.log, node=self.svc.node) except Exception as e: raise ex.excError(str(e)) if len(arrays.arrays) == 1: self.array_obj = arrays.arrays[0] if self.array_obj is None: raise ex.excError("array %s is not accessible" % self.array) self.array_obj.path = self.svc.path
def del_diskinfo(self, disk_id): if disk_id in (None, ""): return if self.node is None: return try: result = self.node.collector_rest_delete("/disks/%s" % disk_id) except Exception as exc: raise ex.excError(str(exc)) if "error" in result: raise ex.excError(result["error"]) return result
def sync_swap(self): master = self.master() slave = self.slave() s = self.snapmirror_status(self.local()) if s['state'] != "Broken-off": self.log.error( "can not swap: snapmirror is not in state Broken-off") raise ex.excError src = slave + ':' + self.path_short dst = master + ':' + self.path_short (ret, buff, err) = self._cmd(['snapmirror', 'resync', '-f', '-S', src, dst], master, info=True) if ret != 0: raise ex.excError(err) (ret, buff, err) = self._cmd(['snapmirror', 'release', self.path_short, src], master, info=True) if ret != 0: raise ex.excError(err) (ret, buff, err) = self._cmd(['snapmirror', 'status', '-l', dst], slave, info=False) if ret != 0: raise ex.excError(err) snap = "" state = "" for line in buff.split('\n'): l = line.split() if len(l) < 2: continue if l[0] == "State:": state = l[1] if state != "Broken-off": continue if l[0] == "Base" and l[1] == "Snapshot:": snap = l[-1] break if len(snap) == 0: self.log.error( "can not determine base snapshot name to remove on %s" % slave) raise ex.excError import time time.sleep(5) (ret, buff, err) = self._cmd(['snap', 'delete', self.path_short, snap], slave, info=True) if ret != 0: raise ex.excError(err)