def pool_status(self): from utilities.converters import convert_size data = { "type": self.type, "name": self.name, "head": "array://%s/%s" % (self.array_name, self.srp), "capabilities": self.capabilities, } try: dg = [ dg for dg in self.array.get_srps() if dg["name"] == self.srp ][0] except Exception as exc: print(exc, file=sys.stderr) return data data["free"] = convert_size(dg["free_capacity_gigabytes"], default_unit="G", _to="KB") data["used"] = convert_size(dg["used_capacity_gigabytes"], default_unit="G", _to="KB") data["size"] = convert_size(dg["usable_capacity_gigabytes"], default_unit="G", _to="KB") return data
def resize_zvol(self, name=None, naa=None, size=None, **kwargs): if size is None: raise ex.Error("'size' key is mandatory") if name is None and naa is None: raise ex.Error("'name' or 'naa' must be specified") data = self.get_iscsi_extent(name=name, naa=naa) if data is None: raise ex.Error("extent not found") volume = self.extent_volume(data) if volume is None: raise ex.Error("volume not found") zvol_data = self.get_zvol(volume=volume, name=data["name"]) if zvol_data is None: raise ex.Error("zvol not found") if size.startswith("+"): incr = convert_size(size.lstrip("+"), _to="MiB") current_size = convert_size(int(zvol_data["volsize"]), _to="MiB") size = str(current_size + incr) + "MiB" else: size = str(convert_size(size, _to="MiB")) + "MiB" d = { "volsize": size, } buff = self.put('/pool/dataset/id/%s' % quote_plus(zvol_data["id"]), d) try: return json.loads(buff) except ValueError: raise ex.Error(buff)
def pool_status(self): from utilities.converters import convert_size data = { "type": self.type, "name": self.name, "capabilities": self.capabilities, "free": -1, "used": -1, "size": -1, "head": self.zpool, } cmd = ["zpool", "get", "-H", "size,alloc,free", "-p", self.zpool] out, err, ret = justcall(cmd) if ret != 0: return data lines = out.splitlines() data["size"] = convert_size(lines[0].split()[2], default_unit="", _to="kb") data["used"] = convert_size(lines[1].split()[2], default_unit="", _to="kb") data["free"] = convert_size(lines[2].split()[2], default_unit="", _to="kb") return data
def resize_disk(self, id=None, name=None, naa=None, size=None, **kwargs): if size is None: raise ex.Error("'size' key is mandatory") if name is None and naa is None: raise ex.Error("'name' or 'naa' must be specified") lun_data = self.get_lun(oid=id, name=name, naa=naa) if lun_data is None: raise ex.Error("extent not found") storagepool = self.get_storagepool_by_id(lun_data["PARENTID"]) if storagepool is None: raise ex.Error("storagepool not found") if size.startswith("+"): incr = convert_size(size.lstrip("+"), _to="B") // 512 current_size = int(lun_data["ALLOCCAPACITY"]) * 512 size = current_size + incr else: size = convert_size(size, _to="B") // 512 d = { "CAPACITY": size, "ID": lun_data["ID"], } data = self.put("/lun/expand", d) if data.get("error", {}).get("code"): raise ex.Error("expand_lun error: %s => %s" % ((lun_data["ID"], size), data.get("error"))) return data["data"]
def fs_u_zfs(): if not which(Env.syspaths.zfs): return [] cmd = [ Env.syspaths.zfs, 'list', '-o', 'name,used,avail,mountpoint', '-H' ] (out, err, ret) = justcall(cmd) if ret != 0: return [] lines = out.split('\n') if len(lines) == 0: return [] vals = [] for line in lines: l = line.split() if len(l) != 4: continue if "@" in l[0]: # do not report clone usage continue if "osvc_sync_" in l[0]: # do not report osvc sync snapshots fs usage continue used = convert_size(l[1], _to="KB") if l[2] == '0': l[2] = '0K' avail = convert_size(l[2], _to="KB") total = used + avail pct = used / total * 100 vals.append([now, node.nodename, l[0], str(total), str(pct)]) return vals
def resize_disk(self, volume=None, size=None, **kwargs): if volume is None: raise ex.Error("--volume is mandatory") if volume == "": raise ex.Error("--volume can not be empty") if size == 0 or size is None: raise ex.Error("--size is mandatory") if size.startswith("+"): incr = convert_size(size.lstrip("+"), _to="KB") data = self.get_volumes(volume=volume) current_size = int(data["content"]["vol-size"]) size = str(current_size + incr)+"K" d = { "vol-size": str(convert_size(size, _to="MB"))+"M", } uri = "/volumes" params = {} if volume is not None: try: int(volume) uri += "/"+str(volume) except ValueError: params["name"] = volume self.put(uri, params=params, data=d) ret = self.get_volumes(volume=volume) return ret
def create_volume(self, name, namespace=None, size=None, access="rwo", fmt=True, nodes=None, shared=False): volume = factory("vol")(name=name, namespace=namespace, node=self.node) if volume.exists(): self.log.info("volume %s already exists", name) return volume if nodes is None: nodes = "" self.log.info( "create volume %s (pool name: %s, pool type: %s, " "access: %s, size: %s, format: %s, nodes: %s, shared: %s)", volume.path, self.name, self.type, access, size, fmt, nodes, shared) self.configure_volume(volume, fmt=fmt, size=convert_size(size), access=access, nodes=nodes, shared=shared) volume.action("provision", options={"wait": True, "time": "5m"})
def add_zvol(self, name=None, size=None, volume=None, compression="inherit", dedup="off", sparse=False, **kwargs): for key in ["name", "size", "volume"]: if locals()[key] is None: raise ex.Error("'%s' key is mandatory" % key) size = convert_size(size, _to="B") d = { "name": "%s/%s" % (volume, name), "type": "VOLUME", "volsize": size, "sparse": sparse, "deduplication": dedup.upper(), } if compression != "inherit": d["compression"] = compression.upper() buff = self.post('/pool/dataset', d) try: return json.loads(buff) except ValueError: raise ex.Error(buff)
def provisioner(self): if not which('vxassist'): raise ex.Error("vxassist command not found") if self.has_it(): self.log.info("skip vxvol provision: %s already exists" % self.fullname) return if not self.size: raise ex.Error("a size is required") size_parm = str(self.size).upper() size_parm = [str(convert_size(size_parm, _to="m")) + 'M'] create_options = self.create_options or self.oget("create_options") # strip dev dir in case the alloc vxassist parameter was formatted using sub_devs # lazy references for idx, option in enumerate(create_options): create_options[idx] = option.replace("/dev/vx/dsk/", "") # create the logical volume cmd = ['vxassist', '-g', self.vg, "make", self.name ] + size_parm + create_options ret, out, err = self.vcall(cmd) if ret != 0: raise ex.Error(err) self.can_rollback = True self.svc.node.unset_lazy("devtree")
def _provisioner(self, name): disk_names = self.get_disk_names() if name in disk_names: self.log.info("gce disk name %s already provisioned" % name) return size = str(convert_size(self.size, _to="MB")) + 'MB' cmd = [ "gcloud", "compute", "disks", "create", "-q", name, "--size", size, "--zone", self.gce_zone ] if self.description: cmd += ["--description", self.description] if self.image: cmd += ["--image", self.image] if self.source_snapshot: cmd += ["--source-snapshot", self.source_snapshot] if self.image_project: cmd += ["--image-project", self.image_project] if self.disk_type: cmd += ["--type", self.disk_type] self.vcall(cmd)
def add_lun(self, name=None, size=None, storagepool=None, compression=True, dedup=True, **kwargs): for key in ["name", "size", "storagepool"]: if locals()[key] is None: raise ex.Error("'%s' key is mandatory" % key) size = convert_size(size, _to="B") // 512 path = "/lun" d = { "NAME": name, "PARENTID": self.get_storagepool_id(storagepool), "CAPACITY": size, "MSGRETURNTYPE": 1, # sync "ALLOCTYPE": 1, # thin "ENABLECOMPRESSION": compression, "ENABLESMARTDEDUP": dedup, } data = self.post(path, d) if data.get("error", {}).get("code"): raise ex.Error("add lun error: %s => %s" % (d, data.get("error"))) return data["data"]
def _md_create_argv(self): level = self.level devs = self.devs or self.oget("devs") spares = self.spares chunk = self.chunk layout = self.layout number_devs = len(devs) - (spares or 0) if number_devs < 1: raise ex.Error("at least 1 device must be set in the 'devs' provisioning") invalid_devname_message = self._invalid_devname() if invalid_devname_message: raise ex.Error(invalid_devname_message) name = self.devname() argv = [self.mdadm, '--create', name, '--force', '--quiet', '--metadata=default'] argv += ['-n', str(number_devs)] if level: argv += ["-l", level] if spares: argv += ["-x", str(spares)] if chunk: argv += ["-c", str(convert_size(chunk, _to="k", _round=4))] if layout: argv += ["-p", layout] argv += devs return argv
def do_check(self): cmd = [ Env.syspaths.zfs, 'list', '-o', 'name,used,avail,mountpoint', '-H' ] (out, err, ret) = justcall(cmd) if ret != 0: return self.undef lines = out.split('\n') if len(lines) == 0: return self.undef r = [] for line in lines: l = line.split() if len(l) != 4: continue if "@" in l[0]: # do not report clone usage continue if re.findall("/[0-9a-f]{64}", l[0]): # container id continue if "osvc_sync_" in l[0]: # do not report osvc sync snapshots fs usage continue used = convert_size(l[1], _to="KB") avail = convert_size(l[2], _to="KB") total = used + avail pct = round(used / total * 100) path = self.find_svc(l[0], l[3]) r.append({ "instance": l[0], "value": str(pct) + "%", "path": path, }) r.append({ "instance": l[0] + ".free", "value": str(avail), "path": path, }) r.append({ "instance": l[0] + ".size", "value": str(total), "path": path, }) return r
def add_disk(self, name=None, pool=None, size=None, lun=None, mappings=None, **kwargs): if pool is None: raise ex.Error("--pool is mandatory") if size == 0 or size is None: raise ex.Error("--size is mandatory") pool_id = self.get_pool_by_name(pool)["poolID"] cmd = [ "addvirtualvolume", "capacity=" + str(convert_size(size, _to="KB")), "capacitytype=KB", "poolid=" + str(pool_id), ] out, err, ret = self.cmd(cmd, xml=False, log=True) if ret != 0: raise ex.Error(err) data = self.parse(out) ret = data[0]["ArrayGroup"][0]["Lu"][0] if name: self.rename_disk(devnum=ret["devNum"], name=name) if mappings: self.add_map(name=name, devnum=ret["devNum"], lun=lun, mappings=mappings) lun_data = self.get_lu_data(devnum=ret["displayName"])[0] self.push_diskinfo(lun_data, name, size) mappings = {} for path in lun_data["Path"]: domain = path["domainID"] port = path["portName"] if domain not in self.domain_portname: continue if port not in self.port_portname: continue for hba_id in self.domain_portname[domain]: for tgt_id in self.port_portname[port]: mappings[hba_id + ":" + tgt_id] = { "hba_id": hba_id, "tgt_id": tgt_id, "lun": int(path["LUN"]), } results = { "disk_id": ".".join(lun_data["objectID"].split(".")[-2:]), "disk_devid": lun_data["displayName"], "mappings": mappings, "driver_data": { "lu": lun_data, }, } return results
def get_hardware_mem(self): """ Get-WmiObject -Class "win32_PhysicalMemory" instance of Win32_PhysicalMemory { Attributes = 0; BankLabel = ""; Capacity = "2147483648"; Caption = "Memoire physique"; ConfiguredClockSpeed = 0; ConfiguredVoltage = 0; CreationClassName = "Win32_PhysicalMemory"; Description = "Memoire physique"; DeviceLocator = "DIMM 0"; FormFactor = 8; Manufacturer = "QEMU"; MaxVoltage = 0; MemoryType = 9; MinVoltage = 0; Name = "Memoire physique"; SMBIOSMemoryType = 7; Tag = "Physical Memory 0"; TypeDetail = 2; }; """ devs = [] dev = None path = [] cla = [] desc = [] payload = self.wmi.WIN32_PhysicalMemory() for a in payload: path = [] cla = [] desc = [] dev = { "type": "mem", "path": "", "class": "", "description": "", "driver": "", } path.append(a.DeviceLocator) if len(a.BankLabel) > 0: path.append(a.BankLabel) if a.Description is not None: desc.append(a.Description) if a.Manufacturer is not None: desc.append(a.Manufacturer) size = str(convert_size(a.Capacity, _to="GB"))+'GB' cla.append(size) if dev is not None: dev["path"] = " ".join(path) dev["class"] = " ".join(cla) dev["description"] = " ".join(desc) devs.append(dev) return devs
def pool_status(self): from utilities.converters import convert_size if not os.path.exists(self.path): os.makedirs(self.path) data = { "name": self.name, "type": self.type, "capabilities": self.capabilities, } cmd = ["df", "-P", self.path] out, err, ret = justcall(cmd) if ret != 0: return data l = out.splitlines()[-1].split() data["free"] = convert_size(l[3], default_unit="K", _to="k") data["used"] = convert_size(l[2], default_unit="K", _to="k") data["size"] = convert_size(l[1], default_unit="K", _to="k") data["head"] = self.path return data
def pool_status(self): from utilities.converters import convert_size data = { "type": self.type, "name": self.name, "head": "array://%s/%s" % (self.array_name, self.storagepool), "capabilities": self.capabilities, } try: status = self.array.get_storagepool(name=self.storagepool) except Exception as exc: print(exc, file=sys.stderr) return data data["size"] = convert_size(int(status["USERTOTALCAPACITY"]) * 512, _to="KB") data["free"] = convert_size(int(status["USERFREECAPACITY"]) * 512, _to="KB") data["used"] = data["size"] - data["free"] return data
def pool_status(self): from utilities.converters import convert_size data = { "type": self.type, "name": self.name, "head": "array://%s/%s" % (self.array_name, self.diskgroup), "capabilities": self.capabilities, } try: dg = [ dg for dg in self.array.list_volume() if dg["name"] == self.diskgroup ][0] except Exception as exc: print(exc, file=sys.stderr) return data data["free"] = convert_size(dg["avail"], _to="KB") data["used"] = convert_size(dg["used"], _to="KB") data["size"] = convert_size(dg["avail"] + dg["used"], _to="KB") return data
def provisioner_one(self, image): if self.exists(image): self.log.info("%s already provisioned" % image) return size = convert_size(self.size, _to="m") cmd = self.rbd_rcmd() + ['create', '--size', str(size), image] if self.image_format: cmd += ["--image-format", str(self.image_format)] ret, out, err = self.vcall(cmd) if ret != 0: raise ex.Error self.svc.node.unset_lazy("devtree")
def resize_disk(self, devnum=None, size=None, **kwargs): if devnum is None: raise ex.Error("--devnum is mandatory") devnum = self.to_devnum(devnum) if size == 0 or size is None: raise ex.Error("--size is mandatory") if size.startswith("+"): incr = convert_size(size.lstrip("+"), _to="KB") data = self.get_logicalunit(devnum=devnum) current_size = int(data["capacityInKB"]) size = str(current_size + incr) else: size = str(convert_size(size, _to="KB")) cmd = [ "modifyvirtualvolume", "capacity=" + size, "capacitytype=KB", "devnums=" + str(devnum), ] out, err, ret = self.cmd(cmd, xml=False, log=True) if ret != 0: raise ex.Error(err)
def provisioner(self): d = os.path.dirname(self.loopfile) try: if not os.path.exists(d): self.log.info("create directory %s"%d) os.makedirs(d) with open(self.loopfile, 'w') as f: self.log.info("create file %s, size %s"%(self.loopfile, self.size)) f.seek(convert_size(self.size, _to='b', _round=512)-1) f.write('\0') except Exception as e: raise ex.Error("failed to create %s: %s"% (self.loopfile, str(e))) self.svc.node.unset_lazy("devtree")
def add_disk(self, name=None, size=None, blocksize=None, tags=None, access=None, vaai_tp_alerts=None, small_io_alerts=None, unaligned_io_alerts=None, alignment_offset=None, mappings=None, **kwargs): if name is None: raise ex.Error("--name is mandatory") if size == 0 or size is None: raise ex.Error("--size is mandatory") d = { "vol-name": name, "vol-size": str(convert_size(size, _to="MB"))+"M", } if blocksize is not None: d["lb-size"] = blocksize if small_io_alerts is not None: d["small-io-alerts"] = small_io_alerts if unaligned_io_alerts is not None: d["unaligned-io-alerts"] = unaligned_io_alerts if access is not None: d["vol-access"] = access if vaai_tp_alerts is not None: d["vaai-tp-alerts"] = vaai_tp_alerts if alignment_offset is not None: d["alignment-offset"] = alignment_offset self.post("/volumes", data=d) driver_data = {} if mappings: mappings_data = self.add_map(volume=name, mappings=mappings) driver_data["volume"] = self.get_volumes(volume=name)["content"] driver_data["mappings"] = [val for val in mappings_data.values()] results = { "driver_data": driver_data, "disk_id": driver_data["volume"]["naa-name"], "disk_devid": driver_data["volume"]["index"], "mappings": {}, } for ig, tg in list(mappings_data.keys()): if ig not in self.ig_portname: continue for hba_id in self.ig_portname[ig]: if tg not in self.tg_portname: continue for tgt_id in self.tg_portname[tg]: results["mappings"][hba_id+":"+tgt_id] = { "hba_id": hba_id, "tgt_id": tgt_id, "lun": mappings_data[(ig, tg)]["lun"], } self.push_diskinfo(results, name, size) return results
def _get_mem_bytes_virsh(self): from utilities.converters import convert_size cmd = ['virsh', 'nodeinfo'] out, err, ret = justcall(cmd) if ret != 0: return '0' lines = out.split('\n') for line in lines: if 'Memory size' not in line: continue l = line.split(":", 1) if len(l) < 2: continue return str(convert_size(l[-1], _to="MB")) return '0'
def add_iscsi_file_extent(self, name=None, size=None, volume=None, insecure_tpc=True, blocksize=512, **kwargs): for key in ["name", "size", "volume"]: if locals()[key] is None: raise ex.Error("'%s' key is mandatory" % key) size = convert_size(size, _to="MiB") d = { "iscsi_target_extent_type": "File", "iscsi_target_extent_name": name, "iscsi_target_extent_insecure_tpc": insecure_tpc, "iscsi_target_extent_blocksize": blocksize, "iscsi_target_extent_filesize": str(size)+"MB", "iscsi_target_extent_path": "/mnt/%s/%s" % (volume, name), } buff = self.post("/services/iscsi/extent", d) data = json.loads(buff) return data
def translate(self, name=None, size=None, fmt=True, shared=False): if not fmt: return self.translate_blk(name=name, size=size, shared=shared) data = [] path = os.path.join(self.path, name) size_opt = "size=%dm" % convert_size(size, _to="m") if self.mnt_opt: mnt_opt = ",".join((self.mnt_opt, size_opt)) else: mnt_opt = size_opt data.append({ "rtype": "fs", "type": "tmpfs", "dev": "shmfs", "mnt": self.mount_point(name), "mnt_opt": mnt_opt, }) return data
def add_diskinfo(self, data, size=None, volume=None): if self.node is None: return try: result = self.node.collector_rest_post("/disks", { "disk_id": data["iscsi_target_extent_naa"].replace("0x", ""), "disk_devid": data["id"], "disk_name": data["iscsi_target_extent_name"], "disk_size": convert_size(size, _to="MB"), "disk_alloc": 0, "disk_arrayid": self.name, "disk_group": volume, }) except Exception as exc: raise ex.Error(str(exc)) if "error" in data: raise ex.Error(result["error"]) return result
def add_diskinfo(self, data, size=None, storagepool=None): if self.node is None: return try: result = self.node.collector_rest_post( "/disks", { "disk_id": data["WWN"], "disk_devid": data["ID"], "disk_name": data["NAME"], "disk_size": convert_size(size, _to="MB"), "disk_alloc": 0, "disk_arrayid": self.name, "disk_group": storagepool, }) except Exception as exc: raise ex.Error(str(exc)) if "error" in data: raise ex.Error(result["error"]) return result
def add_zvol(self, name=None, size=None, volume=None, compression="inherit", dedup="off", sparse=False, **kwargs): for key in ["name", "size", "volume"]: if locals()[key] is None: raise ex.Error("'%s' key is mandatory" % key) size = convert_size(size, _to="MiB") d = { "name": name, "volsize": str(size)+"MiB", "compression": compression, "sparse": sparse, "dedup": dedup, } buff = self.post('/storage/volume/%s/zvols/' % volume, d) try: return json.loads(buff) except ValueError: raise ex.Error(buff)
def push_diskinfo(self, data, name, size): if self.node is None: return try: ret = self.node.collector_rest_post( "/disks", { "disk_id": self.serial + "." + str(data["devNum"]), "disk_devid": data["devNum"], "disk_name": str(name), "disk_size": convert_size(size, _to="MB"), "disk_alloc": 0, "disk_arrayid": self.name, "disk_group": self.get_pool_by_id(data["dpPoolID"]), }) except Exception as exc: raise ex.Error(str(exc)) if "error" in data: raise ex.Error(ret["error"]) return ret
def push_diskinfo(self, data, name, size): if self.node is None: return if data["disk_id"] in (None, ""): data["disk_id"] = self.name+"."+str(data["driver_info"]["volume"]["index"]) try: ret = self.node.collector_rest_post("/disks", { "disk_id": data["disk_id"], "disk_devid": data["disk_devid"], "disk_name": name, "disk_size": convert_size(size, _to="MB"), "disk_alloc": 0, "disk_arrayid": self.name, "disk_group": "default", }) except Exception as exc: raise ex.Error(str(exc)) if "error" in data: raise ex.Error(ret["error"]) return ret