def update_free_size(hostvol, sizechange): """Update the free size in respective hosting Volume's stat file""" # Check for mount availability before updating the free size retry_errors(os.statvfs, [os.path.join(HOSTVOL_MOUNTDIR, hostvol)], [ENOTCONN]) stat_file_path = os.path.join(HOSTVOL_MOUNTDIR, hostvol, ".stat") with statfile_lock: with open(stat_file_path+".tmp", "w") as stat_file_tmp: with open(stat_file_path) as stat_file: data = json.load(stat_file) data["free_size"] += sizechange stat_file_tmp.write(json.dumps(data)) logging.debug(logf( "Updated .stat.tmp file", hostvol=hostvol, before=data["free_size"] - sizechange, after=data["free_size"] )) os.rename(stat_file_path+".tmp", stat_file_path) logging.debug(logf( "Renamed .stat.tmp to .stat file", hostvol=hostvol ))
def search_volume(volname): """Search for a Volume by name in all Hosting Volumes""" volhash = get_volname_hash(volname) subdir_path = get_volume_path(PV_TYPE_SUBVOL, volhash, volname) virtblock_path = get_volume_path(PV_TYPE_VIRTBLOCK, volhash, volname) host_volumes = get_pv_hosting_volumes() for volume in host_volumes: hvol = volume['name'] mntdir = os.path.join(HOSTVOL_MOUNTDIR, hvol) mount_glusterfs(volume, mntdir) # Check for mount availability before checking the info file retry_errors(os.statvfs, [mntdir], [ENOTCONN]) for info_path in [subdir_path, virtblock_path]: info_path_full = os.path.join(mntdir, "info", info_path + ".json") voltype = PV_TYPE_SUBVOL if "/%s/" % PV_TYPE_SUBVOL \ in info_path_full else PV_TYPE_VIRTBLOCK if os.path.exists(info_path_full): data = {} with open(info_path_full) as info_file: data = json.load(info_file) return Volume(volname=volname, voltype=voltype, volhash=volhash, hostvol=hvol, size=data["size"]) return None
def create_subdir_volume(hostvol_mnt, volname, size): """Create sub directory Volume""" volhash = get_volname_hash(volname) volpath = get_volume_path(PV_TYPE_SUBVOL, volhash, volname) logging.debug(logf("Volume hash", volhash=volhash)) # Check for mount availability before creating subdir volume retry_errors(os.statvfs, [hostvol_mnt], [ENOTCONN]) # Create a subdir makedirs(os.path.join(hostvol_mnt, volpath)) logging.debug(logf("Created PV directory", pvdir=volpath)) # Write info file so that Brick's quotad sidecar # container picks it up. save_pv_metadata(hostvol_mnt, volpath, size) # Wait for quota set # TODO: Handle Timeout pvsize_buffer = size * 0.05 # 5% pvsize_min = (size - pvsize_buffer) pvsize_max = (size + pvsize_buffer) logging.debug( logf( "Watching df of pv directory", pvdir=volpath, pvsize_buffer=pvsize_buffer, )) count = 0 while True: count += 1 pvstat = retry_errors(os.statvfs, [os.path.join(hostvol_mnt, volpath)], [ENOTCONN]) volsize = pvstat.f_blocks * pvstat.f_bsize if pvsize_min < volsize < pvsize_max: logging.debug( logf("Matching df output, Quota set successful", volsize=volsize, num_tries=count)) break if count >= 6: logging.warning( logf("Waited for some time, Quota set failed, continuing.", volsize=volsize, num_tries=count)) break time.sleep(1) return Volume( volname=volname, voltype=PV_TYPE_SUBVOL, volhash=volhash, hostvol=os.path.basename(hostvol_mnt), size=size, volpath=volpath, )
def create_virtblock_volume(hostvol_mnt, volname, size): """Create virtual block volume""" volhash = get_volname_hash(volname) volpath = get_volume_path(PV_TYPE_VIRTBLOCK, volhash, volname) volpath_full = os.path.join(hostvol_mnt, volpath) logging.debug(logf( "Volume hash", volhash=volhash )) # Check for mount availability before creating virtblock volume retry_errors(os.statvfs, [hostvol_mnt], [ENOTCONN]) # Create a file with required size makedirs(os.path.dirname(volpath_full)) logging.debug(logf( "Created virtblock directory", path=os.path.dirname(volpath) )) if os.path.exists(volpath_full): rand = time.time() logging.info(logf( "Getting 'Create request' on existing file, renaming.", path=volpath_full, random=rand )) os.rename(volpath_full, "%s.%s" % (volpath_full, rand)) volpath_fd = os.open(volpath_full, os.O_CREAT | os.O_RDWR) os.close(volpath_fd) os.truncate(volpath_full, size) logging.debug(logf( "Truncated file to required size", path=volpath, size=size )) # TODO: Multiple FS support based on volume_capability mount option execute(MKFS_XFS_CMD, volpath_full) logging.debug(logf( "Created Filesystem", path=volpath, command=MKFS_XFS_CMD )) save_pv_metadata(hostvol_mnt, volpath, size) return Volume( volname=volname, voltype=PV_TYPE_VIRTBLOCK, volhash=volhash, hostvol=os.path.basename(hostvol_mnt), size=size, volpath=volpath, )
def update_free_size(hostvol, pvname, sizechange): """Update the free size in respective host volume's stats.db file""" mntdir = os.path.join(HOSTVOL_MOUNTDIR, hostvol) # Check for mount availability before updating the free size retry_errors(os.statvfs, [mntdir], [ENOTCONN]) with statfile_lock: with SizeAccounting(hostvol, mntdir) as acc: # Reclaim space if sizechange > 0: acc.remove_pv_record(pvname) else: acc.update_pv_record(pvname, -sizechange)
def mount_and_select_hosting_volume(pv_hosting_volumes, required_size): """Mount each hosting volume to find available space""" for volume in pv_hosting_volumes: hvol = volume['name'] mntdir = os.path.join(HOSTVOL_MOUNTDIR, hvol) mount_glusterfs(volume, mntdir) with statfile_lock: # Stat done before `os.path.exists` to prevent ignoring # file not exists even in case of ENOTCONN mntdir_stat = retry_errors(os.statvfs, [mntdir], [ENOTCONN]) with SizeAccounting(hvol, mntdir) as acc: acc.update_summary(mntdir_stat.f_bavail * mntdir_stat.f_bsize) pv_stats = acc.get_stats() reserved_size = pv_stats[ "free_size_bytes"] * RESERVED_SIZE_PERCENTAGE / 100 logging.debug( logf("pv stats", hostvol=hvol, total_size_bytes=pv_stats["total_size_bytes"], used_size_bytes=pv_stats["used_size_bytes"], free_size_bytes=pv_stats["free_size_bytes"], number_of_pvs=pv_stats["number_of_pvs"], required_size=required_size, reserved_size=reserved_size)) if required_size < (pv_stats["free_size_bytes"] - reserved_size): return hvol return None
def is_hosting_volume_free(hostvol, requested_pvsize): """Check if host volume is free to expand or create (external)volume""" mntdir = os.path.join(HOSTVOL_MOUNTDIR, hostvol) with statfile_lock: # Stat done before `os.path.exists` to prevent ignoring # file not exists even in case of ENOTCONN mntdir_stat = retry_errors(os.statvfs, [mntdir], [ENOTCONN]) with SizeAccounting(hostvol, mntdir) as acc: acc.update_summary(mntdir_stat.f_bavail * mntdir_stat.f_bsize) pv_stats = acc.get_stats() reserved_size = pv_stats[ "free_size_bytes"] * RESERVED_SIZE_PERCENTAGE / 100 logging.debug( logf("pv stats", hostvol=hostvol, total_size_bytes=pv_stats["total_size_bytes"], used_size_bytes=pv_stats["used_size_bytes"], free_size_bytes=pv_stats["free_size_bytes"], number_of_pvs=pv_stats["number_of_pvs"], required_size=requested_pvsize, reserved_size=reserved_size)) if requested_pvsize < (pv_stats["free_size_bytes"] - reserved_size): return True return False
def volume_list(voltype=None): """List of Volumes""" host_volumes = get_pv_hosting_volumes() volumes = [] for volume in host_volumes: hvol = volume['name'] mntdir = os.path.join(HOSTVOL_MOUNTDIR, hvol) mount_glusterfs(volume, mntdir) # Check for mount availability before listing the Volumes retry_errors(os.statvfs, [mntdir], [ENOTCONN]) if voltype is None or voltype == PV_TYPE_SUBVOL: get_subdir_virtblock_vols(mntdir, volumes, PV_TYPE_SUBVOL) if voltype is None or voltype == PV_TYPE_VIRTBLOCK: get_subdir_virtblock_vols(mntdir, volumes, PV_TYPE_VIRTBLOCK) return volumes
def save_pv_metadata(hostvol_mnt, pvpath, pvsize): """Save PV metadata in info file""" # Create info dir if not exists info_file_path = os.path.join(hostvol_mnt, "info", pvpath) info_file_dir = os.path.dirname(info_file_path) retry_errors(makedirs, [info_file_dir], [ENOTCONN]) logging.debug( logf("Created metadata directory", metadata_dir=info_file_dir)) with open(info_file_path + ".json", "w") as info_file: info_file.write( json.dumps({ "size": pvsize, "path_prefix": os.path.dirname(pvpath) })) logging.debug(logf( "Metadata saved", metadata_file=info_file_path, ))
def delete_archived_pvs(storage_name, archived_pvs): """ Delete all archived pvcs in archived_pvs """ for pvname, values in archived_pvs.items(): # Check for mount availablity before deleting info file & PVC mntdir = os.path.join(HOSTVOL_MOUNTDIR, storage_name) retry_errors(os.statvfs, [mntdir], [ENOTCONN]) # Remove PV in stat.db update_free_size(storage_name, pvname.replace("archived-", ""), values["size"]) # Delete info file info_file_path = os.path.join(mntdir, "info", values["path_prefix"], pvname + ".json") shutil.rmtree(os.path.dirname(info_file_path)) # Delete PVC pvc_path = os.path.join(mntdir, values["path_prefix"], pvname) shutil.rmtree(os.path.dirname(pvc_path))
def update_virtblock_volume(hostvol_mnt, volname, expansion_requested_pvsize): """Update virtual block volume""" volhash = get_volname_hash(volname) volpath = get_volume_path(PV_TYPE_VIRTBLOCK, volhash, volname) volpath_full = os.path.join(hostvol_mnt, volpath) logging.debug(logf("Volume hash", volhash=volhash)) # Check for mount availability before updating virtblock volume retry_errors(os.statvfs, [hostvol_mnt], [ENOTCONN]) # Update the file with required size makedirs(os.path.dirname(volpath_full)) logging.debug( logf("Updated virtblock directory", path=os.path.dirname(volpath))) volpath_fd = os.open(volpath_full, os.O_CREAT | os.O_RDWR) os.close(volpath_fd) execute("truncate", "-s", expansion_requested_pvsize, volpath_full) logging.debug( logf("Truncated file to required size", path=volpath, size=expansion_requested_pvsize)) # TODO: Multiple FS support based on volume_capability mount option execute(MKFS_XFS_CMD, volpath_full) logging.debug( logf("Created Filesystem", path=volpath, command=MKFS_XFS_CMD)) update_pv_metadata(hostvol_mnt, volpath, expansion_requested_pvsize) return Volume( volname=volname, voltype=PV_TYPE_VIRTBLOCK, volhash=volhash, hostvol=os.path.basename(hostvol_mnt), size=expansion_requested_pvsize, volpath=volpath, )
def update_pv_metadata(hostvol_mnt, pvpath, expansion_requested_pvsize): """Update PV metadata in info file""" # Create info dir if not exists info_file_path = os.path.join(hostvol_mnt, "info", pvpath) info_file_dir = os.path.dirname(info_file_path) retry_errors(makedirs, [info_file_dir], [ENOTCONN]) logging.debug( logf("Updated metadata directory", metadata_dir=info_file_dir)) # Update existing PV contents with open(info_file_path + ".json", "r") as info_file: data = json.load(info_file) # Update PV contents data["size"] = expansion_requested_pvsize data["path_prefix"] = os.path.dirname(pvpath) # Save back the changes with open(info_file_path + ".json", "w+") as info_file: info_file.write(json.dumps(data)) logging.debug(logf("Metadata updated", metadata_file=info_file_path))
def mount_and_select_hosting_volume(pv_hosting_volumes, required_size): """Mount each hosting volume to find available space""" for volume in pv_hosting_volumes: hvol = volume['name'] mntdir = os.path.join(HOSTVOL_MOUNTDIR, hvol) mount_glusterfs(volume, mntdir) stat_file_path = os.path.join(mntdir, ".stat") data = {} with statfile_lock: # Stat done before `os.path.exists` to prevent ignoring # file not exists even in case of ENOTCONN mntdir_stat = retry_errors(os.statvfs, [mntdir], [ENOTCONN]) if not os.path.exists(stat_file_path): data = { "size": mntdir_stat.f_bavail * mntdir_stat.f_bsize, "free_size": mntdir_stat.f_bavail * mntdir_stat.f_bsize } with open(stat_file_path, "w") as stat_file: stat_file.write(json.dumps(data)) else: with open(stat_file_path) as stat_file: data = json.load(stat_file) reserved_size = data["free_size"] * RESERVED_SIZE_PERCENTAGE/100 logging.debug(logf( "stat file content", hostvol=hvol, data=data, required_size=required_size, reserved_size=reserved_size )) if required_size < (data["free_size"] - reserved_size): return hvol return None
def update_subdir_volume(hostvol_mnt, hostvoltype, volname, expansion_requested_pvsize): """Update sub directory Volume""" volhash = get_volname_hash(volname) volpath = get_volume_path(PV_TYPE_SUBVOL, volhash, volname) logging.debug(logf("Volume hash", volhash=volhash)) # Check for mount availability before updating subdir volume retry_errors(os.statvfs, [hostvol_mnt], [ENOTCONN]) # Create a subdir makedirs(os.path.join(hostvol_mnt, volpath)) logging.debug(logf("Updated PV directory", pvdir=volpath)) # Write info file so that Brick's quotad sidecar # container picks it up. update_pv_metadata(hostvol_mnt, volpath, expansion_requested_pvsize) # Wait for quota set # TODO: Handle Timeout pvsize_buffer = expansion_requested_pvsize * 0.05 # 5% pvsize_min = (expansion_requested_pvsize - pvsize_buffer) pvsize_max = (expansion_requested_pvsize + pvsize_buffer) logging.debug( logf( "Watching df of pv directory", pvdir=volpath, pvsize_buffer=pvsize_buffer, )) # Handle this case in calling function if hostvoltype == 'External': return None retry_errors(os.setxattr, [ os.path.join(hostvol_mnt, volpath), "trusted.gfs.squota.limit", str(expansion_requested_pvsize).encode() ], [ENOTCONN]) count = 0 while True: count += 1 pvstat = retry_errors(os.statvfs, [os.path.join(hostvol_mnt, volpath)], [ENOTCONN]) volsize = pvstat.f_blocks * pvstat.f_bsize if pvsize_min < volsize < pvsize_max: logging.debug( logf("Matching df output, Quota update set successful", volsize=volsize, pvsize=expansion_requested_pvsize, num_tries=count)) break if count >= 6: logging.warning( logf( "Waited for some time, Quota update set failed, continuing.", volsize=volsize, pvsize=expansion_requested_pvsize, num_tries=count)) break time.sleep(1) return Volume( volname=volname, voltype=PV_TYPE_SUBVOL, volhash=volhash, hostvol=os.path.basename(hostvol_mnt), size=expansion_requested_pvsize, volpath=volpath, )
def delete_volume(volname): """Delete virtual block, sub directory volume, or External""" vol = search_volume(volname) if vol is None: logging.warning(logf("Volume not found for delete", volname=volname)) return False logging.debug( logf("Volume found for delete", volname=vol.volname, voltype=vol.voltype, volhash=vol.volhash, hostvol=vol.hostvol)) # Check for mount availability before deleting the volume retry_errors(os.statvfs, [os.path.join(HOSTVOL_MOUNTDIR, vol.hostvol)], [ENOTCONN]) volpath = os.path.join(HOSTVOL_MOUNTDIR, vol.hostvol, vol.volpath) try: if vol.voltype == PV_TYPE_SUBVOL: shutil.rmtree(volpath) else: os.remove(volpath) except OSError as err: logging.info( logf( "Error while deleting volume", volpath=volpath, voltype=vol.voltype, error=err, )) logging.debug(logf("Volume deleted", volpath=volpath, voltype=vol.voltype)) # Delete Metadata file info_file_path = os.path.join(HOSTVOL_MOUNTDIR, vol.hostvol, "info", vol.volpath + ".json") try: with open(info_file_path) as info_file: data = json.load(info_file) # We assume there would be a create before delete, but while # developing thats not true. There can be a delete request for # previously created pvc, which would be assigned to you once # you come up. We can't fail then. update_free_size(vol.hostvol, volname, data["size"]) os.remove(info_file_path) logging.debug( logf("Removed volume metadata file", path="info/" + vol.volpath + ".json", hostvol=vol.hostvol)) except OSError as err: logging.info( logf( "Error while removing the file", path="info/" + vol.volpath + ".json", hostvol=vol.hostvol, error=err, )) return True
def delete_volume(volname): """Delete virtual block, sub directory volume, or External""" vol = search_volume(volname) if vol is None: logging.warning(logf("Volume not found for delete", volname=volname)) return False logging.debug( logf("Volume found for delete", volname=vol.volname, voltype=vol.voltype, volhash=vol.volhash, hostvol=vol.hostvol)) # Check for mount availability before deleting the volume retry_errors(os.statvfs, [os.path.join(HOSTVOL_MOUNTDIR, vol.hostvol)], [ENOTCONN]) storage_filename = vol.hostvol + ".info" with open(os.path.join(VOLINFO_DIR, storage_filename)) as info_file: storage_data = json.load(info_file) pv_reclaim_policy = storage_data.get("pvReclaimPolicy", "delete") volpath = os.path.join(HOSTVOL_MOUNTDIR, vol.hostvol, vol.volpath) if pv_reclaim_policy == "archive": old_volname = vol.volname vol.volname = "archived-" + vol.volname path_prefix = os.path.dirname(vol.volpath) vol.volpath = os.path.join(path_prefix, vol.volname) # Rename directory & files that are to be archived try: # Brick/PVC os.rename( os.path.join(HOSTVOL_MOUNTDIR, vol.hostvol, path_prefix, old_volname), os.path.join(HOSTVOL_MOUNTDIR, vol.hostvol, path_prefix, vol.volname)) # Info-File old_info_file_name = old_volname + ".json" info_file_name = vol.volname + ".json" os.rename( os.path.join(HOSTVOL_MOUNTDIR, vol.hostvol, "info", path_prefix, old_info_file_name), os.path.join(HOSTVOL_MOUNTDIR, vol.hostvol, "info", path_prefix, info_file_name)) logging.info( logf("Volume archived", old_volname=old_volname, new_archived_volname=vol.volname, volpath=vol.volpath)) except OSError as err: logging.info( logf( "Error while archiving volume", volname=old_volname, volpath=os.path.join(path_prefix, old_volname), voltype=vol.voltype, error=err, )) return True try: if vol.voltype == PV_TYPE_SUBVOL: shutil.rmtree(volpath) else: os.remove(volpath) except OSError as err: logging.info( logf( "Error while deleting volume", volpath=volpath, voltype=vol.voltype, error=err, )) logging.info(logf("Volume deleted", volpath=volpath, voltype=vol.voltype)) # Delete Metadata file info_file_path = os.path.join(HOSTVOL_MOUNTDIR, vol.hostvol, "info", vol.volpath + ".json") try: with open(info_file_path) as info_file: data = json.load(info_file) # We assume there would be a create before delete, but while # developing thats not true. There can be a delete request for # previously created pvc, which would be assigned to you once # you come up. We can't fail then. update_free_size(vol.hostvol, volname, data["size"]) os.remove(info_file_path) logging.debug( logf("Removed volume metadata file", path="info/" + vol.volpath + ".json", hostvol=vol.hostvol)) except OSError as err: logging.info( logf( "Error while removing the file", path="info/" + vol.volpath + ".json", hostvol=vol.hostvol, error=err, )) return True
def create_subdir_volume(hostvol_mnt, volname, size): """Create sub directory Volume""" volhash = get_volname_hash(volname) volpath = get_volume_path(PV_TYPE_SUBVOL, volhash, volname) logging.debug(logf("Volume hash", volhash=volhash)) # Check for mount availability before creating subdir volume retry_errors(os.statvfs, [hostvol_mnt], [ENOTCONN]) # Create a subdir makedirs(os.path.join(hostvol_mnt, volpath)) logging.debug(logf("Created PV directory", pvdir=volpath)) # Write info file so that Brick's quotad sidecar # container picks it up. save_pv_metadata(hostvol_mnt, volpath, size) # Wait for quota set # TODO: Handle Timeout pvsize_buffer = size * 0.05 # 5% pvsize_min = (size - pvsize_buffer) pvsize_max = (size + pvsize_buffer) logging.debug( logf( "Watching df of pv directory", pvdir=volpath, pvsize_buffer=pvsize_buffer, )) #setfattr -n trusted.glusterfs.namespace -v true #setfattr -n trusted.gfs.squota.limit -v size try: retry_errors(os.setxattr, [ os.path.join(hostvol_mnt, volpath), "trusted.glusterfs.namespace", "true".encode() ], [ENOTCONN]) retry_errors(os.setxattr, [ os.path.join(hostvol_mnt, volpath), "trusted.gfs.squota.limit", str(size).encode() ], [ENOTCONN]) # noqa # pylint: disable=broad-except except Exception as err: logging.info( logf("Failed to set quota using simple-quota. Continuing", error=err)) count = 0 while True: count += 1 pvstat = retry_errors(os.statvfs, [os.path.join(hostvol_mnt, volpath)], [ENOTCONN]) volsize = pvstat.f_blocks * pvstat.f_bsize if pvsize_min < volsize < pvsize_max: logging.debug( logf("Matching df output, Quota set successful", volsize=volsize, num_tries=count)) break if count >= 6: logging.warning( logf("Waited for some time, Quota set failed, continuing.", volsize=volsize, num_tries=count)) break time.sleep(1) return Volume( volname=volname, voltype=PV_TYPE_SUBVOL, volhash=volhash, hostvol=os.path.basename(hostvol_mnt), size=size, volpath=volpath, )