def get_job(args): job_manager = JobManager() # Get Status if args.t ==1: print( int(job_manager.is_done(args.id))) # Get output elif args.t ==2: print (job_manager.get_job_output(args.id)) sys.exit(0)
def get_full_disk_list(pid=None): __output_split_text = "##petasan##" disk_list = [] ceph_disk_list = get_disk_list() ph_disk_list = disk_util.get_disk_list() osd_dict = None try: osd_dict = ceph_osd.ceph_osd_tree(configuration().get_node_info().name) except Exception as e: logger.error(e.message) missing_disk_list = [] # Set osd id and usage if ceph_disk_list and len(ceph_disk_list) > 0: for disk in ceph_disk_list: for ph_disk in ph_disk_list: if ph_disk.name == disk.name: ph_disk.usage = disk.usage ph_disk.osd_id = disk.osd_id ph_disk.osd_uuid = disk.osd_uuid ph_disk.linked_journal = disk.linked_journal ph_disk.linked_osds = disk.linked_osds ph_disk.linked_cache = disk.linked_cache ph_disk.linked_cache_part_num = disk.linked_cache_part_num ph_disk.vg_name = disk.vg_name ph_disk.lv_name = disk.lv_name ph_disk.linked_journal_part_num = disk.linked_journal_part_num ph_disk.no_of_partitions = disk.no_of_partitions ph_disk.no_available_partitions = disk.no_available_partitions disk_list.append(ph_disk) break else: disk_list.extend(ph_disk_list) health_test = Smart().get_overall_health() for disk in disk_list: if disk.name in health_test: disk.smart_test = health_test[disk.name] # get all running jobs job_manager = JobManager() job_list = job_manager.get_running_job_list() # Set disk osd status for node_disk in disk_list: # Set osd status [up, down] if node_disk.usage == DiskUsage.osd: status = None if osd_dict and node_disk.osd_id is not None: status = osd_dict.get(int(node_disk.osd_id), None) if str(ceph_osd.get_osd_id(node_disk.osd_uuid)) == "-1": node_disk.status = OsdStatus.no_status node_disk.usage = DiskUsage.mounted node_disk.osd_id = -1 elif status is not None: node_disk.status = status else: node_disk.status = OsdStatus.no_status disk_name_parameter = "-disk_name {}".format(node_disk.name) disk_id_parameter = "-id {}".format(node_disk.osd_id) # loop on running job list for j in job_list: # Set osd status [deleting , adding] if j.type == JobType.ADDDISK and str(j.params).find( str(disk_name_parameter)) > -1: node_disk.status = OsdStatus.adding elif j.type == JobType.ADDJOURNAL and str(j.params).find( str(disk_name_parameter)) > -1: node_disk.status = OsdStatus.adding_journal elif j.type == JobType.ADDCACHE and str(j.params).find( str(disk_name_parameter)) > -1: node_disk.status = OsdStatus.adding_cache elif j.type == JobType.DELETEOSD and ( str(j.params).find(str(disk_name_parameter)) > -1 or str(j.params).find(str(disk_id_parameter)) > -1): node_disk.status = OsdStatus.deleting elif j.type == JobType.DELETEJOURNAL and str(j.params).find( str(disk_name_parameter)) > -1: node_disk.status = OsdStatus.deleting elif j.type == JobType.DELETECACHE and str(j.params).find( str(disk_name_parameter)) > -1: node_disk.status = OsdStatus.deleting # Check if the job completed and has error to return it elif pid and j.id == int(pid): job_output = job_manager.get_job_output(j) if job_output is None: continue job_output = str(job_output).strip() if job_output != "": # We expect our custom messages appear after __output_split_text. out_arr = job_output.split(__output_split_text) if out_arr > 1: node_disk.error_message = out_arr[1] job_manager.remove_job(j.id) if not osd_dict or len(osd_dict.items()) == 0: return disk_list # If there is an osd found in ceph tree and this osd not has disk. for osd_id, osd_status in osd_dict.items(): is_missing = True for disk in disk_list: if str(disk.osd_id) == str(osd_id): is_missing = False break if is_missing: disk = DiskInfo() disk.osd_id = osd_id disk.status = osd_status disk.usage = DiskUsage.osd missing_disk_list.append(disk) disk_list.extend(missing_disk_list) return disk_list