def disk_list(delete_job_id=-1, disk_id="", pool=""): mesg_err = "" mesg_success = "" mesg_warning = "" available_disk_list = [] disk_status = None active_paths = None base_url = request.base_url disk_id = disk_id cluster_fsid = "" try: manage_disk = ManageDisk() available_disk_list = manage_disk.get_disks_meta() cluster_fsid = ceph_disk.get_fsid(configuration().get_cluster_name()) disk_status = DisplayDiskStatus if "err" in session: mesg_err = session["err"] session.pop("err") elif "success" in session: mesg_success = session["success"] session.pop("success") elif "warning" in session: mesg_warning = session["warning"] session.pop("warning") except Exception as e: mesg_err = "error in loading page" return render_template('admin/disk/list.html', diskList=available_disk_list, diskStatus=disk_status, cluster_fsid=cluster_fsid, err=mesg_err, base_url=base_url, disk_id=disk_id, delete_job_id=delete_job_id, pool=pool, success=mesg_success, warning=mesg_warning)
def stop_dest_disk(args): disk_id = args.disk_id consul_api = ConsulAPI() try: kv = consul_api.find_disk(disk_id) if kv is not None: manage_disk = ManageDisk() status = manage_disk.stop(disk_id) if status != Status.done: print('Error : Cannot stop disk , id = ' + disk_id) sys.exit(-1) sys.exit(0) else: print('Disk {} is already stopped.'.format(disk_id)) sys.exit(0) except Exception as e: print("Error : Exception - {}".format(str(e.message))) sys.exit(-1)
def create_disk(disk_id): ceph_api = CephAPI() manage_disk = ManageDisk() disk_meta = DiskMeta() disk_meta.disk_name = "sanatech" + str("sql1") disk_meta.size = 1 disk_meta.password = "******" disk_meta.user = "******" path1 = Path() path2 = Path() path1.ip = "192.168.57.150" path1.subnet_mask = "255.255.255.0" path1.eth = "eth0" path2.ip = "192.168.58.150" path2.subnet_mask = "255.255.255.0" path2.eth = "eth1" disk_meta.paths.append(path1) disk_meta.paths.append(path2) disk_meta.id = disk_id disk_meta.iqn = app_conf.read_app_config().iqn_base + ":" + disk_meta.id status = ceph_api.add_disk(disk_meta) if status == ManageDiskStatus.done: logger.info("done , create disk") attr = ceph_api.read_image_metadata("image-" + disk_id) xx = attr.get(app_conf.get_image_meta_key()) logger.info(xx) disk = DiskMeta() disk.load_json(xx) print("disk user is %s" % (disk.user)) print disk elif status == ManageDiskStatus.disk_name_exists: print("disk is Exists") else: print("error create disk")
def clear_disk(args): disk_id = args.disk_id image_name = "image-" + disk_id try: # Get which ceph user is using this function & get his keyring file path # # ---------------------------------------------------------------------- # ceph_auth = CephAuthenticator() config = configuration() cluster_name = config.get_cluster_name() # Get disk metadata : # ------------------- ceph_api = CephAPI() disk_metadata = ceph_api.get_diskmeta(disk_id) # Get pool name : # --------------- pool_name = disk_metadata.pool data_pool = "" # Check if disk has been created on replicated pool or erasure pool : # ------------------------------------------------------------------- if len(disk_metadata.data_pool) > 0: data_pool = disk_metadata.data_pool tmp_image_name = "tmp_disk_" + disk_metadata.id # (1.) Check if a previous tmp image for this disk is still existed : # =================================================================== images_list = ceph_api.get_all_images(pool_name) for image in images_list: if tmp_image_name in image: # Delete image # cmd = "rbd rm {}/{} {} --cluster {}".format( pool_name, image, ceph_auth.get_authentication_string(), cluster_name) if not call_cmd(cmd): print( "Error : clear_disk.py script : cannot remove tmp image ,\ncmd : " + cmd) sys.exit(-1) print( "Stage 1 :\n\tCheck if a previous tmp image for this disk is still existed > (Completed)" ) logger.info( "Stage 1 :\n\tCheck if a previous tmp image for this disk is still existed > (Completed)" ) # (2.) Stop old disk : # ==================== consul_api = ConsulAPI() kv = consul_api.find_disk(disk_id) if kv is not None: manage_disk = ManageDisk() status = manage_disk.stop(disk_id) if status != Status.done: print('Error : Cannot stop disk , id = ' + disk_id) sys.exit(-1) print("Stage 2 :\n\tStop old disk > (Completed)") logger.info("Stage 2 :\n\tStop old disk > (Completed)") time.sleep(3) # (3.) Check if old disk is stopped or not : # ========================================== if len(data_pool) > 0: pool_type = "erasure" _confirm_disk_stopped(data_pool, disk_id, pool_type) else: pool_type = "replicated" _confirm_disk_stopped(pool_name, disk_id, pool_type) print( "Stage 3 :\n\tConfirm that disk is completely stopped > (Completed)" ) logger.info( "Stage 3 :\n\tConfirm that disk is completely stopped > (Completed)" ) else: print("Stage 2 :\n\tStop old disk > (Completed)") logger.info("Stage 2 :\n\tStop old disk > (Completed)") print( "Stage 3 :\n\tConfirm that disk is completely stopped > (Completed)" ) logger.info( "Stage 3 :\n\tConfirm that disk is completely stopped > (Completed)" ) print('\tclear_disk.py script : disk {} is already stopped'.format( disk_id)) # (4.) Create a tmp image (not PetaSAN image) : # ============================================= # Generate a random value between 1 and 99999 # random_no = str(random.randint(1, 100000)) tmp_image_name = tmp_image_name + "_" + str(random_no) image_size = disk_metadata.size * 1024 if len(data_pool) > 0: cmd = "rbd create {}/{} --size {} --data-pool {} {} --cluster {}".format( pool_name, tmp_image_name, image_size, data_pool, ceph_auth.get_authentication_string(), cluster_name) else: cmd = "rbd create {}/{} --size {} {} --cluster {}".format( pool_name, tmp_image_name, image_size, ceph_auth.get_authentication_string(), cluster_name) if not call_cmd(cmd): print( "Error : clear_disk.py script : cannot create new tmp image ,\ncmd : " + cmd) sys.exit(-1) print("Stage 4 :\n\tCreate a tmp image called ( " + tmp_image_name + " ) > (Completed)") logger.info("Stage 4 :\n\tCreate a tmp image called ( " + tmp_image_name + " ) > (Completed)") # (5.) Run script to copy "old disk" metadata to new "tmp_disk" : # =============================================================== metadata_script_file = ConfigAPI().get_disk_meta_script_path() # Function : read_disks_metadata : parser_key_1 = "read" arg_1 = "--image" arg_2 = "--pool" # Function : set_disk_metadata : parser_key_2 = "write" arg_3 = "--file" cmd = metadata_script_file + " " + parser_key_1 + " " + arg_1 + " " + image_name + " " + arg_2 + " " + pool_name +\ " | " + metadata_script_file + " " + parser_key_2 + " " + arg_1 + " " + tmp_image_name + " " + arg_2 + " " + pool_name if not call_cmd(cmd): print( "Error : clear_disk.py script : cannot copy metadata from old disk to new tmp image ,\ncmd : " + cmd) sys.exit(-1) print( "Stage 5 :\n\tRun script to copy 'old disk' metadata to new 'tmp_disk' > (Completed)" ) logger.info( "Stage 5 :\n\tRun script to copy 'old disk' metadata to new 'tmp_disk' > (Completed)" ) time.sleep(3) # (6.) Remove metadata of old disk : # =========================================================== old_image_name = str(ceph_api.conf_api.get_image_name_prefix() + disk_metadata.id) confirm = ceph_api.remove_disk_metadata(old_image_name, disk_metadata.pool) if not confirm: print( "Error : clear_disk.py script : cannot remove metadata of old disk" ) # sys.exit(-1) print("Stage 6 :\n\tRemove metadata of old disk > (Completed)") logger.info("Stage 6 :\n\tRemove metadata of old disk > (Completed)") # (7.) Rename old disk image name with "deleted-" + disk_id + random_no: # ====================================================================== new_image_name = "deleted-" + disk_metadata.id + "-" + random_no cmd = "rbd mv {}/{} {} {} --cluster {}".format( pool_name, image_name, new_image_name, ceph_auth.get_authentication_string(), cluster_name) if not call_cmd(cmd): print( "Error : clear_disk.py script : cannot rename old image from {} to {} ,\ncmd : {}" .format(image_name, new_image_name, cmd)) sys.exit(-1) print("Stage 7 :\n\tRename old disk image name with ( " + new_image_name + " ) > (Completed)") logger.info("Stage 7 :\n\tRename old disk image name with ( " + new_image_name + " ) > (Completed)") time.sleep(5) # (8.) Rename "tmp_disk" with old disk image name : # ================================================= cmd = "rbd mv {}/{} {} {} --cluster {}".format( pool_name, tmp_image_name, image_name, ceph_auth.get_authentication_string(), cluster_name) if not call_cmd(cmd): print( "Error : clear_disk.py script : cannot rename \"tmp_disk\" from {} to {} ,\ncmd : {}" .format(tmp_image_name, image_name, cmd)) sys.exit(-1) print( "Stage 8 :\n\tRename 'tmp_disk' with old disk image name > (Completed)" ) logger.info( "Stage 8 :\n\tRename 'tmp_disk' with old disk image name > (Completed)" ) time.sleep(5) jm = JobManager() id = jm.add_job(JobType.DELETE_DISK, new_image_name + ' ' + pool_name) print("Stage 9 :\n\tStart a job to remove old disk image , job id = " + str(id)) logger.info( "Stage 9 :\n\tStart a job to remove old disk image , job id = " + str(id)) sys.exit(0) except PoolException as e: print("Error : PoolException , {}".format(e.message)) logger.error("Clear Disk Error : PoolException , {}".format(e.message)) sys.exit(-1) except DiskListException as e: print("Error : DiskListException , {}".format(e.message)) logger.error("Clear Disk Error : DiskListException , {}".format( e.message)) sys.exit(-1) except CephException as e: if e.id == CephException.GENERAL_EXCEPTION: print("Error : CephException , {}".format(e.message)) logger.error("Clear Disk Error : CephException , {}".format(e.message)) sys.exit(-1) except MetadataException as e: print("Error : MetadataException , {}".format(e.message)) logger.error("Clear Disk Error : MetadataException , {}".format( e.message)) sys.exit(-1) except Exception as e: print("Error : Exception , {}".format(e.message)) logger.error("Clear Disk Error : Exception , {}".format(e.message)) sys.exit(-1)
running_pool_disks = _get_running_pool_disks(pool) logger.info('Stop pool disks count {}'.format( str(len(running_pool_disks)))) if len(running_pool_disks) == 0: return wait_count -= 1 if len(sys.argv) != 3 or sys.argv[2] != '--yes-i-really-really-mean-it': print 'usage: delete_pool pool --yes-i-really-really-mean-it ' sys.exit(1) pool = sys.argv[1] # step 1 stop disks _stop_pool_disks(pool) sleep(20) # step 2 delete disks in case of ec pool_info = _get_pool_info_by_name(pool) if pool_info.type == "erasure": meta_disk = ManageDisk().get_disks_meta() if len(meta_disk) > 0: for disk in meta_disk: if disk.data_pool == pool: ManageDisk().delete_disk(disk.id, disk.pool) # step 3 delete pool ceph_api = CephAPI() ceph_api.delete_pool(pool)
def get_disk_info(disk_id): manage_disk = ManageDisk() i = manage_disk.get_disk(disk_id) print i.user, i.disk_name, i.ip, i.ip2, i.subnet1, i.subnet2, i.password, i.id, i.iqn, i.size
def deattach_disk(disk_id): ceph_manage = ManageDisk() ceph_manage.detach_disk(disk_id) disk = ceph_manage.get_disk(disk_id) print disk.id, disk.disk_name, disk.size, [p for p in disk.paths]
def delete(disk_id): manage_disk = ManageDisk() print manage_disk.delete_disk(disk_id)
def start_disk(id): ceph_manage = ManageDisk() ceph_manage.start(id)
def stop_disk(id): ceph_manage = ManageDisk() ceph_manage.stop(id)
def stop_disk(id, pool): manage_disk = ManageDisk() if manage_disk.stop(id) == Status.done: return True return False
def remove_pool(pool_name, pool_type): """ DOCSTRING : this function is called to delete a certain pool , then redirect to the page : 'admin/pool/pools_list.html'. Args : pool_name (string) Returns : redirect to the page : 'admin/pool/pools_list.html' """ if request.method == 'POST': try: manage_pool = ManagePools() pool_list = manage_pool.get_pools_info() pool_names = [] for pool in pool_list: pool_names.append(pool.name) if pool_type == "erasure": delete_job_id = manage_pool.delete_pool(pool_name) elif pool_type == "replicated": ceph_api = CephAPI() pools = ceph_api.get_active_pools() if pool_name not in pools: delete_job_id = manage_pool.delete_pool(pool_name) elif pool_name in pools: meta_disk = ManageDisk().get_disks_meta_by_pool(pool_name) if len(meta_disk) == 0: delete_job_id = manage_pool.delete_pool(pool_name) has_data_pool = 0 for disk in meta_disk: if disk.data_pool is not None and disk.data_pool != "": if disk.data_pool in pool_names: has_data_pool += 1 if has_data_pool > 0: session['err'] = "error_deleting_mata_pool" return redirect(url_for('pool_controller.get_pools')) else: if len(meta_disk) > 0: delete_job_id = manage_pool.delete_pool(pool_name) return redirect(url_for('pool_controller.get_pools', delete_job_id=delete_job_id, pool_name=pool_name)) except CephException as e: if e.id == CephException.CONNECTION_TIMEOUT: session['err'] = "ui_admin_ceph_time_out" return redirect(url_for('pool_controller.get_pools')) elif e.id == CephException.GENERAL_EXCEPTION: session['err'] = "ui_admin_ceph_general_exception" return redirect(url_for('pool_controller.get_pools')) session['err'] = "ui_admin_ceph_general_exception" logger.error(e) return redirect(url_for('pool_controller.get_pools')) except Exception as e: session['err'] = "ui_admin_delete_pool_error" logger.error(e) return redirect(url_for('pool_controller.get_pools'))
def save_attach_disk(disk_id, pool): if request.method == 'POST': try: failed_pools = int(request.form['failed_pools']) activePaths = int(request.form['ActivePaths']) if activePaths >= 3: automatic_ip = "Yes" else: automatic_ip = request.form['orpUseFirstRange'] if automatic_ip == "Yes" and failed_pools > 0: session['err'] = "ui_admin_add_disk_while_pool_inactive" return redirect( url_for('disk_controller.attach_disk', disk_id=disk_id, pool=pool), 307) disk = DiskMeta() disk.id = disk_id disk.size = int(request.form['diskSize']) disk.disk_name = request.form['diskName'] if 'orpUseFirstRange' not in request.values: isAutomaticIp = "Yes" else: isAutomaticIp = request.form['orpUseFirstRange'] activePathsCount = int(request.form['ActivePaths']) if activePathsCount >= 3: isAutomaticIp = "Yes" path_type = int(request.form['ISCSISubnet']) if path_type == 1: path_type = PathType.iscsi_subnet1 elif path_type == 2: path_type = PathType.iscsi_subnet2 elif path_type == 3: path_type = PathType.both manual_ips = [] enable_rep = request.form['replication'] if enable_rep == "yes": disk.is_replication_target = True if isAutomaticIp != "Yes": manual_ips.append(request.form['path1']) isAutomaticIp = False if activePathsCount == 2: manual_ips.append(request.form['path2']) else: isAutomaticIp = True disk.orpUseFirstRange = isAutomaticIp disk.ISCSISubnet = int(request.form['ISCSISubnet']) usedACL = request.form['orpACL'] if usedACL == "Iqn": disk.acl = request.form['IqnVal'] else: disk.acl = "" usedAutentication = request.form['orpAuth'] if usedAutentication == "Yes": auth_auto = False disk.user = request.form['UserName'] disk.password = request.form['Password'] else: auth_auto = True disk.data_pool = request.form['data_pool'] manage_config = ManageConfig() subnet1_info = manage_config.get_iscsi1_subnet() subnet2_info = manage_config.get_iscsi2_subnet() disk.subnet1 = subnet1_info.subnet_mask # call method to save object manageDisk = ManageDisk() status = manageDisk.attach_disk(disk, manual_ips, path_type, activePathsCount, auth_auto, isAutomaticIp, pool) if status == ManageDiskStatus.done: # session['success'] = "ui_admin_attach_disk_success" return redirect(url_for('disk_controller.disk_list')) elif status == ManageDiskStatus.done_metaNo: session[ 'success'] = "ui_admin_attach_disk_attached_with_no_metadata" return redirect(url_for('disk_controller.disk_list')) elif status == ManageDiskStatus.error: session['err'] = "ui_admin_attach_disk_error" return redirect( url_for('disk_controller.attach_disk', disk_id=disk_id, pool=pool), 307) elif status == ManageDiskStatus.disk_created_cant_start: session['warning'] = "ui_admin_attach_disk_attached_not_start" return redirect(url_for('disk_controller.disk_list')) elif status == ManageDiskStatus.data_missing: session['err'] = "ui_admin_manage_disk_data_missing" return redirect( url_for('disk_controller.attach_disk', disk_id=disk_id, pool=pool), 307) elif status == ManageDiskStatus.disk_meta_cant_read: session[ 'warning'] = "ui_admin_attach_disk_error_attached_not_read_metadata" return redirect(url_for('disk_controller.disk_list')) elif status == ManageDiskStatus.disk_exists: session['err'] = "ui_admin_manage_disk_exist" return redirect( url_for('disk_controller.attach_disk', disk_id=disk_id, pool=pool), 307) elif status == ManageDiskStatus.disk_name_exists: session['err'] = "ui_admin_manage_disk_name_exist" return redirect( url_for('disk_controller.attach_disk', disk_id=disk_id, pool=pool), 307) elif status == ManageDiskStatus.ip_out_of_range: session['err'] = "ui_admin_manage_disk_no_auto_ip" return redirect( url_for('disk_controller.attach_disk', disk_id=disk_id, pool=pool), 307) elif status == ManageDiskStatus.wrong_subnet: session['err'] = "ui_admin_manage_disk_wrong_subnet" return redirect( url_for('disk_controller.attach_disk', disk_id=disk_id, pool=pool), 307) elif status == ManageDiskStatus.wrong_data: session['err'] = "ui_admin_manage_disk_wrong_data" return redirect( url_for('disk_controller.attach_disk', disk_id=disk_id, pool=pool), 307) elif status == ManageDiskStatus.used_already: session['err'] = "ui_admin_manage_disk_used_already" return redirect( url_for('disk_controller.attach_disk', disk_id=disk_id, pool=pool), 307) elif status == ManageDiskStatus.disk_get__list_error: session['err'] = "ui_admin_manage_disk_disk_get_list_error" return redirect( url_for('disk_controller.attach_disk', disk_id=disk_id, pool=pool), 307) elif status == ManageDiskStatus.is_busy: session['err'] = "ui_admin_attach_disk_is_busy" return redirect( url_for('disk_controller.attach_disk', disk_id=disk_id, pool=pool), 307) except Exception as e: session['err'] = "ui_admin_attach_disk_error" logger.error(e) return redirect( url_for('disk_controller.attach_disk', disk_id=disk_id, pool=pool), 307)
def attach_disk(disk_id, pool): if request.method == 'GET' or request.method == 'POST': failed_pools = 0 manage_pool = ManagePools() active_pools = manage_pool.get_active_pools() all_pools = manage_pool.get_pools_info() if len(all_pools) > len(active_pools): failed_pools = len(all_pools) - len(active_pools) manage_config = ManageConfig() subnet1_info = manage_config.get_iscsi1_subnet() subnet2_info = manage_config.get_iscsi2_subnet() size_list_aval = [ 1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 150, 250, 300, 400, 450, 500, 600, 700, 800, 900, 1024, 2048, 3072, 4096, 5120, 10240, 20480, 30720, 51200, 102400 ] result = "" if list_err in session: result = session[list_err] session.pop(list_err) manage_disk = ManageDisk() disk = manage_disk.get_disk(disk_id, pool) form1 = add_disk_form() form1.id = disk.id form1.diskSize = disk.size form1.pool = pool form1.data_pool = disk.data_pool form1.diskName = disk.disk_name form1.is_replication_target = disk.is_replication_target return render_template('admin/disk/attach_disk.html', subnet1=subnet1_info, subnet2=subnet2_info, form=form1, failed_pools=failed_pools, size_list=size_list_aval, err=result) elif list_warning in session: result = session[list_warning] session.pop(list_warning) manage_disk = ManageDisk() disk = manage_disk.get_disk(disk_id, pool) form1 = add_disk_form() form1.id = disk.id form1.diskSize = disk.size form1.pool = pool form1.data_pool = disk.data_pool form1.diskName = disk.disk_name form1.is_replication_target = disk.is_replication_target return render_template('admin/disk/attach_disk.html', subnet1=subnet1_info, subnet2=subnet2_info, form=form1, failed_pools=failed_pools, size_list=size_list_aval, warning=result) elif list_success in session: result = session[list_success] session.pop(list_success) manage_disk = ManageDisk() disk = manage_disk.get_disk(disk_id, pool) form1 = add_disk_form() form1.id = disk.id form1.diskSize = disk.size form1.pool = pool form1.data_pool = disk.data_pool form1.diskName = disk.disk_name form1.is_replication_target = disk.is_replication_target return render_template('admin/disk/attach_disk.html', subnet1=subnet1_info, subnet2=subnet2_info, form=form1, failed_pools=failed_pools, size_list=size_list_aval, success=result) else: is_not_petasan_warning = '' manage_disk = ManageDisk() disk = manage_disk.get_disk(disk_id, pool) form1 = add_disk_form() if disk.is_petasan_image is not None and disk.is_petasan_image == False: form1.diskName = disk.id is_not_petasan_warning = "ui_admin_attach_disk_warning_disk_is_not_petasan" form1.id = disk.id form1.diskSize = disk.size form1.pool = pool form1.data_pool = disk.data_pool form1.diskName = disk.disk_name form1.is_replication_target = disk.is_replication_target if is_not_petasan_warning != '': result = is_not_petasan_warning return render_template('admin/disk/attach_disk.html', subnet1=subnet1_info, subnet2=subnet2_info, form=form1, failed_pools=failed_pools, size_list=size_list_aval, warning=result)
def update_disk(disk_id, pool): if request.method == 'POST': try: disk = DiskMeta() disk.id = disk_id disk.disk_name = request.form['diskName'] disk.size = int(request.form['diskSize']) auth_auto = True acl = request.form['orpACL'] if acl == "Iqn": disk.acl = request.form['IqnVal'] # if 'clientACL' in request.form: # usedACL = request.form['clientACL'] # if usedACL == "Yes": # disk.acl = request.form['IqnVal'] # else: # disk.acl = "" if 'orpAuth' in request.form: used_autentication = request.form['orpAuth'] if used_autentication == "Yes": auth_auto = False disk.user = request.form['UserName'] disk.password = request.form['Password'] disk.data_pool = request.form['data_pool'] enable_rep = request.form['replication'] if enable_rep == "yes": disk.is_replication_target = True manage_disk = ManageDisk() status = manage_disk.edit_disk(disk, auth_auto, pool) if status == ManageDiskStatus.done: session['success'] = "ui_admin_edit_disk_success" return redirect(url_for('disk_controller.disk_list')) elif status == ManageDiskStatus.error: session['err'] = "ui_admin_edit_disk_error" return redirect( url_for('disk_controller.edit_disk', disk_id=disk_id, pool=pool), 307) elif status == ManageDiskStatus.data_missing: session['err'] = "ui_admin_manage_disk_data_missing" return redirect( url_for('disk_controller.edit_disk', disk_id=disk_id, pool=pool), 307) elif status == ManageDiskStatus.disk_exists: session['err'] = "ui_admin_manage_disk_exist" return redirect( url_for('disk_controller.edit_disk', disk_id=disk_id, pool=pool), 307) elif status == ManageDiskStatus.disk_name_exists: session['err'] = "ui_admin_manage_disk_name_exist" return redirect( url_for('disk_controller.edit_disk', disk_id=disk_id, pool=pool), 307) elif status == ManageDiskStatus.disk_get__list_error: session['err'] = "ui_admin_manage_disk_disk_get_list_error" return redirect( url_for('disk_controller.edit_disk', disk_id=disk_id, pool=pool), 307) except Exception as e: session['err'] = "ui_admin_edit_disk_error" logger.error(e) return redirect( url_for('disk_controller.edit_disk', disk_id=disk_id, pool=pool), 307)
def edit_disk(disk_id, pool): if request.method == 'GET' or request.method == 'POST': size_list_aval = [ 1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 150, 250, 300, 400, 450, 500, 600, 700, 800, 900, 1024, 2048, 3072, 4096, 5120, 10240, 20480, 30720, 51200, 102400 ] result = "" if list_err in session: result = session[list_err] session.pop(list_err) manage_disk = ManageDisk() disk = manage_disk.get_disk(disk_id, pool) return render_template('admin/disk/edit_disk.html', disk=disk, size_list=size_list_aval, err=result) elif list_warning in session: result = session[list_warning] session.pop(list_warning) manage_disk = ManageDisk() disk = manage_disk.get_disk(disk_id, pool) return render_template('admin/disk/edit_disk.html', disk=disk, size_list=size_list_aval, warning=result) elif list_success in session: result = session[list_success] session.pop(list_success) manage_disk = ManageDisk() disk = manage_disk.get_disk(disk_id, pool) return render_template('admin/disk/edit_disk.html', disk=disk, size_list=size_list_aval, success=result) else: manage_disk = ManageDisk() disk = manage_disk.get_disk(disk_id, pool) paths = disk.get_paths() config = configuration() iscsi_1_eth_name = config.get_cluster_info().iscsi_1_eth_name iscsi_2_eth_name = config.get_cluster_info().iscsi_2_eth_name paths_iscsi_1 = [] paths_iscsi_2 = [] for path in paths: if path.eth == iscsi_1_eth_name: paths_iscsi_1.append(path.ip) elif path.eth == iscsi_2_eth_name: paths_iscsi_2.append(path.ip) return render_template('admin/disk/edit_disk.html', disk=disk, paths_iscsi_1=paths_iscsi_1, paths_iscsi_2=paths_iscsi_2, size_list=size_list_aval)