def modify_lv_name(self, lv_name, lv_name_config): lv_vg = self.get_lv_vg(lv_name)['lv_vg'] mount_dir = self.lv_vg_capacity_usedcapacity_dir(lv_name)['mount_dir'] modify_lv_name = 0 info = 'Renamed "{0}" to "{1}" in volume group "{2}"'.\ format(lv_name, lv_name_config, lv_vg) if mount_dir is None: cmd = 'lvrename /dev/{0}/{1} /dev/{0}/{2}'.\ format(lv_vg, lv_name, lv_name_config) result = ShellCmd.execute_shell_cmd(cmd) if result and result.find(info) != -1: modify_lv_name = 1 self.update_lv_info(lv_name, lv_name_config) else: cmd = 'umount /dev/{0}/{1} && ' \ 'lvrename /dev/{0}/{1} /dev/{0}/{2} && ' \ 'mount /dev/{0}/{2} {3}'.\ format(lv_vg, lv_name, lv_name_config, mount_dir) result = ShellCmd.execute_shell_cmd(cmd) if result and result.find(info) != -1: modify_lv_name = 1 self.update_lv_info(lv_name, lv_name_config) return {'modify_lv_name': modify_lv_name}
def delete_lv(self, lv_name): lv_name = 'volume_{0}'.format(lv_name.strip()) link_lv_name = self.get_elasticsearch_link_lv()['link_lv_name'] if link_lv_name and link_lv_name == lv_name: raise ARLvException(LvStatusPool.ID_LV_HAS_LINK_DATA_SOURCE) lv_vg_capacity_usedcapacity_dir = self.lv_vg_capacity_usedcapacity_dir( lv_name) vg = lv_vg_capacity_usedcapacity_dir['vg'] mount_dir = lv_vg_capacity_usedcapacity_dir['mount_dir'] if mount_dir: cmd = 'umount /dev/{0}/{1}'.format(vg, lv_name) ShellCmd.execute_shell_cmd(cmd) self.delete_mount_info_from_fstab(lv_name) lv_deleted = 0 cmd = 'lvremove -y /dev/{0}/{1}'.format(vg, lv_name) result = ShellCmd.execute_shell_cmd(cmd) '''采用同步的方式返回''' if result and result.find('successfully removed') != -1: self.delete_lv_info(lv_name) lv_deleted = 1 return {'lv_name': lv_name.split('_')[1], 'lv_deleted': lv_deleted}
def delete_pv_vg_by_vg(self, vir_drv_id): raid_name = self.get_raid_name_by_vd(vir_drv_id)['raid_name'] raid_tag = self.get_raid_tag_by_vd(vir_drv_id)['raid_tag'] raid_drive_letter = self.get_raid_drive_letter(raid_tag) vg = self.get_raid_vg_and_vgsize(raid_name)['vg'] if vg: cmd = "vgremove -f %s && pvremove -f %s{1,2,3,4,5}" % \ (vg, raid_drive_letter) ShellCmd.execute_shell_cmd(cmd)
def get_lv_vg(self, lv_name): lv_names = ARLvManager.get_lv_names() if lv_name not in lv_names: raise ARLvException(LvStatusPool.ID_LV_NOT_FOUND) cmd = "lvdisplay | grep -E 'LV Name|VG Name'" results = ShellCmd.execute_shell_cmd(cmd) lv_vg = None if results: values = results.split('\n') vals = [] for val in values: if val: vals.append(val.strip()) pattern = 'LV Name.*{0}'.format(lv_name) lv_vg_index = 0 for val in vals: if re.match(pattern, val): lv_vg_index = vals.index(val) + 1 break lv_vg = vals[lv_vg_index].split(' ')[-1] return {'lv_vg': lv_vg}
def create_raid(self, raid_name, raid_level, slot_numbers, stripsz): if raid_name in self.get_raid_names(): raise ARRaidException(RaidStatusPool.ID_RAID_NAME_HAS_BEEN_USED) physdrvs = Pdraid.physdrvs_by_slot_numbers(slot_numbers) cmd = "-CfgLdAdd -r%s%s strpsz%s -a0" % (raid_level, physdrvs, stripsz) pre = ShellCmd.execute_shell_cmd('lsscsi') values = self._megacli.execute(cmd) exit_code = values[-1].split(':')[1] self.raid_raise_excep(exit_code) vir_drv_id = None create = 0 if exit_code == '0x00': raid_tag = ARRaidManager.get_raid_tag(pre)['raid_tag'] for value in values: if value.find('Created VD') != -1: vir_drv_id = value.split(':')[1].split(' ')[2] self.save_raid_info(vir_drv_id, raid_tag) set_raid_name = self.set_raid_name( vir_drv_id, raid_name)['set_raid_name'] if set_raid_name == 1: ARRaidManager.save_raid_id_name(vir_drv_id, raid_name) create = 1 break return {'vir_drv_id': vir_drv_id, 'create': create}
def start_anyrobot_store_service(): cmd = 'docker start anyrobot-store' start_anyrobot_store = 0 result = ShellCmd.execute_shell_cmd(cmd) if result and result.find('anyrobot-store') != -1: start_anyrobot_store = 1 return {'start_anyrobot_store': start_anyrobot_store}
def get_lv_names(): lv_names = [] cmd = "lvs | grep volume_ | awk '{print $1}'" results = ShellCmd.execute_shell_cmd(cmd) if results: lv_names = results.strip().split('\n') return lv_names
def get_raid_lvs_by_vg(self, vg): cmd = "lvs |grep %s |awk '{print $1}'" % vg results = ShellCmd.execute_shell_cmd(cmd) lvs = [] if results: values = results.split('\n') lvs = [lv for lv in values if lv] return {'lvs': lvs}
def expand_lv_capacity(self, lv_name, lv_capacity): lv_vg = self.get_lv_vg(lv_name)['lv_vg'] lv_expand = 0 cmd = "lvresize -L {0} /dev/{1}/{2}".\ format(lv_capacity, lv_vg, lv_name) result = ShellCmd.execute_shell_cmd(cmd) if result and result.find('successfully resized') != -1: lv_expand = 1 return {'lv_name': lv_name, 'lv_expand': lv_expand}
def delete_mount_info_from_fstab(self, lv_name): lv_vg = self.get_lv_vg(lv_name)['lv_vg'] cmd = "sed -i '/\/dev\/mapper\/{0}-{1}/d' /etc/fstab".format( lv_vg, lv_name) proc = ShellCmd.execute_cmd(cmd) delete_mount_info_from_fstab = 0 if not proc.returncode: delete_mount_info_from_fstab = 1 return {'delete_mount_info_from_fstab': delete_mount_info_from_fstab}
def write_mount_info_to_fstab(self, lv_name, directory): lv_vg = self.get_lv_vg(lv_name)['lv_vg'] mount_info = "/dev/mapper/{0}-{1} {2} ext3 defaults 0 0".\ format(lv_vg, lv_name, directory) cmd = 'echo {0} >> /etc/fstab'.format(mount_info) proc = ShellCmd.execute_cmd(cmd) write_mount_dir_to_fstab = 0 if not proc.returncode: write_mount_dir_to_fstab = 1 return {'write_mount_dir_to_fstab': write_mount_dir_to_fstab}
def mount_other_dir(self, lv_name, directory): lv_vg = self.get_lv_vg(lv_name)['lv_vg'] lv_path = '/dev/{0}/{1}'.format(lv_vg, lv_name) cmd = 'umount {0} && mkdir -p {1} && mount {0} {1}'.\ format(lv_path, directory) proc = ShellCmd.execute_cmd(cmd) lv_mount_dir = 0 if not proc.returncode: lv_mount_dir = 1 return {'lv_mount_dir': lv_mount_dir}
def validate_create_is_done(self, lv_name): create_is_done = 0 lv_vg = self.get_lv_vg(lv_name)['lv_vg'] cmd = "ps -ef | grep 'mkfs -t ext3 /dev/{0}/{1} && mkdir' | grep -v grep".\ format(lv_vg, lv_name) result = ShellCmd.execute_shell_cmd(cmd) if not result: create_is_done = 1 return {'create_is_done': create_is_done}
def get_vgsize_vgfree_about_lv(self, lv_name): lv_vg = self.get_lv_vg(lv_name)['lv_vg'] cmd = "vgs | grep %s | awk '{print $6,$7}'" % lv_vg result = ShellCmd.execute_shell_cmd(cmd) lv_vgsize = None lv_vgfree = None if result: vals = result.split(' ') lv_vgsize = Raidlv.size_patter(vals[0].strip()) lv_vgfree = Raidlv.size_patter(vals[1].strip()) return {'lv_vgsize': lv_vgsize, 'lv_vgfree': lv_vgfree}
def get_elasticsearch_link_lv(self): link_lv_name = None store_dir = '/anyrobot/store' cmd = 'ls -l {0}'.format(store_dir) result = ShellCmd.execute_shell_cmd(cmd) if result: vals = result.strip().split('\n') for val in vals: value = re.findall('volume_[a-zA-Z0-9]+', val) if value: link_lv_name = value[0] return {'link_lv_name': link_lv_name}
def create_lv(self, lv_name, lv_type, vir_drv_id, lv_capacity): lv_name = 'volume_{0}'.format(lv_name.strip()) lv_names = ARLvManager.get_lv_names() if lv_name in lv_names: raise ARLvException(LvStatusPool.ID_LV_NAME_HAS_BEEN_USED) self.validate_create_lv_capacity(vir_drv_id, lv_capacity) raid_name = ARRaidManager().get_raid_name_by_vd( vir_drv_id)['raid_name'] raid_tag = ARRaidManager().get_raid_tag_by_vd(vir_drv_id)['raid_tag'] raid_letter = ARRaidManager.get_raid_drive_letter( raid_tag)['drive_letter'] vg_vgsize = ARRaidManager().get_raid_vg_and_vgsize(raid_name) vg = vg_vgsize['vg'] lv_created = 0 create = 0 if vg is None: vg_created = ARLvManager.create_raid_vg(raid_letter, raid_name)['vg_created'] if vg_created == 0: return {'lv_name': lv_name.split('_')[1], 'lv_created': 0} vg = raid_name vgfree = ARRaidManager().get_raid_vg_and_vgsize(raid_name)['vgfree'] is_equal = ARLvManager.is_equal(lv_capacity, vgfree)['equal'] '''采用同步的方式返回''' if is_equal: cmd = 'lvcreate -y -l 100%VG -n {0} {1} &'.format(lv_name, vg) else: cmd = 'lvcreate -L {0} -n {1} {2} &'.format( lv_capacity, lv_name, vg) result = ShellCmd.execute_shell_cmd(cmd) if result and result.find('created') != -1: create = 1 self.save_lv_info(lv_name, lv_type) volume_dir = '/uservolume/{0}'.format(lv_name) lv_mount_dir = self.lv_mount_dir(lv_name, volume_dir)['lv_mount_dir'] if create == 1 and lv_mount_dir == 1: lv_created = 1 self.write_mount_info_to_fstab(lv_name, volume_dir) return {'lv_name': lv_name.split('_')[1], 'lv_created': lv_created}
def get_raid_vg_and_vgsize(self, raid_name): cmd = 'vgs | grep {0}'.format(raid_name) result = ShellCmd.execute_shell_cmd(cmd) vg = None vgsize = None vgfree = None if result: values = [item.strip() for item in result.split('\n')] raid_info = [item for item in values[0].split(' ') if item] vg = raid_info[0] vgsize = Raidlv.size_patter(raid_info[5]) vgfree = Raidlv.size_patter(raid_info[6]) return {'vg': vg, 'vgsize': vgsize, 'vgfree': vgfree}
def get_lvsize_usedsize_by_cmd_lvs(self, lv_name): lv_names = ARLvManager.get_lv_names() if lv_name not in lv_names: raise ARLvException(LvStatusPool.ID_LV_NOT_FOUND) cmd = "lvs | grep %s | awk '{print $4}'" % lv_name result = ShellCmd.execute_shell_cmd(cmd) lvsize = None if result: vals = result.strip().split('\n') if vals: lvsize = Raidlv.size_patter(vals[0]) return {'lvsize': lvsize, 'usedsize': None}
def expand_fs(self, lv_name): """ 扩展文件系统,需要耗费一些时间, 使用异步的方式处理。 """ lv_vg = self.get_lv_vg(lv_name)['lv_vg'] lv_path = '/dev/{0}/{1}'.format(lv_vg, lv_name) resize_2_fs = 0 cmd = 'resize2fs {0}'.format(lv_path) proc = ShellCmd.execute_cmd(cmd) if not proc.returncode: resize_2_fs = 1 return {'resize_2_fs': resize_2_fs}
def get_raid_drive_letter(raid_tag): drive_letter = None if raid_tag is not None: cmd = 'lsscsi' results = ShellCmd.execute_shell_cmd(cmd).strip().split('\n') for result in results: if result.find(raid_tag) != -1: value = re.findall('/[a-z]+/[a-z]+', result) if value: drive_letter = value[0] break return {'drive_letter': drive_letter}
def get_raid_tag(pre): raid_tag = None now = None if pre: while 1: now = ShellCmd.execute_shell_cmd('lsscsi') if now and now != pre: break pre = pre.strip().split('\n') now = now.strip().split('\n') pre = [item.strip() for item in pre] now = [item.strip() for item in now] raid_tag = list(set(now) ^ set(pre))[0].split(' ')[0] return {'raid_tag': raid_tag}
def migrate_data_and_make_link(self, lv_name, data_source): """ 迁移大数据量的数据,需要花费很长的时间; 这里采用异步的方式处理。 """ mount_dir = self.lv_vg_capacity_usedcapacity_dir(lv_name)['mount_dir'] migrate_data_make_link = 0 if mount_dir: cmd = 'mv -f {0} {1}/ && ln -s {1}/elasticsearch {0}'.\ format(data_source, mount_dir) proc = ShellCmd.execute_cmd(cmd) if not proc.returncode: migrate_data_make_link = 1 return {'migrate_data_make_link': migrate_data_make_link}
def lv_mount_dir(self, lv_name, directory): """ 对逻辑卷进行格式化操作,非常耗时;逻辑卷容量越大, 需要的时间越长。 此处,采用异步的方式处理。 """ lv_vg = self.get_lv_vg(lv_name)['lv_vg'] lv_path = '/dev/{0}/{1}'.format(lv_vg, lv_name) cmd = "mkfs -t ext3 {0} && " \ "mkdir -p {1} && " \ "mount {0} {1}".format(lv_path, directory) proc = ShellCmd.execute_cmd(cmd) lv_mount_dir = 0 if not proc.returncode: lv_mount_dir = 1 return {'lv_mount_dir': lv_mount_dir}
def get_lv_status(self, lv_name): """ 逻辑卷的状态依赖于所在的raid的状态。 当raid在重建时,尽可能少的执行MegaCli命令,以此来提升接口访问速度。 这里将 has_raid_recon 判断放在此处执行。 """ lv_names = ARLvManager.get_lv_names() if lv_name not in lv_names: raise ARLvException(LvStatusPool.ID_LV_NOT_FOUND) raid_name = self.get_lv_vg(lv_name)['lv_vg'] raid_id = ARRaidManager().get_raid_id_by_name(raid_name)['vir_drv_id'] raid_status = None has_raid_recon = 0 if raid_id: is_recon = ARRaidManager().get_raid_recon_info(raid_id)['is_recon'] if is_recon: raid_status = ARRaidStatus.RECONSTRUCTING has_raid_recon = 1 else: raid_status = ARRaidManager().get_raid_state_size_stripsz( raid_id)['raid_state'] cmd = "lvdisplay |grep -E 'LV Name|LV Status'|grep -A1 %s |grep 'LV Status'" \ " |awk '{print $3}'" % lv_name result = ShellCmd.execute_shell_cmd(cmd) lv_status = None if result: vals = result.strip().split('\n') if vals: if not raid_status or raid_status == ARRaidStatus.OFFLINE: lv_status = ARLvStatus.code(ARLvStatus.NOT_AVAILABLE) else: lv_status = ARLvStatus.code(vals[0]) return {'lv_status': lv_status, 'has_raid_recon': has_raid_recon}
def lv_vg_capacity_usedcapacity_dir(self, lv_name): """ vg与raid名称保持一致 """ vg = self.get_lv_vg(lv_name)['lv_vg'] file_system = "/dev/mapper/{0}-{1}".format(vg, lv_name) cmd = "df -h | grep {0}".format(file_system) result = ShellCmd.execute_shell_cmd(cmd) lv_capacity = None lv_used_capacity = None lv_available_capacity = None lv_used_percent = None mount_dir = None if result: results = result.split(' ') vals = [] for res in results: if res: vals.append(res.strip()) if len(vals) == 6: lv_capacity = ARLvManager.mount_of_lv_size_patter(vals[1]) lv_used_capacity = ARLvManager.mount_of_lv_size_patter(vals[2]) lv_available_capacity = ARLvManager.mount_of_lv_size_patter( vals[3]) lv_used_percent = vals[4].split('%')[0] mount_dir = vals[-1] return { 'lv_name': lv_name, 'vg': None if not vg else vg, 'lv_capacity': lv_capacity, 'lv_used_capacity': lv_used_capacity, 'lv_available_capacity': lv_available_capacity, 'lv_used_percent': lv_used_percent, 'mount_dir': mount_dir }
def create_raid_vg(raid_drive_letter, raid_name): cmd = "parted -s {0} mklabel gpt &&" \ "parted -s {0} mkpart primary 0 20% &&" \ "parted -s {0} mkpart primary 20% 40% &&" \ "parted -s {0} mkpart primary 40% 60% &&" \ "parted -s {0} mkpart primary 60% 80% &&" \ "parted -s {0} mkpart primary 80% 100% &&" \ "parted -s {0} toggle 1 lvm &&" \ "parted -s {0} toggle 2 lvm &&" \ "parted -s {0} toggle 3 lvm &&" \ "parted -s {0} toggle 4 lvm &&" \ "parted -s {0} toggle 5 lvm &&" \ "partprobe {0}{2} &&" \ "pvcreate -y {0}{2} &&" \ "vgcreate -s 4M {1} {0}{2}".format(raid_drive_letter, raid_name, '{1,2,3,4,5}') vg_created = 0 result = ShellCmd.execute_shell_cmd(cmd) if result.find('successfully created') != -1: vg_created = 1 return {'raid_name': raid_name, 'vg_created': vg_created}