def test_lvmdata_same_as_hlm_model_numbering(self): hlm_model = yaml.safe_load(lvm_disk_model) server0 = hlm_model.get('global').get('all_servers')[0] drive_model = server0.get('disk_model') swift_devs = LogicalVol.get_lvms(drive_model) name_numbers = [] for dev in swift_devs: name_numbers.append(dev.swift_lvm_name) # Do not change this without also examining test_hlm_model.py dev_expected = ['lvm0', 'lvm1', 'lvm2'] for dev in set(dev_expected): self.assertTrue(dev in name_numbers, '%s missing from %s' % (dev, name_numbers)) name_numbers.remove(dev) self.assertEqual(0, len(name_numbers), 'still have %s' % name_numbers) # Check we handle empty ring list (lvm2 has no rings), but has # Swift consumer server1 = hlm_model.get('global').get('all_servers')[1] drive_model = server1.get('disk_model') swift_devs = LogicalVol.get_lvms(drive_model) name_numbers = [] for dev in swift_devs: name_numbers.append(dev.swift_lvm_name) # Do not change this without also examining test_hlm_model.py dev_expected = ['lvm0', 'lvm1', 'lvm2', 'lvm3'] for dev in set(dev_expected): self.assertTrue(dev in name_numbers, '%s missing from %s' % (dev, name_numbers)) name_numbers.remove(dev) self.assertEqual(0, len(name_numbers), 'still have %s' % name_numbers)
def test_lvmdata_same_as_hlm_model_numbering(self): hlm_model = yaml.safe_load(lvm_disk_model) server0 = hlm_model.get('global').get('all_servers')[0] drive_model = server0.get('disk_model') swift_devs = LogicalVol.get_lvms(drive_model) name_numbers = [] for dev in swift_devs: name_numbers.append(dev.swift_lvm_name) # Do not change this without also examining test_hlm_model.py dev_expected = ['lvm0', 'lvm1', 'lvm2'] for dev in set(dev_expected): self.assertTrue(dev in name_numbers, '%s missing from %s' % ( dev, name_numbers)) name_numbers.remove(dev) self.assertEqual(0, len(name_numbers), 'still have %s' % name_numbers) # Check we handle empty ring list (lvm2 has no rings), but has # Swift consumer server1 = hlm_model.get('global').get('all_servers')[1] drive_model = server1.get('disk_model') swift_devs = LogicalVol.get_lvms(drive_model) name_numbers = [] for dev in swift_devs: name_numbers.append(dev.swift_lvm_name) # Do not change this without also examining test_hlm_model.py dev_expected = ['lvm0', 'lvm1', 'lvm2', 'lvm3'] for dev in set(dev_expected): self.assertTrue(dev in name_numbers, '%s missing from %s' % ( dev, name_numbers)) name_numbers.remove(dev) self.assertEqual(0, len(name_numbers), 'still have %s' % name_numbers)
def test_no_swift_consumers(self): disk_model = ''' volume_groups: - consumer: name: os logical_volumes: - fstype: ext4 mount: / name: root size: 40% - fstype: ext4 mkfs_opts: -O large_file mount: /var/log name: log size: 20% - fstype: ext4 mkfs_opts: -O large_file mount: /var/crash name: crash size: 10% - fstype: ext4 mkfs_opts: -O large_file mount: /var/lib/elasticsearch name: elasticsearch size: 10% - fstype: ext4 mkfs_opts: -O large_file mount: /var/lib/zookeeper name: zookeeper size: 5% name: ardana-vg ''' swift_lvms = LogicalVol.get_lvms(yaml.safe_load(disk_model)) self.assertEqual(len(swift_lvms), 0)
def test_no_swift_consumers(self): disk_model = ''' volume_groups: - consumer: name: os logical_volumes: - fstype: ext4 mount: / name: root size: 40% - fstype: ext4 mkfs_opts: -O large_file mount: /var/log name: log size: 20% - fstype: ext4 mkfs_opts: -O large_file mount: /var/crash name: crash size: 10% - fstype: ext4 mkfs_opts: -O large_file mount: /var/lib/elasticsearch name: elasticsearch size: 10% - fstype: ext4 mkfs_opts: -O large_file mount: /var/lib/zookeeper name: zookeeper size: 5% name: hlm-vg ''' swift_lvms = LogicalVol.get_lvms(yaml.safe_load(disk_model)) self.assertEqual(len(swift_lvms), 0)
def test_valid_disk_model(self): disk_model = ''' volume_groups: - consumer: name: os logical_volumes: - fstype: ext4 mount: / name: root size: 40% - attrs: rings: - account - container consumer: name: swift name: swf1 size: 5% - attrs: rings: - account - container consumer: name: swift name: swf2 size: 5% - fstype: ext4 mkfs_opts: -O large_file mount: /var/log name: log size: 20% - fstype: ext4 mkfs_opts: -O large_file mount: /var/crash name: crash size: 10% - fstype: ext4 mkfs_opts: -O large_file mount: /var/lib/elasticsearch name: elasticsearch size: 10% - fstype: ext4 mkfs_opts: -O large_file mount: /var/lib/zookeeper name: zookeeper size: 5% name: ardana-vg ''' swift_lvms = LogicalVol.get_lvms(yaml.safe_load(disk_model)) expect0 = ('swf1', 'lvm0') expect1 = ('swf2', 'lvm1') results = [] for swift_lvm in swift_lvms: name = swift_lvm.lvm mount = swift_lvm.swift_lvm_name results.append((name, mount)) self.assertEqual(results, [expect0, expect1])
def test_valid_disk_model(self): disk_model = ''' volume_groups: - consumer: name: os logical_volumes: - fstype: ext4 mount: / name: root size: 40% - attrs: rings: - account - container consumer: name: swift name: swf1 size: 5% - attrs: rings: - account - container consumer: name: swift name: swf2 size: 5% - fstype: ext4 mkfs_opts: -O large_file mount: /var/log name: log size: 20% - fstype: ext4 mkfs_opts: -O large_file mount: /var/crash name: crash size: 10% - fstype: ext4 mkfs_opts: -O large_file mount: /var/lib/elasticsearch name: elasticsearch size: 10% - fstype: ext4 mkfs_opts: -O large_file mount: /var/lib/zookeeper name: zookeeper size: 5% name: hlm-vg ''' swift_lvms = LogicalVol.get_lvms(yaml.safe_load(disk_model)) expect0 = ('swf1', 'lvm0') expect1 = ('swf2', 'lvm1') results = [] for swift_lvm in swift_lvms: name = swift_lvm.lvm mount = swift_lvm.swift_lvm_name results.append((name, mount)) self.assertEqual(results, [expect0, expect1])
def test_lvmdata_same_as_ardana_model_numbering(self): ardana_model = yaml.safe_load(lvm_disk_model) server0 = ardana_model.get('global').get('all_servers')[0] drive_model = server0.get('disk_model') swift_devs = LogicalVol.get_lvms(drive_model) name_numbers = [] for dev in swift_devs: name_numbers.append(dev.swift_lvm_name) # Do not change this without also examining test_ardana_model.py dev_expected = ['lvm0', 'lvm1', 'lvm2'] for dev in set(dev_expected): self.assertTrue(dev in name_numbers, '%s missing from %s' % ( dev, name_numbers)) dev_expected.remove(dev) self.assertEqual(0, len(dev_expected), 'still have %s' % dev_expected)
def test_bad_consumer_syntax(self): disk_model = ''' volume_groups: - consumer: name: os logical_volumes: - fstype: ext4 mount: / name: root size: 40% - name: swf1 consumer: swift # should be object with name and attrs attrs: rings: - junk name: ardana-vg ''' swift_lvms = LogicalVol.get_lvms(yaml.safe_load(disk_model)) self.assertEqual(len(swift_lvms), 0)
def test_bad_consumer_syntax(self): disk_model = ''' volume_groups: - consumer: name: os logical_volumes: - fstype: ext4 mount: / name: root size: 40% - name: swf1 consumer: swift # should be object with name and attrs attrs: rings: - junk name: hlm-vg ''' swift_lvms = LogicalVol.get_lvms(yaml.safe_load(disk_model)) self.assertEqual(len(swift_lvms), 0)
def main(): # Make sure that the user is root if not os.geteuid() == 0: print("Script must be run as root") sys.exit(1) args = OptionParser() args.add_option("-v", "--verbose", dest="verbose", action="store_true", help="Give a more verbose output") args.add_option("-a", "--all_actions", dest="all_actions", action="store_true", help="Perform all actions on the the drives") args.add_option("-p", "--partition", dest="partition", action="store_true", help="Only partition the drives") args.add_option("-l", "--label", dest="label", action="store_true", help="Only label the drives") args.add_option("-m", "--mount", dest="mount", action="store_true", help="Only mount the drives") args.add_option("-c", "--check", dest="check", action="store_true", help="Check config data and hardware match") options, arguments = args.parse_args() # Get confuration data from /etc/swift/ardana_storage.conf if os.path.isfile(conf_file): if options.verbose: print("Using config data from %s" % (conf_file)) parser = ConfigParser.RawConfigParser() parser.read(conf_file) try: boot_label = parser.get("swift_config", "boot_label") except ConfigParser.NoOptionError: boot_label = "boot" try: fs = parser.get("swift_config", "file_system") except ConfigParser.NoOptionError: fs = "xfs" try: mount_dir = parser.get("swift_config", "mount_dir") except ConfigParser.NoOptionError: mount_dir = "/srv/node/" try: fact_file = parser.get("swift_config", "fact_file") except ConfigParser.NoOptionError: fact_file = "/etc/ansible/facts.d/swift_drive_info.fact" else: boot_label = "boot" fs = "xfs" mount_dir = "/srv/node/" fact_file = "/etc/ansible/facts.d/swift_drive_info.fact" if options.verbose: print("No %s - using default values" % (conf_file)) print("*****************************") print("Boot Label = %s" % (boot_label)) print("File System = %s" % (fs)) print("Mount Directory = %s" % (mount_dir)) print("File Prefix = %s" % (fact_file)) print("Not using config processor drive entries") print("*****************************") # Get disk info from the disk_models.yml if os.path.isfile(disk_model_file): cp_input_list = [] try: swift_drives = Drive.load(disk_model_file) except SwiftlmInvalidConfig as exc: print("ERROR: %s" % (exc)) sys.exit(1) for e_drive in swift_drives: cp_input_list.append((e_drive.device, e_drive.swift_device_name)) try: swift_log_vols = LogicalVol.load(disk_model_file) except SwiftlmInvalidConfig as exc: print("ERROR: %s" % (exc)) sys.exit(1) for e_vol in swift_log_vols: full_vol = "/dev/" + str(e_vol.lvg) + "/" + e_vol.lvm cp_input_list.append((full_vol, e_vol.swift_lvm_name)) else: print("Cannot continue - %s not present" % (disk_model_file)) sys.exit(1) # No need to continue if there are no swift drives/lvms if not cp_input_list: print("No swift devicess specified in input model for this node") sys.exit(0) mounted = [] my_hostname = socket.getfqdn() my_ip = get_my_ip(my_hostname) ip_label = get_ip_label(my_ip) if options.verbose: print("IP hex label = %s" % (ip_label)) # Can skip validation if ONLY mounting devices. Note that the validation # will still be performed if the "all_actions" option is selected, which # includes mounting the devives if not options.mount: data_conflict = check_data_matches(cp_input_list) if data_conflict: sys.exit(1) if options.check: sys.exit(0) parted_drives, raw_drives = find_parted_drives(cp_input_list) blank_drives, unparted_drives, disk_label_list, blank_partitions = \ separate_labelled_devices(ip_label, parted_drives, supported_file_systems, raw_drives, boot_label) blank_volumes, disk_label_list = create_volume_fs(cp_input_list, disk_label_list, ip_label) if options.verbose: print("Current disk label list = %s" % (str(disk_label_list))) print("Unpartitioned disk list = %s" % (str(unparted_drives))) print("Unlabelled disk list = %s" % (str(blank_drives))) print("Unlabelled partition list = %s" % (str(blank_partitions))) print("Unlabelled volume list = %s" % (str(blank_volumes))) if options.all_actions or options.partition: if not unparted_drives: print("No drives that need to be partitioned") else: print("The following drives are not partitioned - %s" % (str(unparted_drives))) blank_drives = format_drives(blank_drives, unparted_drives, fs, cp_input_list) if options.all_actions or options.label: if not blank_drives: print("No one-partition drives that need to be labelled") else: print("Label the following drives - %s" % (str(blank_drives))) disk_label_list = label_drives(blank_drives, ip_label, fs, disk_label_list, cp_input_list) if not blank_partitions: print("No multiple partition drives that need to be labelled") else: print("Label the following partitions - %s" % (str(blank_partitions))) disk_label_list = label_partitions(blank_partitions, ip_label, fs, disk_label_list, cp_input_list) if not blank_volumes: print("No volumes that need to be labelled") else: print("Label the following volumes - %s" % (str(blank_volumes))) disk_label_list = label_volumes(blank_volumes, ip_label, disk_label_list, cp_input_list) if options.all_actions or options.mount: if not disk_label_list: print("No drives ready to be mounted") else: mount_devices(ip_label, disk_label_list, mount_dir, cp_input_list) node_drive_info = generate_drive_info(my_ip, mount_dir, disk_label_list) write_to_info_file(node_drive_info, fact_file)
def main(): # Make sure that the user is root if not os.geteuid() == 0: print("Script must be run as root") sys.exit(1) args = OptionParser() args.add_option("-v", "--verbose", dest="verbose", action="store_true", help="Give a more verbose output") args.add_option("-a", "--all_actions", dest="all_actions", action="store_true", help="Perform all actions on the the drives") args.add_option("-p", "--partition", dest="partition", action="store_true", help="Only partition the drives") args.add_option("-l", "--label", dest="label", action="store_true", help="Only label the drives") args.add_option("-m", "--mount", dest="mount", action="store_true", help="Only mount the drives") args.add_option("-c", "--check", dest="check", action="store_true", help="Check config data and hardware match") options, arguments = args.parse_args() # Get confuration data from /etc/swift/hlm_storage.conf if os.path.isfile(conf_file): if options.verbose: print("Using config data from %s" % (conf_file)) parser = ConfigParser.RawConfigParser() parser.read(conf_file) try: boot_label = parser.get("swift_config", "boot_label") except ConfigParser.NoOptionError: boot_label = "boot" try: disk_pattern = parser.get("swift_config", "disk_pattern") except ConfigParser.NoOptionError: disk_pattern = "/dev/sd[a-z]\+$" try: fs = parser.get("swift_config", "file_system") except ConfigParser.NoOptionError: fs = "xfs" try: mount_dir = parser.get("swift_config", "mount_dir") except ConfigParser.NoOptionError: mount_dir = "/srv/node/" try: fact_file = parser.get("swift_config", "fact_file") except ConfigParser.NoOptionError: fact_file = "/etc/ansible/facts.d/swift_drive_info.fact" else: boot_label = "boot" disk_pattern = "/dev/sd[a-z]\+$" fs = "xfs" mount_dir = "/srv/node/" fact_file = "/etc/ansible/facts.d/swift_drive_info.fact" if options.verbose: print("No %s - using default values" % (conf_file)) print("*****************************") print("Boot Label = %s" % (boot_label)) print("Disk Pattern = %s" % (disk_pattern)) print("File System = %s" % (fs)) print("Mount Directory = %s" % (mount_dir)) print("File Prefix = %s" % (fact_file)) print("Not using config processor drive entries") print("*****************************") # Get disk info from the disk_models.yml if os.path.isfile(disk_model_file): cp_input_list = [] try: swift_drives = Drive.load(disk_model_file) except SwiftlmInvalidConfig as exc: print("ERROR: %s" % (exc)) sys.exit(1) for e_drive in swift_drives: cp_input_list.append((e_drive.device, e_drive.swift_device_name)) try: swift_log_vols = LogicalVol.load(disk_model_file) except SwiftlmInvalidConfig as exc: print("ERROR: %s" % (exc)) sys.exit(1) for e_vol in swift_log_vols: full_vol = "/dev/" + str(e_vol.lvg) + "/" + e_vol.lvm cp_input_list.append((full_vol, e_vol.swift_lvm_name)) else: print("Cannot continue - %s not present" % (disk_model_file)) sys.exit(1) # No need to continue if there are no swift drives/lvms if not cp_input_list: print("No swift devicess specified in input model for this node") sys.exit(0) mounted = [] my_hostname = socket.getfqdn() my_ip = get_my_ip(my_hostname) ip_label = get_ip_label(my_ip) if options.verbose: print("IP hex label = %s" % (ip_label)) status, all_drives = commands.getstatusoutput('ls /dev/sd* | grep "' + disk_pattern + '"') if status != 0: print("Error determining disks on the node") sys.exit(1) all_drives = all_drives.split('\n') data_conflict = check_data_matches(cp_input_list, all_drives) if data_conflict: sys.exit(1) if options.check: sys.exit(0) parted_drives, raw_drives = find_parted_drives(all_drives) blank_drives, unparted_drives, disk_label_list, blank_partitions = \ separate_labelled_devices(ip_label, parted_drives, supported_file_systems, raw_drives, boot_label) blank_volumes, disk_label_list = create_volume_fs(cp_input_list, disk_label_list, ip_label) if options.verbose: print("Current disk label list = %s" % (str(disk_label_list))) print("Unpartitioned disk list = %s" % (str(unparted_drives))) print("Unlabelled disk list = %s" % (str(blank_drives))) print("Unlabelled partition list = %s" % (str(blank_partitions))) print("Unlabelled volume list = %s" % (str(blank_volumes))) if options.all_actions or options.partition: if not unparted_drives: print("No drives that need to be partitioned") else: print("The following drives are not partitioned - %s" % (str(unparted_drives))) blank_drives = format_drives(blank_drives, unparted_drives, fs, cp_input_list) if options.all_actions or options.label: if not blank_drives: print("No one-partition drives that need to be labelled") else: print("Label the following drives - %s" % (str(blank_drives))) disk_label_list = label_drives(blank_drives, ip_label, fs, disk_label_list, cp_input_list) if not blank_partitions: print("No multiple partition drives that need to be labelled") else: print("Label the following partitions - %s" % (str(blank_partitions))) disk_label_list = label_partitions(blank_partitions, ip_label, fs, disk_label_list, cp_input_list) if not blank_volumes: print("No volumes that need to be labelled") else: print("Label the following volumes - %s" % (str(blank_volumes))) disk_label_list = label_volumes(blank_volumes, ip_label, disk_label_list, cp_input_list) if options.all_actions or options.mount: if not disk_label_list: print("No drives ready to be mounted") else: mount_devices(ip_label, disk_label_list, mount_dir, cp_input_list) node_drive_info = generate_drive_info(my_ip, mount_dir, disk_label_list) write_to_info_file(node_drive_info, fact_file)