示例#1
0
    def clone(self, id=None, src=None, backup=None, size=None):
        """
        This runs a clone job outside of the storage api,
        which is useful for performance testing backup restores
        (Example: storage tools clone volume-clone
          --backup volume-backup --src volume-original)
        """
        # Set basic Logging
        logging.basicConfig()
        # Get the lunr logger
        log = logger.get_logger()
        # Output Debug level info
        log.logger.setLevel(logging.DEBUG)
        # Load the local storage configuration
        conf = LunrConfig.from_storage_conf()
        # Init the volume helper
        volume = VolumeHelper(conf)

        # Attempt to figure out the original volume size
        size = size or str(volume.get(src)['size'] / 1073741824)
        # Size is in gigs
        if not re.match('G', size):
            size = size + 'G'
        # Create a tag to apply to the lvm volume
        tag = encode_tag(source_volume_id=src, backup_id=backup)
        # Create the volume
        execute('lvcreate', volume.volume_group,
                name=id, size=size, addtag=tag)
        # Get info for the newly created volume
        new = volume.get(id)

        with self.timeit():
            print("Starting Backup")
            # Restore volume from the backup
            volume.clone(new, src, backup)
示例#2
0
 def __init__(self):
     # Give our sub command a name
     self._name = 'tools'
     # Create a volume helper with our local storage config
     self.volume = VolumeHelper(LunrConfig.from_storage_conf())
     # let the base class setup methods in our class
     SubCommand.__init__(self)
     self.total = defaultdict(float)
示例#3
0
文件: base.py 项目: rackerlabs/lunr
    def __init__(self, conf):
        self.volumes = VolumeHelper(conf)
        self.exports = ExportHelper(conf)
        self.backups = BackupHelper(conf)
        self.cgroups = CgroupHelper(conf)
        self.api_server = conf.string('storage', 'api_server',
                                      "http://localhost:8080")
        self.api_retry = conf.int('storage', 'api_retry', 1)

        # name of node registration
        self.name = conf.string('storage', 'name', socket.gethostname())
        self.affinity_group = conf.string('storage', 'affinity_group', '')

        # management interface
        self.management_host = conf.string('server:main', 'host', '0.0.0.0')
        if self.management_host == '0.0.0.0':
            self.management_host = my_ip(self.api_server)
        self.management_port = conf.int('server:main', 'port', 8081)

        # storage interface
        self.storage_host = conf.string('storage', 'host', '127.0.0.1')
        self.storage_port = conf.int('storage', 'port', 3260)
        self.volume_type = conf.string('storage', 'volume_type', 'vtype')

        # cinder
        self.cinder_args = cinderclient.get_args(conf)
        self.rax_auth = conf.bool('cinder', 'rax_auth', True)
        if self.rax_auth:
            self.client = cinderclient.CinderClient(**self.cinder_args)
        self.cinder_host = conf.string('storage', 'cinder_host',
                                       self.management_host)
示例#4
0
 def __init__(self):
     # Give our sub command a name
     self._name = 'tools'
     # Create a volume helper with our local storage config
     self.volume = VolumeHelper(LunrConfig.from_storage_conf())
     # let the base class setup methods in our class
     SubCommand.__init__(self)
     self.total = defaultdict(float)
示例#5
0
    def backup(self, id=None, src=None, timestamp=None):
        """
        This runs a backup job outside of the storage api,
        which is useful for performance testing backups
        """
        # Set basic Logging
        logging.basicConfig()
        # Get the lunr logger
        log = logger.get_logger()
        # Output Debug level info
        log.logger.setLevel(logging.DEBUG)
        # Load the local storage configuration
        conf = LunrConfig.from_storage_conf()
        # If no time provided, use current time
        timestamp = timestamp or time()
        # Init our helpers
        volume = VolumeHelper(conf)
        backup = BackupHelper(conf)

        try:
            # Create the snapshot
            snapshot = volume.create_snapshot(src, id, timestamp)

            # For testing non-snapshot speeds
            #snapshot = volume.get(src)
            #snapshot['backup_id'] = id
            #snapshot['origin'] = src
            #snapshot['timestamp'] = 1338410885.0
            #del snapshot['volume']

            print("Created snap-shot: ", pprint(snapshot))

            with self.timeit(snapshot['size']):
                # Backup the snapshot
                print("Starting Backup")
                backup.save(snapshot, id)

        finally:
            # Delete the snapshot if it was created
            if 'snapshot' in locals():
                self._remove_volume(snapshot['path'])
示例#6
0
    def backup(self, id=None, src=None, timestamp=None):
        """
        This runs a backup job outside of the storage api,
        which is useful for performance testing backups
        """
        # Set basic Logging
        logging.basicConfig()
        # Get the lunr logger
        log = logger.get_logger()
        # Output Debug level info
        log.logger.setLevel(logging.DEBUG)
        # Load the local storage configuration
        conf = LunrConfig.from_storage_conf()
        # If no time provided, use current time
        timestamp = timestamp or time()
        # Init our helpers
        volume = VolumeHelper(conf)
        backup = BackupHelper(conf)

        try:
            # Create the snapshot
            snapshot = volume.create_snapshot(src, id, timestamp)

            # For testing non-snapshot speeds
            #snapshot = volume.get(src)
            #snapshot['backup_id'] = id
            #snapshot['origin'] = src
            #snapshot['timestamp'] = 1338410885.0
            #del snapshot['volume']

            print("Created snap-shot: ", pprint(snapshot))

            with self.timeit(snapshot['size']):
                # Backup the snapshot
                print("Starting Backup")
                backup.save(snapshot, id)

        finally:
            # Delete the snapshot if it was created
            if 'snapshot' in locals():
                self._remove_volume(snapshot['path'])
示例#7
0
    def clone(self, id=None, src=None, backup=None, size=None):
        """
        This runs a clone job outside of the storage api,
        which is useful for performance testing backup restores
        (Example: storage tools clone volume-clone
          --backup volume-backup --src volume-original)
        """
        # Set basic Logging
        logging.basicConfig()
        # Get the lunr logger
        log = logger.get_logger()
        # Output Debug level info
        log.logger.setLevel(logging.DEBUG)
        # Load the local storage configuration
        conf = LunrConfig.from_storage_conf()
        # Init the volume helper
        volume = VolumeHelper(conf)

        # Attempt to figure out the original volume size
        size = size or str(volume.get(src)['size'] / 1073741824)
        # Size is in gigs
        if not re.match('G', size):
            size = size + 'G'
        # Create a tag to apply to the lvm volume
        tag = encode_tag(source_volume_id=src, backup_id=backup)
        # Create the volume
        execute('lvcreate',
                volume.volume_group,
                name=id,
                size=size,
                addtag=tag)
        # Get info for the newly created volume
        new = volume.get(id)

        with self.timeit():
            print("Starting Backup")
            # Restore volume from the backup
            volume.clone(new, src, backup)
示例#8
0
class Tools(SubCommand):
    """
    A collection of misc Storage Node tools
    """

    def __init__(self):
        # Give our sub command a name
        self._name = 'tools'
        # Create a volume helper with our local storage config
        self.volume = VolumeHelper(LunrConfig.from_storage_conf())
        # let the base class setup methods in our class
        SubCommand.__init__(self)
        self.total = defaultdict(float)

    def dot(self):
        sys.stdout.write('.')
        sys.stdout.flush()

    def get_volume(self, id):
        """
        return volume information if the argument is an id or a path
        """
        # If the id is actually a path
        if exists(id):
            with open(id) as file:
                size = os.lseek(file.fileno(), 0, os.SEEK_END)
            return {'path': id, 'size': size}
        return self.volume.get(id)

    def _remove_volume(self, path):
        try:
            self.volume.remove(path)
        except Exception as e:
            print("Remove Failed: %s" % e)

    @opt('device',
         help="volume id or /path/to/block-device to randomize writes to")
    @opt('--precent', help="precent of the volume should we randomize")
    @opt('--silent', help="run silent", action='store_const', const=True)
    def randomize(self, device=None, percent=100, silent=False):
        """
        Writes random data to the beginning of each 4MB block on a block device
        this is useful when performance testing the backup process

        (Without any optional arguments will randomize the first 32k of each
        4MB block on 100 percent of the device)
        """
        volume = self.get_volume(device)
        # The number of blocks in the volume
        blocks = int(volume['size'] / BLOCK_SIZE)
        # How many writes should be to the device
        # (based on the percentage requested)
        num_writes = int(blocks * percent * 0.01)
        # Build a list of offsets we write to
        offsets = sorted(random.sample(range(blocks), num_writes))
        total = 0

        if not silent:
            print('Writing urandom to %s bytes in %s' % (volume['size'],
                                                         volume['path']))

        with open(volume['path'], 'w') as file:
            for offset in offsets:
                if not silent:
                    self.dot()
                file.seek(offset * BLOCK_SIZE)
                # Create a random string 32k long then duplicate
                # the randomized string 128 times (32768 * 128 = 4MB)
                data = os.urandom(32768) * 128
                total += len(data)
                # write out the 4MB block of randomized data
                file.write(data)
        print("\nWrote: %s" % total)

    @opt('device', help="volume id or /path/to/block-device to read")
    @opt('--offset', help="the offset in blocks to start the read")
    @opt('--count', help="the number of blocks to read")
    @opt('--bs', help="size of the block to read (default: %s)" % BLOCK_SIZE)
    def read(self, device=None, offset=0, bs=None, count=1):
        """
        Using DIRECT_O read from the block device specified to stdout
        (Without any optional arguments will read the first 4k from the device)
        """
        volume = self.get_volume(device)
        block_size = bs or BLOCK_SIZE

        offset = int(offset) * block_size
        count = int(count)
        print("Offset: ", offset)

        total = 0
        with directio.open(volume['path'], buffered=block_size) as file:
            file.seek(offset)
            for i in range(0, count):
                total += os.write(sys.stdout.fileno(), file.read(block_size))
        os.write(sys.stdout.fileno(), "\nRead: %d Bytes\n" % total)

    @opt('device', help="volume id or /path/to/block-device to read")
    @opt('--char',
         help="the character to write to the block device (default: 0)")
    @opt('--count',
         help="the number of blocks to write (default: size of device)")
    @opt('--bs', help="size of the block to write (default: %s)" % BLOCK_SIZE)
    def write(self, device=None, char=0, bs=None, count=None):
        """
        Using DIRECT_O write a character in 4k chunks to a specified block
        device (Without any optional arguments will write NULL's to the
        entire device)
        """
        volume = self.get_volume(device)
        block_size = bs or BLOCK_SIZE

        # Calculate the number of blocks that are in the volume
        count = count or (volume['size'] / block_size)

        data = "".join([chr(int(char)) for i in range(0, block_size)])

        print("Writing: '%c'" % data[0])
        total = 0
        with directio.open(volume['path'], buffered=block_size) as file:
            for i in range(0, count):
                self.dot()
                total += file.write(data)
        print("\nWrote: ", total)
        return 0

    @contextmanager
    def timeit(self, size):
        before = time()
        yield
        secs = time() - before
        print("Elapsed: %s" % secs)
        print("Throughput: %0.2f MB/s" % ((int(size) / secs) / 1048576))

    @opt('id', help="backup id to identify the backup")
    @opt('--src', help="volume id to create the backup from", required=True)
    @opt('--timestamp', help="the timestamp used on the backup")
    def backup(self, id=None, src=None, timestamp=None):
        """
        This runs a backup job outside of the storage api,
        which is useful for performance testing backups
        """
        # Set basic Logging
        logging.basicConfig()
        # Get the lunr logger
        log = logger.get_logger()
        # Output Debug level info
        log.logger.setLevel(logging.DEBUG)
        # Load the local storage configuration
        conf = LunrConfig.from_storage_conf()
        # If no time provided, use current time
        timestamp = timestamp or time()
        # Init our helpers
        volume = VolumeHelper(conf)
        backup = BackupHelper(conf)

        try:
            # Create the snapshot
            snapshot = volume.create_snapshot(src, id, timestamp)

            # For testing non-snapshot speeds
            #snapshot = volume.get(src)
            #snapshot['backup_id'] = id
            #snapshot['origin'] = src
            #snapshot['timestamp'] = 1338410885.0
            #del snapshot['volume']

            print("Created snap-shot: ", pprint(snapshot))

            with self.timeit(snapshot['size']):
                # Backup the snapshot
                print("Starting Backup")
                backup.save(snapshot, id)

        finally:
            # Delete the snapshot if it was created
            if 'snapshot' in locals():
                self._remove_volume(snapshot['path'])

    @opt('id', help="volume id to identify the new volume")
    @opt('--src', help="volume id the backup was created for", required=True)
    @opt('--backup', help="backup id to create the clone from", required=True)
    @opt('--size', help="new volume size (default: src volume size)")
    def clone(self, id=None, src=None, backup=None, size=None):
        """
        This runs a clone job outside of the storage api,
        which is useful for performance testing backup restores
        (Example: storage tools clone volume-clone
          --backup volume-backup --src volume-original)
        """
        # Set basic Logging
        logging.basicConfig()
        # Get the lunr logger
        log = logger.get_logger()
        # Output Debug level info
        log.logger.setLevel(logging.DEBUG)
        # Load the local storage configuration
        conf = LunrConfig.from_storage_conf()
        # Init the volume helper
        volume = VolumeHelper(conf)

        # Attempt to figure out the original volume size
        size = size or str(volume.get(src)['size'] / 1073741824)
        # Size is in gigs
        if not re.match('G', size):
            size = size + 'G'
        # Create a tag to apply to the lvm volume
        tag = encode_tag(source_volume_id=src, backup_id=backup)
        # Create the volume
        execute('lvcreate', volume.volume_group,
                name=id, size=size, addtag=tag)
        # Get info for the newly created volume
        new = volume.get(id)

        with self.timeit():
            print("Starting Backup")
            # Restore volume from the backup
            volume.clone(new, src, backup)
示例#9
0
 def setUp(self):
     IetTest.setUp(self)
     self.tempdir = mkdtemp()
     self.conf = self.config(self.tempdir)
     self.volume = VolumeHelper(self.conf)
     self.export = ExportHelper(self.conf)
示例#10
0
class TestClone(IetTest):

    def setUp(self):
        IetTest.setUp(self)
        self.tempdir = mkdtemp()
        self.conf = self.config(self.tempdir)
        self.volume = VolumeHelper(self.conf)
        self.export = ExportHelper(self.conf)

    def tearDown(self):
        try:
            # Remove any exports
            self.export.delete(self.volume2)
        except NotFound:
            pass

        shutil.rmtree(self.tempdir)
        IetTest.tearDown(self)

    @classmethod
    def setUpClass(cls):
        pass

    @classmethod
    def tearDownClass(cls):
        pass

    def test_create_clone(self):
        block_size = 32768

        # Create 2 Volumes
        self.volume1 = str(uuid4())
        self.volume2 = str(uuid4())
        self.volume.create(self.volume1)
        self.volume.create(self.volume2)

        # Write some stuff to volume1
        volume = self.volume.get(self.volume1)
        with directio.open(volume['path']) as file:
            size = directio.size(volume['path'])
            for i in xrange(0, size / block_size):
                # 32768 / 4 = 8192
                file.write('ZERG' * (block_size / 4))

        # Create an export for volume 2
        info = self.export.create(self.volume2)

        # Now clone Volume 1 to Volume 2
        clone = self.volume.create_clone(self.volume1, self.volume2,
                                         info['name'], '127.0.0.1',
                                         3260, lock=MockResourceLock())

        compare_block = 'ZERG' * (block_size / 4)
        # Ensure the stuff we wrote to Volume 1 is in Volume 2
        volume = self.volume.get(self.volume2)
        with directio.open(volume['path']) as file:
            size = directio.size(volume['path'])
            for i in xrange(0, size / block_size):
                block = file.read(block_size)
                self.assertTrue(block == compare_block)

        # Remove the export
        self.export.delete(self.volume2)

    def test_cinder_callback(self):
        block_size = 32768

        # Create 2 Volumes
        self.volume1 = str(uuid4())
        self.volume2 = str(uuid4())
        self.volume.create(self.volume1)
        self.volume.create(self.volume2)

        # Create an export for volume 2
        info = self.export.create(self.volume2)

        cinder = MockCinder()
        # Now clone Volume 1 to Volume 2
        clone = self.volume.create_clone(self.volume1, self.volume2,
                                         info['name'], '127.0.0.1',
                                         3260, lock=MockResourceLock(),
                                         cinder=cinder)

        # Assert cinder was called atleast once
        self.assertTrue(cinder.called != 0)
        # Remove the export
        self.export.delete(self.volume2)
示例#11
0
文件: test_backup.py 项目: audip/lunr
 def setUp(self):
     IetTest.setUp(self)
     self.tempdir = mkdtemp()
     self.conf = self.config(self.tempdir)
     self.volume = VolumeHelper(self.conf)
     self.backup = BackupHelper(self.conf)
示例#12
0
文件: test_backup.py 项目: audip/lunr
class TestBackupHelper(IetTest):

    def setUp(self):
        IetTest.setUp(self)
        self.tempdir = mkdtemp()
        self.conf = self.config(self.tempdir)
        self.volume = VolumeHelper(self.conf)
        self.backup = BackupHelper(self.conf)

    def tearDown(self):
        backup_dir = self.conf.string('disk', 'path', None)
        # Remove the temp dir where backups are created
        shutil.rmtree(self.tempdir)
        IetTest.tearDown(self)

    @classmethod
    def setUpClass(cls):
        pass

    @classmethod
    def tearDownClass(cls):
        pass

    def test_create_snapshot(self):
        # Create a Volume
        volume_id = str(uuid4())
        self.volume.create(volume_id)
        # Create a snap-shot with a timestamp of 123456
        backup_id = str(uuid4())
        snapshot = self.volume.create_snapshot(volume_id, backup_id, '123456')

        # Assert snapshot values exist
        self.assertEquals(int(snapshot['timestamp']), 123456)
        self.assertEquals(snapshot['backup_id'], backup_id)
        self.assertEquals(snapshot['id'], backup_id)
        self.assertIn('size', snapshot)
        self.assertIn('path', snapshot)
        self.assertIn('origin', snapshot)
        self.assertTrue(path.exists(snapshot['path']))

        # Deleting the origin also removes the snapshot
        self.volume.remove(self.volume.get(volume_id)['path'])

    def test_delete_active_backup_origin_fails(self):
        # Create a Volume
        volume_id = str(uuid4())
        self.volume.create(volume_id)
        volume_id2 = str(uuid4())
        self.volume.create(volume_id2)
        # Create a snap-shot with a timestamp of 123456
        backup_id = str(uuid4())
        snapshot = self.volume.create_snapshot(volume_id, backup_id, '123456')
        # Ensure requests to delete the origin fail
        self.assertRaises(ServiceUnavailable, self.volume.delete, volume_id)
        # Should delete ok, no backup running
        self.volume.delete(volume_id2, lock=MockResourceLock())
        # Deleting the origin also removes the snapshot
        self.volume.remove(self.volume.get(volume_id)['path'])

    def test_delete_active_backup_origin_fails_is_isolated(self):
        first_vol_id = 'vol1'
        self.volume.create(first_vol_id)
        second_vol_id = 'vol11'  # contains 'vol1'
        self.volume.create(second_vol_id)
        backup_id = 'backup1'
        second_vol_snapshot = self.volume.create_snapshot(
            second_vol_id, backup_id)
        self.backup.create(second_vol_snapshot, 'backup1',
                           lock=MockResourceLock())
        # delete 'vol1' should not fail because of snapshot on 'vol11'
        self.volume.delete(first_vol_id, lock=MockResourceLock())
        # cleanup
        self.volume.delete(backup_id, lock=MockResourceLock())
        self.volume.delete(second_vol_id, lock=MockResourceLock())

    def test_snapshot_scrub(self):
        block_size = 32768
        # Create a Volume
        volume_id = str(uuid4())
        self.volume.create(volume_id)
        # Get the volume information
        volume = self.volume.get(volume_id)
        # Fill the volume with 'ZERG's
        with directio.open(volume['path']) as file:
            size = directio.size(volume['path'])
            for i in xrange(0, size / block_size):
                # 32768 / 4 = 8192
                file.write('ZERG' * (block_size / 4))

        # Create a snap-shot with a timestamp of 123456
        backup_id = str(uuid4())
        snapshot = self.volume.create_snapshot(volume_id, backup_id, '123456')

        # Now that the snapshot is made, simulate users making writes
        # to the origin during a normal backup. This should generate
        # exceptions in the cow
        with directio.open(volume['path']) as file:
            # Overwrite all the zergs.
            for i in xrange(0, size / block_size):
                file.write('A' * block_size)

        # Tell scrub we don't want it to remove the cow after scrubbing
        scrub = Scrub(LunrConfig())

        # Build the cow-zero
        (cow_name, cow_path) = scrub.get_writable_cow(snapshot, volume)

        with directio.open(cow_path) as file:
            size = directio.size(cow_path)
            for i in xrange(0, size / block_size):
                block = file.read(block_size)
                if 'ZERG' in block:
                    self.assert_(True)
                    break

        with directio.open(self._ramdisk) as file:
            size = directio.size(self._ramdisk)
            for i in xrange(0, size / block_size):
                block = file.read(block_size)
                if 'ZERG' in block:
                    self.assert_(True)
                    break

        # Scrub the cow of all exceptions
        scrub.scrub_cow(cow_path)
        scrub.remove_cow(cow_name)

        # Remove & scrub the volume. LVM removes snapshot itself.
        self.volume.remove_lvm_volume(volume)

        # Read full disk for hidden zergs.
        with directio.open(self._ramdisk) as file:
            size = directio.size(self._ramdisk)
            for i in xrange(0, size / block_size):
                block = file.read(block_size)
                if 'ZERG' in block:
                    self.fail("Found zergs on disk: %s" % self._ramdisk)

    def test_writable_cow_multiline_table(self):
        # Let's do some silly math
        size = directio.size(self._ramdisk)
        megs = size / 1024 / 1024
        megs = megs - megs % 4
        # 12 megs for a volume, 4 for lvm itself
        alloc = megs - 12 - 4
        vg = self.conf.string('volume', 'volume_group', None)
        # Reserve a 4m hole at the front, and 8m at the end
        execute('lvcreate', vg, size='4m', name='tmpvol')
        execute('lvcreate', vg, size='%sm' % alloc, name='wasted')
        execute('lvremove', '%s/tmpvol' % vg, force=None)
        foo = execute('pvs', self._ramdisk)
        foo = execute('vgs', vg)
        foo = execute('lvs', vg)
        volume_id = str(uuid4())
        self.volume.create(volume_id)
        volume = self.volume.get(volume_id)
        execute('lvremove', '%s/wasted' % vg, force=None)
        dmname = '%s-%s' % (re.sub('-', '--', vg),
                            re.sub('-', '--', volume_id))
        foo = execute('dmsetup', 'table', dmname)
        self.assert_('\n' in foo)
        backup_id = str(uuid4())
        snapshot = self.volume.create_snapshot(volume_id, backup_id, '123456')
        scrub = Scrub(LunrConfig())
        (cow_name, cow_path) = scrub.get_writable_cow(snapshot, volume)
        execute('dmsetup', 'remove', cow_name)
        self.assertTrue(True)

    def test_create_backup(self):
        # Create a Volume
        volume_id = str(uuid4())
        self.volume.create(volume_id)
        # Create a snap-shot with a timestamp of 123456
        backup_id = str(uuid4())
        snapshot = self.volume.create_snapshot(volume_id, backup_id, '123456')

        def callback():
            # Delete the snapshot after completion
            self.volume.delete(snapshot['id'])

        # Create the backup
        self.backup.create(snapshot, backup_id,
                           callback=callback,
                           lock=MockResourceLock())

        # Assert the backup exists in the dir and has the
        # same name as the volume
        backup_dir = self.conf.string('disk', 'path', None)
        self.assertTrue(path.exists(path.join(backup_dir, volume_id)))

        # Deleting the origin also removes the snapshot
        self.volume.remove(self.volume.get(volume_id)['path'])

    def test_restore_backup(self):
        # Create a Volume
        volume_id = str(uuid4())
        self.volume.create(volume_id)

        # Write ZERG to the volume
        volume = self.volume.get(volume_id)
        with directio.open(volume['path']) as file:
            size = directio.size(volume['path'])
            block_size = 32768
            for i in xrange(0, size / block_size):
                file.write('ZERG' * (block_size / 4))

        # Create a snap-shot with a timestamp of 123456
        backup_id = str(uuid4())
        snapshot = self.volume.create_snapshot(volume_id, backup_id, '123456')

        def callback():
            # Delete the snapshot after completion
            self.volume.delete(snapshot['id'])

        # Create the backup
        self.backup.create(snapshot, backup_id,
                           callback=callback,
                           lock=MockResourceLock())

        # Deleting the origin also removes the snapshot
        self.volume.remove(self.volume.get(volume_id)['path'])

        # Create a Restore Volume
        restore_volume_id = str(uuid4())
        self.volume.create(
            restore_volume_id, backup_source_volume_id=volume_id,
            backup_id=backup_id, lock=MockResourceLock())
        volume = self.volume.get(restore_volume_id)

        # Read the restored volume, it should contain ZERGS
        with directio.open(volume['path']) as file:
            size = directio.size(volume['path'])
            for i in xrange(0, size / block_size):
                block = file.read(block_size)
                if 'ZERG' not in block:
                    self.fail("zergs missing on disk: %s" % volume['path'])

    def test_overflow_snapshot(self):
        # Better to use conf, but helper is already created.
        self.volume.max_snapshot_bytes = 4 * 1024 * 1024
        volume_id = str(uuid4())
        self.volume.create(volume_id)
        volume = self.volume.get(volume_id)
        backup_id = str(uuid4())
        snapshot = self.volume.create_snapshot(volume_id, backup_id, '123456')

        def callback():
            self.volume.delete(snapshot['id'])
            self.fail("didnt get the proper error callback")

        def error_callback():
            self.volume.delete(snapshot['id'])
            error_callback.ran = True
        error_callback.ran = False
        # Overflow the snapshot! Only reserved 4m
        with directio.open(volume['path']) as file:
            size = directio.size(volume['path'])
            block_size = 32768
            for i in xrange(0, size / block_size):
                file.write('ZERG' * (block_size / 4))
        with open(snapshot['path']) as file:
            self.assertRaises(IOError, file.read, block_size)
        self.backup.create(
            snapshot, backup_id, callback=callback,
            error_callback=error_callback, lock=MockResourceLock())
        self.assertTrue(error_callback.ran)

        # Make sure scrubbing still happened correctly.
        with directio.open(volume['path']) as file:
            size = directio.size(volume['path'])
            block_size = 32768
            for i in xrange(0, size / block_size):
                file.write('\0' * block_size)

        # Read full disk for hidden zergs.
        with directio.open(self._ramdisk) as file:
            size = directio.size(self._ramdisk)
            for i in xrange(0, size / block_size):
                block = file.read(block_size)
                if 'ZERG' in block:
                    self.fail("Found zergs on disk: %s" % self._ramdisk)
示例#13
0
 def setUp(self):
     IetTest.setUp(self)
     self.tempdir = mkdtemp()
     self.conf = self.config(self.tempdir)
     self.volume = VolumeHelper(self.conf)
示例#14
0
class Tools(SubCommand):
    """
    A collection of misc Storage Node tools
    """
    def __init__(self):
        # Give our sub command a name
        self._name = 'tools'
        # Create a volume helper with our local storage config
        self.volume = VolumeHelper(LunrConfig.from_storage_conf())
        # let the base class setup methods in our class
        SubCommand.__init__(self)
        self.total = defaultdict(float)

    def dot(self):
        sys.stdout.write('.')
        sys.stdout.flush()

    def get_volume(self, id):
        """
        return volume information if the argument is an id or a path
        """
        # If the id is actually a path
        if exists(id):
            with open(id) as file:
                size = os.lseek(file.fileno(), 0, os.SEEK_END)
            return {'path': id, 'size': size}
        return self.volume.get(id)

    def _remove_volume(self, path):
        try:
            self.volume.remove(path)
        except Exception as e:
            print("Remove Failed: %s" % e)

    @opt('device',
         help="volume id or /path/to/block-device to randomize writes to")
    @opt('--precent', help="precent of the volume should we randomize")
    @opt('--silent', help="run silent", action='store_const', const=True)
    def randomize(self, device=None, percent=100, silent=False):
        """
        Writes random data to the beginning of each 4MB block on a block device
        this is useful when performance testing the backup process

        (Without any optional arguments will randomize the first 32k of each
        4MB block on 100 percent of the device)
        """
        volume = self.get_volume(device)
        # The number of blocks in the volume
        blocks = int(volume['size'] / BLOCK_SIZE)
        # How many writes should be to the device
        # (based on the percentage requested)
        num_writes = int(blocks * percent * 0.01)
        # Build a list of offsets we write to
        offsets = sorted(random.sample(range(blocks), num_writes))
        total = 0

        if not silent:
            print('Writing urandom to %s bytes in %s' %
                  (volume['size'], volume['path']))

        with open(volume['path'], 'w') as file:
            for offset in offsets:
                if not silent:
                    self.dot()
                file.seek(offset * BLOCK_SIZE)
                # Create a random string 32k long then duplicate
                # the randomized string 128 times (32768 * 128 = 4MB)
                data = os.urandom(32768) * 128
                total += len(data)
                # write out the 4MB block of randomized data
                file.write(data)
        print("\nWrote: %s" % total)

    @opt('device', help="volume id or /path/to/block-device to read")
    @opt('--offset', help="the offset in blocks to start the read")
    @opt('--count', help="the number of blocks to read")
    @opt('--bs', help="size of the block to read (default: %s)" % BLOCK_SIZE)
    def read(self, device=None, offset=0, bs=None, count=1):
        """
        Using DIRECT_O read from the block device specified to stdout
        (Without any optional arguments will read the first 4k from the device)
        """
        volume = self.get_volume(device)
        block_size = bs or BLOCK_SIZE

        offset = int(offset) * block_size
        count = int(count)
        print("Offset: ", offset)

        total = 0
        with directio.open(volume['path'], buffered=block_size) as file:
            file.seek(offset)
            for i in range(0, count):
                total += os.write(sys.stdout.fileno(), file.read(block_size))
        os.write(sys.stdout.fileno(), "\nRead: %d Bytes\n" % total)

    @opt('device', help="volume id or /path/to/block-device to read")
    @opt('--char',
         help="the character to write to the block device (default: 0)")
    @opt('--count',
         help="the number of blocks to write (default: size of device)")
    @opt('--bs', help="size of the block to write (default: %s)" % BLOCK_SIZE)
    def write(self, device=None, char=0, bs=None, count=None):
        """
        Using DIRECT_O write a character in 4k chunks to a specified block
        device (Without any optional arguments will write NULL's to the
        entire device)
        """
        volume = self.get_volume(device)
        block_size = bs or BLOCK_SIZE

        # Calculate the number of blocks that are in the volume
        count = count or (volume['size'] / block_size)

        data = "".join([chr(int(char)) for i in range(0, block_size)])

        print("Writing: '%c'" % data[0])
        total = 0
        with directio.open(volume['path'], buffered=block_size) as file:
            for i in range(0, count):
                self.dot()
                total += file.write(data)
        print("\nWrote: ", total)
        return 0

    @contextmanager
    def timeit(self, size):
        before = time()
        yield
        secs = time() - before
        print("Elapsed: %s" % secs)
        print("Throughput: %0.2f MB/s" % ((int(size) / secs) / 1048576))

    @opt('id', help="backup id to identify the backup")
    @opt('--src', help="volume id to create the backup from", required=True)
    @opt('--timestamp', help="the timestamp used on the backup")
    def backup(self, id=None, src=None, timestamp=None):
        """
        This runs a backup job outside of the storage api,
        which is useful for performance testing backups
        """
        # Set basic Logging
        logging.basicConfig()
        # Get the lunr logger
        log = logger.get_logger()
        # Output Debug level info
        log.logger.setLevel(logging.DEBUG)
        # Load the local storage configuration
        conf = LunrConfig.from_storage_conf()
        # If no time provided, use current time
        timestamp = timestamp or time()
        # Init our helpers
        volume = VolumeHelper(conf)
        backup = BackupHelper(conf)

        try:
            # Create the snapshot
            snapshot = volume.create_snapshot(src, id, timestamp)

            # For testing non-snapshot speeds
            #snapshot = volume.get(src)
            #snapshot['backup_id'] = id
            #snapshot['origin'] = src
            #snapshot['timestamp'] = 1338410885.0
            #del snapshot['volume']

            print("Created snap-shot: ", pprint(snapshot))

            with self.timeit(snapshot['size']):
                # Backup the snapshot
                print("Starting Backup")
                backup.save(snapshot, id)

        finally:
            # Delete the snapshot if it was created
            if 'snapshot' in locals():
                self._remove_volume(snapshot['path'])

    @opt('id', help="volume id to identify the new volume")
    @opt('--src', help="volume id the backup was created for", required=True)
    @opt('--backup', help="backup id to create the clone from", required=True)
    @opt('--size', help="new volume size (default: src volume size)")
    def clone(self, id=None, src=None, backup=None, size=None):
        """
        This runs a clone job outside of the storage api,
        which is useful for performance testing backup restores
        (Example: storage tools clone volume-clone
          --backup volume-backup --src volume-original)
        """
        # Set basic Logging
        logging.basicConfig()
        # Get the lunr logger
        log = logger.get_logger()
        # Output Debug level info
        log.logger.setLevel(logging.DEBUG)
        # Load the local storage configuration
        conf = LunrConfig.from_storage_conf()
        # Init the volume helper
        volume = VolumeHelper(conf)

        # Attempt to figure out the original volume size
        size = size or str(volume.get(src)['size'] / 1073741824)
        # Size is in gigs
        if not re.match('G', size):
            size = size + 'G'
        # Create a tag to apply to the lvm volume
        tag = encode_tag(source_volume_id=src, backup_id=backup)
        # Create the volume
        execute('lvcreate',
                volume.volume_group,
                name=id,
                size=size,
                addtag=tag)
        # Get info for the newly created volume
        new = volume.get(id)

        with self.timeit():
            print("Starting Backup")
            # Restore volume from the backup
            volume.clone(new, src, backup)
示例#15
0
class TestClone(IetTest):

    def setUp(self):
        IetTest.setUp(self)
        self.tempdir = mkdtemp()
        self.conf = self.config(self.tempdir)
        self.volume = VolumeHelper(self.conf)
        self.export = ExportHelper(self.conf)

    def tearDown(self):
        try:
            # Remove any exports
            self.export.delete(self.volume2)
        except NotFound:
            pass

        shutil.rmtree(self.tempdir)
        IetTest.tearDown(self)

    @classmethod
    def setUpClass(cls):
        pass

    @classmethod
    def tearDownClass(cls):
        pass

    def test_create_clone(self):
        block_size = 32768

        # Create 2 Volumes
        self.volume1 = str(uuid4())
        self.volume2 = str(uuid4())
        self.volume.create(self.volume1)
        self.volume.create(self.volume2)

        # Write some stuff to volume1
        volume = self.volume.get(self.volume1)
        with directio.open(volume['path']) as file:
            size = directio.size(volume['path'])
            for i in xrange(0, size / block_size):
                # 32768 / 4 = 8192
                file.write('ZERG' * (block_size / 4))

        # Create an export for volume 2
        info = self.export.create(self.volume2)

        # Now clone Volume 1 to Volume 2
        clone = self.volume.create_clone(self.volume1, self.volume2,
                                         info['name'], '127.0.0.1',
                                         3260, lock=MockResourceLock())

        compare_block = 'ZERG' * (block_size / 4)
        # Ensure the stuff we wrote to Volume 1 is in Volume 2
        volume = self.volume.get(self.volume2)
        with directio.open(volume['path']) as file:
            size = directio.size(volume['path'])
            for i in xrange(0, size / block_size):
                block = file.read(block_size)
                self.assertTrue(block == compare_block)

        # Remove the export
        self.export.delete(self.volume2)

    def test_cinder_callback(self):
        block_size = 32768

        # Create 2 Volumes
        self.volume1 = str(uuid4())
        self.volume2 = str(uuid4())
        self.volume.create(self.volume1)
        self.volume.create(self.volume2)

        # Create an export for volume 2
        info = self.export.create(self.volume2)

        cinder = MockCinder()
        # Now clone Volume 1 to Volume 2
        clone = self.volume.create_clone(self.volume1, self.volume2,
                                         info['name'], '127.0.0.1',
                                         3260, lock=MockResourceLock(),
                                         cinder=cinder)

        # Assert cinder was called atleast once
        self.assertTrue(cinder.called != 0)
        # Remove the export
        self.export.delete(self.volume2)
示例#16
0
class TestBackupHelper(IetTest):
    def setUp(self):
        IetTest.setUp(self)
        self.tempdir = mkdtemp()
        self.conf = self.config(self.tempdir)
        self.volume = VolumeHelper(self.conf)
        self.backup = BackupHelper(self.conf)

    def tearDown(self):
        backup_dir = self.conf.string('disk', 'path', None)
        # Remove the temp dir where backups are created
        shutil.rmtree(self.tempdir)
        IetTest.tearDown(self)

    @classmethod
    def setUpClass(cls):
        pass

    @classmethod
    def tearDownClass(cls):
        pass

    def test_create_snapshot(self):
        # Create a Volume
        volume_id = str(uuid4())
        self.volume.create(volume_id)
        # Create a snap-shot with a timestamp of 123456
        backup_id = str(uuid4())
        snapshot = self.volume.create_snapshot(volume_id, backup_id, '123456')

        # Assert snapshot values exist
        self.assertEquals(int(snapshot['timestamp']), 123456)
        self.assertEquals(snapshot['backup_id'], backup_id)
        self.assertEquals(snapshot['id'], backup_id)
        self.assertIn('size', snapshot)
        self.assertIn('path', snapshot)
        self.assertIn('origin', snapshot)
        self.assertTrue(path.exists(snapshot['path']))

        # Deleting the origin also removes the snapshot
        self.volume.remove(self.volume.get(volume_id)['path'])

    def test_delete_active_backup_origin_fails(self):
        # Create a Volume
        volume_id = str(uuid4())
        self.volume.create(volume_id)
        volume_id2 = str(uuid4())
        self.volume.create(volume_id2)
        # Create a snap-shot with a timestamp of 123456
        backup_id = str(uuid4())
        snapshot = self.volume.create_snapshot(volume_id, backup_id, '123456')
        # Ensure requests to delete the origin fail
        self.assertRaises(ServiceUnavailable, self.volume.delete, volume_id)
        # Should delete ok, no backup running
        self.volume.delete(volume_id2, lock=MockResourceLock())
        # Deleting the origin also removes the snapshot
        self.volume.remove(self.volume.get(volume_id)['path'])

    def test_delete_active_backup_origin_fails_is_isolated(self):
        first_vol_id = 'vol1'
        self.volume.create(first_vol_id)
        second_vol_id = 'vol11'  # contains 'vol1'
        self.volume.create(second_vol_id)
        backup_id = 'backup1'
        second_vol_snapshot = self.volume.create_snapshot(
            second_vol_id, backup_id)
        self.backup.create(second_vol_snapshot,
                           'backup1',
                           lock=MockResourceLock())
        # delete 'vol1' should not fail because of snapshot on 'vol11'
        self.volume.delete(first_vol_id, lock=MockResourceLock())
        # cleanup
        self.volume.delete(backup_id, lock=MockResourceLock())
        self.volume.delete(second_vol_id, lock=MockResourceLock())

    def test_snapshot_scrub(self):
        block_size = 32768
        # Create a Volume
        volume_id = str(uuid4())
        self.volume.create(volume_id)
        # Get the volume information
        volume = self.volume.get(volume_id)
        # Fill the volume with 'ZERG's
        with directio.open(volume['path']) as file:
            size = directio.size(volume['path'])
            for i in xrange(0, size / block_size):
                # 32768 / 4 = 8192
                file.write('ZERG' * (block_size / 4))

        # Create a snap-shot with a timestamp of 123456
        backup_id = str(uuid4())
        snapshot = self.volume.create_snapshot(volume_id, backup_id, '123456')

        # Now that the snapshot is made, simulate users making writes
        # to the origin during a normal backup. This should generate
        # exceptions in the cow
        with directio.open(volume['path']) as file:
            # Overwrite all the zergs.
            for i in xrange(0, size / block_size):
                file.write('A' * block_size)

        # Tell scrub we don't want it to remove the cow after scrubbing
        scrub = Scrub(LunrConfig())

        # Build the cow-zero
        (cow_name, cow_path) = scrub.get_writable_cow(snapshot, volume)

        with directio.open(cow_path) as file:
            size = directio.size(cow_path)
            for i in xrange(0, size / block_size):
                block = file.read(block_size)
                if 'ZERG' in block:
                    self.assert_(True)
                    break

        with directio.open(self._ramdisk) as file:
            size = directio.size(self._ramdisk)
            for i in xrange(0, size / block_size):
                block = file.read(block_size)
                if 'ZERG' in block:
                    self.assert_(True)
                    break

        # Scrub the cow of all exceptions
        scrub.scrub_cow(cow_path)
        scrub.remove_cow(cow_name)

        # Remove & scrub the volume. LVM removes snapshot itself.
        self.volume.remove_lvm_volume(volume)

        # Read full disk for hidden zergs.
        with directio.open(self._ramdisk) as file:
            size = directio.size(self._ramdisk)
            for i in xrange(0, size / block_size):
                block = file.read(block_size)
                if 'ZERG' in block:
                    self.fail("Found zergs on disk: %s" % self._ramdisk)

    def test_writable_cow_multiline_table(self):
        # Let's do some silly math
        size = directio.size(self._ramdisk)
        megs = size / 1024 / 1024
        megs = megs - megs % 4
        # 12 megs for a volume, 4 for lvm itself
        alloc = megs - 12 - 4
        vg = self.conf.string('volume', 'volume_group', None)
        # Reserve a 4m hole at the front, and 8m at the end
        execute('lvcreate', vg, size='4m', name='tmpvol')
        execute('lvcreate', vg, size='%sm' % alloc, name='wasted')
        execute('lvremove', '%s/tmpvol' % vg, force=None)
        foo = execute('pvs', self._ramdisk)
        foo = execute('vgs', vg)
        foo = execute('lvs', vg)
        volume_id = str(uuid4())
        self.volume.create(volume_id)
        volume = self.volume.get(volume_id)
        execute('lvremove', '%s/wasted' % vg, force=None)
        dmname = '%s-%s' % (re.sub('-', '--', vg), re.sub(
            '-', '--', volume_id))
        foo = execute('dmsetup', 'table', dmname)
        self.assert_('\n' in foo)
        backup_id = str(uuid4())
        snapshot = self.volume.create_snapshot(volume_id, backup_id, '123456')
        scrub = Scrub(LunrConfig())
        (cow_name, cow_path) = scrub.get_writable_cow(snapshot, volume)
        execute('dmsetup', 'remove', cow_name)
        self.assertTrue(True)

    def test_create_backup(self):
        # Create a Volume
        volume_id = str(uuid4())
        self.volume.create(volume_id)
        # Create a snap-shot with a timestamp of 123456
        backup_id = str(uuid4())
        snapshot = self.volume.create_snapshot(volume_id, backup_id, '123456')

        def callback():
            # Delete the snapshot after completion
            self.volume.delete(snapshot['id'])

        # Create the backup
        self.backup.create(snapshot,
                           backup_id,
                           callback=callback,
                           lock=MockResourceLock())

        # Assert the backup exists in the dir and has the
        # same name as the volume
        backup_dir = self.conf.string('disk', 'path', None)
        self.assertTrue(path.exists(path.join(backup_dir, volume_id)))

        # Deleting the origin also removes the snapshot
        self.volume.remove(self.volume.get(volume_id)['path'])

    def test_restore_backup(self):
        # Create a Volume
        volume_id = str(uuid4())
        self.volume.create(volume_id)

        # Write ZERG to the volume
        volume = self.volume.get(volume_id)
        with directio.open(volume['path']) as file:
            size = directio.size(volume['path'])
            block_size = 32768
            for i in xrange(0, size / block_size):
                file.write('ZERG' * (block_size / 4))

        # Create a snap-shot with a timestamp of 123456
        backup_id = str(uuid4())
        snapshot = self.volume.create_snapshot(volume_id, backup_id, '123456')

        def callback():
            # Delete the snapshot after completion
            self.volume.delete(snapshot['id'])

        # Create the backup
        self.backup.create(snapshot,
                           backup_id,
                           callback=callback,
                           lock=MockResourceLock())

        # Deleting the origin also removes the snapshot
        self.volume.remove(self.volume.get(volume_id)['path'])

        # Create a Restore Volume
        restore_volume_id = str(uuid4())
        self.volume.create(restore_volume_id,
                           backup_source_volume_id=volume_id,
                           backup_id=backup_id,
                           lock=MockResourceLock())
        volume = self.volume.get(restore_volume_id)

        # Read the restored volume, it should contain ZERGS
        with directio.open(volume['path']) as file:
            size = directio.size(volume['path'])
            for i in xrange(0, size / block_size):
                block = file.read(block_size)
                if 'ZERG' not in block:
                    self.fail("zergs missing on disk: %s" % volume['path'])

    def test_overflow_snapshot(self):
        # Better to use conf, but helper is already created.
        self.volume.max_snapshot_bytes = 4 * 1024 * 1024
        volume_id = str(uuid4())
        self.volume.create(volume_id)
        volume = self.volume.get(volume_id)
        backup_id = str(uuid4())
        snapshot = self.volume.create_snapshot(volume_id, backup_id, '123456')

        def callback():
            self.volume.delete(snapshot['id'])
            self.fail("didnt get the proper error callback")

        def error_callback():
            self.volume.delete(snapshot['id'])
            error_callback.ran = True

        error_callback.ran = False
        # Overflow the snapshot! Only reserved 4m
        with directio.open(volume['path']) as file:
            size = directio.size(volume['path'])
            block_size = 32768
            for i in xrange(0, size / block_size):
                file.write('ZERG' * (block_size / 4))
        with open(snapshot['path']) as file:
            self.assertRaises(IOError, file.read, block_size)
        self.backup.create(snapshot,
                           backup_id,
                           callback=callback,
                           error_callback=error_callback,
                           lock=MockResourceLock())
        self.assertTrue(error_callback.ran)

        # Make sure scrubbing still happened correctly.
        with directio.open(volume['path']) as file:
            size = directio.size(volume['path'])
            block_size = 32768
            for i in xrange(0, size / block_size):
                file.write('\0' * block_size)

        # Read full disk for hidden zergs.
        with directio.open(self._ramdisk) as file:
            size = directio.size(self._ramdisk)
            for i in xrange(0, size / block_size):
                block = file.read(block_size)
                if 'ZERG' in block:
                    self.fail("Found zergs on disk: %s" % self._ramdisk)