Example #1
0
 def __init__(self, conf):
     self.volume_group = conf.string('volume', 'volume_group',
                                     'lunr-volume')
     self.device_prefix = conf.string('volume', 'device_prefix', '/dev')
     self.run_dir = conf.string('storage', 'run_dir', conf.path('run'))
     self.convert_gbs = conf.int('glance', 'convert_gbs', 100)
     self.glance_mgmt_urls = conf.list('glance', 'glance_mgmt_urls', None)
     self.glance_base_multiplier = conf.float('glance',
                                              'base_convert_multiplier',
                                              2.0)
     self.glance_custom_multiplier = conf.float('glance',
                                                'custom_convert_multiplier',
                                                4.0)
     self.skip_fork = conf.bool('storage', 'skip_fork', False)
     self.scrub = Scrub(conf)
     self.conf = conf
     self.max_snapshot_bytes = conf.int('volume', 'max_snapshot_bytes',
                                        None)
     if self.max_snapshot_bytes:
         self.sector_size = conf.int('volume', 'sector_size', 512)
         max_bytes = (self.max_snapshot_bytes -
                      self.max_snapshot_bytes % self.sector_size)
         if max_bytes != self.max_snapshot_bytes:
             logger.info("Setting max_snapshot_size to %s" % max_bytes)
             self.max_snapshot_bytes = max_bytes
     self.has_old_mkfs = self.old_mkfs()
Example #2
0
 def log_config(self, conf):
     s = StringIO()
     conf.write(s)
     s.seek(0)
     logger.info("LunrConfig:")
     for line in s:
         logger.info(line.rstrip())
Example #3
0
File: wsgi.py Project: audip/lunr
 def log_config(self, conf):
     s = StringIO()
     conf.write(s)
     s.seek(0)
     logger.info("LunrConfig:")
     for line in s:
         logger.info(line.rstrip())
Example #4
0
 def _do_create(self,
                volume_id,
                size_str,
                tag,
                backup_source_volume_id=None):
     try:
         out = execute('lvcreate',
                       self.volume_group,
                       name=volume_id,
                       size=size_str,
                       addtag=tag)
     except ProcessError, e:
         if not e.errcode == 5 and 'already exists' not in e.err:
             raise
         # We ran out of space on the storage node!
         if "Insufficient free extents" in e.err \
                 or "insufficient free space" in e.err:
             logger.error(e.err)
             raise ServiceUnavailable("LVM reports insufficient "
                                      "free space on drive")
         # If we are requesting a restore, and the existing volume is this
         # same failed restore, it's not an error.
         if backup_source_volume_id and backup_source_volume_id == \
                 self.get(volume_id).get('backup_source_volume_id', False):
             logger.info("Restarting failed restore on '%s'" % volume_id)
         else:
             raise AlreadyExists("Unable to create a new volume named "
                                 "'%s' because one already exists." %
                                 volume_id)
Example #5
0
    def create(self, volume_id, size=None, backup_source_volume_id=None,
               backup_id=None, image_id=None, callback=None, lock=None,
               account=None, cinder=None, scrub_callback=None):

        op_start = time()

        size_str = self._get_size_str(size)
        tmp_vol = None

        if image_id:
            mgmt_glance = get_glance_conn(self.conf, tenant_id=account,
                                          glance_urls=self.glance_mgmt_urls)
            try:
                glance_start = time()
                image = mgmt_glance.head(image_id)
                logger.info('STAT: glance.head %r. Time: %r' %
                            (image_id, time() - glance_start))
                status = getattr(image, 'status', 'ACTIVE')
                if status.upper() != 'ACTIVE':
                    raise InvalidImage("Non-active image status: %s" % status)
                min_disk = getattr(image, 'min_disk', 0)
                if min_disk > 127:
                    raise InvalidImage("Image > 127GB: %s" % image_id)
                if min_disk:
                    multiplier = self._get_scratch_multiplier(image)
                    convert_gbs = int(min_disk * multiplier)
                else:
                    convert_gbs = self.convert_gbs
                tmp_vol = self.create_convert_scratch(image, convert_gbs)
            except GlanceError, e:
                logger.warning("Error fetching glance image: %s" % e)
                raise InvalidImage("Error fetching image: %s" % image_id)
Example #6
0
 def __init__(self, conf):
     self.volume_group = conf.string('volume', 'volume_group',
                                     'lunr-volume')
     self.device_prefix = conf.string('volume', 'device_prefix', '/dev')
     self.run_dir = conf.string('storage', 'run_dir', conf.path('run'))
     self.convert_gbs = conf.int('glance', 'convert_gbs', 100)
     self.glance_mgmt_urls = conf.list('glance', 'glance_mgmt_urls', None)
     self.glance_base_multiplier = conf.float('glance',
                                              'base_convert_multiplier',
                                              2.0)
     self.glance_custom_multiplier = conf.float(
         'glance', 'custom_convert_multiplier', 4.0)
     self.skip_fork = conf.bool('storage', 'skip_fork', False)
     self.scrub = Scrub(conf)
     self.conf = conf
     self.max_snapshot_bytes = conf.int('volume', 'max_snapshot_bytes',
                                        None)
     if self.max_snapshot_bytes:
         self.sector_size = conf.int('volume', 'sector_size', 512)
         max_bytes = (self.max_snapshot_bytes -
                      self.max_snapshot_bytes % self.sector_size)
         if max_bytes != self.max_snapshot_bytes:
             logger.info("Setting max_snapshot_size to %s" % max_bytes)
             self.max_snapshot_bytes = max_bytes
     self.has_old_mkfs = self.old_mkfs()
Example #7
0
    def create(self, volume_id, size=None, backup_source_volume_id=None,
               backup_id=None, image_id=None, callback=None, lock=None,
               account=None, cinder=None, scrub_callback=None):

        op_start = time()

        size_str = self._get_size_str(size)
        tmp_vol = None
        snet_glance = None

        if image_id:
            mgmt_glance = get_glance_conn(self.conf, tenant_id=account,
                                          glance_urls=self.glance_mgmt_urls)
            snet_glance = get_glance_conn(self.conf, tenant_id=account)
            try:
                glance_start = time()
                image = mgmt_glance.head(image_id)
                logger.info('STAT: glance.head %r. Time: %r' %
                            (image_id, time() - glance_start))
                status = getattr(image, 'status', 'ACTIVE')
                if status.upper() != 'ACTIVE':
                    raise InvalidImage("Non-active image status: %s" % status)
                min_disk = getattr(image, 'min_disk', 0)
                if min_disk > 127:
                    raise InvalidImage("Image > 127GB: %s" % image_id)
                if min_disk:
                    multiplier = self._get_scratch_multiplier(image)
                    convert_gbs = int(min_disk * multiplier)
                else:
                    convert_gbs = self.convert_gbs
                tmp_vol = self.create_convert_scratch(image, convert_gbs)
            except GlanceError, e:
                logger.warning("Error fetching glance image: %s" % e)
                raise InvalidImage("Error fetching image: %s" % image_id)
Example #8
0
    def get_coalesced_vhd(self, path):
        # Check for old style, image.vhd
        old_style = self.get_oldstyle_vhd(path)
        if old_style:
            return old_style

        op_start = time()

        chain = self.get_vhd_chain(path)
        if len(chain) == 0:
            raise ValueError('Invalid image. Bad vhd chain.')

        journal = os.path.join(path, 'vhdjournal')

        self.reparent_vhd_chain(chain)
        self.repair_vhd_chain(chain)

        while len(chain) > 1:
            child = chain.pop(0)
            parent = chain[0]
            child_size = execute('vhd-util', 'query', '-n', child, '-v',
                                 sudo=False)
            parent_size = execute('vhd-util', 'query', '-n', parent, '-v',
                                  sudo=False)
            if child_size != parent_size:
                execute('vhd-util', 'resize', '-n', parent,
                        '-s', child_size, '-j', journal, sudo=False)
            execute('vhd-util', 'coalesce', '-n', child, sudo=False)

        duration = time() - op_start
        logger.info('STAT: get_coalesced_vhd Time: %r.' % duration)
        return chain[0]
Example #9
0
    def get_coalesced_vhd(self, path):
        # Check for old style, image.vhd
        old_style = self.get_oldstyle_vhd(path)
        if old_style:
            return old_style

        op_start = time()

        chain = self.get_vhd_chain(path)
        if len(chain) == 0:
            raise ValueError('Invalid image. Bad vhd chain.')

        journal = os.path.join(path, 'vhdjournal')

        self.reparent_vhd_chain(chain)
        self.repair_vhd_chain(chain)

        while len(chain) > 1:
            child = chain.pop(0)
            parent = chain[0]
            child_size = execute('vhd-util', 'query', '-n', child, '-v',
                                 sudo=False)
            parent_size = execute('vhd-util', 'query', '-n', parent, '-v',
                                  sudo=False)
            if child_size != parent_size:
                execute('vhd-util', 'resize', '-n', parent,
                        '-s', child_size, '-j', journal, sudo=False)
            execute('vhd-util', 'coalesce', '-n', child, sudo=False)

        duration = time() - op_start
        logger.info('STAT: get_coalesced_vhd Time: %r.' % duration)
        return chain[0]
Example #10
0
 def write(self, param, value):
     try:
         with open(os.path.join(self.path, param), 'w') as f:
             f.write(value)
             logger.info("Writing cgroup: %s: %s" % (param, value))
     except IOError, e:
         msg = "Cgroup write error: %s/%s: %s" % (self.path, param, e)
         logger.error(msg)
Example #11
0
 def write(self, param, value):
     try:
         with open(os.path.join(self.path, param), 'w') as f:
             f.write(value)
             logger.info("Writing cgroup: %s: %s" % (param, value))
     except IOError, e:
         msg = "Cgroup write error: %s/%s: %s" % (self.path, param, e)
         logger.error(msg)
Example #12
0
def delete_manifest(conn, volume_id, lock_file):
    fd = aquire_lock(lock_file)
    op_start = time()
    conn.delete_object(volume_id, 'manifest')
    duration = time() - op_start
    logger.info("STAT: delete_manifest for %r Duration: %r" % (volume_id,
                                                               duration))
    os.remove(lock_file)
    release_lock(lock_file)
Example #13
0
def delete_manifest(conn, volume_id, lock_file):
    fd = aquire_lock(lock_file)
    op_start = time()
    conn.delete_object(volume_id, 'manifest')
    duration = time() - op_start
    logger.info("STAT: delete_manifest for %r Duration: %r" % (volume_id,
                                                               duration))
    os.remove(lock_file)
    release_lock(lock_file)
Example #14
0
File: backup.py Project: audip/lunr
 def remove_container(self, volume):
     for attempt in range(0, 2):
         try:
             logger.info("Removing container '%s'" % volume['id'])
             conn = get_conn(self.conf)
             return conn.delete_container(volume['id'])
         except exc.ClientException, e:
             if e.http_status == 404:
                 return
             # Audit the backups, and try again
             self.audit(volume)
Example #15
0
def load_manifest(conn, volume_id, lock_file):
    fd = aquire_lock(lock_file)
    op_start = time()
    _headers, raw_json_string = conn.get_object(volume_id, 'manifest',
                                                newest=True)
    duration = time() - op_start
    logger.info("STAT: load_manifest for %r Duration: %r" % (volume_id,
                                                             duration))
    manifest = Manifest.loads(raw_json_string)
    os.write(fd, raw_json_string)
    return manifest
Example #16
0
def load_manifest(conn, volume_id, lock_file):
    fd = aquire_lock(lock_file)
    op_start = time()
    _headers, raw_json_string = conn.get_object(volume_id, 'manifest',
                                                newest=True)
    duration = time() - op_start
    logger.info("STAT: load_manifest for %r Duration: %r" % (volume_id,
                                                             duration))
    manifest = Manifest.loads(raw_json_string)
    os.write(fd, raw_json_string)
    return manifest
Example #17
0
 def _read_initial_cgroups(self):
     cgroups = defaultdict(dict)
     try:
         for line in open(self._updates_path(), 'r+'):
             try:
                 volume_id, name, throttle = line.split()
                 cgroups[name][volume_id] = throttle
             except ValueError:
                 pass
     except IOError, e:
         logger.info('Failed reading cgroup updates: %s' % e)
Example #18
0
 def _read_initial_cgroups(self):
     cgroups = defaultdict(dict)
     try:
         for line in open(self._updates_path(), 'r+'):
             try:
                 volume_id, name, throttle = line.split()
                 cgroups[name][volume_id] = throttle
             except ValueError:
                 pass
     except IOError, e:
         logger.info('Failed reading cgroup updates: %s' % e)
Example #19
0
def save_manifest(manifest, conn, volume_id, lock_file):
    fd = aquire_lock(lock_file)
    raw_json_string = manifest.dumps()
    os.ftruncate(fd, 0)
    os.write(fd, raw_json_string)
    op_start = time()
    conn.put_object(volume_id, 'manifest', raw_json_string)
    duration = time() - op_start
    logger.info("STAT: save_manifest for %r Duration: %r" % (volume_id,
                                                             duration))
    release_lock(lock_file)
Example #20
0
 def remove_container(self, volume):
     for attempt in range(0, 2):
         try:
             logger.info("Removing container '%s'" % volume['id'])
             conn = get_conn(self.conf)
             return conn.delete_container(volume['id'])
         except exc.ClientException, e:
             if e.http_status == 404:
                 return
             # Audit the backups, and try again
             self.audit(volume)
Example #21
0
def save_manifest(manifest, conn, volume_id, lock_file):
    fd = aquire_lock(lock_file)
    raw_json_string = manifest.dumps()
    os.ftruncate(fd, 0)
    os.write(fd, raw_json_string)
    op_start = time()
    conn.put_object(volume_id, 'manifest', raw_json_string)
    duration = time() - op_start
    logger.info("STAT: save_manifest for %r Duration: %r" % (volume_id,
                                                             duration))
    release_lock(lock_file)
Example #22
0
        def log_duration():
            duration = time() - op_start

            parts = ['STAT: Create Volume']
            if volume_id:
                parts.append('Volume_ID: %s' % (volume_id,))
            if backup_id:
                parts.append('Backup_ID: %s' % (backup_id,))
            if backup_source_volume_id:
                parts.append('Backup_Source_Volume_ID: %s' % (backup_id,))
            if image_id:
                parts.append('Image_ID: %s' % (image_id,))
            parts.append('Size: %s' % size)
            parts.append('Duration: %s' % duration)

            logger.info(' '.join(parts))
Example #23
0
    def rename(self, req, lock):
        try:
            logger.info("Renaming logical volume inprogress .")
            volume = self.helper.volumes.get(self.id)
        except NotFound:
            raise HTTPNotFound("Cannot rename non-existant volume '%s'" %
                               self.id)

        callback = None
        new_name = req.params.get('new_name')
        self.helper.volumes.rename(self.id,
                                   new_name,
                                   lock=lock,
                                   callback=callback)
        logger.info("Renaming logical volume done.")
        return Response(volume)
Example #24
0
        def log_duration():
            duration = time() - op_start

            parts = ['STAT: Create Volume']
            if volume_id:
                parts.append('Volume_ID: %s' % (volume_id, ))
            if backup_id:
                parts.append('Backup_ID: %s' % (backup_id, ))
            if backup_source_volume_id:
                parts.append('Backup_Source_Volume_ID: %s' % (backup_id, ))
            if image_id:
                parts.append('Image_ID: %s' % (image_id, ))
            parts.append('Size: %s' % size)
            parts.append('Duration: %s' % duration)

            logger.info(' '.join(parts))
Example #25
0
 def __init__(self, conf):
     self.ietd_config = conf.string("export", "ietd_config", "/etc/iet/ietd.conf")
     self.volume_group = conf.string("volume", "volume_group", "lunr-volume")
     self.iqn_prefix = conf.string("export", "iqn_prefix", "iqn.2010-11.com.rackspace")
     self.device_prefix = conf.string("export", "device_prefix", "/dev")
     self.proc_iet_volume = conf.string("export", "proc_iet_volume", "/proc/net/iet/volume")
     self.proc_iet_session = conf.string("export", "proc_iet_session", "/proc/net/iet/session")
     self.initiators_allow = conf.string("export", "initiators_allow", "/etc/iet/initiators.allow")
     self.default_allows = conf.string("export", "default_allows", "ALL")
     logger.info("Setting export default_allows: %s" % self.default_allows)
     subnets = conf.list("export", "allow_subnets", "0.0.0.0/0")
     logger.debug("Setting export allow_subnets: %s" % subnets)
     self.allow_subnets = []
     for subnet in subnets:
         if subnet:
             self.allow_subnets.append(netaddr.IPNetwork(subnet))
     self.run_dir = conf.string("storage", "run_dir", conf.path("run"))
Example #26
0
 def remove_lvm_snapshot(self, snapshot):
     try:
         op_start = time()
         volume = self.get(snapshot['origin'])
         logger.rename('lunr.storage.helper.volume.remove_lvm_snapshot')
         self.scrub.scrub_snapshot(snapshot, volume)
         self.remove(snapshot['path'])
         # TODO: Failure to scrub a snapshot is un-acceptable
         # If we catch an exception, we should mark the snapshot
         # Or make this as recoverable as possible
         duration = time() - op_start
         logger.info("STAT: remove_lvm_snapshot(%r) Time: %r" %
                     (volume['path'], duration))
     except Scrub, e:
         logger.exception(
             "scrub snapshot failed with '%r' after %r seconds" %
             (e, time() - op_start))
Example #27
0
 def remove_lvm_snapshot(self, snapshot):
     try:
         op_start = time()
         volume = self.get(snapshot['origin'])
         logger.rename('lunr.storage.helper.volume.remove_lvm_snapshot')
         self.scrub.scrub_snapshot(snapshot, volume)
         self.remove(snapshot['path'])
         # TODO: Failure to scrub a snapshot is un-acceptable
         # If we catch an exception, we should mark the snapshot
         # Or make this as recoverable as possible
         duration = time() - op_start
         logger.info("STAT: remove_lvm_snapshot(%r) Time: %r" %
                     (volume['path'],
                      duration))
     except Scrub, e:
         logger.exception(
             "scrub snapshot failed with '%r' after %r seconds" %
             (e, time() - op_start))
Example #28
0
 def remove_lvm_volume(self, volume):
     try:
         op_start = time()
         size = volume['size'] / 1024 / 1024 / 1024
         logger.rename('lunr.storage.helper.volume.remove_lvm_volume')
         setproctitle("lunr-remove: " + volume['id'])
         # Scrub the volume
         self.scrub.scrub_volume(volume['path'])
         # Remove the device
         self.remove(volume['path'])
         duration = time() - op_start
         logger.info(
             'STAT: remove_lvm_volume(%r) '
             'Size: %r GB Time: %r s Speed: %r MB/s' %
             (volume['path'], size, duration, size * 1024 / duration))
     except ProcessError, e:
         logger.exception(
             "delete volume failed with '%r' after %r seconds" %
             (e, time() - op_start))
Example #29
0
 def restore(self, dest_volume, backup_source_volume_id,
             backup_id, size, cinder):
     op_start = time()
     logger.rename('lunr.storage.helper.volume.restore')
     setproctitle("lunr-restore: " + dest_volume['id'])
     job_stats_path = self._stats_file(dest_volume['id'])
     worker = Worker(backup_source_volume_id, conf=self.conf,
                     stats_path=job_stats_path)
     try:
         worker.restore(backup_id, dest_volume['path'],
                        dest_volume['id'], cinder)
     finally:
         os.unlink(job_stats_path)
     self.update_tags(dest_volume, {})
     duration = time() - op_start
     logger.info('STAT: Restore %r from %r. '
                 'Size: %r GB Time: %r s Speed: %r MB/s' %
                 (dest_volume['id'], backup_id, size, duration,
                  size * 1024 / duration))
Example #30
0
 def untar_image(self, path, image):
     tarball = os.path.join(path, 'image')
     op_start = time()
     execute('tar', '-C', path, '-zxf', tarball, sudo=False)
     duration = time() - op_start
     mbytes = image.size / 1024 / 1024
     uncompressed = 0
     for f in os.listdir(path):
         # No fair counting twice.
         if f == 'image':
             continue
         fpath = os.path.join(path, f)
         if os.path.isfile(fpath):
             uncompressed += os.path.getsize(fpath)
     uncompressed = uncompressed / 1024 / 1024
     logger.info('STAT: tar %r. Compressed Size: %r MB '
                 'Uncompressed Size: %r MB '
                 'Time: %r Speed: %r' % (image.id, mbytes, uncompressed,
                                         duration, uncompressed / duration))
Example #31
0
    def copy_image(self, volume, image, glance, tmp_vol, scrub_callback):
        logger.rename('lunr.storage.helper.volume.copy_image')
        setproctitle("lunr-copy-image: " + volume['id'])
        copy_image_start = time()
        convert_dir = None
        try:
            if image.disk_format == 'raw':
                self.write_raw_image(glance, image, volume['path'])
                return

            convert_dir = self.prepare_tmp_vol(tmp_vol)

            if not os.path.exists(convert_dir):
                raise ValueError("Convert dir doesn't exist!")

            try:
                path = mkdtemp(dir=convert_dir)
                logger.info("Image convert tmp dir: %s" % path)
                image_file = os.path.join(path, 'image')
                self.write_raw_image(glance, image, image_file)

                if (image.disk_format == 'vhd' and
                        image.container_format == 'ovf'):
                    self.untar_image(path, image)
                    image_file = self.get_coalesced_vhd(path)

                op_start = time()
                out = execute('qemu-img', 'convert', '-O', 'raw', image_file,
                              volume['path'])
                duration = time() - op_start
                mbytes = os.path.getsize(image_file) / 1024 / 1024
                logger.info('STAT: image convert %r. Image Size: %r MB '
                            'Time: %r Speed: %r' %
                            (image.id, mbytes, duration, mbytes / duration))
            except Exception, e:
                logger.exception("Exception in image conversion")
                raise

        except Exception, e:
            # We have to clean this up no matter what happened.
            # Delete volume syncronously. Clean up db in callback.
            logger.exception('Unhandled exception in copy_image')
            self.remove_lvm_volume(volume)
Example #32
0
    def copy_image(self, volume, image, glance, tmp_vol, scrub_callback):
        logger.rename('lunr.storage.helper.volume.copy_image')
        setproctitle("lunr-copy-image: " + volume['id'])
        copy_image_start = time()
        convert_dir = None
        try:
            if image.disk_format == 'raw':
                self.write_raw_image(glance, image, volume['path'])
                return

            convert_dir = self.prepare_tmp_vol(tmp_vol)

            if not os.path.exists(convert_dir):
                raise ValueError("Convert dir doesn't exist!")

            try:
                path = mkdtemp(dir=convert_dir)
                logger.info("Image convert tmp dir: %s" % path)
                image_file = os.path.join(path, 'image')
                self.write_raw_image(glance, image, image_file)

                if (image.disk_format == 'vhd'
                        and image.container_format == 'ovf'):
                    self.untar_image(path, image)
                    image_file = self.get_coalesced_vhd(path)

                op_start = time()
                out = execute('qemu-img', 'convert', '-O', 'raw', image_file,
                              volume['path'])
                duration = time() - op_start
                mbytes = os.path.getsize(image_file) / 1024 / 1024
                logger.info('STAT: image convert %r. Image Size: %r MB '
                            'Time: %r Speed: %r' %
                            (image.id, mbytes, duration, mbytes / duration))
            except Exception, e:
                logger.exception("Exception in image conversion")
                raise

        except Exception, e:
            # We have to clean this up no matter what happened.
            # Delete volume syncronously. Clean up db in callback.
            logger.exception('Unhandled exception in copy_image')
            self.remove_lvm_volume(volume)
Example #33
0
 def restore(self, dest_volume, backup_source_volume_id,
             backup_id, size, cinder):
     op_start = time()
     logger.rename('lunr.storage.helper.volume.restore')
     setproctitle("lunr-restore: " + dest_volume['id'])
     job_stats_path = self._stats_file(dest_volume['id'])
     worker = Worker(backup_source_volume_id, conf=self.conf,
                     stats_path=job_stats_path)
     try:
         worker.restore(backup_id, dest_volume['path'],
                        dest_volume['id'], cinder)
     finally:
         os.unlink(job_stats_path)
     self.update_tags(dest_volume, {})
     duration = time() - op_start
     logger.info('STAT: Restore %r from %r. '
                 'Size: %r GB Time: %r s Speed: %r MB/s' %
                 (dest_volume['id'], backup_id, size, duration,
                  size * 1024 / duration))
Example #34
0
 def remove_lvm_volume(self, volume):
     try:
         op_start = time()
         size = volume['size'] / 1024 / 1024 / 1024
         logger.rename('lunr.storage.helper.volume.remove_lvm_volume')
         setproctitle("lunr-remove: " + volume['id'])
         # Scrub the volume
         self.scrub.scrub_volume(volume['path'])
         # Remove the device
         self.remove(volume['path'])
         duration = time() - op_start
         logger.info('STAT: remove_lvm_volume(%r) '
                     'Size: %r GB Time: %r s Speed: %r MB/s' %
                     (volume['path'],
                      size, duration,  size * 1024 / duration))
     except ProcessError, e:
         logger.exception(
             "delete volume failed with '%r' after %r seconds" %
             (e, time() - op_start))
Example #35
0
 def untar_image(self, path, image):
     tarball = os.path.join(path, 'image')
     op_start = time()
     execute('tar', '-C', path, '-zxf', tarball, sudo=False)
     duration = time() - op_start
     mbytes = image.size / 1024 / 1024
     uncompressed = 0
     for f in os.listdir(path):
         # No fair counting twice.
         if f == 'image':
             continue
         fpath = os.path.join(path, f)
         if os.path.isfile(fpath):
             uncompressed += os.path.getsize(fpath)
     uncompressed = uncompressed / 1024 / 1024
     logger.info('STAT: tar %r. Compressed Size: %r MB '
                 'Uncompressed Size: %r MB '
                 'Time: %r Speed: %r' %
                 (image.id, mbytes, uncompressed,
                  duration, uncompressed / duration))
Example #36
0
    def create_clone(self, volume_id, clone_id, iqn, iscsi_ip, iscsi_port,
                     callback=None, lock=None, cinder=None):
        volume = self.get(volume_id)
        size = volume['size'] / 1024 / 1024 / 1024
        logger.info("Cloning source '%s' to volume '%s'" %
                    (volume_id, clone_id))
        snapshot_name = uuid.uuid4()
        snapshot = self.create_snapshot(volume_id, snapshot_name,
                                        clone_id=clone_id, type_='clone')
        logger.info("Snapshot to clone id: '%s'" % snapshot['id'])
        try:
            new_volume = ISCSIDevice(iqn, iscsi_ip, iscsi_port)
            new_volume.connect()
        except (ISCSILoginFailed, ISCSINotConnected):
            msg = "Unable to open iscsi connection to %s:%s - %s" % \
                  (iscsi_ip, iscsi_port, iqn)
            logger.error(msg)
            self.delete(snapshot['id'])
            raise ServiceUnavailable(msg)

        spawn(lock, self._copy_clone, snapshot, clone_id, size, new_volume,
              cinder, callback=callback, skip_fork=self.skip_fork)
Example #37
0
    def create_clone(self, volume_id, clone_id, iqn, iscsi_ip, iscsi_port,
                     callback=None, lock=None, cinder=None):
        volume = self.get(volume_id)
        size = volume['size'] / 1024 / 1024 / 1024
        logger.info("Cloning source '%s' to volume '%s'" %
                    (volume_id, clone_id))
        snapshot_name = uuid.uuid4()
        snapshot = self.create_snapshot(volume_id, snapshot_name,
                                        clone_id=clone_id, type_='clone')
        logger.info("Snapshot to clone id: '%s'" % snapshot['id'])
        try:
            new_volume = ISCSIDevice(iqn, iscsi_ip, iscsi_port)
            new_volume.connect()
        except (ISCSILoginFailed, ISCSINotConnected):
            msg = "Unable to open iscsi connection to %s:%s - %s" % \
                  (iscsi_ip, iscsi_port, iqn)
            logger.error(msg)
            self.delete(snapshot['id'])
            raise ServiceUnavailable(msg)

        spawn(lock, self._copy_clone, snapshot, clone_id, size, new_volume,
              cinder, callback=callback, skip_fork=self.skip_fork)
Example #38
0
def get_registration_exceptions(local_info, node_info):
    exceptions = {}
    for k, v in local_info.items():
        if 'hostname' in k and node_info[k] != v:
            try:
                node_value = socket.gethostbyname(node_info[k])
            except socket.error:
                # skip hostname translation on failure
                pass
        else:
            try:
                node_value = node_info[k]
            except KeyError, e:
                logger.error("During registration; missing '%s' key in api "
                             "server response" % k)
                continue
        if node_value != v:
            logger.warning("Invalid '%s' registered "
                           "as %r != %r" % (k, node_value, v))
            exceptions[k] = v
        else:
            logger.info("Verified '%s' registered as '%s'" % (k, v))
Example #39
0
    def update(self, request):
        """
        POST /v1.0/{account_id}/volumes/{id}/export

        Update export info.
        Params: status, instance_id, and mountpoint.
        This also pulls the connected ip and initiator from the storage node.
        """
        try:
            volume = self.account_query(Volume).filter_by(id=self.id).one()
        except NoResultFound:
            raise HTTPNotFound(
                "Cannot update export for non-existent volume '%s'" % self.id)

        update_params, meta_params = filter_update_params(request, Export)

        try:
            node_export = self.node_request(volume.node, 'GET',
                                            '/volumes/%s/export' % volume.name)
        except NodeError, e:
            logger.info('Node error fetching export: %s' % volume.id)
            node_export = {}
Example #40
0
File: base.py Project: audip/lunr
def get_registration_exceptions(local_info, node_info):
    exceptions = {}
    for k, v in local_info.items():
        if 'hostname' in k and node_info[k] != v:
            try:
                node_value = socket.gethostbyname(node_info[k])
            except socket.error:
                # skip hostname translation on failure
                pass
        else:
            try:
                node_value = node_info[k]
            except KeyError, e:
                logger.error("During registration; missing '%s' key in api "
                             "server response" % k)
                continue
        if node_value != v:
            logger.warning("Invalid '%s' registered "
                           "as %r != %r" % (k, node_value, v))
            exceptions[k] = v
        else:
            logger.info("Verified '%s' registered as '%s'" % (k, v))
Example #41
0
File: export.py Project: audip/lunr
    def update(self, request):
        """
        POST /v1.0/{account_id}/volumes/{id}/export

        Update export info.
        Params: status, instance_id, and mountpoint.
        This also pulls the connected ip and initiator from the storage node.
        """
        try:
            volume = self.account_query(Volume).filter_by(id=self.id).one()
        except NoResultFound:
            raise HTTPNotFound(
                "Cannot update export for non-existent volume '%s'" % self.id)

        update_params, meta_params = filter_update_params(request, Export)

        try:
            node_export = self.node_request(volume.node, 'GET',
                                            '/volumes/%s/export' % volume.name)
        except NodeError, e:
            logger.info('Node error fetching export: %s' % volume.id)
            node_export = {}
Example #42
0
 def __init__(self, conf):
     self.ietd_config = conf.string('export', 'ietd_config',
                                    '/etc/iet/ietd.conf')
     self.volume_group = conf.string('volume', 'volume_group',
                                     'lunr-volume')
     self.iqn_prefix = conf.string('export', 'iqn_prefix',
                                   'iqn.2010-11.com.rackspace')
     self.device_prefix = conf.string('export', 'device_prefix', '/dev')
     self.proc_iet_volume = conf.string('export', 'proc_iet_volume',
                                        '/proc/net/iet/volume')
     self.proc_iet_session = conf.string('export', 'proc_iet_session',
                                         '/proc/net/iet/session')
     self.initiators_allow = conf.string('export', 'initiators_allow',
                                         '/etc/iet/initiators.allow')
     self.default_allows = conf.string('export', 'default_allows', 'ALL')
     logger.info("Setting export default_allows: %s" % self.default_allows)
     subnets = conf.list('export', 'allow_subnets', '0.0.0.0/0')
     logger.debug("Setting export allow_subnets: %s" % subnets)
     self.allow_subnets = []
     for subnet in subnets:
         if subnet:
             self.allow_subnets.append(netaddr.IPNetwork(subnet))
     self.run_dir = conf.string('storage', 'run_dir', conf.path('run'))
Example #43
0
File: export.py Project: audip/lunr
 def __init__(self, conf):
     self.ietd_config = conf.string('export', 'ietd_config',
                                    '/etc/iet/ietd.conf')
     self.volume_group = conf.string('volume', 'volume_group',
                                     'lunr-volume')
     self.iqn_prefix = conf.string('export', 'iqn_prefix',
                                   'iqn.2010-11.com.rackspace')
     self.device_prefix = conf.string('export', 'device_prefix', '/dev')
     self.proc_iet_volume = conf.string('export', 'proc_iet_volume',
                                        '/proc/net/iet/volume')
     self.proc_iet_session = conf.string('export', 'proc_iet_session',
                                         '/proc/net/iet/session')
     self.initiators_allow = conf.string('export', 'initiators_allow',
                                         '/etc/iet/initiators.allow')
     self.default_allows = conf.string('export', 'default_allows', 'ALL')
     logger.info("Setting export default_allows: %s" % self.default_allows)
     subnets = conf.list('export', 'allow_subnets', '0.0.0.0/0')
     logger.debug("Setting export allow_subnets: %s" % subnets)
     self.allow_subnets = []
     for subnet in subnets:
         if subnet:
             self.allow_subnets.append(netaddr.IPNetwork(subnet))
     self.run_dir = conf.string('storage', 'run_dir', conf.path('run'))
Example #44
0
 def _do_create(self, volume_id, size_str, tag,
                backup_source_volume_id=None):
     try:
         out = execute('lvcreate', self.volume_group,
                       name=volume_id, size=size_str, addtag=tag)
     except ProcessError, e:
         if not e.errcode == 5 and 'already exists' not in e.err:
             raise
         # We ran out of space on the storage node!
         if "Insufficient free extents" in e.err \
                 or "insufficient free space" in e.err:
             logger.error(e.err)
             raise ServiceUnavailable("LVM reports insufficient "
                                      "free space on drive")
         # If we are requesting a restore, and the existing volume is this
         # same failed restore, it's not an error.
         if backup_source_volume_id and backup_source_volume_id == \
                 self.get(volume_id).get('backup_source_volume_id', False):
             logger.info("Restarting failed restore on '%s'" % volume_id)
         else:
             raise AlreadyExists("Unable to create a new volume named "
                                 "'%s' because one already exists." %
                                 volume_id)
Example #45
0
 def check_registration(self):
     try:
         node_info = self.api_status()
     except NotRegistered:
         logger.info("Registering new node '%s'" % self.name)
         self._register()
         return
     # check api node_info against local stats
     local_info = self._local_info()
     data = get_registration_exceptions(local_info, node_info)
     if not data:
         logger.info("Verfied registration, "
                     "node status is '%s'" % node_info['status'])
         return
     if node_info['status'] == 'ACTIVE':
         data['status'] = 'PENDING'
     logger.info("Node status is '%s', updating registration: %r" %
                 (node_info['status'], data))
     # racy if node just came out of maintenance?
     self._register(node_info['id'], data=data)
Example #46
0
File: base.py Project: audip/lunr
 def check_registration(self):
     try:
         node_info = self.api_status()
     except NotRegistered:
         logger.info("Registering new node '%s'" % self.name)
         self._register()
         return
     # check api node_info against local stats
     local_info = self._local_info()
     data = get_registration_exceptions(local_info, node_info)
     if not data:
         logger.info("Verfied registration, "
                     "node status is '%s'" % node_info['status'])
         return
     if node_info['status'] == 'ACTIVE':
         data['status'] = 'PENDING'
     logger.info("Node status is '%s', updating registration: %r" % (
         node_info['status'], data))
     # racy if node just came out of maintenance?
     self._register(node_info['id'], data=data)
Example #47
0
class BackupHelper(object):
    def __init__(self, conf):
        self.run_dir = conf.string('storage', 'run_dir', conf.path('run'))
        self.skip_fork = conf.bool('storage', 'skip_fork', False)
        self.conf = conf

    def _resource_file(self, id):
        return join(self.run_dir, 'volumes/%s/resource' % id)

    def _stats_file(self, id):
        return join(self.run_dir, 'volumes/%s/stats' % id)

    def _in_use(self, volume_id):
        resource_file = self._resource_file(volume_id)
        if not exists(resource_file):
            return False

        with ResourceFile(resource_file) as lock:
            # If the file is not in use
            return lock.used()

    def _is_a_backup_running(self, volume_id):
        used = self._in_use(volume_id)
        if not used:
            return False
        if 'stats' in used and 'volume_id' in used:
            return used
        return False

    def _backup_is_running(self, volume_id, backup_id):
        used = self._in_use(volume_id)
        if not used:
            return False
        if 'id' not in used or used['id'] != backup_id:
            return False
        return used

    def list(self, volume):
        """
        Find all manifest in local cache, and running backups
        """
        results = {}

        # Might be a backup running for this volume not yet in the manifest
        running = self._is_a_backup_running(volume['id'])
        if running:
            results.update({running['id']: 'RUNNING'})

        try:
            manifest_file = Worker.build_lock_path(self.run_dir, volume['id'])
            manifest = read_local_manifest(manifest_file)
        except ManifestEmptyError:
            return results

        for backup_id in manifest.backups:
            if self._backup_is_running(volume['id'], backup_id):
                job = self.get(volume, backup_id)
                job['ts'] = manifest.backups.get(backup_id)
                manifest.backups[backup_id] = job

        results.update(manifest.backups)
        return results

    def get(self, volume, backup_id):
        running = self._backup_is_running(volume['id'], backup_id)
        if not running:
            raise NotFound("no active backup running on '%s' called '%s'" %
                           (volume['id'], backup_id))
        stats_file = self._stats_file(volume['id'])
        with ResourceFile(stats_file) as lock:
            stats = lock.read()

        return {
            'lock': self._resource_file(volume['id']),
            'status': 'RUNNING',
            'stats': stats
        }

    def save(self, snapshot, backup_id, cinder):
        job_stats_path = self._stats_file(snapshot['origin'])
        logger.rename('lunr.storage.helper.backup.save')
        setproctitle("lunr-save: " + backup_id)
        size = snapshot['size'] / 1024 / 1024 / 1024

        try:
            op_start = time()
            worker = Worker(snapshot['origin'],
                            conf=self.conf,
                            stats_path=job_stats_path)
        except exc.ClientException, e:
            if e.http_status != 404:
                raise
            op_start = time()
            conn = get_conn(self.conf)
            conn.put_container(snapshot['origin'])
            logger.warning("failed to retrieve manifest;"
                           " first time backup for this volume?")
            # TODO: write the block_size on the manifest at create?
            block_count, remainder = divmod(snapshot['size'], BLOCK_SIZE)
            if remainder:
                block_count += 1
            # initial backup is the only time the we need to worry about
            # creating a new manifest for the worker
            worker = Worker(snapshot['origin'],
                            conf=self.conf,
                            manifest=Manifest.blank(block_count),
                            stats_path=job_stats_path)
        try:
            worker.save(snapshot['path'],
                        backup_id,
                        timestamp=snapshot['timestamp'],
                        cinder=cinder)
        finally:
            os.unlink(job_stats_path)
        duration = time() - op_start
        logger.info('STAT: worker save for backup_id %r on %r. '
                    'Size: %r GB Time: %r s Speed: %r MB/s' %
                    (backup_id, snapshot['path'], size, duration,
                     size * 1024 / duration))
Example #48
0
            op_start = time()
            worker = Worker(volume["id"], self.conf)
        except exc.ClientException, e:
            # If the manifest doesn't exist, We consider the backup deleted.
            # If anything else happens, we bail.
            if e.http_status != 404:
                raise
            logger.warning("No manifest found pruning volume: %s" % volume["id"])
            return
        try:
            history = worker.delete(backup_id)
        except NotFound, e:
            logger.warning("backup_id: '%s' missing from manifest in prune" % backup_id)
            return
        duration = time() - op_start
        logger.info("STAT: pruning %r. Time: %r s" % (backup_id, duration))

    def delete(self, volume, backup_id, callback=None, lock=None):
        spawn(lock, self.prune, volume, backup_id, callback=callback, skip_fork=self.skip_fork)

    def audit(self, volume):
        logger.rename("lunr.storage.helper.backup.audit")
        setproctitle("lunr-audit: " + volume["id"])
        try:
            op_start = time()
            worker = Worker(volume["id"], self.conf)
        except exc.ClientException, e:
            if e.http_status != 404:
                raise
            op_start = time()
            conn = get_conn(self.conf)
Example #49
0
        except exc.ClientException, e:
            # If the manifest doesn't exist, We consider the backup deleted.
            # If anything else happens, we bail.
            if e.http_status != 404:
                raise
            logger.warning('No manifest found pruning volume: %s' %
                           volume['id'])
            return
        try:
            history = worker.delete(backup_id)
        except NotFound, e:
            logger.warning("backup_id: '%s' missing from manifest in prune" %
                           backup_id)
            return
        duration = time() - op_start
        logger.info('STAT: pruning %r. Time: %r s' % (backup_id, duration))

    def delete(self, volume, backup_id, callback=None, lock=None):
        spawn(lock,
              self.prune,
              volume,
              backup_id,
              callback=callback,
              skip_fork=self.skip_fork)

    def audit(self, volume):
        logger.rename('lunr.storage.helper.backup.audit')
        setproctitle("lunr-audit: " + volume['id'])
        try:
            op_start = time()
            worker = Worker(volume['id'], self.conf)
Example #50
0
                try:
                    chunks = glance.get(image.id)
                except GlanceError, e:
                    logger.warning("Error fetching glance image: %s" % e)
                    raise

                try:
                    for chunk in chunks:
                        f.write(chunk)
                    break
                # Glanceclient doesn't handle socket timeouts for chunk reads.
                except (GlanceError, socket.timeout) as e:
                    continue
        duration = time() - op_start
        mbytes = image.size / 1024 / 1024
        logger.info('STAT: glance.get %r. Size: %r MB Time: %r Speed: %r' %
                    (image.id, mbytes, duration, mbytes / duration))

    def get_oldstyle_vhd(self, path):
        old_style = os.path.join(path, 'image.vhd')
        if os.path.exists(old_style):
            return old_style
        return None

    def get_vhd_chain(self, path):
        i = 0
        chain = []
        while True:
            vhd = os.path.join(path, '%d.vhd' % i)
            if not os.path.exists(vhd):
                break
            chain.append(vhd)
Example #51
0
File: backup.py Project: audip/lunr
        except exc.ClientException, e:
            # If the manifest doesn't exist, We consider the backup deleted.
            # If anything else happens, we bail.
            if e.http_status != 404:
                raise
            logger.warning('No manifest found pruning volume: %s' %
                           volume['id'])
            return
        try:
            history = worker.delete(backup_id)
        except NotFound, e:
            logger.warning("backup_id: '%s' missing from manifest in prune" %
                           backup_id)
            return
        duration = time() - op_start
        logger.info('STAT: pruning %r. Time: %r s' % (backup_id, duration))

    def delete(self, volume, backup_id, callback=None, lock=None):
        spawn(lock, self.prune, volume, backup_id,
              callback=callback, skip_fork=self.skip_fork)

    def audit(self, volume):
        logger.rename('lunr.storage.helper.backup.audit')
        setproctitle("lunr-audit: " + volume['id'])
        try:
            op_start = time()
            worker = Worker(volume['id'], self.conf)
        except exc.ClientException, e:
            if e.http_status != 404:
                raise
            op_start = time()
Example #52
0
                try:
                    chunks = glance.get(image.id)
                except GlanceError, e:
                    logger.warning("Error fetching glance image: %s" % e)
                    raise

                try:
                    for chunk in chunks:
                        f.write(chunk)
                    break
                # Glanceclient doesn't handle socket timeouts for chunk reads.
                except (GlanceError, socket.timeout) as e:
                    continue
        duration = time() - op_start
        mbytes = image.size / 1024 / 1024
        logger.info('STAT: glance.get %r. Size: %r MB Time: %r Speed: %r' %
                    (image.id, mbytes, duration, mbytes / duration))

    def get_oldstyle_vhd(self, path):
        old_style = os.path.join(path, 'image.vhd')
        if os.path.exists(old_style):
            return old_style
        return None

    def get_vhd_chain(self, path):
        i = 0
        chain = []
        while True:
            vhd = os.path.join(path, '%d.vhd' % i)
            if not os.path.exists(vhd):
                break
            chain.append(vhd)