Exemple #1
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    start = timeutils.utcnow()
                    self.f(*self.args, **self.kw)
                    end = timeutils.utcnow()
                    if not self._running:
                        break
                    delay = interval - timeutils.delta_seconds(start, end)
                    if delay <= 0:
                        LOG.warn(_('task run outlasted interval by %s sec') %
                                 -delay)
                    greenthread.sleep(delay if delay > 0 else 0)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_('in fixed duration looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
Exemple #2
0
def convert_image(source, dest, out_format, bps_limit=None):
    """Convert image to other format."""
    start_time = timeutils.utcnow()
    cmd = ('qemu-img', 'convert', '-O', out_format, source, dest)
    cgcmd = volume_utils.setup_blkio_cgroup(source, dest, bps_limit)
    if cgcmd:
        cmd = tuple(cgcmd) + cmd
        cmd += ('-t', 'none')  # required to enable ratelimit by blkio cgroup
    utils.execute(*cmd, run_as_root=True)

    duration = timeutils.delta_seconds(start_time, timeutils.utcnow())

    # NOTE(jdg): use a default of 1, mostly for unit test, but in
    # some incredible event this is 0 (cirros image?) don't barf
    if duration < 1:
        duration = 1
    fsz_mb = os.stat(source).st_size / units.Mi
    mbps = (fsz_mb / duration)
    msg = ("Image conversion details: src %(src)s, size %(sz).2f MB, "
           "duration %(duration).2f sec, destination %(dest)s")
    LOG.debug(msg % {
        "src": source,
        "sz": fsz_mb,
        "duration": duration,
        "dest": dest
    })

    msg = _("Converted %(sz).2f MB image at %(mbps).2f MB/s")
    LOG.info(msg % {"sz": fsz_mb, "mbps": mbps})
Exemple #3
0
def convert_image(source, dest, out_format, bps_limit=None):
    """Convert image to other format."""
    start_time = timeutils.utcnow()
    # Always set -t none. First it is needed for cgroup io/limiting
    # and it is needed to ensure that all data hit the device before
    # it gets unmapped remotely from the host
    cmd = ('qemu-img', 'convert', '-t', 'none', '-O', out_format, source, dest)
    cgcmd = volume_utils.setup_blkio_cgroup(source, dest, bps_limit)
    if cgcmd:
        cmd = tuple(cgcmd) + cmd
    utils.execute(*cmd, run_as_root=True)

    duration = timeutils.delta_seconds(start_time, timeutils.utcnow())

    # NOTE(jdg): use a default of 1, mostly for unit test, but in
    # some incredible event this is 0 (cirros image?) don't barf
    if duration < 1:
        duration = 1
    fsz_mb = os.stat(source).st_size / units.Mi
    mbps = (fsz_mb / duration)
    msg = ("Image conversion details: src %(src)s, size %(sz).2f MB, "
           "duration %(duration).2f sec, destination %(dest)s")
    LOG.debug(msg % {
        "src": source,
        "sz": fsz_mb,
        "duration": duration,
        "dest": dest
    })

    msg = _("Converted %(sz).2f MB image at %(mbps).2f MB/s")
    LOG.info(msg % {"sz": fsz_mb, "mbps": mbps})
Exemple #4
0
def convert_image(source, dest, out_format, bps_limit=None):
    """Convert image to other format."""
    start_time = timeutils.utcnow()
    # Always set -t none. First it is needed for cgroup io/limiting
    # and it is needed to ensure that all data hit the device before
    # it gets unmapped remotely from the host
    cmd = ('qemu-img', 'convert',
           '-t', 'none',
           '-O', out_format, source, dest)
    cgcmd = volume_utils.setup_blkio_cgroup(source, dest, bps_limit)
    if cgcmd:
        cmd = tuple(cgcmd) + cmd
    utils.execute(*cmd, run_as_root=True)

    duration = timeutils.delta_seconds(start_time, timeutils.utcnow())

    # NOTE(jdg): use a default of 1, mostly for unit test, but in
    # some incredible event this is 0 (cirros image?) don't barf
    if duration < 1:
        duration = 1
    fsz_mb = os.stat(source).st_size / units.Mi
    mbps = (fsz_mb / duration)
    msg = ("Image conversion details: src %(src)s, size %(sz).2f MB, "
           "duration %(duration).2f sec, destination %(dest)s")
    LOG.debug(msg % {"src": source,
                     "sz": fsz_mb,
                     "duration": duration,
                     "dest": dest})

    msg = _("Converted %(sz).2f MB image at %(mbps).2f MB/s")
    LOG.info(msg % {"sz": fsz_mb, "mbps": mbps})
Exemple #5
0
    def run_periodic_tasks(self, context, raise_on_error=False):
        """Tasks to be run at a periodic interval."""
        idle_for = DEFAULT_INTERVAL
        for task_name, task in self._periodic_tasks:
            full_task_name = '.'.join([self.__class__.__name__, task_name])

            now = timeutils.utcnow()
            spacing = self._periodic_spacing[task_name]
            last_run = self._periodic_last_run[task_name]

            # If a periodic task is _nearly_ due, then we'll run it early
            if spacing is not None and last_run is not None:
                due = last_run + datetime.timedelta(seconds=spacing)
                if not timeutils.is_soon(due, 0.2):
                    idle_for = min(idle_for, timeutils.delta_seconds(now, due))
                    continue

            if spacing is not None:
                idle_for = min(idle_for, spacing)

            LOG.debug(_("Running periodic task %(full_task_name)s"), locals())
            self._periodic_last_run[task_name] = timeutils.utcnow()

            try:
                task(self, context)
            except Exception as e:
                if raise_on_error:
                    raise
                LOG.exception(_("Error during %(full_task_name)s: %(e)s"),
                              locals())
            time.sleep(0)

        return idle_for
Exemple #6
0
    def test_create_clone(self):
        self.stubs.Set(SolidFireDriver, '_issue_api_request',
                       self.fake_issue_api_request)
        self.stubs.Set(SolidFireDriver, '_get_model_info',
                       self.fake_get_model_info)
        testvol = {
            'project_id': 'testprjid',
            'name': 'testvol',
            'size': 1,
            'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
            'volume_type_id': None,
            'created_at': timeutils.utcnow()
        }

        testvol_b = {
            'project_id': 'testprjid',
            'name': 'testvol',
            'size': 1,
            'id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66',
            'volume_type_id': None,
            'created_at': timeutils.utcnow()
        }

        sfv = SolidFireDriver(configuration=self.configuration)
        sfv.create_cloned_volume(testvol_b, testvol)
Exemple #7
0
    def test_create_snapshot(self):
        self.stubs.Set(SolidFireDriver, "_issue_api_request", self.fake_issue_api_request)
        self.stubs.Set(SolidFireDriver, "_get_model_info", self.fake_get_model_info)
        testvol = {
            "project_id": "testprjid",
            "name": "testvol",
            "size": 1,
            "id": "a720b3c0-d1f0-11e1-9b23-0800200c9a66",
            "volume_type_id": None,
            "created_at": timeutils.utcnow(),
        }

        testsnap = {
            "project_id": "testprjid",
            "name": "testvol",
            "volume_size": 1,
            "id": "b831c4d1-d1f0-11e1-9b23-0800200c9a66",
            "volume_id": "a720b3c0-d1f0-11e1-9b23-0800200c9a66",
            "volume_type_id": None,
            "created_at": timeutils.utcnow(),
        }

        sfv = SolidFireDriver(configuration=self.configuration)
        model_update = sfv.create_volume(testvol)
        sfv.create_snapshot(testsnap)
Exemple #8
0
def convert_image(source, dest, out_format, bps_limit=None):
    """Convert image to other format."""
    start_time = timeutils.utcnow()
    cmd = ('qemu-img', 'convert', '-O', out_format, source, dest)
    cgcmd = volume_utils.setup_blkio_cgroup(source, dest, bps_limit)
    if cgcmd:
        cmd = tuple(cgcmd) + cmd
        cmd += ('-t', 'none')  # required to enable ratelimit by blkio cgroup
    utils.execute(*cmd, run_as_root=True)

    duration = timeutils.delta_seconds(start_time, timeutils.utcnow())

    # NOTE(jdg): use a default of 1, mostly for unit test, but in
    # some incredible event this is 0 (cirros image?) don't barf
    if duration < 1:
        duration = 1
    fsz_mb = os.stat(source).st_size / units.Mi
    mbps = (fsz_mb / duration)
    msg = ("Image conversion details: src %(src)s, size %(sz).2f MB, "
           "duration %(duration).2f sec, destination %(dest)s")
    LOG.debug(msg % {"src": source,
                     "sz": fsz_mb,
                     "duration": duration,
                     "dest": dest})

    msg = _("Converted %(sz).2f MB image at %(mbps).2f MB/s")
    LOG.info(msg % {"sz": fsz_mb, "mbps": mbps})
Exemple #9
0
def fetch(context, image_service, image_id, path, _user_id, _project_id):
    # TODO(vish): Improve context handling and add owner and auth data
    #             when it is added to glance.  Right now there is no
    #             auth checking in glance, so we assume that access was
    #             checked before we got here.
    start_time = timeutils.utcnow()
    with fileutils.remove_path_on_error(path):
        with open(path, "wb") as image_file:
            image_service.download(context, image_id, image_file)
    duration = timeutils.delta_seconds(start_time, timeutils.utcnow())

    # NOTE(jdg): use a default of 1, mostly for unit test, but in
    # some incredible event this is 0 (cirros image?) don't barf
    if duration < 1:
        duration = 1
    fsz_mb = os.stat(image_file.name).st_size / units.Mi
    mbps = (fsz_mb / duration)
    msg = ("Image fetch details: dest %(dest)s, size %(sz).2f MB, "
           "duration %(duration).2f sec")
    LOG.debug(msg % {
        "dest": image_file.name,
        "sz": fsz_mb,
        "duration": duration
    })
    msg = _("Image download %(sz).2f MB at %(mbps).2f MB/s")
    LOG.info(msg % {"sz": fsz_mb, "mbps": mbps})
Exemple #10
0
    def test_cast_to_volume_host_update_db_with_volume_id(self):
        host = 'fake_host1'
        method = 'fake_method'
        fake_kwargs = {'volume_id': 31337, 'extra_arg': 'meow'}
        queue = 'fake_queue'

        self.mox.StubOutWithMock(timeutils, 'utcnow')
        self.mox.StubOutWithMock(db, 'volume_update')
        self.mox.StubOutWithMock(rpc, 'queue_get_for')
        self.mox.StubOutWithMock(rpc, 'cast')

        timeutils.utcnow().AndReturn('fake-now')
        db.volume_update(self.context, 31337, {
            'host': host,
            'scheduled_at': 'fake-now'
        })
        rpc.queue_get_for(self.context, FLAGS.volume_topic,
                          host).AndReturn(queue)
        rpc.cast(self.context, queue, {'method': method, 'args': fake_kwargs})

        self.mox.ReplayAll()
        driver.cast_to_volume_host(self.context,
                                   host,
                                   method,
                                   update_db=True,
                                   **fake_kwargs)
Exemple #11
0
def copy_volume(srcstr,
                deststr,
                size_in_m,
                blocksize,
                sync=False,
                execute=utils.execute,
                ionice=None):
    # Use O_DIRECT to avoid thrashing the system buffer cache
    extra_flags = []
    if check_for_odirect_support(srcstr, deststr, 'iflag=direct'):
        extra_flags.append('iflag=direct')

    if check_for_odirect_support(srcstr, deststr, 'oflag=direct'):
        extra_flags.append('oflag=direct')

    # If the volume is being unprovisioned then
    # request the data is persisted before returning,
    # so that it's not discarded from the cache.
    if sync and not extra_flags:
        extra_flags.append('conv=fdatasync')

    blocksize, count = _calculate_count(size_in_m, blocksize)

    cmd = [
        'dd',
        'if=%s' % srcstr,
        'of=%s' % deststr,
        'count=%d' % count,
        'bs=%s' % blocksize
    ]
    cmd.extend(extra_flags)

    if ionice is not None:
        cmd = ['ionice', ionice] + cmd

    cgcmd = setup_blkio_cgroup(srcstr, deststr, CONF.volume_copy_bps_limit)
    if cgcmd:
        cmd = cgcmd + cmd

    # Perform the copy
    start_time = timeutils.utcnow()
    execute(*cmd, run_as_root=True)
    duration = timeutils.delta_seconds(start_time, timeutils.utcnow())

    # NOTE(jdg): use a default of 1, mostly for unit test, but in
    # some incredible event this is 0 (cirros image?) don't barf
    if duration < 1:
        duration = 1
    mbps = (size_in_m / duration)
    mesg = ("Volume copy details: src %(src)s, dest %(dest)s, "
            "size %(sz).2f MB, duration %(duration).2f sec")
    LOG.debug(mesg % {
        "src": srcstr,
        "dest": deststr,
        "sz": size_in_m,
        "duration": duration
    })
    mesg = _("Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s")
    LOG.info(mesg % {'size_in_m': size_in_m, 'mbps': mbps})
Exemple #12
0
    def _cast_create_volume(self, context, request_spec, filter_properties):

        # NOTE(Rongze Zhu): It is a simple solution for bug 1008866
        # If snapshot_id is set, make the call create volume directly to
        # the volume host where the snapshot resides instead of passing it
        # through the scheduler. So snapshot can be copy to new volume.

        source_volid = request_spec["source_volid"]
        volume_id = request_spec["volume_id"]
        snapshot_id = request_spec["snapshot_id"]
        image_id = request_spec["image_id"]

        if snapshot_id and FLAGS.snapshot_same_host:
            snapshot_ref = self.db.snapshot_get(context, snapshot_id)
            source_volume_ref = self.db.volume_get(context, snapshot_ref["volume_id"])
            now = timeutils.utcnow()
            values = {"host": source_volume_ref["host"], "scheduled_at": now}
            volume_ref = self.db.volume_update(context, volume_id, values)

            # bypass scheduler and send request directly to volume
            self.volume_rpcapi.create_volume(
                context,
                volume_ref,
                volume_ref["host"],
                request_spec=request_spec,
                filter_properties=filter_properties,
                allow_reschedule=False,
                snapshot_id=snapshot_id,
                image_id=image_id,
            )
        elif source_volid:
            source_volume_ref = self.db.volume_get(context, source_volid)
            now = timeutils.utcnow()
            values = {"host": source_volume_ref["host"], "scheduled_at": now}
            volume_ref = self.db.volume_update(context, volume_id, values)

            # bypass scheduler and send request directly to volume
            self.volume_rpcapi.create_volume(
                context,
                volume_ref,
                volume_ref["host"],
                request_spec=request_spec,
                filter_properties=filter_properties,
                allow_reschedule=False,
                snapshot_id=snapshot_id,
                image_id=image_id,
                source_volid=source_volid,
            )
        else:
            self.scheduler_rpcapi.create_volume(
                context,
                FLAGS.volume_topic,
                volume_id,
                snapshot_id,
                image_id,
                request_spec=request_spec,
                filter_properties=filter_properties,
            )
Exemple #13
0
    def _cast_create_volume(self, context, request_spec, filter_properties):

        # NOTE(Rongze Zhu): It is a simple solution for bug 1008866
        # If snapshot_id is set, make the call create volume directly to
        # the volume host where the snapshot resides instead of passing it
        # through the scheduler. So snapshot can be copy to new volume.

        source_volid = request_spec['source_volid']
        volume_id = request_spec['volume_id']
        snapshot_id = request_spec['snapshot_id']
        image_id = request_spec['image_id']

        if snapshot_id and FLAGS.snapshot_same_host:
            snapshot_ref = self.db.snapshot_get(context, snapshot_id)
            source_volume_ref = self.db.volume_get(context,
                                                   snapshot_ref['volume_id'])
            now = timeutils.utcnow()
            values = {'host': source_volume_ref['host'], 'scheduled_at': now}
            volume_ref = self.db.volume_update(context, volume_id, values)

            # bypass scheduler and send request directly to volume
            self.volume_rpcapi.create_volume(
                context,
                volume_ref,
                volume_ref['host'],
                request_spec=request_spec,
                filter_properties=filter_properties,
                allow_reschedule=False,
                snapshot_id=snapshot_id,
                image_id=image_id)
        elif source_volid:
            source_volume_ref = self.db.volume_get(context,
                                                   source_volid)
            now = timeutils.utcnow()
            values = {'host': source_volume_ref['host'], 'scheduled_at': now}
            volume_ref = self.db.volume_update(context, volume_id, values)

            # bypass scheduler and send request directly to volume
            self.volume_rpcapi.create_volume(
                context,
                volume_ref,
                volume_ref['host'],
                request_spec=request_spec,
                filter_properties=filter_properties,
                allow_reschedule=False,
                snapshot_id=snapshot_id,
                image_id=image_id,
                source_volid=source_volid)
        else:
            self.scheduler_rpcapi.create_volume(
                context,
                FLAGS.volume_topic,
                volume_id,
                snapshot_id,
                image_id,
                request_spec=request_spec,
                filter_properties=filter_properties)
Exemple #14
0
    def test_volume_host_update_db(self):
        self.mox.StubOutWithMock(timeutils, 'utcnow')
        self.mox.StubOutWithMock(db, 'volume_update')

        timeutils.utcnow().AndReturn('fake-now')
        db.volume_update(self.context, 31337,
                         {'host': 'fake_host',
                          'scheduled_at': 'fake-now'})

        self.mox.ReplayAll()
        driver.volume_update_db(self.context, 31337, 'fake_host')
Exemple #15
0
def convert_image(source, dest, out_format, bps_limit=None, is_qcow_compress=False):
    """Convert image to other format."""

    cmd = ('qemu-img', 'convert',
           '-O', out_format, source, dest)

    if is_qcow_compress and out_format=='qcow2':
        cmd = ('qemu-img', 'convert',
               '-c',
               '-O', out_format, source, dest)
    else:
        cmd = ('qemu-img', 'convert',
               '-O', out_format, source, dest)

    # Check whether O_DIRECT is supported and set '-t none' if it is
    # This is needed to ensure that all data hit the device before
    # it gets unmapped remotely from the host for some backends
    # Reference Bug: #1363016

    # NOTE(jdg): In the case of file devices qemu does the
    # flush properly and more efficiently than would be done
    # setting O_DIRECT, so check for that and skip the
    # setting for non BLK devs
    if (utils.is_blk_device(dest) and
            volume_utils.check_for_odirect_support(source,
                                                   dest,
                                                   'oflag=direct')):
        cmd = ('qemu-img', 'convert',
               '-t', 'none',
               '-O', out_format, source, dest)

    start_time = timeutils.utcnow()
    cgcmd = volume_utils.setup_blkio_cgroup(source, dest, bps_limit)
    if cgcmd:
        cmd = tuple(cgcmd) + cmd
    utils.execute(*cmd, run_as_root=True)

    duration = timeutils.delta_seconds(start_time, timeutils.utcnow())

    # NOTE(jdg): use a default of 1, mostly for unit test, but in
    # some incredible event this is 0 (cirros image?) don't barf
    if duration < 1:
        duration = 1
    fsz_mb = os.stat(source).st_size / units.Mi
    mbps = (fsz_mb / duration)
    msg = ("Image conversion details: src %(src)s, size %(sz).2f MB, "
           "duration %(duration).2f sec, destination %(dest)s")
    LOG.debug(msg % {"src": source,
                     "sz": fsz_mb,
                     "duration": duration,
                     "dest": dest})

    msg = _("Converted %(sz).2f MB image at %(mbps).2f MB/s")
    LOG.info(msg % {"sz": fsz_mb, "mbps": mbps})
Exemple #16
0
def convert_image(source,
                  dest,
                  out_format,
                  bps_limit=None,
                  is_qcow_compress=False):
    """Convert image to other format."""

    cmd = ('qemu-img', 'convert', '-O', out_format, source, dest)

    if is_qcow_compress and out_format == 'qcow2':
        cmd = ('qemu-img', 'convert', '-c', '-O', out_format, source, dest)
    else:
        cmd = ('qemu-img', 'convert', '-O', out_format, source, dest)

    # Check whether O_DIRECT is supported and set '-t none' if it is
    # This is needed to ensure that all data hit the device before
    # it gets unmapped remotely from the host for some backends
    # Reference Bug: #1363016

    # NOTE(jdg): In the case of file devices qemu does the
    # flush properly and more efficiently than would be done
    # setting O_DIRECT, so check for that and skip the
    # setting for non BLK devs
    if (utils.is_blk_device(dest) and volume_utils.check_for_odirect_support(
            source, dest, 'oflag=direct')):
        cmd = ('qemu-img', 'convert', '-t', 'none', '-O', out_format, source,
               dest)

    start_time = timeutils.utcnow()
    cgcmd = volume_utils.setup_blkio_cgroup(source, dest, bps_limit)
    if cgcmd:
        cmd = tuple(cgcmd) + cmd
    utils.execute(*cmd, run_as_root=True)

    duration = timeutils.delta_seconds(start_time, timeutils.utcnow())

    # NOTE(jdg): use a default of 1, mostly for unit test, but in
    # some incredible event this is 0 (cirros image?) don't barf
    if duration < 1:
        duration = 1
    fsz_mb = os.stat(source).st_size / units.Mi
    mbps = (fsz_mb / duration)
    msg = ("Image conversion details: src %(src)s, size %(sz).2f MB, "
           "duration %(duration).2f sec, destination %(dest)s")
    LOG.debug(msg % {
        "src": source,
        "sz": fsz_mb,
        "duration": duration,
        "dest": dest
    })

    msg = _("Converted %(sz).2f MB image at %(mbps).2f MB/s")
    LOG.info(msg % {"sz": fsz_mb, "mbps": mbps})
Exemple #17
0
 def multi_services(context, topic):
     return [{
         'availability_zone': "fake_az",
         'host': alt_host,
         'disabled': 0,
         'updated_at': timeutils.utcnow()
     }, {
         'availability_zone': "fake_az",
         'host': test_host,
         'disabled': 0,
         'updated_at': timeutils.utcnow()
     }]
    def test_volume_host_update_db(self):
        self.mox.StubOutWithMock(timeutils, 'utcnow')
        self.mox.StubOutWithMock(db, 'volume_update')

        timeutils.utcnow().AndReturn('fake-now')
        db.volume_update(self.context, 31337, {
            'host': 'fake_host',
            'scheduled_at': 'fake-now'
        })

        self.mox.ReplayAll()
        driver.volume_update_db(self.context, 31337, 'fake_host')
Exemple #19
0
def copy_volume(srcstr, deststr, size_in_m, blocksize, sync=False,
                execute=utils.execute, ionice=None):
    # Use O_DIRECT to avoid thrashing the system buffer cache
    extra_flags = []
    # Check whether O_DIRECT is supported to iflag and oflag separately
    for flag in ['iflag=direct', 'oflag=direct']:
        try:
            execute('dd', 'count=0', 'if=%s' % srcstr, 'of=%s' % deststr,
                    flag, run_as_root=True)
            extra_flags.append(flag)
        except processutils.ProcessExecutionError:
            pass

    # If the volume is being unprovisioned then
    # request the data is persisted before returning,
    # so that it's not discarded from the cache.
    if sync and not extra_flags:
        extra_flags.append('conv=fdatasync')

    blocksize, count = _calculate_count(size_in_m, blocksize)

    cmd = ['dd', 'if=%s' % srcstr, 'of=%s' % deststr,
           'count=%d' % count, 'bs=%s' % blocksize]
    cmd.extend(extra_flags)

    if ionice is not None:
        cmd = ['ionice', ionice] + cmd

    cgcmd = setup_blkio_cgroup(srcstr, deststr, CONF.volume_copy_bps_limit)
    if cgcmd:
        cmd = cgcmd + cmd

    # Perform the copy
    start_time = timeutils.utcnow()
    execute(*cmd, run_as_root=True)
    duration = timeutils.delta_seconds(start_time, timeutils.utcnow())

    # NOTE(jdg): use a default of 1, mostly for unit test, but in
    # some incredible event this is 0 (cirros image?) don't barf
    if duration < 1:
        duration = 1
    mbps = (size_in_m / duration)
    mesg = ("Volume copy details: src %(src)s, dest %(dest)s, "
            "size %(sz).2f MB, duration %(duration).2f sec")
    LOG.debug(mesg % {"src": srcstr,
                      "dest": deststr,
                      "sz": size_in_m,
                      "duration": duration})
    mesg = _("Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s")
    LOG.info(mesg % {'size_in_m': size_in_m, 'mbps': mbps})
Exemple #20
0
def mock_host_manager_db_calls(mock_obj):
    services = [
        dict(id=1, host='host1', topic='volume', disabled=False,
             availability_zone='zone1', updated_at=timeutils.utcnow()),
        dict(id=2, host='host2', topic='volume', disabled=False,
             availability_zone='zone1', updated_at=timeutils.utcnow()),
        dict(id=3, host='host3', topic='volume', disabled=False,
             availability_zone='zone2', updated_at=timeutils.utcnow()),
        dict(id=4, host='host4', topic='volume', disabled=False,
             availability_zone='zone3', updated_at=timeutils.utcnow()),
        # service on host5 is disabled
        dict(id=5, host='host5', topic='volume', disabled=True,
             availability_zone='zone4', updated_at=timeutils.utcnow()),
    ]
    mock_obj.return_value = services
Exemple #21
0
 def execute(self, context, volume, volume_spec):
     volume_id = volume['id']
     new_status = self.status_translation.get(volume_spec.get('status'),
                                              'available')
     update = {
         'status': new_status,
         'launched_at': timeutils.utcnow(),
     }
     try:
         # TODO(harlowja): is it acceptable to only log if this fails??
         # or are there other side-effects that this will cause if the
         # status isn't updated correctly (aka it will likely be stuck in
         # 'building' if this fails)??
         volume_ref = self.db.volume_update(context, volume_id, update)
         # Now use the parent to notify.
         super(CreateVolumeOnFinishTask, self).execute(context, volume_ref)
     except exception.CinderException:
         LOG.exception(
             _("Failed updating volume %(volume_id)s with "
               "%(update)s") % {
                   'volume_id': volume_id,
                   'update': update
               })
     # Even if the update fails, the volume is ready.
     msg = _("Volume %(volume_name)s (%(volume_id)s): created successfully")
     LOG.info(msg % {
         'volume_name': volume_spec['volume_name'],
         'volume_id': volume_id,
     })
 def disabled_service(context, topic):
     return [{
         'availability_zone': "fake_az",
         'host': 'test_host',
         'disabled': 1,
         'updated_at': timeutils.utcnow()
     }]
Exemple #23
0
def _list_hosts(req, service=None):
    """Returns a summary list of hosts."""
    curr_time = timeutils.utcnow()
    context = req.environ['cinder.context']
    services = db.service_get_all(context, False)
    zone = ''
    if 'zone' in req.GET:
        zone = req.GET['zone']
    if zone:
        services = [s for s in services if s['availability_zone'] == zone]
    hosts = []
    for host in services:
        delta = curr_time - (host['updated_at'] or host['created_at'])
        alive = abs(utils.total_seconds(delta)) <= CONF.service_down_time
        status = (alive and "available") or "unavailable"
        active = 'enabled'
        if host['disabled']:
            active = 'disabled'
        LOG.debug('status, active and update: %s, %s, %s' %
                  (status, active, host['updated_at']))
        hosts.append({
            'host_name': host['host'],
            'service': host['topic'],
            'zone': host['availability_zone'],
            'service-status': status,
            'service-state': active,
            'last-update': host['updated_at']
        })
    if service:
        hosts = [host for host in hosts if host["service"] == service]
    return hosts
Exemple #24
0
    def _create_volume_from_image(self, expected_status, raw=False,
                                  clone_error=False):
        """Try to clone a volume from an image, and check the status
        afterwards.

        NOTE: if clone_error is True we force the image type to raw otherwise
              clone_image is not called
        """
        def mock_clone_image(volume, image_location, image_id, image_meta):
            self.called.append('clone_image')
            if clone_error:
                raise exception.CinderException()
            else:
                return {'provider_location': None}, True

        # See tests.image.fake for image types.
        if raw:
            image_id = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
        else:
            image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'

        volume_id = 1

        # creating volume testdata
        db.volume_create(self.context,
                         {'id': volume_id,
                          'updated_at': timeutils.utcnow(),
                          'display_description': 'Test Desc',
                          'size': 20,
                          'status': 'creating',
                          'instance_uuid': None,
                          'host': 'dummy'})

        mpo = mock.patch.object
        with mpo(self.volume.driver, 'create_volume') as mock_create_volume:
            with mpo(self.volume.driver, 'clone_image', mock_clone_image):
                with mpo(create_volume.CreateVolumeFromSpecTask,
                         '_copy_image_to_volume') as mock_copy_image_to_volume:

                    try:
                        if not clone_error:
                            self.volume.create_volume(self.context,
                                                      volume_id,
                                                      image_id=image_id)
                        else:
                            self.assertRaises(exception.CinderException,
                                              self.volume.create_volume,
                                              self.context,
                                              volume_id,
                                              image_id=image_id)

                        volume = db.volume_get(self.context, volume_id)
                        self.assertEqual(volume['status'], expected_status)
                    finally:
                        # cleanup
                        db.volume_destroy(self.context, volume_id)

                    self.assertEqual(self.called, ['clone_image'])
                    mock_create_volume.assert_called()
                    mock_copy_image_to_volume.assert_called()
Exemple #25
0
 def get_latest_ssc():
     LOG.info(_('Running cluster latest ssc job for %(server)s'
                ' and vserver %(vs)s')
              % {'server': na_server, 'vs': vserver})
     ssc_vols = get_cluster_ssc(na_server, vserver)
     backend.refresh_ssc_vols(ssc_vols)
     backend.ssc_run_time = timeutils.utcnow()
Exemple #26
0
    def delete(self, context, volume, force=False):
        if context.is_admin and context.project_id != volume['project_id']:
            project_id = volume['project_id']
        else:
            project_id = context.project_id

        volume_id = volume['id']
        if not volume['host']:
            # NOTE(vish): scheduling failed, so delete it
            # Note(zhiteng): update volume quota reservation
            try:
                reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']}
                QUOTAS.add_volume_type_opts(context, reserve_opts,
                                            volume['volume_type_id'])
                reservations = QUOTAS.reserve(context,
                                              project_id=project_id,
                                              **reserve_opts)
            except Exception:
                reservations = None
                LOG.exception(_("Failed to update quota for deleting volume"))
            self.db.volume_destroy(context.elevated(), volume_id)

            if reservations:
                QUOTAS.commit(context, reservations, project_id=project_id)
            return
        if not force and volume['status'] not in [
                "available", "error", "error_restoring", "error_extending"
        ]:
            msg = _("Volume status must be available or error, "
                    "but current status is: %s") % volume['status']
            raise exception.InvalidVolume(reason=msg)

        if volume['attach_status'] == "attached":
            # Volume is still attached, need to detach first
            raise exception.VolumeAttached(volume_id=volume_id)

        if volume['migration_status'] != None:
            # Volume is migrating, wait until done
            msg = _("Volume cannot be deleted while migrating")
            raise exception.InvalidVolume(reason=msg)

        snapshots = self.db.snapshot_get_all_for_volume(context, volume_id)
        if len(snapshots):
            msg = _("Volume still has %d dependent snapshots") % len(snapshots)
            raise exception.InvalidVolume(reason=msg)

        # If the volume is encrypted, delete its encryption key from the key
        # manager. This operation makes volume deletion an irreversible process
        # because the volume cannot be decrypted without its key.
        encryption_key_id = volume.get('encryption_key_id', None)
        if encryption_key_id is not None:
            self.key_manager.delete_key(context, encryption_key_id)

        now = timeutils.utcnow()
        self.db.volume_update(context, volume_id, {
            'status': 'deleting',
            'terminated_at': now
        })

        self.volume_rpcapi.delete_volume(context, volume)
Exemple #27
0
    def test_create_volume_for_migration(self):
        def _fake_do_v_create(self, project_id, params):
            return project_id, params

        self.stubs.Set(SolidFireDriver, '_issue_api_request',
                       self.fake_issue_api_request)
        self.stubs.Set(SolidFireDriver, '_do_volume_create', _fake_do_v_create)

        testvol = {'project_id': 'testprjid',
                   'name': 'testvol',
                   'size': 1,
                   'id': 'b830b3c0-d1f0-11e1-9b23-1900200c9a77',
                   'volume_type_id': None,
                   'created_at': timeutils.utcnow(),
                   'migration_status': 'target:'
                                       'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}

        sfv = SolidFireDriver(configuration=self.configuration)
        proj_id, sf_vol_object = sfv.create_volume(testvol)
        self.assertEqual('a720b3c0-d1f0-11e1-9b23-0800200c9a66',
                         sf_vol_object['attributes']['uuid'])
        self.assertEqual('b830b3c0-d1f0-11e1-9b23-1900200c9a77',
                         sf_vol_object['attributes']['migration_uuid'])
        self.assertEqual('UUID-a720b3c0-d1f0-11e1-9b23-0800200c9a66',
                         sf_vol_object['name'])
Exemple #28
0
    def list_availability_zones(self, enable_cache=False):
        """Describe the known availability zones

        :retval list of dicts, each with a 'name' and 'available' key
        """
        refresh_cache = False
        if enable_cache:
            if self.availability_zones_last_fetched is None:
                refresh_cache = True
            else:
                cache_age = timeutils.delta_seconds(self.availability_zones_last_fetched, timeutils.utcnow())
                if cache_age >= CONF.az_cache_duration:
                    refresh_cache = True
        if refresh_cache or not enable_cache:
            topic = CONF.volume_topic
            ctxt = context.get_admin_context()
            services = self.db.service_get_all_by_topic(ctxt, topic)
            az_data = [(s["availability_zone"], s["disabled"]) for s in services]
            disabled_map = {}
            for (az_name, disabled) in az_data:
                tracked_disabled = disabled_map.get(az_name, True)
                disabled_map[az_name] = tracked_disabled and disabled
            azs = [{"name": name, "available": not disabled} for (name, disabled) in disabled_map.items()]
            if refresh_cache:
                now = timeutils.utcnow()
                self.availability_zones = azs
                self.availability_zones_last_fetched = now
                LOG.debug(
                    "Availability zone cache updated, next update will" " occur around %s",
                    now + datetime.timedelta(seconds=CONF.az_cache_duration),
                )
        else:
            azs = self.availability_zones
        return tuple(azs)
Exemple #29
0
    def test_initialize_connector_with_blocksizes(self):
        connector = {'initiator': 'iqn.2012-07.org.fake:01'}
        testvol = {
            'project_id':
            'testprjid',
            'name':
            'testvol',
            'size':
            1,
            'id':
            'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
            'volume_type_id':
            None,
            'provider_location':
            '10.10.7.1:3260 iqn.2010-01.com.'
            'solidfire:87hg.uuid-2cc06226-cc'
            '74-4cb7-bd55-14aed659a0cc.4060 0',
            'provider_auth':
            'CHAP stack-1-a60e2611875f40199931f2'
            'c76370d66b 2FE0CQ8J196R',
            'provider_geometry':
            '4096 4096',
            'created_at':
            timeutils.utcnow(),
        }

        sfv = SolidFireDriver(configuration=self.configuration)
        properties = sfv.initialize_connection(testvol, connector)
        self.assertEqual(properties['data']['physical_block_size'], '4096')
        self.assertEqual(properties['data']['logical_block_size'], '4096')
Exemple #30
0
    def delete(self, context, volume, force=False):
        volume_id = volume['id']
        if not volume['host']:
            # NOTE(vish): scheduling failed, so delete it
            # Note(zhiteng): update volume quota reservation
            try:
                reservations = QUOTAS.reserve(context, volumes=-1,
                                              gigabytes=-volume['size'])
            except Exception:
                reservations = None
                LOG.exception(_("Failed to update quota for deleting volume"))

            self.db.volume_destroy(context, volume_id)

            if reservations:
                QUOTAS.commit(context, reservations)
            return
        if not force and volume['status'] not in ["available", "error"]:
            msg = _("Volume status must be available or error")
            raise exception.InvalidVolume(reason=msg)

        snapshots = self.db.snapshot_get_all_for_volume(context, volume_id)
        if len(snapshots):
            msg = _("Volume still has %d dependent snapshots") % len(snapshots)
            raise exception.InvalidVolume(reason=msg)

        now = timeutils.utcnow()
        self.db.volume_update(context, volume_id, {'status': 'deleting',
                                                   'terminated_at': now})
        host = volume['host']
        rpc.cast(context,
                 rpc.queue_get_for(context, FLAGS.volume_topic, host),
                 {"method": "delete_volume",
                  "args": {"volume_id": volume_id}})
Exemple #31
0
    def test_retype(self):
        sfv = SolidFireDriver(configuration=self.configuration)
        self.stubs.Set(SolidFireDriver, '_issue_api_request',
                       self.fake_issue_api_request)
        type_ref = volume_types.create(self.ctxt, "type1", {
            "qos:minIOPS": "500",
            "qos:burstIOPS": "2000",
            "qos:maxIOPS": "1000"
        })
        diff = {
            'encryption': {},
            'qos_specs': {},
            'extra_specs': {
                'qos:burstIOPS': ('10000', u'2000'),
                'qos:minIOPS': ('1000', u'500'),
                'qos:maxIOPS': ('10000', u'1000')
            }
        }
        host = None
        testvol = {
            'project_id': 'testprjid',
            'name': 'test_volume',
            'size': 1,
            'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
            'created_at': timeutils.utcnow()
        }

        self.assertTrue(sfv.retype(self.ctxt, testvol, type_ref, diff, host))
Exemple #32
0
    def index(self, req):
        """
        Return a list of all running services. Filter by host & service name.
        """
        context = req.environ['cinder.context']
        authorize(context)
        now = timeutils.utcnow()
        services = db.service_get_all(context)

        host = ''
        if 'host' in req.GET:
            host = req.GET['host']
        service = ''
        if 'service' in req.GET:
            service = req.GET['service']
        if host:
            services = [s for s in services if s['host'] == host]
        if service:
            services = [s for s in services if s['binary'] == service]

        svcs = []
        for svc in services:
            delta = now - (svc['updated_at'] or svc['created_at'])
            alive = abs(utils.total_seconds(delta))
            art = (alive and "up") or "down"
            active = 'enabled'
            if svc['disabled']:
                active = 'disabled'
            svcs.append({"binary": svc['binary'], 'host': svc['host'],
                         'zone': svc['availability_zone'],
                         'status': active, 'state': art,
                         'updated_at': svc['updated_at']})
        return {'services': svcs}
 def test_create_cloned_volume_snapfails(self, connmock):
     self.drv.conn = connmock.return_value
     # this operation is a 2 part process, snap, then clone.
     # This tests for the snap failing
     srcvol = {'id': 'cinderVolumeID'}
     dstvol = {'project_id': 'testproject',
               'name': 'cinderVolumeName',
               'size': 1,
               'id': 'cinderVolumeID-dst',
               'volume_type_id': None,
               'created_at': timeutils.utcnow()}
     cmd = {'VolumeUUID': 'FakeBasicVolID',
            'Name': 'mockedFakeUUID'}
     get_effect = [basic_vol_response, ]
     self.drv.conn.get.side_effect = get_effect
     self.drv.conn.post.side_effect = requests.exceptions.HTTPError
     with mock.patch('cinder.volume.drivers.fusionio.ioControl.uuid',
                     autospec=True) as uuidmock:
         uuidmock.uuid4.return_value = cmd['Name']
         self.assertRaises(requests.exceptions.HTTPError,
                           self.drv.create_cloned_volume,
                           dstvol, srcvol)
     expected = [mock.call.get('TierStore/Volumes/by-id/'),
                 mock.call.post('TierStore/Snapshots/by-id/', cmd), ]
     self.drv.conn.assert_has_calls(expected)
 def test_terminate_connection_multiple_delete(self, connmock):
     self.drv.conn = connmock.return_value
     connector = {'initiator': 'fake:01'}
     testvol = {'project_id': 'testproject',
                'name': 'cinderVolumeName',
                'size': 1,
                'id': 'cinderVolumeID',
                'volume_type_id': None,
                'created_at': timeutils.utcnow(),
                'provider_auth': {}}
     cmd = {"AclGroupList": ["1"], }
     return2vol = copy.deepcopy(basic_vol_response)
     return2vol.append(copy.deepcopy(basic_vol_response[0]))
     get_effect = [basic_vol_response,
                   basic_acl_group_response,
                   basic_acl_group_response,
                   return2vol, ]
     self.drv.conn.get.side_effect = get_effect
     self.drv.terminate_connection(testvol, connector)
     expected = [mock.call.get('TierStore/Volumes/by-id/'),
                 mock.call.get('TierStore/ACLGroup/by-id/'),
                 mock.call.put('TierStore/Volumes/by-id/FakeBasicVolID',
                               cmd),
                 mock.call.get('TierStore/ACLGroup/by-id/'),
                 mock.call.get('TierStore/Volumes/by-id/'),
                 mock.call.delete('TierStore/ACLGroup/by-id/3')]
     self.drv.conn.assert_has_calls(expected)
 def test_initialize_connection_no_usable_Networks_fail(self, connmock):
     self.drv.conn = connmock.return_value
     connector = {'initiator': 'fake:01'}
     testvol = {'project_id': 'testproject',
                'name': 'cinderVolumeName',
                'size': 1,
                'id': 'cinderVolumeID',
                'volume_type_id': None,
                'created_at': timeutils.utcnow(),
                'provider_auth': {}}
     cmd = {"GroupName": "fake:01",
            "InitiatorList": ["fake:01"]}
     cmd2 = {"AclGroupList": ["3"], }
     netResponse = copy.deepcopy(basic_net_response)
     netResponse[4]['OperationalState'] = "down"
     get_effect = [basic_vol_response,
                   basic_acl_group_response,
                   basic_vol_response,
                   netResponse, ]
     self.drv.conn.get.side_effect = get_effect
     self.assertRaises(exception.VolumeDriverException,
                       self.drv.initialize_connection, testvol,
                       connector)
     expected = [mock.call.get('TierStore/Volumes/by-id/'),
                 mock.call.post('TierStore/ACLGroup/by-id/', cmd),
                 mock.call.get('TierStore/ACLGroup/by-id/'),
                 mock.call.put('TierStore/Volumes/by-id/FakeBasicVolID',
                               cmd2),
                 mock.call.get('TierStore/Volumes/by-id/'),
                 mock.call.get('System/Network/by-id/'), ]
     self.drv.conn.assert_has_calls(expected)
def volume_update_db(context, volume_id, host, replica=None):
    '''Set the host and set the scheduled_at field of a volume.

    If this is a replicated volume, create a new DB entry for the replica and
    set the links between them in replication_partner.

    :returns: A Volume with the updated fields set properly.
    '''
    now = timeutils.utcnow()
    values = {'host': host, 'scheduled_at': now}
    if replica:
        volume_ref = db.volume_get(context, volume_id)
        updates = {'scheduled_at': now, 'status': 'replica_creating'}
        for field in [
                'size', 'ec2_id', 'user_id', 'project_id', 'created_at',
                'updated_at', 'display_name', 'display_description'
        ]:
            updates[field] = volume_ref[field]
        replica.update(updates)
        replica_ref = db.volume_create(context, replica)
        replication = {
            'primary_id': volume_id,
            'secondary_id': replica_ref['id'],
            'status': 'starting'
        }
        db.replication_relationship_create(context, replication)

    return db.volume_update(context, volume_id, values)
Exemple #37
0
 def get_latest_ssc():
     LOG.info(_('Running cluster latest ssc job for %(server)s'
                ' and vserver %(vs)s')
              % {'server': na_server, 'vs': vserver})
     ssc_vols = get_cluster_ssc(na_server, vserver)
     backend.refresh_ssc_vols(ssc_vols)
     backend.ssc_run_time = timeutils.utcnow()
Exemple #38
0
    def delete(self, context, volume, force=False):
        if context.is_admin and context.project_id != volume["project_id"]:
            project_id = volume["project_id"]
        else:
            project_id = context.project_id

        volume_id = volume["id"]
        if not volume["host"]:
            # NOTE(vish): scheduling failed, so delete it
            # Note(zhiteng): update volume quota reservation
            try:
                reservations = QUOTAS.reserve(context, project_id=project_id, volumes=-1, gigabytes=-volume["size"])
            except Exception:
                reservations = None
                LOG.exception(_("Failed to update quota for deleting volume"))
            self.db.volume_destroy(context.elevated(), volume_id)

            if reservations:
                QUOTAS.commit(context, reservations, project_id=project_id)
            return
        if not force and volume["status"] not in ["available", "error", "error_restoring"]:
            msg = _("Volume status must be available or error")
            raise exception.InvalidVolume(reason=msg)

        snapshots = self.db.snapshot_get_all_for_volume(context, volume_id)
        if len(snapshots):
            msg = _("Volume still has %d dependent snapshots") % len(snapshots)
            raise exception.InvalidVolume(reason=msg)

        now = timeutils.utcnow()
        self.db.volume_update(context, volume_id, {"status": "deleting", "terminated_at": now})

        self.volume_rpcapi.delete_volume(context, volume)
Exemple #39
0
    def _cast_create_volume(self, context, request_spec, filter_properties):

        # NOTE(Rongze Zhu): It is a simple solution for bug 1008866
        # If snapshot_id is set, make the call create volume directly to
        # the volume host where the snapshot resides instead of passing it
        # through the scheduler. So snapshot can be copy to new volume.
        volume_id = request_spec['volume_id']
        snapshot_id = request_spec['snapshot_id']
        image_id = request_spec['image_id']

        if snapshot_id and FLAGS.snapshot_same_host:
            snapshot_ref = self.db.snapshot_get(context, snapshot_id)
            src_volume_ref = self.db.volume_get(context,
                                                snapshot_ref['volume_id'])
            now = timeutils.utcnow()
            values = {'host': src_volume_ref['host'], 'scheduled_at': now}
            volume_ref = self.db.volume_update(context, volume_id, values)

            # bypass scheduler and send request directly to volume
            self.volume_rpcapi.create_volume(context,
                                             volume_ref,
                                             volume_ref['host'],
                                             snapshot_id,
                                             image_id)
        else:
            self.scheduler_rpcapi.create_volume(
                context,
                FLAGS.volume_topic,
                volume_id,
                snapshot_id,
                image_id,
                request_spec=request_spec,
                filter_properties=filter_properties)
Exemple #40
0
    def _reschedule(self, context, request_spec, filter_properties,
                    volume_id, scheduler_method, method_args,
                    exc_info=None):
        """Attempt to re-schedule a volume operation."""

        retry = filter_properties.get('retry', None)
        if not retry:
            # no retry information, do not reschedule.
            LOG.debug(_("Retry info not present, will not reschedule"))
            return

        if not request_spec:
            LOG.debug(_("No request spec, will not reschedule"))
            return

        request_spec['volume_id'] = [volume_id]

        LOG.debug(_("volume %(volume_id)s: re-scheduling %(method)s "
                    "attempt %(num)d") %
                  {'volume_id': volume_id,
                   'method': scheduler_method.func_name,
                   'num': retry['num_attempts']})

        # reset the volume state:
        now = timeutils.utcnow()
        self.db.volume_update(context, volume_id,
                              {'status': 'creating',
                               'scheduled_at': now})

        if exc_info:
            # stringify to avoid circular ref problem in json serialization:
            retry['exc'] = traceback.format_exception(*exc_info)

        scheduler_method(context, *method_args)
        return True
Exemple #41
0
 def az_not_match(context, topic):
     return [{
         'availability_zone': "strange_az",
         'host': test_host,
         'disabled': 0,
         'updated_at': timeutils.utcnow()
     }]
Exemple #42
0
def volume_destroy(context, volume_id):
    session = get_session()
    with session.begin():
        session.query(models.Volume).\
                filter_by(id=volume_id).\
                update({'deleted': True,
                        'deleted_at': timeutils.utcnow(),
                        'updated_at': literal_column('updated_at')})
        session.query(models.IscsiTarget).\
                filter_by(volume_id=volume_id).\
                update({'volume_id': None})
        session.query(models.VolumeMetadata).\
                filter_by(volume_id=volume_id).\
                update({'deleted': True,
                        'deleted_at': timeutils.utcnow(),
                        'updated_at': literal_column('updated_at')})
Exemple #43
0
 def _stub_service_get_all_by_topic(context, topic):
     return [{
         'availability_zone': "fake_az",
         'host': 'test_host',
         'disabled': 0,
         'updated_at': timeutils.utcnow()
     }]
Exemple #44
0
def volume_type_destroy(context, name):
    session = get_session()
    with session.begin():
        volume_type_ref = volume_type_get_by_name(context, name,
                                                  session=session)
        volume_type_id = volume_type_ref['id']
        session.query(models.VolumeTypes).\
                filter_by(id=volume_type_id).\
                update({'deleted': True,
                        'deleted_at': timeutils.utcnow(),
                        'updated_at': literal_column('updated_at')})
        session.query(models.VolumeTypeExtraSpecs).\
                filter_by(volume_type_id=volume_type_id).\
                update({'deleted': True,
                        'deleted_at': timeutils.utcnow(),
                        'updated_at': literal_column('updated_at')})
 def test_create_cloned_volume_clonefails(self, connmock):
     self.drv.conn = connmock.return_value
     srcvol = {'id': 'cinderVolumeID'}
     dstvol = {'project_id': 'testproject',
               'name': 'cinderVolumeName',
               'size': 1,
               'id': 'cinderVolumeID-dst',
               'volume_type_id': None,
               'created_at': timeutils.utcnow()}
     get_effect = [basic_vol_response,
                   basic_snapshot_response[0], ]
     self.drv.conn.get.side_effect = get_effect
     # also mock _getSnapshotByName because of the random snapshotname.
     self.drv._get_snapshot_by_name = mock.MagicMock()
     self.drv._get_snapshot_by_name.return_value = \
         basic_snapshot_response[0]
     cmd = {'VolumeUUID': 'FakeBasicVolID',
            'Name': 'mockedFakeUUID'}
     cmd2 = {"ParentLayerId": "407115424bb9539c",
             "Name": "cinderVolumeID-dst",
             "PolicyUUID": "00000000-00000000-0000-000000000000"}
     self.drv.conn.put.side_effect = requests.exceptions.HTTPError
     with mock.patch('cinder.volume.drivers.fusionio.ioControl.uuid',
                     autospec=True) as uuidmock:
             uuidmock.uuid4.return_value = cmd['Name']
             self.assertRaises(requests.exceptions.HTTPError,
                               self.drv.create_cloned_volume,
                               dstvol, srcvol)
     expected = [mock.call.get('TierStore/Volumes/by-id/'),
                 mock.call.post('TierStore/Snapshots/by-id/', cmd),
                 mock.call.put(('TierStore/Snapshots/functions/' +
                                'CloneSnapshot'), cmd2),
                 mock.call.delete(('TierStore/Snapshots/by-id/' +
                                   cmd2['ParentLayerId'])), ]
     self.drv.conn.assert_has_calls(expected)
Exemple #46
0
    def _reschedule(self, context, request_spec, filter_properties,
                    volume_id, scheduler_method, method_args,
                    exc_info=None):
        """Attempt to re-schedule a volume operation."""

        retry = filter_properties.get('retry', None)
        if not retry:
            # no retry information, do not reschedule.
            LOG.debug(_("Retry info not present, will not reschedule"))
            return

        if not request_spec:
            LOG.debug(_("No request spec, will not reschedule"))
            return

        request_spec['volume_id'] = volume_id

        LOG.debug(_("volume %(volume_id)s: re-scheduling %(method)s "
                    "attempt %(num)d") %
                  {'volume_id': volume_id,
                   'method': scheduler_method.func_name,
                   'num': retry['num_attempts']})

        # reset the volume state:
        now = timeutils.utcnow()
        self.db.volume_update(context, volume_id,
                              {'status': 'creating',
                               'scheduled_at': now})

        if exc_info:
            # stringify to avoid circular ref problem in json serialization:
            retry['exc'] = traceback.format_exception(*exc_info)

        scheduler_method(context, *method_args)
        return True
Exemple #47
0
    def _pre_reschedule(self, context, volume_id):
        """Actions that happen before the rescheduling attempt occur here."""

        try:
            # Reset the volume state.
            #
            # NOTE(harlowja): this is awkward to be done here, shouldn't
            # this happen at the scheduler itself and not before it gets
            # sent to the scheduler? (since what happens if it never gets
            # there??). It's almost like we need a status of 'on-the-way-to
            # scheduler' in the future.
            update = {
                'status': 'creating',
                'scheduled_at': timeutils.utcnow(),
            }
            LOG.debug(
                _("Updating volume %(volume_id)s with %(update)s.") % {
                    'update': update,
                    'volume_id': volume_id
                })
            self.db.volume_update(context, volume_id, update)
        except exception.CinderException:
            # Don't let resetting the status cause the rescheduling to fail.
            LOG.exception(_("Volume %s: resetting 'creating' status failed."),
                          volume_id)
Exemple #48
0
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()

        test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
        try:
            test_timeout = int(test_timeout)
        except ValueError:
            # If timeout value is invalid do not set a timeout.
            test_timeout = 0
        if test_timeout > 0:
            self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())

        if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
                os.environ.get('OS_STDOUT_CAPTURE') == '1'):
            stdout = self.useFixture(fixtures.StringStream('stdout')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
        if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
                os.environ.get('OS_STDERR_CAPTURE') == '1'):
            stderr = self.useFixture(fixtures.StringStream('stderr')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))

        self.log_fixture = self.useFixture(fixtures.FakeLogger())

        conf_fixture.set_defaults(CONF)
        CONF([], default_config_files=[])

        # NOTE(vish): We need a better method for creating fixtures for tests
        #             now that we have some required db setup for the system
        #             to work properly.
        self.start = timeutils.utcnow()

        CONF.set_default('connection', 'sqlite://', 'database')
        CONF.set_default('sqlite_synchronous', False)

        self.log_fixture = self.useFixture(fixtures.FakeLogger())

        global _DB_CACHE
        if not _DB_CACHE:
            _DB_CACHE = Database(session, migration,
                                 sql_connection=CONF.database.connection,
                                 sqlite_db=CONF.sqlite_db,
                                 sqlite_clean_db=CONF.sqlite_clean_db)
        self.useFixture(_DB_CACHE)

        # emulate some of the mox stuff, we can't use the metaclass
        # because it screws with our generators
        self.mox = mox.Mox()
        self.stubs = stubout.StubOutForTesting()
        self.addCleanup(CONF.reset)
        self.addCleanup(self.mox.UnsetStubs)
        self.addCleanup(self.stubs.UnsetAll)
        self.addCleanup(self.stubs.SmartUnsetAll)
        self.addCleanup(self.mox.VerifyAll)
        self.injected = []
        self._services = []

        CONF.set_override('fatal_exception_format_errors', True)
Exemple #49
0
 def execute(self, context, volume, volume_spec):
     volume_id = volume['id']
     new_status = self.status_translation.get(volume_spec.get('status'),
                                              'available')
     update = {
         'status': new_status,
         'launched_at': timeutils.utcnow(),
     }
     try:
         # TODO(harlowja): is it acceptable to only log if this fails??
         # or are there other side-effects that this will cause if the
         # status isn't updated correctly (aka it will likely be stuck in
         # 'building' if this fails)??
         volume_ref = self.db.volume_update(context, volume_id, update)
         # Now use the parent to notify.
         super(CreateVolumeOnFinishTask, self).execute(context, volume_ref)
     except exception.CinderException:
         LOG.exception(_("Failed updating volume %(volume_id)s with "
                         "%(update)s") % {'volume_id': volume_id,
                                          'update': update})
     # Even if the update fails, the volume is ready.
     msg = _("Volume %(volume_name)s (%(volume_id)s): created successfully")
     LOG.info(msg % {
         'volume_name': volume_spec['volume_name'],
         'volume_id': volume_id,
     })
Exemple #50
0
    def _reschedule(
        self, context, request_spec, filter_properties, volume_id, scheduler_method, method_args, exc_info=None
    ):
        """Attempt to re-schedule a volume operation."""

        retry = filter_properties.get("retry", None)
        if not retry:
            # no retry information, do not reschedule.
            LOG.debug(_("Retry info not present, will not reschedule"))
            return

        if not request_spec:
            LOG.debug(_("No request spec, will not reschedule"))
            return

        request_spec["volume_id"] = volume_id

        LOG.debug(
            _("volume %(volume_id)s: re-scheduling %(method)s " "attempt %(num)d")
            % {"volume_id": volume_id, "method": scheduler_method.func_name, "num": retry["num_attempts"]}
        )

        # reset the volume state:
        now = timeutils.utcnow()
        self.db.volume_update(context, volume_id, {"status": "creating", "scheduled_at": now})

        if exc_info:
            # stringify to avoid circular ref problem in json serialization:
            retry["exc"] = traceback.format_exception(*exc_info)

        scheduler_method(context, *method_args)
        return True
Exemple #51
0
def _list_hosts(req, service=None):
    """Returns a summary list of hosts."""
    curr_time = timeutils.utcnow()
    context = req.environ['cinder.context']
    services = db.service_get_all(context, False)
    zone = ''
    if 'zone' in req.GET:
        zone = req.GET['zone']
    if zone:
        services = [s for s in services if s['availability_zone'] == zone]
    hosts = []
    for host in services:
        delta = curr_time - (host['updated_at'] or host['created_at'])
        alive = abs(utils.total_seconds(delta)) <= CONF.service_down_time
        status = (alive and "available") or "unavailable"
        active = 'enabled'
        if host['disabled']:
            active = 'disabled'
        LOG.debug('status, active and update: %s, %s, %s' %
                  (status, active, host['updated_at']))
        hosts.append({'host_name': host['host'],
                      'service': host['topic'],
                      'zone': host['availability_zone'],
                      'service-status': status,
                      'service-state': active,
                      'last-update': host['updated_at']})
    if service:
        hosts = [host for host in hosts
                 if host["service"] == service]
    return hosts
Exemple #52
0
    def _cast_create_volume(self, context, request_spec, filter_properties):
        source_volid = request_spec["source_volid"]
        source_replicaid = request_spec["source_replicaid"]
        volume_id = request_spec["volume_id"]
        snapshot_id = request_spec["snapshot_id"]
        image_id = request_spec["image_id"]
        group_id = request_spec["consistencygroup_id"]
        host = None

        if group_id:
            group = self.db.consistencygroup_get(context, group_id)
            if group:
                host = group.get("host", None)
        elif snapshot_id and CONF.snapshot_same_host:
            # NOTE(Rongze Zhu): A simple solution for bug 1008866.
            #
            # If snapshot_id is set, make the call create volume directly to
            # the volume host where the snapshot resides instead of passing it
            # through the scheduler. So snapshot can be copy to new volume.
            snapshot_ref = self.db.snapshot_get(context, snapshot_id)
            source_volume_ref = self.db.volume_get(context, snapshot_ref["volume_id"])
            host = source_volume_ref["host"]
        elif source_volid:
            source_volume_ref = self.db.volume_get(context, source_volid)
            host = source_volume_ref["host"]
        elif source_replicaid:
            source_volume_ref = self.db.volume_get(context, source_replicaid)
            host = source_volume_ref["host"]

        if not host:
            # Cast to the scheduler and let it handle whatever is needed
            # to select the target host for this volume.
            self.scheduler_rpcapi.create_volume(
                context,
                CONF.volume_topic,
                volume_id,
                snapshot_id=snapshot_id,
                image_id=image_id,
                request_spec=request_spec,
                filter_properties=filter_properties,
            )
        else:
            # Bypass the scheduler and send the request directly to the volume
            # manager.
            now = timeutils.utcnow()
            values = {"host": host, "scheduled_at": now}
            volume_ref = self.db.volume_update(context, volume_id, values)
            self.volume_rpcapi.create_volume(
                context,
                volume_ref,
                volume_ref["host"],
                request_spec,
                filter_properties,
                allow_reschedule=False,
                snapshot_id=snapshot_id,
                image_id=image_id,
                source_volid=source_volid,
                source_replicaid=source_replicaid,
                consistencygroup_id=group_id,
            )