示例#1
0
 def run_repeat_on_errors(self, cmd, **kwargs):
     retrier = retrying.Retry(
         max_attempts=CONF.migrate.retry,
         reraise_original_exception=True,
         timeout=0,
     )
     return retrier.run(self.run, cmd, **kwargs)
示例#2
0
 def _dst_images(self):
     dst_images = {}
     keystone = self.cloud.resources["identity"]
     LOG.info("Retrieving list of images from destination to make sure "
              "images are not migrated twice. May take awhile, please be "
              "patient.")
     for dst_image in self.get_image_list():
         LOG.debug("Working on destination image '%s (%s)'", dst_image.name,
                   dst_image.id)
         retryer = retrying.Retry(max_attempts=self.config.migrate.retry,
                                  reraise_original_exception=True)
         try:
             # Destination cloud sporadically fails with Unauthorized for
             # random images, thus this logic; see CF-385
             tenant_name = retryer.run(
                 keystone.try_get_tenant_name_by_id,
                 dst_image.owner,
                 default=self.cloud.cloud_config.cloud.tenant)
             image_key = (dst_image.name, tenant_name, dst_image.checksum,
                          dst_image.is_public)
             dst_images[image_key] = dst_image
         except keystone_exceptions.Unauthorized:
             LOG.warning("Authorization failed in destination keystone, "
                         "image '%s (%s)' may be migrated twice later!")
     return dst_images
示例#3
0
    def migrate_volume(self, src_volume):
        """Creates volume on destination and copies volume data from source"""
        LOG.info("Checking if volume '%s' already present in destination",
                 volume_name(src_volume))
        dst_cinder = self.dst_cloud.resources[utils.STORAGE_RESOURCE]

        dst_volume = dst_cinder.get_migrated_volume(src_volume['id'])
        volume_exists_in_destination = (dst_volume is not None
                                        and dst_volume.status
                                        in ['available', 'in-use'])

        if not volume_exists_in_destination:
            try:
                src_volume_object = self.src_cinder_backend.get_volume_object(
                    self.src_cloud, src_volume['id'])
                LOG.debug("Backing file for source volume: %s",
                          src_volume_object)

                dst_volume = self._create_volume(src_volume)

                # It takes time to create volume object
                timeout = self.cfg.migrate.storage_backend_timeout
                retryer = retrying.Retry(max_time=timeout)
                dst_volume_object = retryer.run(
                    self.dst_cinder_backend.get_volume_object, self.dst_cloud,
                    dst_volume.id)

                LOG.debug("Backing file for volume in destination: %s",
                          dst_volume_object)
                LOG.info("Starting volume copy from %s to %s",
                         src_volume_object, dst_volume_object)
                self.copy_volume_data(src_volume_object, dst_volume_object)
            except (plugins.base.VolumeObjectNotFoundError,
                    retrying.TimeoutExceeded,
                    exception.TenantNotPresentInDestination,
                    cinder_exceptions.OverLimit,
                    copy_mechanisms.CopyFailed) as e:
                LOG.warning("%(error)s, volume %(name)s will be skipped", {
                    'error': e.message,
                    'name': volume_name(src_volume)
                })

                if dst_volume is not None:
                    msg = ("Removing volume {name} from destination "
                           "since it didn't migrate properly".format(
                               name=volume_name(dst_volume)))
                    LOG.info(msg)
                    self.delete_volume(dst_volume)
            finally:
                if dst_volume is not None:
                    self.dst_cinder_backend.cleanup(self.cloud, dst_volume.id)
        else:
            LOG.info(
                "Volume '%s' is already present in destination cloud, "
                "skipping", src_volume['id'])

        return dst_volume
示例#4
0
    def test_raises_expected_exception(self, sleep_mock):
        retry = retrying.Retry(max_attempts=10, expected_exceptions=[KeyError])

        @retry
        def func():
            raise KeyError()

        self.assertRaises(KeyError, func)
        self.assertEqual(retry.attempt, 1)
        self.assertFalse(sleep_mock.called)
示例#5
0
    def test_raises_error_if_predicate_failed_after_timeout(self, sleep_mock):
        def func():
            return 0

        retry = retrying.Retry(max_time=100,
                               predicate_retval_as_arg=True,
                               predicate=lambda rv: rv == 1)

        self.assertRaises(retrying.TimeoutExceeded, retry.run, func)
        self.assertTrue(retry.total_time >= retry.max_time)
        self.assertTrue(sleep_mock.called)
示例#6
0
    def test_stops_if_retval_matches_predicate(self, sleep_mock):
        def func():
            return 0

        retry = retrying.Retry(max_attempts=5,
                               predicate_retval_as_arg=True,
                               predicate=lambda rv: rv == 0)

        retry.run(func)
        self.assertEqual(retry.attempt, 1)
        self.assertFalse(sleep_mock.called)
示例#7
0
    def test_raises_last_error_if_all_attempts_failed(self, sleep_mock):
        retry = retrying.Retry(max_attempts=5,
                               wait_exponential=False,
                               reraise_original_exception=True)

        @retry
        def func():
            raise ValueError()

        self.assertRaises(ValueError, func)
        self.assertEqual(retry.attempt, retry.max_attempts)
        self.assertEqual(sleep_mock.call_count, retry.max_attempts)
示例#8
0
    def test_raises_timeout_error_if_timedout(self, sleep_mock):
        retry = retrying.Retry(max_time=100,
                               wait_exponential=True,
                               reraise_original_exception=False)

        @retry
        def func():
            raise RuntimeError()

        self.assertRaises(retrying.TimeoutExceeded, func)
        self.assertTrue(retry.total_time >= retry.max_time)
        self.assertTrue(sleep_mock.called)
示例#9
0
    def test_retries_until_timed_out(self, sleep_mock):
        retry = retrying.Retry(max_time=100,
                               wait_exponential=True,
                               raise_error=False)

        @retry
        def func():
            raise RuntimeError()

        func()

        self.assertTrue(retry.total_time >= retry.max_time)
        self.assertTrue(sleep_mock.called)
示例#10
0
    def test_returns_object_returned_by_function(self, sleep_mock):
        expected_rv = 0

        def func():
            return expected_rv

        retry = retrying.Retry(max_attempts=10,
                               predicate_retval_as_arg=True,
                               predicate=lambda rv: rv == expected_rv)

        actual_rv = retry.run(func)
        self.assertEqual(1, retry.attempt)
        self.assertEqual(expected_rv, actual_rv)
        self.assertFalse(sleep_mock.called)
示例#11
0
    def create_volume_from_volume(self, volume, tenant_id):
        """Creates volume based on values from :param volume: and adds
        metadata in order to not copy already copied volumes

        :param volume: CF volume object (dict)

        :raises: retrying.TimeoutExceeded if volume did not become available
        in migrate.storage_backend_timeout time
        """

        glance = self.cloud.resources[utils.IMAGE_RESOURCE]
        compute = self.cloud.resources[utils.COMPUTE_RESOURCE]
        az_mapper = compute.attr_override

        metadata = volume.get('metadata', {})
        metadata[MIGRATED_VOLUMES_METADATA_KEY] = volume['id']

        image_id = None
        if volume['bootable']:
            image_metadata = volume['volume_image_metadata']
            dst_image = glance.get_matching_image(
                uuid=image_metadata['image_id'],
                size=image_metadata['size'],
                name=image_metadata['image_name'],
                checksum=image_metadata['checksum'])
            if dst_image:
                image_id = dst_image.id

        src_az = compute.get_availability_zone(volume['availability_zone'])

        created_volume = self.create_volume(
            size=volume['size'],
            project_id=tenant_id,
            display_name=volume['display_name'],
            display_description=volume['display_description'],
            availability_zone=src_az or az_mapper.get_attr(
                volume, 'availability_zone'),
            metadata=metadata,
            imageRef=image_id)

        timeout = self.config.migrate.storage_backend_timeout
        retryer = retrying.Retry(max_time=timeout,
                                 predicate=lambda v: v.status == 'available',
                                 predicate_retval_as_arg=True,
                                 retry_message="Volume is not available")

        retryer.run(self.get_volume_by_id, created_volume.id)

        return created_volume
示例#12
0
    def test_retries_on_invalid_return_value(self, sleep_mock):
        bad_value = 10

        retry = retrying.Retry(max_attempts=5,
                               retry_on_return_value=True,
                               return_value=bad_value,
                               raise_error=False)

        @retry
        def func():
            return bad_value

        func()

        self.assertEqual(retry.attempt, retry.max_attempts)
        self.assertEqual(sleep_mock.call_count, retry.max_attempts)
示例#13
0
    def transfer(self, data):
        src_host = data['host_src']
        src_path = data['path_src']
        dst_host = data['host_dst']
        dst_path = data['path_dst']

        options = CONF.bbcp.options
        additional_options = []
        # -f: forces the copy by first unlinking the target file before
        # copying.
        # -p: preserve source mode, ownership, and dates.
        forced_options = ['-f', '-p']
        if CONF.migrate.copy_with_md5_verification:
            # -e: error check data for transmission errors using md5 checksum.
            forced_options.append('-e')
        for o in forced_options:
            if o not in options:
                additional_options.append(o)
        # -S: command to start bbcp on the source node.
        # -T: command to start bbcp on the target node.
        for o in ('-S', '-T'):
            if o not in options:
                additional_options.append(o + " '{bbcp_cmd}'")
        bbcp_cmd = "ssh {ssh_opts} %I -l %U %H bbcp".format(
            ssh_opts=ssh_util.default_ssh_options())
        options += ' ' + ' '.join(additional_options).format(bbcp_cmd=bbcp_cmd)
        cmd = ("{bbcp} {options} "
               "{src_user}@{src_host}:{src_path} "
               "{dst_user}@{dst_host}:{dst_path} "
               "2>&1")
        retrier = retrying.Retry(
            max_attempts=CONF.migrate.retry,
            timeout=0,
        )
        try:
            retrier.run(local.run, cmd.format(bbcp=CONF.bbcp.path,
                                              options=options,
                                              src_user=CONF.src.ssh_user,
                                              dst_user=CONF.dst.ssh_user,
                                              src_host=src_host,
                                              dst_host=dst_host,
                                              src_path=src_path,
                                              dst_path=dst_path),
                        capture_output=False)
        except retrying.MaxAttemptsReached:
            self.clean_dst(data)
            raise base.FileCopyError(**data)
示例#14
0
    def get_client(self):
        """ Getting keystone client using authentication with admin auth token.

        :return: OpenStack Keystone Client instance
        """
        def func():
            auth_ref = self._get_client_by_creds().auth_ref
            return keystone_client.Client(auth_ref=auth_ref,
                                          endpoint=self.config.cloud.auth_url,
                                          cacert=self.config.cloud.cacert,
                                          insecure=self.config.cloud.insecure)

        retrier = retrying.Retry(
            max_attempts=cfglib.CONF.migrate.retry,
            expected_exceptions=[ks_exceptions.Unauthorized],
            reraise_original_exception=True)
        return retrier.run(func)
示例#15
0
    def test_retries_if_predicate_fails(self, sleep_mock):
        def always_fail():
            return False

        retry = retrying.Retry(max_attempts=5,
                               wait_exponential=True,
                               raise_error=False,
                               predicate=always_fail)

        @retry
        def func():
            pass

        func()

        self.assertTrue(retry.attempt >= retry.max_attempts)
        self.assertEqual(sleep_mock.call_count, retry.max_attempts)
示例#16
0
    def test_does_not_retry_if_predicate_succeeds(self, sleep_mock):
        def always_succeeds():
            return True

        retry = retrying.Retry(max_attempts=5,
                               wait_exponential=True,
                               raise_error=False,
                               predicate=always_succeeds)

        @retry
        def func():
            pass

        func()

        self.assertEqual(retry.attempt, 1)
        self.assertFalse(sleep_mock.called)