Beispiel #1
0
def retry_upon_exception(exc, delay, max_delay, max_attempts):
    return tenacity.retry(reraise=True,
                          retry=tenacity.retry_if_exception_type(exc),
                          wait=tenacity.wait_exponential(
                                multiplier=delay, max=max_delay),
                          stop=tenacity.stop_after_attempt(max_attempts),
                          before=_log_before_retry, after=_log_after_retry)
    def call_func(self, func, **kwargs):
        """General method for calling any Monasca API function."""
        @tenacity.retry(
            wait=tenacity.wait_fixed(self._retry_interval),
            stop=tenacity.stop_after_attempt(self._max_retries),
            retry=(tenacity.retry_if_exception_type(MonascaServiceException) |
                   tenacity.retry_if_exception_type(MonascaException)))
        def _inner():
            try:
                return func(**kwargs)
            except (exc.http.InternalServerError,
                    exc.http.ServiceUnavailable,
                    exc.http.BadGateway,
                    exc.connection.ConnectionError) as e:
                LOG.exception(e)
                msg = '%s: %s' % (e.__class__.__name__, e)
                raise MonascaServiceException(msg)
            except exc.http.HttpError as e:
                LOG.exception(e)
                msg = '%s: %s' % (e.__class__.__name__, e)
                status_code = e.http_status
                if not isinstance(status_code, int):
                    status_code = 500
                if 400 <= status_code < 500:
                    raise MonascaInvalidParametersException(msg)
                else:
                    raise MonascaException(msg)
            except Exception as e:
                LOG.exception(e)
                msg = '%s: %s' % (e.__class__.__name__, e)
                raise MonascaException(msg)

        return _inner()
    def validate(self, max_retries=0):
        """Performs basic **connection** validation of a sqlalchemy engine."""

        def _retry_on_exception(exc):
            LOG.warning("Engine connection (validate) failed due to '%s'", exc)
            if isinstance(exc, sa_exc.OperationalError) and \
               _is_db_connection_error(six.text_type(exc.args[0])):
                # We may be able to fix this by retrying...
                return True
            if isinstance(exc, (sa_exc.TimeoutError,
                                sa_exc.ResourceClosedError,
                                sa_exc.DisconnectionError)):
                # We may be able to fix this by retrying...
                return True
            # Other failures we likely can't fix by retrying...
            return False

        @tenacity.retry(
            stop=tenacity.stop_after_attempt(max(0, int(max_retries))),
            wait=tenacity.wait_exponential(),
            reraise=True,
            retry=tenacity.retry_if_exception(_retry_on_exception)
        )
        def _try_connect(engine):
            # See if we can make a connection happen.
            #
            # NOTE(harlowja): note that even though we are connecting
            # once it does not mean that we will be able to connect in
            # the future, so this is more of a sanity test and is not
            # complete connection insurance.
            with contextlib.closing(engine.connect()):
                pass

        _try_connect(self._engine)
Beispiel #4
0
def retry_on_db_error(func, retry=None):
    """Decorates the given function so that it retries on DB errors.

    Note that the decorator retries the function/method only on some
    of the DB errors that are considered to be worth retrying, like
    deadlocks and disconnections.

    :param func: Function to decorate.
    :param retry: a Retrying object
    :return: Decorated function.
    """
    if not retry:
        retry = tenacity.Retrying(
            retry=tenacity.retry_if_exception_type(_RETRY_ERRORS),
            stop=tenacity.stop_after_attempt(50),
            wait=tenacity.wait_incrementing(start=0, increment=0.1, max=2)
        )

    # The `assigned` arg should be empty as some of the default values are not
    # supported by simply initialized MagicMocks. The consequence may
    # be that the representation will contain the wrapper and not the
    # wrapped function.
    @functools.wraps(func, assigned=[])
    def decorate(*args, **kw):
        # Retrying library decorator might potentially run a decorated
        # function within a new thread so it's safer not to apply the
        # decorator directly to a target method/function because we can
        # lose an authentication context.
        # The solution is to create one more function and explicitly set
        # auth context before calling it (potentially in a new thread).
        auth_ctx = context.ctx() if context.has_ctx() else None

        return retry.call(_with_auth_context, auth_ctx, func, *args, **kw)

    return decorate
Beispiel #5
0
    def fetch(self, url, verb='get', **kwargs):
        """return response or None in case of failure, try twice"""
        @retry(stop=stop_after_attempt(2), wait=wait_fixed(2))
        def _fetch(verb='get'):
            headers = {
                'Authorization': 'apiToken %s' % C.DEFAULT_SHIPPABLE_TOKEN
            }

            logging.info(u'%s %s' % (verb, url))
            http_method = getattr(requests, verb)
            resp = http_method(url, headers=headers, **kwargs)
            logging.info(u'shippable status code: %s' % resp.status_code)
            logging.info(u'shippable reason: %s' % resp.reason)

            if resp.status_code not in [200, 302, 400]:
                logging.error(u'RC: %s', resp.status_code)
                raise TryAgain

            return resp

        try:
            logging.debug(u'%s', url)
            return _fetch(verb=verb)
        except RetryError as e:
            logging.error(e)
Beispiel #6
0
def upgrade():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.BoolOpt('skip-gnocchi-resource-types',
                    help='Skip gnocchi resource-types upgrade.',
                    default=False),
        cfg.IntOpt('retry',
                   min=0,
                   help='Number of times to retry on failure. '
                   'Default is to retry forever.'),
    ])

    service.prepare_service(conf=conf)
    if conf.skip_gnocchi_resource_types:
        LOG.info("Skipping Gnocchi resource types upgrade")
    else:
        LOG.debug("Upgrading Gnocchi resource types")
        from ceilometer import gnocchi_client
        from gnocchiclient import exceptions
        if conf.retry is None:
            stop = tenacity.stop_never
        else:
            stop = tenacity.stop_after_attempt(conf.retry)
        tenacity.Retrying(
            stop=stop,
            retry=tenacity.retry_if_exception_type((
                exceptions.ConnectionFailure,
                exceptions.UnknownConnectionError,
                exceptions.ConnectionTimeout,
                exceptions.SSLError,
            ))
        )(gnocchi_client.upgrade_resource_types, conf)
Beispiel #7
0
def retry_random_upon_exception(exc, delay=0.5, max_delay=5,
                                max_attempts=DEFAULT_MAX_ATTEMPTS):
    return tenacity.retry(reraise=True,
                          retry=tenacity.retry_if_exception_type(exc),
                          wait=tenacity.wait_random_exponential(
                              multiplier=delay, max=max_delay),
                          stop=tenacity.stop_after_attempt(max_attempts),
                          before=_log_before_retry, after=_log_after_retry)
Beispiel #8
0
def _safe_mongo_call(max_retries, retry_interval):
    return tenacity.retry(
        retry=tenacity.retry_if_exception_type(
            pymongo.errors.AutoReconnect),
        wait=tenacity.wait_fixed(retry_interval),
        stop=(tenacity.stop_after_attempt(max_retries) if max_retries >= 0
              else tenacity.stop_never)
    )
Beispiel #9
0
def retry_on_conflict(func):
    wrapper = tenacity.retry(
        stop=tenacity.stop_after_attempt(11),
        wait=tenacity.wait_random(max=0.002),
        retry=tenacity.retry_if_exception_type(exception.ConcurrentTransaction),
        reraise=True,
    )
    return wrapper(func)
Beispiel #10
0
 def decorator(func):
     @tenacity.retry(
         retry=tenacity.retry_if_exception(retry_on_retriable_kafka_error),
         wait=tenacity.wait_fixed(1),
         stop=tenacity.stop_after_attempt(retries),
         reraise=True
     )
     def wrapper(*args, **kwargs):
         return func(*args, **kwargs)
     return wrapper
Beispiel #11
0
def retry_upon_none_result(max_attempts, delay=0.5, max_delay=2, random=False):
    if random:
        wait_func = tenacity.wait_exponential(
            multiplier=delay, max=max_delay)
    else:
        wait_func = tenacity.wait_random_exponential(
            multiplier=delay, max=max_delay)
    return tenacity.retry(reraise=True,
                          retry=tenacity.retry_if_result(lambda x: x is None),
                          wait=wait_func,
                          stop=tenacity.stop_after_attempt(max_attempts),
                          before=_log_before_retry, after=_log_after_retry)
Beispiel #12
0
def get_connection_from_config(conf):
    retries = conf.database.max_retries

    @tenacity.retry(
        wait=tenacity.wait_fixed(conf.database.retry_interval),
        stop=(tenacity.stop_after_attempt(retries) if retries >= 0
              else tenacity.stop_never),
        reraise=True)
    def _inner():
        url = (getattr(conf.database, 'metering_connection') or
               conf.database.connection)
        return get_connection(conf, url)

    return _inner()
Beispiel #13
0
def get_connection_from_config(conf, purpose='metering'):
    retries = conf.database.max_retries

    @tenacity.retry(
        wait=tenacity.wait_fixed(conf.database.retry_interval),
        stop=(tenacity.stop_after_attempt(retries) if retries >= 0
              else tenacity.stop_never),
        reraise=True)
    def _inner():
        namespace = 'ceilometer.%s.storage' % purpose
        url = (getattr(conf.database, '%s_connection' % purpose) or
               conf.database.connection)
        return get_connection(conf, url, namespace)

    return _inner()
Beispiel #14
0
def get_connection_from_config(conf):
    retries = conf.database.max_retries

    @tenacity.retry(
        reraise=True,
        wait=tenacity.wait_fixed(conf.database.retry_interval),
        stop=(tenacity.stop_after_attempt(retries) if retries >= 0
              else tenacity.stop_never)
    )
    def _inner():
        url = (conf.database.connection or
               getattr(conf.database, 'event_connection', None))
        return get_connection(url, conf)

    return _inner()
Beispiel #15
0
    def __init__(self, conf):
        super(GnocchiDispatcher, self).__init__(conf)
        self.conf = conf
        self.filter_service_activity = (
            conf.dispatcher_gnocchi.filter_service_activity)
        self._ks_client = keystone_client.get_client(conf)
        self.resources_definition = self._load_resources_definitions(conf)

        self.cache = None
        try:
            import oslo_cache
            oslo_cache.configure(self.conf)
            # NOTE(cdent): The default cache backend is a real but
            # noop backend. We don't want to use that here because
            # we want to avoid the cache pathways entirely if the
            # cache has not been configured explicitly.
            if self.conf.cache.enabled:
                cache_region = oslo_cache.create_region()
                self.cache = oslo_cache.configure_cache_region(
                    self.conf, cache_region)
                self.cache.key_mangler = cache_key_mangler
        except ImportError:
            pass
        except oslo_cache.exception.ConfigurationError as exc:
            LOG.warning(_LW('unable to configure oslo_cache: %s') % exc)

        self._gnocchi_project_id = None
        self._gnocchi_project_id_lock = threading.Lock()
        self._gnocchi_resource_lock = LockedDefaultDict(threading.Lock)

        self._gnocchi = gnocchi_client.get_gnocchiclient(conf)

        retries = conf.storage.max_retries

        @tenacity.retry(
            wait=tenacity.wait_fixed(conf.storage.retry_interval),
            stop=(tenacity.stop_after_attempt(retries) if retries >= 0
                  else tenacity.stop_never),
            reraise=True)
        def _get_connection():
            self._gnocchi.capabilities.list()

        try:
            _get_connection()
        except Exception:
            LOG.error(_LE('Failed to connect to Gnocchi.'))
            raise
Beispiel #16
0
def get_connection_from_config(conf):
    retries = conf.database.max_retries
    url = conf.database.connection
    connection_scheme = urlparse.urlparse(url).scheme
    LOG.debug('looking for %(name)r driver in %(namespace)r',
              {'name': connection_scheme, 'namespace': _NAMESPACE})
    mgr = driver.DriverManager(_NAMESPACE, connection_scheme)

    @tenacity.retry(
        wait=tenacity.wait_fixed(conf.database.retry_interval),
        stop=tenacity.stop_after_attempt(retries if retries >= 0 else 5),
        reraise=True)
    def _get_connection():
        """Return an open connection to the database."""
        return mgr.driver(conf, url)

    return _get_connection()
Beispiel #17
0
    def send_notification(self, target, ctxt, message, version, retry=None):
        if retry is None:
            retry = self._pika_engine.default_notification_retry_attempts

        def on_exception(ex):
            if isinstance(ex, (pika_drv_exc.ExchangeNotFoundException,
                               pika_drv_exc.RoutingException)):
                LOG.warning("Problem during sending notification. %s", ex)
                try:
                    self._declare_notification_queue_binding(target)
                except pika_drv_exc.ConnectionException as e:
                    LOG.warning("Problem during declaring notification queue "
                                "binding. %s", e)
                return True
            elif isinstance(ex, (pika_drv_exc.ConnectionException,
                                 pika_drv_exc.MessageRejectedException)):
                LOG.warning("Problem during sending notification. %s", ex)
                return True
            else:
                return False

        if retry:
            retrier = tenacity.retry(
                stop=(tenacity.stop_never if retry == -1 else
                      tenacity.stop_after_attempt(retry)),
                retry=tenacity.retry_if_exception(on_exception),
                wait=tenacity.wait_fixed(
                    self._pika_engine.notification_retry_delay
                )
            )
        else:
            retrier = None

        msg = pika_drv_msg.PikaOutgoingMessage(self._pika_engine, message,
                                               ctxt)
        return msg.send(
            exchange=(
                target.exchange or
                self._pika_engine.default_notification_exchange
            ),
            routing_key=target.topic,
            confirm=True,
            mandatory=True,
            persistent=self._pika_engine.notification_persistence,
            retrier=retrier
        )
    def send_request(self, socket, request):
        if hasattr(request, 'timeout'):
            _stop = tenacity.stop_after_delay(request.timeout)
        elif request.retry is not None and request.retry > 0:
            # no rpc_response_timeout option if notification
            _stop = tenacity.stop_after_attempt(request.retry)
        else:
            # well, now what?
            _stop = tenacity.stop_after_delay(60)

        @tenacity.retry(retry=tenacity.retry_if_exception_type(zmq.Again),
                        stop=_stop)
        def send_retrying():
            if request.msg_type in zmq_names.MULTISEND_TYPES:
                for _ in range(socket.connections_count()):
                    self.sender.send(socket, request)
            else:
                self.sender.send(socket, request)
        return send_retrying()
Beispiel #19
0
def retry_upon_exception_exclude_error_codes(
    exc, excluded_errors, delay, max_delay, max_attempts):
    """Retry with the configured exponential delay, unless the exception error
    code is in the given list
    """
    def retry_if_not_error_codes(e):
        # return True only for BadRequests without error codes or with error
        # codes not in the exclude list
        if isinstance(e, exc):
            error_code = _get_bad_request_error_code(e)
            if error_code and error_code not in excluded_errors:
                return True
        return False

    return tenacity.retry(reraise=True,
                          retry=tenacity.retry_if_exception(
                                retry_if_not_error_codes),
                          wait=tenacity.wait_exponential(
                                multiplier=delay, max=max_delay),
                          stop=tenacity.stop_after_attempt(max_attempts),
                          before=_log_before_retry, after=_log_after_retry)
Beispiel #20
0
def run_migrations_online():
    """Run migrations in 'online' mode.

    In this scenario we need to create an Engine
    and associate a connection with the context.

    """

    # this callback is used to prevent an auto-migration from being generated
    # when there are no changes to the schema
    # reference: http://alembic.readthedocs.org/en/latest/cookbook.html
    def process_revision_directives(context, revision, directives):
        if getattr(config.cmd_opts, 'autogenerate', False):
            script = directives[0]
            if script.upgrade_ops.is_empty():
                directives[:] = []
                logger.info('No changes in schema detected.')

    engine = engine_from_config(config.get_section(config.config_ini_section),
                                prefix='sqlalchemy.',
                                poolclass=pool.NullPool)

    connection = tenacity.Retrying(
        stop=tenacity.stop_after_attempt(100),
        wait=tenacity.wait_random(min=2, max=5),
        before=tenacity.before_log(logging.getLogger("tenacity.retry"), logging.DEBUG),
        before_sleep=tenacity.before_sleep_log(logging.getLogger("tenacity.retry"), logging.INFO),
        after=tenacity.after_log(logging.getLogger("tenacity.retry"), logging.DEBUG)
        ).call(engine.connect)

    context.configure(connection=connection,
                      target_metadata=target_metadata,
                      process_revision_directives=process_revision_directives,
                      **current_app.extensions['migrate'].configure_args)

    try:
        with context.begin_transaction():
            context.run_migrations()
    finally:
        connection.close()
Beispiel #21
0
    def test_retry_on_conn_error(self, mocked_session):

        retry_args = dict(
            wait=tenacity.wait_none(),
            stop=tenacity.stop_after_attempt(7),
            retry=requests.exceptions.ConnectionError
        )

        def send_and_raise(request, **kwargs):
            raise requests.exceptions.ConnectionError

        mocked_session().send.side_effect = send_and_raise
        # The job failed for some reason
        with self.assertRaises(tenacity.RetryError):
            self.get_hook.run_with_advanced_retry(
                endpoint='v1/test',
                _retry_args=retry_args
            )
        self.assertEqual(
            self.get_hook._retry_obj.stop.max_attempt_number + 1,
            mocked_session.call_count
        )
Beispiel #22
0
    return compile.parse1(formula_string, theories=theories)


def str2pol(policy_string, theories=None):
    return compile.parse(policy_string, theories=theories)


def pol2str(policy):
    return " ".join(str(x) for x in policy)


def form2str(formula):
    return str(formula)


@tenacity.retry(stop=tenacity.stop_after_attempt(1000),
                wait=tenacity.wait_fixed(0.1))
def retry_check_for_last_message(obj):
    if not hasattr(obj, "last_msg"):
        raise AttributeError("Missing 'last_msg' attribute")


@tenacity.retry(stop=tenacity.stop_after_attempt(1000),
                wait=tenacity.wait_fixed(0.1))
def retry_check_for_message_to_arrive(obj):
    if not hasattr(obj.msg, "body"):
        raise AttributeError("Missing 'body' attribute")


@tenacity.retry(stop=tenacity.stop_after_attempt(1000),
                wait=tenacity.wait_fixed(0.1))
Beispiel #23
0
    def request(self,
                method,
                additional_headers=None,
                retry=True,
                timeout=None,
                auth=None,
                use_gzip_encoding=None,
                params=None,
                max_attempts=None,
                **kwargs):
        """
        Make an HTTP request by calling self._request with backoff retry.

        :param method: request method
        :type method: str
        :param additional_headers: additional headers to include in the request
        :type additional_headers: dict[str, str]
        :param retry: boolean indicating whether to retry if the request fails
        :type retry: boolean
        :param timeout: timeout in seconds, overrides default_timeout_secs
        :type timeout: float
        :param timeout: timeout in seconds
        :type timeout: float
        :param auth: auth scheme for the request
        :type auth: requests.auth.AuthBase
        :param use_gzip_encoding: boolean indicating whether to pass gzip
                                  encoding in the request headers or not
        :type use_gzip_encoding: boolean | None
        :param params: additional params to include in the request
        :type params: str | dict[str, T] | None
        :param max_attempts: maximum number of attempts to try for any request
        :type max_attempts: int
        :param kwargs: additional arguments to pass to requests.request
        :type kwargs: dict[str, T]
        :return: HTTP response
        :rtype: requests.Response
        """
        request = self._request

        if retry:
            if max_attempts is None:
                max_attempts = self.default_max_attempts

            # We retry only when it makes sense: either due to a network
            # partition (e.g. connection errors) or if the request failed
            # due to a server error such as 500s, timeouts, and so on.
            request = tenacity.retry(
                stop=tenacity.stop_after_attempt(max_attempts),
                wait=tenacity.wait_exponential(),
                retry=tenacity.retry_if_exception_type((
                    requests.exceptions.Timeout,
                    requests.exceptions.ConnectionError,
                    MesosServiceUnavailableException,
                    MesosInternalServerErrorException,
                )),
                reraise=True,
            )(request)

        try:
            return request(
                method=method,
                additional_headers=additional_headers,
                timeout=timeout,
                auth=auth,
                use_gzip_encoding=use_gzip_encoding,
                params=params,
                **kwargs
            )
        # If the request itself failed, an exception subclassed from
        # RequestException will be raised. Catch this and reraise as
        # MesosException since we want the caller to be able to catch
        # and handle this.
        except requests.exceptions.RequestException as err:
            raise MesosException('Request failed', err)
Beispiel #24
0
class VolumeManager(volume_base.VolumeBase):
    '''Volume implementation of virtual machines via cinder.'''

    def __init__(self):
        super(VolumeManager, self).__init__()
        # Must initialize cinder api
        self._cinder_client = clients.CinderAuth.get_cinder_client(
            service_name=CONF.cinder.service_name,
            endpoint=CONF.cinder.endpoint,
            region=CONF.cinder.region_name,
            endpoint_type=CONF.cinder.endpoint_type,
            insecure=CONF.cinder.insecure,
            cacert=CONF.cinder.ca_certificates_file
        )
        self.manager = self._cinder_client.volumes

    @retry(reraise=True,
           stop=stop_after_attempt(CONF.cinder.volume_create_max_retries))
    def create_volume_from_image(self, image_id):
        """Create cinder volume

        :param image_id: ID of amphora image

        :return volume id
        """
        volume = self.manager.create(
            size=CONF.cinder.volume_size,
            volume_type=CONF.cinder.volume_type,
            availability_zone=CONF.cinder.availability_zone,
            imageRef=image_id)
        resource_status = self.manager.get(volume.id).status

        status = constants.CINDER_STATUS_AVAILABLE
        start = int(time.time())

        while resource_status != status:
            time.sleep(CONF.cinder.volume_create_retry_interval)
            instance_volume = self.manager.get(volume.id)
            resource_status = instance_volume.status
            if resource_status == constants.CINDER_STATUS_ERROR:
                LOG.error('Error creating %s', instance_volume.id)
                instance_volume.delete()
                raise cinder_exceptions.ResourceInErrorState(
                    obj=volume, fault_msg='Cannot create volume')
            if int(time.time()) - start >= CONF.cinder.volume_create_timeout:
                LOG.error('Timed out waiting to create cinder volume %s',
                          instance_volume.id)
                instance_volume.delete()
                raise cinder_exceptions.TimeoutException(
                    obj=volume, action=constants.CINDER_ACTION_CREATE_VOLUME)
        return volume.id

    def delete_volume(self, volume_id):
        """Get glance image from volume

        :param volume_id: ID of amphora boot volume

        :return image id
        """
        LOG.debug('Deleting cinder volume %s', volume_id)
        try:
            instance_volume = self.manager.get(volume_id)
            try:
                instance_volume.delete()
                LOG.debug("Deleted volume %s", volume_id)
            except Exception:
                LOG.exception("Error deleting cinder volume %s",
                              volume_id)
                raise exceptions.VolumeDeleteException()
        except cinder_exceptions.NotFound:
            LOG.warning("Volume %s not found: assuming already deleted",
                        volume_id)

    def get_image_from_volume(self, volume_id):
        """Get glance image from volume

        :param volume_id: ID of amphora boot volume

        :return image id
        """
        image_id = None
        LOG.debug('Get glance image for volume %s', volume_id)
        try:
            instance_volume = self.manager.get(volume_id)
        except cinder_exceptions.NotFound:
            LOG.exception("Volume %s not found", volume_id)
            raise exceptions.VolumeGetException()
        if hasattr(instance_volume, 'volume_image_metadata'):
            image_id = instance_volume.volume_image_metadata.get("image_id")
        else:
            LOG.error("Volume %s has no image metadata", volume_id)
            image_id = None
        return image_id
Beispiel #25
0
    elif (ctxt.pull_from_fork and ctxt.pull["base"]["repo"]["private"]
          and not ctxt.pull["maintainer_can_modify"]):
        return check_api.Result(
            check_api.Conclusion.FAILURE,
            "Pull request can't be updated with latest base branch changes",
            "Mergify needs the permission to update the base branch of the pull request.\n"
            "GitHub does not allow a GitHub App to modify base branch for a private fork.\n"
            "You cannot `rebase` a pull request from a private fork.",
        )
    else:
        return None


@tenacity.retry(
    wait=tenacity.wait_exponential(multiplier=0.2),
    stop=tenacity.stop_after_attempt(5),
    retry=tenacity.retry_if_exception_type(BranchUpdateNeedRetry),
)
def _do_rebase(ctxt: context.Context, token: str) -> None:
    # NOTE(sileht):
    # $ curl https://api.github.com/repos/sileht/repotest/pulls/2 | jq .commits
    # 2
    # $ git clone https://[email protected]/sileht-tester/repotest \
    #           --depth=$((2 + 1)) -b sileht/testpr
    # $ cd repotest
    # $ git remote add upstream https://[email protected]/sileht/repotest.git
    # $ git log | grep Date | tail -1
    # Date:   Fri Mar 30 21:30:26 2018 (10 days ago)
    # $ git fetch upstream master --shallow-since="Fri Mar 30 21:30:26 2018"
    # $ git rebase upstream/master
    # $ git push origin sileht/testpr:sileht/testpr
Beispiel #26
0
def launch_instance(instance_key, use_boot_volume=False, vm_name=None,
                    private_network_name=None, image_name=None,
                    flavor_name=None, external_network_name=None, meta=None,
                    userdata=None):
    """Launch an instance.

    :param instance_key: Key to collect associated config data with.
    :type instance_key: str
    :param use_boot_volume: Whether to boot guest from a shared volume.
    :type use_boot_volume: boolean
    :param vm_name: Name to give guest.
    :type vm_name: str
    :param private_network_name: Name of private network to attach guest to.
    :type private_network_name: str
    :param image_name: Image name to use with guest.
    :type image_name: str
    :param flavor_name: Flavor name to use with guest.
    :type flavor_name: str
    :param external_network_name: External network to create floating ip from
                                  for guest.
    :type external_network_name: str
    :param meta: A dict of arbitrary key/value metadata to store for this
                 server. Both keys and values must be <=255 characters.
    :type meta: dict
    :param userdata: Configuration to use upon launch, used by cloud-init.
    :type userdata: str
    :returns: the created instance
    :rtype: novaclient.Server
    """
    keystone_session = openstack_utils.get_overcloud_keystone_session()
    nova_client = openstack_utils.get_nova_session_client(keystone_session)
    neutron_client = openstack_utils.get_neutron_session_client(
        keystone_session)

    # Collect resource information.
    vm_name = vm_name or time.strftime("%Y%m%d%H%M%S")

    image_name = image_name or boot_tests[instance_key]['image_name']
    image = nova_client.glance.find_image(image_name)

    flavor_name = flavor_name or boot_tests[instance_key]['flavor_name']
    flavor = nova_client.flavors.find(name=flavor_name)

    private_network_name = private_network_name or "private"
    net = neutron_client.find_resource("network", private_network_name)
    nics = [{'net-id': net.get('id')}]

    meta = meta or {}
    external_network_name = external_network_name or "ext_net"

    if use_boot_volume:
        bdmv2 = [{
            'boot_index': '0',
            'uuid': image.id,
            'source_type': 'image',
            'volume_size': flavor.disk,
            'destination_type': 'volume',
            'delete_on_termination': True}]
        image = None
    else:
        bdmv2 = None

    # Launch instance.
    logging.info('Launching instance {}'.format(vm_name))
    instance = nova_client.servers.create(
        name=vm_name,
        image=image,
        block_device_mapping_v2=bdmv2,
        flavor=flavor,
        key_name=nova_utils.KEYPAIR_NAME,
        meta=meta,
        nics=nics,
        userdata=userdata)

    # Test Instance is ready.
    logging.info('Checking instance is active')
    openstack_utils.resource_reaches_status(
        nova_client.servers,
        instance.id,
        expected_status='ACTIVE',
        # NOTE(lourot): in some models this may sometimes take more than 15
        # minutes. See lp:1945991
        wait_iteration_max_time=120,
        stop_after_attempt=16)

    logging.info('Checking cloud init is complete')
    openstack_utils.cloud_init_complete(
        nova_client,
        instance.id,
        boot_tests[instance_key]['bootstring'])
    port = openstack_utils.get_ports_from_device_id(
        neutron_client,
        instance.id)[0]
    logging.info('Assigning floating ip.')
    ip = openstack_utils.create_floating_ip(
        neutron_client,
        external_network_name,
        port=port)['floating_ip_address']
    logging.info('Assigned floating IP {} to {}'.format(ip, vm_name))
    try:
        for attempt in Retrying(
                stop=stop_after_attempt(8),
                wait=wait_exponential(multiplier=1, min=2, max=60)):
            with attempt:
                try:
                    openstack_utils.ping_response(ip)
                except subprocess.CalledProcessError as e:
                    logging.error('Pinging {} failed with {}'
                                  .format(ip, e.returncode))
                    logging.error('stdout: {}'.format(e.stdout))
                    logging.error('stderr: {}'.format(e.stderr))
                    raise
    except RetryError:
        raise openstack_exceptions.NovaGuestNoPingResponse()

    # Check ssh'ing to instance.
    logging.info('Testing ssh access.')
    openstack_utils.ssh_test(
        username=boot_tests[instance_key]['username'],
        ip=ip,
        vm_name=vm_name,
        password=boot_tests[instance_key].get('password'),
        privkey=openstack_utils.get_private_key(nova_utils.KEYPAIR_NAME))
    return instance
Beispiel #27
0
class GithubGraphQLQuery(object):

    log = logging.getLogger(__name__)

    def __init__(self, token_getter):
        self.url = 'https://api.github.com/graphql'
        self.session = requests.session()
        # Will get every 25 requests
        self.get_rate_limit_rate = 25
        self.query_count = 0
        # Set an initial value
        self.quota_remain = 5000
        self.token_getter = token_getter

    def get_token(self) -> str:
        return self.token_getter.get_token()[0]

    def get_headers(self) -> dict:
        headers = {
            'Authorization': 'token %s' % self.get_token(),
            'User-Agent': 'change-metrics/monocle',
        }
        self.log.debug('request headers: %s' % headers)
        return headers

    def get_rate_limit(self):
        ratelimit = self.getRateLimit()
        if ratelimit:
            self.quota_remain = ratelimit['remaining']
            self.resetat = utils.is8601_to_dt(ratelimit['resetAt'])
            self.log.info("Got rate limit data: remain %s resetat %s" %
                          (self.quota_remain, self.resetat))

    # https://developer.github.com/v3/guides/best-practices-for-integrators/#dealing-with-abuse-rate-limits
    def wait_for_call(self):
        if self.quota_remain <= 150:
            until_reset = self.resetat - utils.utcnow()
            self.log.info("Quota remain: %s/calls delay until "
                          "reset: %s/secs waiting ..." %
                          (self.quota_remain, until_reset.seconds))
            sleep(until_reset.seconds + 60)
            self.get_rate_limit()
        else:
            self.log.debug("Sleeping 1 sec to be a good citizen")
            sleep(1)

    def getRateLimit(self):
        qdata = '''{
          rateLimit {
            limit
            cost
            remaining
            resetAt
          }
        }'''
        data = self.query(qdata, skip_get_rate_limit=True)
        if data:
            try:
                return data['data']['rateLimit']
            except KeyError:
                self.log.error('No rate limit data: %s' % data)
                raise RequestException('No rate limit data: %s' % data)

    @retry(
        after=after_log(log, logging.INFO),
        wait=wait_fixed(10),
        stop=stop_after_attempt(30),
        retry=retry_if_exception_type(RequestException),
        reraise=True,
    )
    def query(self, qdata, skip_get_rate_limit=False):
        if not skip_get_rate_limit:
            if self.query_count % self.get_rate_limit_rate == 0:
                self.get_rate_limit()
            self.wait_for_call()
        data = {'query': qdata}
        try:
            r = self.session.post(url=self.url,
                                  json=data,
                                  headers=self.get_headers(),
                                  timeout=30.3)
        except (
                requests.exceptions.ConnectionError,
                requests.exceptions.ChunkedEncodingError,
        ):
            raise RequestException("Error connecting to the API")
        self.query_count += 1
        if 'retry-after' in r.headers:
            self.log.info('Got Retry-After: %s, sleeping...' %
                          r.headers['retry-after'])
            sleep(int(r.headers['retry-after']))
        if not r.status_code != "200":
            self.log.error('No ok response code: %s' % r)
            raise RequestException("No ok response code: %s" % r.text)
        ret = r.json()
        if 'Bad credentials' == ret.get('message', ''):
            self.log.info('Query forbidden due to bad credentials')
            ret = {}
        if 'errors' in ret:
            self.log.error("Errors in response: %s" % ret)
            if (len(ret['errors']) >= 1 and 'message' in ret['errors'][0]
                    and 'timeout' in ret['errors'][0]['message']):
                raise RequestTimeout(ret['errors'][0]['message'])
            if len(ret['errors']) >= 1:
                if all([
                        error for error in ret['errors']
                        if 'The additions count for this commit is unavailable'
                        in error['message']
                ]):
                    # This errors are not critical, PRs data are complete, w/o
                    # the failing commit(s). So return the data to the caller and
                    # move on.
                    return ret
            is_forbiden = any([error['type'] == 'FORBIDDEN']
                              for error in ret['errors'])
            if is_forbiden:
                # Do not raise to not retrigger tenacity
                self.log.info('Query forbidden due to unsuffcient token ACLs')
                ret = {}
            else:
                raise RequestException("Errors in response: %s" %
                                       ret['errors'])
        return ret
Beispiel #28
0
    @six.wraps(callable_)
    def wrapper(*a, **kw):
        loop = asyncio.get_event_loop()
        return loop.run_until_complete(callable_(*a, **kw))

    return wrapper


@retry
@asyncio.coroutine
def _retryable_coroutine(thing):
    yield from asyncio.sleep(0.00001)
    return thing.go()


@retry(stop=stop_after_attempt(2))
@asyncio.coroutine
def _retryable_coroutine_with_2_attempts(thing):
    yield from asyncio.sleep(0.00001)
    thing.go()


class TestAsync(unittest.TestCase):
    @asynctest
    def test_retry(self):
        assert asyncio.iscoroutinefunction(_retryable_coroutine)
        thing = NoIOErrorAfterCount(5)
        yield from _retryable_coroutine(thing)
        assert thing.counter == thing.count

    @asynctest
Beispiel #29
0
class NeutronNetworkingTest(unittest.TestCase):
    """Ensure that openstack instances have valid networking."""

    RESOURCE_PREFIX = 'zaza-neutrontests'

    @classmethod
    def setUpClass(cls):
        """Run class setup for running Neutron API Networking tests."""
        cls.keystone_session = (
            openstack_utils.get_overcloud_keystone_session())
        cls.nova_client = (openstack_utils.get_nova_session_client(
            cls.keystone_session))
        # NOTE(fnordahl): in the event of a test failure we do not want to run
        # tear down code as it will make debugging a problem virtually
        # impossible.  To alleviate each test method will set the
        # `run_tearDown` instance variable at the end which will let us run
        # tear down only when there were no failure.
        cls.run_tearDown = False

    @classmethod
    def tearDown(cls):
        """Remove test resources."""
        if cls.run_tearDown:
            logging.info('Running teardown')
            for server in cls.nova_client.servers.list():
                if server.name.startswith(cls.RESOURCE_PREFIX):
                    openstack_utils.delete_resource(cls.nova_client.servers,
                                                    server.id,
                                                    msg="server")

    def test_instances_have_networking(self):
        """Validate North/South and East/West networking."""
        guest.launch_instance(glance_setup.LTS_IMAGE_NAME,
                              vm_name='{}-ins-1'.format(self.RESOURCE_PREFIX))
        guest.launch_instance(glance_setup.LTS_IMAGE_NAME,
                              vm_name='{}-ins-2'.format(self.RESOURCE_PREFIX))

        instance_1 = self.nova_client.servers.find(
            name='{}-ins-1'.format(self.RESOURCE_PREFIX))

        instance_2 = self.nova_client.servers.find(
            name='{}-ins-2'.format(self.RESOURCE_PREFIX))

        def verify(stdin, stdout, stderr):
            """Validate that the SSH command exited 0."""
            self.assertEqual(stdout.channel.recv_exit_status(), 0)

        # Verify network from 1 to 2
        self.validate_instance_can_reach_other(instance_1, instance_2, verify)

        # Verify network from 2 to 1
        self.validate_instance_can_reach_other(instance_2, instance_1, verify)

        # Validate tenant to external network routing
        self.validate_instance_can_reach_router(instance_1, verify)
        self.validate_instance_can_reach_router(instance_2, verify)

        # If we get here, it means the tests passed
        self.run_tearDown = True

    @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=60),
                    reraise=True,
                    stop=tenacity.stop_after_attempt(8))
    def validate_instance_can_reach_other(self, instance_1, instance_2,
                                          verify):
        """
        Validate that an instance can reach a fixed and floating of another.

        :param instance_1: The instance to check networking from
        :type instance_1: nova_client.Server

        :param instance_2: The instance to check networking from
        :type instance_2: nova_client.Server
        """
        floating_1 = floating_ips_from_instance(instance_1)[0]
        floating_2 = floating_ips_from_instance(instance_2)[0]
        address_2 = fixed_ips_from_instance(instance_2)[0]

        username = guest.boot_tests['bionic']['username']
        password = guest.boot_tests['bionic'].get('password')
        privkey = openstack_utils.get_private_key(nova_utils.KEYPAIR_NAME)

        openstack_utils.ssh_command(username,
                                    floating_1,
                                    'instance-1',
                                    'ping -c 1 {}'.format(address_2),
                                    password=password,
                                    privkey=privkey,
                                    verify=verify)

        openstack_utils.ssh_command(username,
                                    floating_1,
                                    'instance-1',
                                    'ping -c 1 {}'.format(floating_2),
                                    password=password,
                                    privkey=privkey,
                                    verify=verify)

    @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=60),
                    reraise=True,
                    stop=tenacity.stop_after_attempt(8))
    def validate_instance_can_reach_router(self, instance, verify):
        """
        Validate that an instance can reach it's primary gateway.

        We make the assumption that the router's IP is 192.168.0.1
        as that's the network that is setup in
        neutron.setup.basic_overcloud_network which is used in all
        Zaza Neutron validations.

        :param instance: The instance to check networking from
        :type instance: nova_client.Server
        """
        address = floating_ips_from_instance(instance)[0]

        username = guest.boot_tests['bionic']['username']
        password = guest.boot_tests['bionic'].get('password')
        privkey = openstack_utils.get_private_key(nova_utils.KEYPAIR_NAME)

        openstack_utils.ssh_command(username,
                                    address,
                                    'instance',
                                    'ping -c 1 192.168.0.1',
                                    password=password,
                                    privkey=privkey,
                                    verify=verify)
        pass
class PodLauncher(LoggingMixin):
    """Launches PODS"""
    def __init__(self,
                 kube_client: client.CoreV1Api = None,
                 in_cluster: bool = True,
                 cluster_context: Optional[str] = None,
                 extract_xcom: bool = False):
        """
        Creates the launcher.

        :param kube_client: kubernetes client
        :param in_cluster: whether we are in cluster
        :param cluster_context: context of the cluster
        :param extract_xcom: whether we should extract xcom
        """
        super().__init__()
        self._client = kube_client or get_kube_client(
            in_cluster=in_cluster, cluster_context=cluster_context)
        self._watch = watch.Watch()
        self.extract_xcom = extract_xcom

    def run_pod_async(self, pod: V1Pod, **kwargs):
        """Runs POD asynchronously"""
        pod_mutation_hook(pod)

        sanitized_pod = self._client.api_client.sanitize_for_serialization(pod)
        json_pod = json.dumps(sanitized_pod, indent=2)

        self.log.debug('Pod Creation Request: \n%s', json_pod)
        try:
            resp = self._client.create_namespaced_pod(
                body=sanitized_pod, namespace=pod.metadata.namespace, **kwargs)
            self.log.debug('Pod Creation Response: %s', resp)
        except Exception as e:
            self.log.exception(
                'Exception when attempting '
                'to create Namespaced Pod: %s', json_pod)
            raise e
        return resp

    def delete_pod(self, pod: V1Pod):
        """Deletes POD"""
        try:
            self._client.delete_namespaced_pod(pod.metadata.name,
                                               pod.metadata.namespace,
                                               body=client.V1DeleteOptions())
        except ApiException as e:
            # If the pod is already deleted
            if e.status != 404:
                raise

    def run_pod(self,
                pod: V1Pod,
                startup_timeout: int = 120,
                get_logs: bool = True) -> Tuple[State, Optional[str]]:
        """
        Launches the pod synchronously and waits for completion.

        :param pod:
        :param startup_timeout: Timeout for startup of the pod (if pod is pending for too long, fails task)
        :param get_logs:  whether to query k8s for logs
        :return:
        """
        resp = self.run_pod_async(pod)
        curr_time = dt.now()
        if resp.status.start_time is None:
            while self.pod_not_started(pod):
                delta = dt.now() - curr_time
                if delta.total_seconds() >= startup_timeout:
                    raise AirflowException("Pod took too long to start")
                time.sleep(1)
            self.log.debug('Pod not yet started')

        return self._monitor_pod(pod, get_logs)

    def _monitor_pod(self, pod: V1Pod,
                     get_logs: bool) -> Tuple[State, Optional[str]]:
        if get_logs:
            logs = self.read_pod_logs(pod)
            for line in logs:
                self.log.info(line)
        result = None
        if self.extract_xcom:
            while self.base_container_is_running(pod):
                self.log.info('Container %s has state %s', pod.metadata.name,
                              State.RUNNING)
                time.sleep(2)
            result = self._extract_xcom(pod)
            self.log.info(result)
            result = json.loads(result)
        while self.pod_is_running(pod):
            self.log.info('Pod %s has state %s', pod.metadata.name,
                          State.RUNNING)
            time.sleep(2)
        return self._task_status(self.read_pod(pod)), result

    def _task_status(self, event):
        self.log.info('Event: %s had an event of type %s', event.metadata.name,
                      event.status.phase)
        status = self.process_status(event.metadata.name, event.status.phase)
        return status

    def pod_not_started(self, pod: V1Pod):
        """Tests if pod has not started"""
        state = self._task_status(self.read_pod(pod))
        return state == State.QUEUED

    def pod_is_running(self, pod: V1Pod):
        """Tests if pod is running"""
        state = self._task_status(self.read_pod(pod))
        return state not in (State.SUCCESS, State.FAILED)

    def base_container_is_running(self, pod: V1Pod):
        """Tests if base container is running"""
        event = self.read_pod(pod)
        status = next(
            iter(
                filter(lambda s: s.name == 'base',
                       event.status.container_statuses)), None)
        if not status:
            return False
        return status.state.running is not None

    @tenacity.retry(stop=tenacity.stop_after_attempt(3),
                    wait=tenacity.wait_exponential(),
                    reraise=True)
    def read_pod_logs(self, pod: V1Pod):
        """Reads log from the POD"""
        try:
            return self._client.read_namespaced_pod_log(
                name=pod.metadata.name,
                namespace=pod.metadata.namespace,
                container='base',
                follow=True,
                tail_lines=10,
                _preload_content=False)
        except BaseHTTPError as e:
            raise AirflowException(
                'There was an error reading the kubernetes API: {}'.format(e))

    @tenacity.retry(stop=tenacity.stop_after_attempt(3),
                    wait=tenacity.wait_exponential(),
                    reraise=True)
    def read_pod_events(self, pod):
        """Reads events from the POD"""
        try:
            return self._client.list_namespaced_event(
                namespace=pod.metadata.namespace,
                field_selector="involvedObject.name={}".format(
                    pod.metadata.name))
        except BaseHTTPError as e:
            raise AirflowException(
                'There was an error reading the kubernetes API: {}'.format(e))

    @tenacity.retry(stop=tenacity.stop_after_attempt(3),
                    wait=tenacity.wait_exponential(),
                    reraise=True)
    def read_pod(self, pod: V1Pod):
        """Read POD information"""
        try:
            return self._client.read_namespaced_pod(pod.metadata.name,
                                                    pod.metadata.namespace)
        except BaseHTTPError as e:
            raise AirflowException(
                'There was an error reading the kubernetes API: {}'.format(e))

    def _extract_xcom(self, pod: V1Pod):
        resp = kubernetes_stream(self._client.connect_get_namespaced_pod_exec,
                                 pod.metadata.name,
                                 pod.metadata.namespace,
                                 container=PodDefaults.SIDECAR_CONTAINER_NAME,
                                 command=['/bin/sh'],
                                 stdin=True,
                                 stdout=True,
                                 stderr=True,
                                 tty=False,
                                 _preload_content=False)
        try:
            result = self._exec_pod_command(
                resp, 'cat {}/return.json'.format(PodDefaults.XCOM_MOUNT_PATH))
            self._exec_pod_command(resp, 'kill -s SIGINT 1')
        finally:
            resp.close()
        if result is None:
            raise AirflowException(
                'Failed to extract xcom from pod: {}'.format(
                    pod.metadata.name))
        return result

    def _exec_pod_command(self, resp, command):
        if resp.is_open():
            self.log.info('Running command... %s\n', command)
            resp.write_stdin(command + '\n')
            while resp.is_open():
                resp.update(timeout=1)
                if resp.peek_stdout():
                    return resp.read_stdout()
                if resp.peek_stderr():
                    self.log.info(resp.read_stderr())
                    break
        return None

    def process_status(self, job_id, status):
        """Process status information for the JOB"""
        status = status.lower()
        if status == PodStatus.PENDING:
            return State.QUEUED
        elif status == PodStatus.FAILED:
            self.log.info('Event with job id %s Failed', job_id)
            return State.FAILED
        elif status == PodStatus.SUCCEEDED:
            self.log.info('Event with job id %s Succeeded', job_id)
            return State.SUCCESS
        elif status == PodStatus.RUNNING:
            return State.RUNNING
        else:
            self.log.info('Event: Invalid state %s on job %s', status, job_id)
            return State.FAILED
Beispiel #31
0
    try:
        return int(value)
    except (TypeError, ValueError):
        return default


def sfloat(value, default):
    if not isinstance(default, int):
        raise TypeError("Default value must be of float type")
    try:
        return float(value)
    except (TypeError, ValueError):
        return default


@retry(stop=stop_after_attempt(5), wait=wait_exponential(min=1, max=10))
async def get_walkoff_auth_header(session, token=None, timeout=5 * 60):
    url = config.API_URI.rstrip('/') + '/walkoff/api'
    logger.debug("Attempting to refresh WALKOFF JWT")
    if token is None:
        key = config.get_from_file(config.INTERNAL_KEY_PATH)
        async with session.post(url + "/auth/login", json={"username": config.WALKOFF_USERNAME,
                                                           "password": key}, timeout=timeout) as resp:
            resp_json = await resp.json()
            token = resp_json["refresh_token"]
            logger.debug("Successfully logged into WALKOFF")

    headers = {"Authorization": f"Bearer {token}"}
    async with session.post(url + "/auth/refresh", headers=headers, timeout=timeout) as resp:
        resp_json = await resp.json()
        access_token = resp_json["access_token"]
Beispiel #32
0
    def test_manila_share(self):
        """Test that Manila + Ganesha shares can be accessed on two instances.

        1. create a share
        2. Spawn two servers
        3. mount it on both
        4. write a file on one
        5. read it on the other
        6. profit
        """
        # Create a share
        share = self.manila_client.shares.create(share_type='cephfsnfstype',
                                                 name='cephnfsshare1',
                                                 share_proto="nfs",
                                                 size=1)

        # Spawn Servers
        instance_1 = guest.launch_instance(glance_setup.LTS_IMAGE_NAME,
                                           vm_name='{}-ins-1'.format(
                                               self.RESOURCE_PREFIX),
                                           userdata=self.INSTANCE_USERDATA)
        instance_2 = guest.launch_instance(glance_setup.LTS_IMAGE_NAME,
                                           vm_name='{}-ins-2'.format(
                                               self.RESOURCE_PREFIX),
                                           userdata=self.INSTANCE_USERDATA)

        fip_1 = neutron_tests.floating_ips_from_instance(instance_1)[0]
        fip_2 = neutron_tests.floating_ips_from_instance(instance_2)[0]

        share.allow(access_type='ip', access=fip_1, access_level='rw')
        share.allow(access_type='ip', access=fip_2, access_level='rw')

        # Mount Share
        username = guest.boot_tests['bionic']['username']
        password = guest.boot_tests['bionic'].get('password')
        privkey = openstack_utils.get_private_key(nova_utils.KEYPAIR_NAME)

        # Write a file on instance_1
        def verify_setup(stdin, stdout, stderr):
            status = stdout.channel.recv_exit_status()
            self.assertEqual(status, 0)

        mount_path = share.export_locations[0]

        for attempt in Retrying(stop=stop_after_attempt(3),
                                wait=wait_exponential(multiplier=1,
                                                      min=2,
                                                      max=10)):
            with attempt:
                openstack_utils.ssh_command(
                    username,
                    fip_1,
                    'instance-1',
                    'sudo mkdir -p /mnt/ceph && '
                    'sudo mount -t nfs -o nfsvers=4.1,proto=tcp '
                    '{} /mnt/ceph && '
                    'echo "test" | sudo tee /mnt/ceph/test'.format(mount_path),
                    password=password,
                    privkey=privkey,
                    verify=verify_setup)

        for attempt in Retrying(stop=stop_after_attempt(3),
                                wait=wait_exponential(multiplier=1,
                                                      min=2,
                                                      max=10)):
            with attempt:
                # Setup that file on instance_2
                openstack_utils.ssh_command(
                    username,
                    fip_2,
                    'instance-2',
                    'sudo mkdir -p /mnt/ceph && '
                    'sudo /bin/mount -t nfs -o nfsvers=4.1,proto=tcp '
                    '{} /mnt/ceph'.format(mount_path),
                    password=password,
                    privkey=privkey,
                    verify=verify_setup)

        def verify(stdin, stdout, stderr):
            status = stdout.channel.recv_exit_status()
            self.assertEqual(status, 0)
            out = ""
            for line in iter(stdout.readline, ""):
                out += line
            self.assertEqual(out, "test\n")

        openstack_utils.ssh_command(
            username,
            fip_2,
            'instance-2',
            'sudo cat /mnt/ceph/test'.format(mount_path),
            password=password,
            privkey=privkey,
            verify=verify)
Beispiel #33
0
class Comms_master:
    """
    """
    def __init__(self, commsffl):
        """
        """
        #self.comms = Comms_master(commsffl)
        #self.context_master = context_master
        #self.task_name = task_name
        self.name = 'pycloudmessenger'
        #self.commsffl = ffl.Factory.aggregator(self.context_master, task_name=task_name)
        self.commsffl = commsffl
        workers = self.commsffl.get_participants()
        self.workers_ids = list(workers.keys())

    def send(self, message, destiny):

        try:
            with self.commsffl:
                # self.send_to maps between worker_id and pseudo_id
                self.commsffl.send(message, destiny)
        except Exception as err:
            print('\n')
            print('*' * 80)
            print('Pycloudmessenger ERROR at send: %s' % err)
            print('*' * 80)
            print('\n')
            raise

    def broadcast(self, message, receivers_list=None):
        # receivers_list are not used here, pycloudmessenger already knows all the recipients
        try:
            with self.commsffl:
                self.commsffl.send(message)
        except Exception as err:
            print('\n')
            print('*' * 80)
            print('Pycloudmessenger ERROR at broadcast: %s' % err)
            print('*' * 80)
            print('\n')
            raise

    def receive(self, timeout=1):
        try:
            with self.commsffl:
                packet = self.commsffl.receive(timeout)
            message = packet.content
            pseudo_id = packet.notification['participant']
            #sender_ = str(self.workers_addresses_cloud.index(pseudo_id))
            #sender = message['sender']
            #message.update({'pseudo_id': pseudo_id})
            #message.update({'sender_': sender})
            message.update({'sender': pseudo_id})
        except Exception as err:
            if 'pycloudmessenger.ffl.fflapi.TimedOutException' not in str(
                    type(err)):  # we skip the normal timeouts
                print('\n')
                print('*' * 80)
                print('Pycloudmessenger ERROR at receive: %s' % err)
                print('*' * 80)
                print('\n')
            else:
                message = None
            raise
        return message

    @tenacity.retry(stop=tenacity.stop_after_attempt(5),
                    wait=tenacity.wait_random(min=1, max=3))
    def receive_poms_123(self, timeout=10):
        with self.commsffl:
            packet = self.commsffl.receive(timeout)
        return packet
import math
import operator
import os.path
import random
import re
import shutil
import uuid
import time

import tenacity

from .lib import mkdir, jsonify, toiter, STRING_TYPES, sip, toabs, first

retry = tenacity.retry(
  reraise=True,
  stop=tenacity.stop_after_attempt(4),
  wait=tenacity.wait_random_exponential(0.5, 60.0),
)

@retry
def read_file(path, mode='rt', lock=False, block=False):
  f = open(path, mode)
  if lock:
    f = read_lock_file(f)
  data = f.read()
  f.close()
  return data

@retry
def write_file(
  path, file, mode='wt',
Beispiel #35
0
class PartitionCoordinator(object):
    """Workload partitioning coordinator.

    This class uses the `tooz` library to manage group membership.

    To ensure that the other agents know this agent is still alive,
    the `heartbeat` method should be called periodically.

    Coordination errors and reconnects are handled under the hood, so the
    service using the partition coordinator need not care whether the
    coordination backend is down. The `extract_my_subset` will simply return an
    empty iterable in this case.
    """
    def __init__(self, conf, my_id=None):
        self.conf = conf
        self.backend_url = self.conf.coordination.backend_url
        self._coordinator = None
        self._groups = set()
        self._my_id = my_id or \
            encodeutils.safe_encode(uuidutils.generate_uuid())

    def start(self):
        if self.backend_url:
            try:
                self._coordinator = tooz.coordination.get_coordinator(
                    self.backend_url, self._my_id)
                self._coordinator.start()
                LOG.info('Coordination backend started successfully.')
            except tooz.coordination.ToozError:
                LOG.exception('Error connecting to coordination backend.')

    def stop(self):
        if not self._coordinator:
            return

        for group in list(self._groups):
            self.leave_group(group)

        try:
            self._coordinator.stop()
        except tooz.coordination.ToozError:
            LOG.exception('Error connecting to coordination backend.')
        finally:
            self._coordinator = None

    def is_active(self):
        return self._coordinator is not None

    def heartbeat(self):
        if self._coordinator:
            if not self._coordinator.is_started:
                # re-connect
                self.start()
            try:
                self._coordinator.heartbeat()
            except tooz.coordination.ToozError:
                LOG.exception('Error sending a heartbeat to coordination '
                              'backend.')

    def join_group(self, group_id):
        if (not self._coordinator or not self._coordinator.is_started
                or not group_id):
            return

        @tenacity.retry(wait=tenacity.wait_exponential(
            multiplier=self.conf.coordination.retry_backoff,
            max=self.conf.coordination.max_retry_interval),
                        retry=tenacity.retry_if_exception_type(
                            ErrorJoiningPartitioningGroup))
        def _inner():
            try:
                join_req = self._coordinator.join_group(group_id)
                join_req.get()
                LOG.info('Joined partitioning group %s', group_id)
            except tooz.coordination.MemberAlreadyExist:
                return
            except tooz.coordination.GroupNotCreated:
                create_grp_req = self._coordinator.create_group(group_id)
                try:
                    create_grp_req.get()
                except tooz.coordination.GroupAlreadyExist:
                    pass
                raise ErrorJoiningPartitioningGroup()
            except tooz.coordination.ToozError:
                LOG.exception(
                    'Error joining partitioning group %s,'
                    ' re-trying', group_id)
                raise ErrorJoiningPartitioningGroup()
            self._groups.add(group_id)

        return _inner()

    def leave_group(self, group_id):
        if group_id not in self._groups:
            return
        if self._coordinator:
            self._coordinator.leave_group(group_id)
            self._groups.remove(group_id)
            LOG.info('Left partitioning group %s', group_id)

    def _get_members(self, group_id):
        if not self._coordinator:
            return [self._my_id]

        while True:
            get_members_req = self._coordinator.get_members(group_id)
            try:
                return get_members_req.get()
            except tooz.coordination.GroupNotCreated:
                self.join_group(group_id)

    @tenacity.retry(
        wait=tenacity.wait_random(max=2),
        stop=tenacity.stop_after_attempt(5),
        retry=tenacity.retry_if_exception_type(MemberNotInGroupError),
        reraise=True)
    def extract_my_subset(self, group_id, universal_set):
        """Filters an iterable, returning only objects assigned to this agent.

        We have a list of objects and get a list of active group members from
        `tooz`. We then hash all the objects into buckets and return only
        the ones that hashed into *our* bucket.
        """
        if not group_id or not self.is_active():
            return universal_set

        if group_id not in self._groups:
            self.join_group(group_id)

        try:
            members = self._get_members(group_id)
            LOG.debug('Members of group: %s, Me: %s', members, self._my_id)
            if self._my_id not in members:
                LOG.warning('Cannot extract tasks because agent failed to '
                            'join group properly. Rejoining group.')
                self.join_group(group_id)
                members = self._get_members(group_id)
                if self._my_id not in members:
                    raise MemberNotInGroupError(group_id, members, self._my_id)
                LOG.debug('Members of group: %s, Me: %s', members, self._my_id)
            hr = HashRing(members)
            LOG.debug('Universal set: %s', universal_set)
            my_subset = [
                v for v in universal_set if hr.get_node(str(v)) == self._my_id
            ]
            LOG.debug('My subset: %s', my_subset)
            return my_subset
        except tooz.coordination.ToozError:
            LOG.exception('Error getting group membership info from '
                          'coordination backend.')
            return []
Beispiel #36
0
class ExchangeInterface():
    """Interface for performing queries against exchange API's
    """

    def __init__(self, exchange_config):
        """Initializes ExchangeInterface class

        Args:
            exchange_config (dict): A dictionary containing configuration for the exchanges.
        """

        self.logger = structlog.get_logger()
        self.exchanges = {}

        # Loads the exchanges using ccxt.
        for exchange in exchange_config:
            if exchange_config[exchange]['required']['enabled']:
                new_exchange = getattr(ccxt, exchange)({
                    "enableRateLimit": True
                })

                # sets up api permissions for user if given
                if new_exchange:
                    if 'key' in exchange_config[exchange]['optional']:
                        new_exchange.apiKey = exchange_config[exchange]['optional']['key']

                    if 'secret' in exchange_config[exchange]['optional']:
                        new_exchange.secret = exchange_config[exchange]['optional'][
                            'secret']

                    if 'username' in exchange_config[exchange]['optional']:
                        new_exchange.username = exchange_config[exchange]['optional'][
                            'username']

                    if 'password' in exchange_config[exchange]['optional']:
                        new_exchange.password = exchange_config[exchange]['optional'][
                            'password']

                    self.exchanges[new_exchange.id] = new_exchange
                else:
                    self.logger.warn("Unable to load exchange %s", new_exchange)


    def override_exchange_config(self):
        """Enables all exchanges regardless of user configuration. Useful for the UI layer.
        """

        for exchange in ccxt.exchanges:
            self.exchanges[exchange] = getattr(ccxt, exchange)({
                "enableRateLimit": True
            })


    @retry(retry=retry_if_exception_type(ccxt.NetworkError), stop=stop_after_attempt(3))
    def get_historical_data(self, market_pair, exchange, time_unit, start_date=None, max_days=100):
        """Get historical OHLCV for a symbol pair

        Args:
            market_pair (str): Contains the symbol pair to operate on i.e. BURST/BTC
            exchange (str): Contains the exchange to fetch the historical data from.
            time_unit (str): A string specifying the ccxt time unit i.e. 5m or 1d.
            start_date (int, optional): Timestamp in milliseconds.
            max_days (int, optional): Defaults to 100. Maximum number of days to fetch data for
                if start date is not specified.

        Returns:
            list: Contains a list of lists which contain timestamp, open, high, low, close, volume.
        """

        if not start_date:
            max_days_date = datetime.now() - timedelta(days=max_days)
            start_date = int(max_days_date.replace(tzinfo=timezone.utc).timestamp() * 1000)

        historical_data = self.exchanges[exchange].fetch_ohlcv(
            market_pair,
            timeframe=time_unit,
            since=start_date
        )

        time.sleep(self.exchanges[exchange].rateLimit / 1000)
        return historical_data


    @retry(retry=retry_if_exception_type(ccxt.NetworkError), stop=stop_after_attempt(3))
    def get_account_markets(self, exchange):
        """Get the symbol pairs listed within a users account.

        Args:
            exchange (str): Contains the exchange to fetch the data from.

        Returns:
            dict: A dictionary containing market data for the symbol pairs.
        """

        account_markets = {}
        account_markets.update(self.exchanges[exchange].fetch_balance())
        time.sleep(self.exchanges[exchange].rateLimit / 1000)
        return account_markets


    @retry(retry=retry_if_exception_type(ccxt.NetworkError), stop=stop_after_attempt(3))
    def get_markets_for_exchange(self, exchange):
        """Get market data for all symbol pairs listed on the given exchange.

        Args:
            exchange (str): Contains the exchange to fetch the data from

        Returns:
            dict: A dictionary containing market data for all symbol pairs.
        """

        exchange_markets = self.exchanges[exchange].load_markets()

        return exchange_markets


    @retry(retry=retry_if_exception_type(ccxt.NetworkError), stop=stop_after_attempt(3))
    def get_exchange_markets(self):
        """Get market data for all symbol pairs listed on all configured exchanges.

        Returns:
            dict: A dictionary containing market data for all symbol pairs.
        """

        exchange_markets = {}
        for exchange in self.exchanges:
            exchange_markets[exchange] = self.exchanges[exchange].load_markets()
            time.sleep(self.exchanges[exchange].rateLimit / 1000)
        return exchange_markets


    @retry(retry=retry_if_exception_type(ccxt.NetworkError), stop=stop_after_attempt(3))
    def get_symbol_markets(self, market_pairs):
        """Get market data for specific symbols on all configured exchanges.

        Args:
            market_pairs (list): The symbol pairs you want to retrieve market data for.

        Returns:
            dict: A dictionary containing market data for requested symbol pairs.
        """

        symbol_markets = {}
        for exchange in self.exchanges:
            self.exchanges[exchange].load_markets()
            symbol_markets[exchange] = {}

            for market_pair in market_pairs:
                symbol_markets[exchange][market_pair] = self.exchanges[exchange].markets[
                    market_pair]
            time.sleep(self.exchanges[exchange].rateLimit / 1000)
        return symbol_markets


    @retry(retry=retry_if_exception_type(ccxt.NetworkError), stop=stop_after_attempt(3))
    def get_order_book(self, market_pair, exchange):
        """Retrieve the order information for a particular symbol pair.

        Args:
            market_pair (str): Contains the symbol pair to operate on i.e. BURST/BTC
            exchange (str): Contains the exchange to fetch the data from.

        Returns:
            dict: A dictionary containing bid, ask and other order information on a pair.
        """

        return self.exchanges[exchange].fetch_order_book(market_pair)


    @retry(retry=retry_if_exception_type(ccxt.NetworkError), stop=stop_after_attempt(3))
    def get_open_orders(self):
        """Get the users currently open orders on all configured exchanges.

        Returns:
            dict: A dictionary containing open order information.
        """

        open_orders = {}
        for exchange in self.exchanges:
            open_orders[exchange] = self.exchanges[exchange].fetch_open_orders()
            time.sleep(self.exchanges[exchange].rateLimit / 1000)
        return open_orders


    @retry(retry=retry_if_exception_type(ccxt.NetworkError), stop=stop_after_attempt(3))
    def cancel_order(self, exchange, order_id):
        """Cancels an open order on a particular exchange.

        Args:
            exchange (str): Contains the exchange to cancel the order on.
            order_id (str): The order id you want to cancel.
        """

        self.exchanges[exchange].cancel_order(order_id)
        time.sleep(self.exchanges[exchange].rateLimit / 1000)


    @retry(retry=retry_if_exception_type(ccxt.NetworkError), stop=stop_after_attempt(3))
    def get_quote_symbols(self, exchange):
        """Get a list of quote symbols on an exchange.

        Args:
            exchange (str): Contains the exchange to fetch the data from.

        Returns:
            list: List of quote symbols on an exchange.
        """

        quote_symbols = []
        for market_pair in self.exchanges[exchange].markets:
            _, quote_symbol = market_pair.split('/')
            if not quote_symbol in quote_symbols:
                quote_symbols.append(quote_symbol)

        return quote_symbols


    @retry(retry=retry_if_exception_type(ccxt.NetworkError), stop=stop_after_attempt(3))
    def get_btc_value(self, exchange, base_symbol, volume):

        btc_value = 0
        market_pair = base_symbol + "/BTC"

        order_book = self.get_order_book(market_pair, exchange)
        bid = order_book['bids'][0][0] if order_book['bids'] else None
        if bid:
            btc_value = bid * volume

        return btc_value
Beispiel #37
0
class URLFile:
    _tlocal = threading.local()

    def __init__(self, url, debug=False, cache=None):
        self._url = url
        self._pos = 0
        self._length = None
        self._local_file = None
        self._debug = debug
        #  True by default, false if FILEREADER_CACHE is defined, but can be overwritten by the cache input
        self._force_download = not int(os.environ.get("FILEREADER_CACHE", "0"))
        if cache is not None:
            self._force_download = not cache

        try:
            self._curl = self._tlocal.curl
        except AttributeError:
            self._curl = self._tlocal.curl = pycurl.Curl()
        mkdirs_exists_ok(CACHE_DIR)

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        if self._local_file is not None:
            os.remove(self._local_file.name)
            self._local_file.close()
            self._local_file = None

    @retry(wait=wait_random_exponential(multiplier=1, max=5),
           stop=stop_after_attempt(3),
           reraise=True)
    def get_length_online(self):
        c = self._curl
        c.reset()
        c.setopt(pycurl.NOSIGNAL, 1)
        c.setopt(pycurl.TIMEOUT_MS, 500000)
        c.setopt(pycurl.FOLLOWLOCATION, True)
        c.setopt(pycurl.URL, self._url)
        c.setopt(c.NOBODY, 1)
        c.perform()
        length = int(c.getinfo(c.CONTENT_LENGTH_DOWNLOAD))
        c.reset()
        return length

    def get_length(self):
        if self._length is not None:
            return self._length
        file_length_path = os.path.join(CACHE_DIR,
                                        hash_256(self._url) + "_length")
        if os.path.exists(file_length_path) and not self._force_download:
            with open(file_length_path) as file_length:
                content = file_length.read()
                self._length = int(content)
                return self._length

        self._length = self.get_length_online()
        if not self._force_download:
            with atomic_write_in_dir(file_length_path,
                                     mode="w") as file_length:
                file_length.write(str(self._length))
        return self._length

    def read(self, ll=None):
        if self._force_download:
            return self.read_aux(ll=ll)

        file_begin = self._pos
        file_end = self._pos + ll if ll is not None else self.get_length()
        #  We have to align with chunks we store. Position is the begginiing of the latest chunk that starts before or at our file
        position = (file_begin // CHUNK_SIZE) * CHUNK_SIZE
        response = b""
        while True:
            self._pos = position
            chunk_number = self._pos / CHUNK_SIZE
            file_name = hash_256(self._url) + "_" + str(chunk_number)
            full_path = os.path.join(CACHE_DIR, str(file_name))
            data = None
            #  If we don't have a file, download it
            if not os.path.exists(full_path):
                data = self.read_aux(ll=CHUNK_SIZE)
                with atomic_write_in_dir(full_path,
                                         mode="wb") as new_cached_file:
                    new_cached_file.write(data)
            else:
                with open(full_path, "rb") as cached_file:
                    data = cached_file.read()

            response += data[max(0, file_begin -
                                 position):min(CHUNK_SIZE, file_end -
                                               position)]

            position += CHUNK_SIZE
            if position >= file_end:
                self._pos = file_end
                return response

    @retry(wait=wait_random_exponential(multiplier=1, max=5),
           stop=stop_after_attempt(3),
           reraise=True)
    def read_aux(self, ll=None):
        download_range = False
        headers = ["Connection: keep-alive"]
        if self._pos != 0 or ll is not None:
            if ll is None:
                end = self.get_length() - 1
            else:
                end = min(self._pos + ll, self.get_length()) - 1
            if self._pos >= end:
                return b""
            headers.append(f"Range: bytes={self._pos}-{end}")
            download_range = True

        dats = BytesIO()
        c = self._curl
        c.setopt(pycurl.URL, self._url)
        c.setopt(pycurl.WRITEDATA, dats)
        c.setopt(pycurl.NOSIGNAL, 1)
        c.setopt(pycurl.TIMEOUT_MS, 500000)
        c.setopt(pycurl.HTTPHEADER, headers)
        c.setopt(pycurl.FOLLOWLOCATION, True)

        if self._debug:
            print("downloading", self._url)

            def header(x):
                if b'MISS' in x:
                    print(x.strip())

            c.setopt(pycurl.HEADERFUNCTION, header)

            def test(debug_type, debug_msg):
                print("  debug(%d): %s" % (debug_type, debug_msg.strip()))

            c.setopt(pycurl.VERBOSE, 1)
            c.setopt(pycurl.DEBUGFUNCTION, test)
            t1 = time.time()

        c.perform()

        if self._debug:
            t2 = time.time()
            if t2 - t1 > 0.1:
                print(f"get {self._url} {headers!r} {t2 - t1:.f} slow")

        response_code = c.getinfo(pycurl.RESPONSE_CODE)
        if response_code == 416:  # Requested Range Not Satisfiable
            raise Exception(
                f"Error, range out of bounds {response_code} {headers} ({self._url}): {repr(dats.getvalue())[:500]}"
            )
        if download_range and response_code != 206:  # Partial Content
            raise Exception(
                f"Error, requested range but got unexpected response {response_code} {headers} ({self._url}): {repr(dats.getvalue())[:500]}"
            )
        if (not download_range) and response_code != 200:  # OK
            raise Exception(
                f"Error {response_code} {headers} ({self._url}): {repr(dats.getvalue())[:500]}"
            )

        ret = dats.getvalue()
        self._pos += len(ret)
        return ret

    def seek(self, pos):
        self._pos = pos

    @property
    def name(self):
        """Returns a local path to file with the URLFile's contents.

       This can be used to interface with modules that require local files.
    """
        if self._local_file is None:
            _, ext = os.path.splitext(urllib.parse.urlparse(self._url).path)
            local_fd, local_path = tempfile.mkstemp(suffix=ext)
            try:
                os.write(local_fd, self.read())
                local_file = open(local_path, "rb")
            except Exception:
                os.remove(local_path)
                raise
            finally:
                os.close(local_fd)

            self._local_file = local_file
            self.read = self._local_file.read
            self.seek = self._local_file.seek

        return self._local_file.name
Beispiel #38
0
class PodManager(LoggingMixin):
    """
    Helper class for creating, monitoring, and otherwise interacting with Kubernetes pods
    for use with the KubernetesPodOperator
    """

    def __init__(
        self,
        kube_client: client.CoreV1Api = None,
        in_cluster: bool = True,
        cluster_context: Optional[str] = None,
    ):
        """
        Creates the launcher.

        :param kube_client: kubernetes client
        :param in_cluster: whether we are in cluster
        :param cluster_context: context of the cluster
        """
        super().__init__()
        self._client = kube_client or get_kube_client(in_cluster=in_cluster, cluster_context=cluster_context)
        self._watch = watch.Watch()

    def run_pod_async(self, pod: V1Pod, **kwargs) -> V1Pod:
        """Runs POD asynchronously"""
        sanitized_pod = self._client.api_client.sanitize_for_serialization(pod)
        json_pod = json.dumps(sanitized_pod, indent=2)

        self.log.debug('Pod Creation Request: \n%s', json_pod)
        try:
            resp = self._client.create_namespaced_pod(
                body=sanitized_pod, namespace=pod.metadata.namespace, **kwargs
            )
            self.log.debug('Pod Creation Response: %s', resp)
        except Exception as e:
            self.log.exception(
                'Exception when attempting to create Namespaced Pod: %s', str(json_pod).replace("\n", " ")
            )
            raise e
        return resp

    def delete_pod(self, pod: V1Pod) -> None:
        """Deletes POD"""
        try:
            self._client.delete_namespaced_pod(
                pod.metadata.name, pod.metadata.namespace, body=client.V1DeleteOptions()
            )
        except ApiException as e:
            # If the pod is already deleted
            if e.status != 404:
                raise

    @tenacity.retry(
        stop=tenacity.stop_after_attempt(3),
        wait=tenacity.wait_random_exponential(),
        reraise=True,
        retry=tenacity.retry_if_exception(should_retry_start_pod),
    )
    def create_pod(self, pod: V1Pod) -> V1Pod:
        """Launches the pod asynchronously."""
        return self.run_pod_async(pod)

    def await_pod_start(self, pod: V1Pod, startup_timeout: int = 120) -> None:
        """
        Waits for the pod to reach phase other than ``Pending``

        :param pod:
        :param startup_timeout: Timeout (in seconds) for startup of the pod
            (if pod is pending for too long, fails task)
        :return:
        """
        curr_time = datetime.now()
        while True:
            remote_pod = self.read_pod(pod)
            if remote_pod.status.phase != PodPhase.PENDING:
                break
            self.log.warning("Pod not yet started: %s", pod.metadata.name)
            delta = datetime.now() - curr_time
            if delta.total_seconds() >= startup_timeout:
                msg = (
                    f"Pod took longer than {startup_timeout} seconds to start. "
                    "Check the pod events in kubernetes to determine why."
                )
                raise PodLaunchFailedException(msg)
            time.sleep(1)

    def follow_container_logs(self, pod: V1Pod, container_name: str) -> PodLoggingStatus:
        warnings.warn(
            "Method `follow_container_logs` is deprecated.  Use `fetch_container_logs` instead"
            "with option `follow=True`.",
            DeprecationWarning,
        )
        return self.fetch_container_logs(pod=pod, container_name=container_name, follow=True)

    def fetch_container_logs(
        self, pod: V1Pod, container_name: str, *, follow=False, since_time: Optional[DateTime] = None
    ) -> PodLoggingStatus:
        """
        Follows the logs of container and streams to airflow logging.
        Returns when container exits.
        """

        def consume_logs(*, since_time: Optional[DateTime] = None, follow: bool = True) -> Optional[DateTime]:
            """
            Tries to follow container logs until container completes.
            For a long-running container, sometimes the log read may be interrupted
            Such errors of this kind are suppressed.

            Returns the last timestamp observed in logs.
            """
            timestamp = None
            try:
                logs = self.read_pod_logs(
                    pod=pod,
                    container_name=container_name,
                    timestamps=True,
                    since_seconds=(
                        math.ceil((pendulum.now() - since_time).total_seconds()) if since_time else None
                    ),
                    follow=follow,
                )
                for raw_line in logs:
                    line = raw_line.decode('utf-8', errors="backslashreplace")
                    timestamp, message = self.parse_log_line(line)
                    self.log.info(message)
            except BaseHTTPError as e:
                self.log.warning(
                    "Reading of logs interrupted with error %r; will retry. "
                    "Set log level to DEBUG for traceback.",
                    e,
                )
                self.log.debug(
                    "Traceback for interrupted logs read for pod %r",
                    pod.metadata.name,
                    exc_info=True,
                )
            return timestamp or since_time

        # note: `read_pod_logs` follows the logs, so we shouldn't necessarily *need* to
        # loop as we do here. But in a long-running process we might temporarily lose connectivity.
        # So the looping logic is there to let us resume following the logs.
        last_log_time = since_time
        while True:
            last_log_time = consume_logs(since_time=last_log_time, follow=follow)
            if not self.container_is_running(pod, container_name=container_name):
                return PodLoggingStatus(running=False, last_log_time=last_log_time)
            if not follow:
                return PodLoggingStatus(running=True, last_log_time=last_log_time)
            else:
                self.log.warning(
                    'Pod %s log read interrupted but container %s still running',
                    pod.metadata.name,
                    container_name,
                )
                time.sleep(1)

    def await_container_completion(self, pod: V1Pod, container_name: str) -> None:
        while not self.container_is_running(pod=pod, container_name=container_name):
            time.sleep(1)

    def await_pod_completion(self, pod: V1Pod) -> V1Pod:
        """
        Monitors a pod and returns the final state

        :param pod: pod spec that will be monitored
        :return:  Tuple[State, Optional[str]]
        """
        while True:
            remote_pod = self.read_pod(pod)
            if remote_pod.status.phase in PodPhase.terminal_states:
                break
            self.log.info('Pod %s has phase %s', pod.metadata.name, remote_pod.status.phase)
            time.sleep(2)
        return remote_pod

    def parse_log_line(self, line: str) -> Tuple[Optional[DateTime], str]:
        """
        Parse K8s log line and returns the final state

        :param line: k8s log line
        :return: timestamp and log message
        :rtype: Tuple[str, str]
        """
        split_at = line.find(' ')
        if split_at == -1:
            self.log.error(
                "Error parsing timestamp (no timestamp in message %r). "
                "Will continue execution but won't update timestamp",
                line,
            )
            return None, line
        timestamp = line[:split_at]
        message = line[split_at + 1 :].rstrip()
        try:
            last_log_time = cast(DateTime, pendulum.parse(timestamp))
        except ParserError:
            self.log.error("Error parsing timestamp. Will continue execution but won't update timestamp")
            return None, line
        return last_log_time, message

    def container_is_running(self, pod: V1Pod, container_name: str) -> bool:
        """Reads pod and checks if container is running"""
        remote_pod = self.read_pod(pod)
        return container_is_running(pod=remote_pod, container_name=container_name)

    @tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential(), reraise=True)
    def read_pod_logs(
        self,
        pod: V1Pod,
        container_name: str,
        tail_lines: Optional[int] = None,
        timestamps: bool = False,
        since_seconds: Optional[int] = None,
        follow=True,
    ) -> Iterable[bytes]:
        """Reads log from the POD"""
        additional_kwargs = {}
        if since_seconds:
            additional_kwargs['since_seconds'] = since_seconds

        if tail_lines:
            additional_kwargs['tail_lines'] = tail_lines

        try:
            return self._client.read_namespaced_pod_log(
                name=pod.metadata.name,
                namespace=pod.metadata.namespace,
                container=container_name,
                follow=follow,
                timestamps=timestamps,
                _preload_content=False,
                **additional_kwargs,
            )
        except BaseHTTPError:
            self.log.exception('There was an error reading the kubernetes API.')
            raise

    @tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential(), reraise=True)
    def read_pod_events(self, pod: V1Pod) -> "CoreV1EventList":
        """Reads events from the POD"""
        try:
            return self._client.list_namespaced_event(
                namespace=pod.metadata.namespace, field_selector=f"involvedObject.name={pod.metadata.name}"
            )
        except BaseHTTPError as e:
            raise AirflowException(f'There was an error reading the kubernetes API: {e}')

    @tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential(), reraise=True)
    def read_pod(self, pod: V1Pod) -> V1Pod:
        """Read POD information"""
        try:
            return self._client.read_namespaced_pod(pod.metadata.name, pod.metadata.namespace)
        except BaseHTTPError as e:
            raise AirflowException(f'There was an error reading the kubernetes API: {e}')

    def extract_xcom(self, pod: V1Pod) -> str:
        """Retrieves XCom value and kills xcom sidecar container"""
        with closing(
            kubernetes_stream(
                self._client.connect_get_namespaced_pod_exec,
                pod.metadata.name,
                pod.metadata.namespace,
                container=PodDefaults.SIDECAR_CONTAINER_NAME,
                command=['/bin/sh'],
                stdin=True,
                stdout=True,
                stderr=True,
                tty=False,
                _preload_content=False,
            )
        ) as resp:
            result = self._exec_pod_command(resp, f'cat {PodDefaults.XCOM_MOUNT_PATH}/return.json')
            self._exec_pod_command(resp, 'kill -s SIGINT 1')
        if result is None:
            raise AirflowException(f'Failed to extract xcom from pod: {pod.metadata.name}')
        return result

    def _exec_pod_command(self, resp, command: str) -> Optional[str]:
        res = None
        if resp.is_open():
            self.log.info('Running command... %s\n', command)
            resp.write_stdin(command + '\n')
            while resp.is_open():
                resp.update(timeout=1)
                while resp.peek_stdout():
                    res = res + resp.read_stdout() if res else resp.read_stdout()
                error_res = None
                while resp.peek_stderr():
                    error_res = error_res + resp.read_stderr() if error_res else resp.read_stderr()
                if error_res:
                    self.log.info("stderr from command: %s", error_res)
                    break
                if res:
                    return res
        return res
Beispiel #39
0
    def send(self, target, ctxt, message, wait_for_reply=None, timeout=None,
             retry=None):
        with timeutils.StopWatch(duration=timeout) as stopwatch:
            if retry is None:
                retry = self._pika_engine.default_rpc_retry_attempts

            exchange = self._pika_engine.get_rpc_exchange_name(
                target.exchange
            )

            def on_exception(ex):
                if isinstance(ex, pika_drv_exc.ExchangeNotFoundException):
                    # it is desired to create exchange because if we sent to
                    # exchange which is not exists, we get ChannelClosed
                    # exception and need to reconnect
                    try:
                        self._declare_rpc_exchange(exchange, stopwatch)
                    except pika_drv_exc.ConnectionException as e:
                        LOG.warning("Problem during declaring exchange. %s", e)
                    return True
                elif isinstance(ex, (pika_drv_exc.ConnectionException,
                                     exceptions.MessageDeliveryFailure)):
                    LOG.warning("Problem during message sending. %s", ex)
                    return True
                else:
                    return False

            if retry:
                retrier = tenacity.retry(
                    stop=(tenacity.stop_never if retry == -1 else
                          tenacity.stop_after_attempt(retry)),
                    retry=tenacity.retry_if_exception(on_exception),
                    wait=tenacity.wait_fixed(self._pika_engine.rpc_retry_delay)
                )
            else:
                retrier = None

            if target.fanout:
                return self.cast_all_workers(
                    exchange, target.topic, ctxt, message, stopwatch, retrier
                )

            routing_key = self._pika_engine.get_rpc_queue_name(
                target.topic, target.server, retrier is None
            )

            msg = pika_drv_msg.RpcPikaOutgoingMessage(self._pika_engine,
                                                      message, ctxt)
            try:
                reply = msg.send(
                    exchange=exchange,
                    routing_key=routing_key,
                    reply_listener=(
                        self._reply_listener if wait_for_reply else None
                    ),
                    stopwatch=stopwatch,
                    retrier=retrier
                )
            except pika_drv_exc.ExchangeNotFoundException as ex:
                try:
                    self._declare_rpc_exchange(exchange, stopwatch)
                except pika_drv_exc.ConnectionException as e:
                    LOG.warning("Problem during declaring exchange. %s", e)
                raise ex

            if reply is not None:
                if reply.failure is not None:
                    raise reply.failure

                return reply.result
Beispiel #40
0
    return connection


def is_disconnection_exception(e):
    if not libvirt:
        return False
    return (isinstance(e, libvirt.libvirtError)
            and e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
                                       libvirt.VIR_ERR_INTERNAL_ERROR)
            and e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
                                         libvirt.VIR_FROM_RPC))


retry_on_disconnect = tenacity.retry(
    retry=tenacity.retry_if_exception(is_disconnection_exception),
    stop=tenacity.stop_after_attempt(2))


def raise_nodata_if_unsupported(method):
    def inner(in_self, instance, *args, **kwargs):
        try:
            return method(in_self, instance, *args, **kwargs)
        except libvirt.libvirtError as e:
            # NOTE(sileht): At this point libvirt connection error
            # have been reraise as tenacity.RetryError()
            msg = _('Failed to inspect instance %(instance_uuid)s stats, '
                    'can not get info from libvirt: %(error)s') % {
                        "instance_uuid": instance.id,
                        "error": e}
            raise virt_inspector.NoDataException(msg)
    return inner
Beispiel #41
0
def retry_on_stale_data_error(func):
    wrapper = tenacity.retry(
        stop=tenacity.stop_after_attempt(3),
        retry=tenacity.retry_if_exception_type(exc.StaleDataError),
        reraise=True)
    return wrapper(func)
Beispiel #42
0
# Note, we yield the options via list_opts to enable generation of the
# sample heat.conf, but we don't register these options directly via
# cfg.CONF.register*, it's done via ks_loading.register_auth_conf_options
# Note, only auth_type = v3password is expected to work, example config:
# [trustee]
# auth_type = password
# auth_url = http://192.168.1.2:35357
# username = heat
# password = password
# user_domain_id = default
PASSWORD_PLUGIN = 'password'  # nosec Bandit B105
TRUSTEE_CONF_GROUP = 'trustee'
ks_loading.register_auth_conf_options(cfg.CONF, TRUSTEE_CONF_GROUP)

retry_on_connection_timeout = tenacity.retry(
    stop=tenacity.stop_after_attempt(cfg.CONF.client_retry_limit + 1),
    wait=tenacity.wait_random(max=2),
    retry=tenacity.retry_if_exception_type(
        (ksa_exceptions.ConnectFailure, ksa_exceptions.DiscoveryFailure)),
    reraise=True)


def list_opts():
    trustee_opts = ks_loading.get_auth_common_conf_options()
    trustee_opts.extend(
        ks_loading.get_auth_plugin_conf_options(PASSWORD_PLUGIN))
    yield TRUSTEE_CONF_GROUP, trustee_opts


def _moved_attr(new_name):
    def getter(self):
Beispiel #43
0
    async def generate_game(self):
        with tempfile.TemporaryDirectory() as tmp:
            settings_file_path = os.path.join(tmp, "settings.json")
            self.hash = ''.join(
                random.choices(string.ascii_letters + string.digits, k=12))

            self.settings['outputpath'] = tmp
            self.settings['outputname'] = self.hash
            self.settings['create_rom'] = True
            self.settings['create_spoiler'] = True
            self.settings['calc_playthrough'] = False
            self.settings['rom'] = os.environ.get('ALTTP_ROM')
            self.settings['enemizercli'] = os.path.join(
                os.environ.get('ENEMIZER_HOME'), 'EnemizerCLI.Core')

            # set some defaults we do NOT want to change ever
            self.settings['count'] = 1
            self.settings['multi'] = 1
            self.settings['names'] = ""
            self.settings['race'] = not self.spoilers

            with open(settings_file_path, "w") as f:
                json.dump(self.settings, f)

            attempts = 0
            try:
                async for attempt in AsyncRetrying(
                        stop=stop_after_attempt(10),
                        retry=retry_if_exception_type(Exception)):
                    with attempt:
                        attempts += 1
                        proc = await asyncio.create_subprocess_exec(
                            'python3',
                            'DungeonRandomizer.py',
                            '--settingsfile',
                            settings_file_path,
                            stdout=asyncio.subprocess.PIPE,
                            stderr=asyncio.subprocess.PIPE,
                            cwd=os.environ.get('DOOR_RANDO_HOME'))

                        stdout, stderr = await proc.communicate()
                        logging.info(stdout.decode())
                        if proc.returncode > 0:
                            raise Exception(
                                f'Exception while generating game: {stderr.decode()}'
                            )

            except RetryError as e:
                raise e.last_attempt._exception from e

            self.attempts = attempts

            self.patch_name = "DR_" + self.settings['outputname'] + ".bps"
            self.rom_name = "DR_" + self.settings['outputname'] + ".sfc"
            self.spoiler_name = "DR_" + self.settings[
                'outputname'] + "_Spoiler.txt"

            rom_path = os.path.join(tmp, self.rom_name)
            patch_path = os.path.join(tmp, self.patch_name)
            spoiler_path = os.path.join(tmp, self.spoiler_name)

            proc = await asyncio.create_subprocess_exec(
                os.path.join('utils', 'flips'),
                '--create',
                '--bps-delta',
                os.environ.get("ALTTP_ROM"),
                rom_path,
                patch_path,
                stdout=asyncio.subprocess.PIPE,
                stderr=asyncio.subprocess.PIPE)

            stdout, stderr = await proc.communicate()
            logging.info(stdout.decode())
            if proc.returncode > 0:
                raise Exception(
                    f'Exception while while creating patch: {stderr.decode()}')

            async with aiofiles.open(patch_path, "rb") as f:
                patchfile = await f.read()

            session = aioboto3.Session()
            async with session.client('s3') as s3:
                await s3.put_object(
                    Bucket=os.environ.get('SAHASRAHBOT_BUCKET'),
                    Key=f"patch/{self.patch_name}",
                    Body=patchfile,
                    ACL='public-read')

            async with aiofiles.open(spoiler_path, "rb") as f:
                self.spoilerfile = await f.read()

            async with session.client('s3') as s3:
                await s3.put_object(
                    Bucket=os.environ.get('SAHASRAHBOT_BUCKET'),
                    Key=f"spoiler/{self.spoiler_name}",
                    Body=gzip.compress(self.spoilerfile),
                    ACL='public-read' if self.spoilers else 'private',
                    ContentEncoding='gzip',
                    ContentDisposition='attachment')
Beispiel #44
0
class GitRepo(object):
    """
        Class for work with repositories
    """
    def __init__(self, root_repo_dir, repo_name, branch, url, commit_id=None):
        """
        :param root_repo_dir: Directory where repositories will clone
        :param repo_name: Name of repository
        :param branch: Branch of repository
        :param commit_id: Commit ID
        """

        self.name = repo_name
        self.branch = branch
        self.url = url
        self.commit_id = commit_id
        self.local_repo_dir = root_repo_dir / repo_name
        self.repo = None

        self.log = logging.getLogger()

    def prepare_repo(self):
        """
        Preparing repository for build
        Include cloning and updating repo to remote state

        :return: None
        """
        self.log.info('-' * 50)
        self.log.info("Getting repo " + self.name)

        self.clone()
        self.repo = git.Repo(str(self.local_repo_dir))
        self.hard_reset()
        self.clean()
        self.checkout("master", silent=True)
        self.fetch()
        self.hard_reset('FETCH_HEAD')

    @retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=60))
    def clone(self):
        """
        Clone repo

        :return: None
        """

        # checking correctness of git repository
        # if dir is not repository, it will be removed
        if self.local_repo_dir.exists():
            try:
                git.Repo(str(self.local_repo_dir))
            except git.InvalidGitRepositoryError:
                self.log.info('Remove broken repo %s', self.local_repo_dir)
                remove_directory(self.local_repo_dir)

        if not self.local_repo_dir.exists():
            self.log.info("Clone repo " + self.name)
            git.Git().clone(self.url, str(self.local_repo_dir))

    @retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=60))
    def fetch(self):
        """
        Fetch repo

        :return: None
        """

        self.log.info("Fetch repo %s to %s", self.name, self.branch)
        self.repo.remotes.origin.fetch(self.branch)

    @retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=60))
    def hard_reset(self, reset_to=None):
        """
        Hard reset repo

        :param reset_to: Commit ID or branch. If None - hard reset to HEAD
        :return: None
        """

        self.log.info("Hard reset repo " + self.name)
        if reset_to:
            self.repo.git.reset('--hard', reset_to)
        else:
            self.repo.git.reset('--hard')

    @retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=60))
    def checkout(self, branch=None, silent=False):
        """
        Checkout to certain state

        :param branch: Branch of repo. If None - checkout to commit ID from class variable commit_id

        :param silent: Flag for getting time of commit
               (set to True only if commit_id does not exist)
        :type silent: Boolean

        :return: None
        """

        checkout_to = branch if branch else self.commit_id
        self.log.info("Checkout repo %s to %s", self.name, checkout_to)
        self.repo.git.checkout(checkout_to, force=True)

        if not silent:
            # error raises after checkout to master if we try
            # to get time of triggered commit_id before fetching repo
            # (commit does not exist in local repository yet)
            committed_date = self.repo.commit(self.commit_id).committed_date
            self.log.info("Committed date: %s",
                          datetime.fromtimestamp(committed_date))

    @retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=60))
    def clean(self):
        """
        Clean repo

        :return: None
        """

        self.log.info("Clean repo " + self.name)
        self.repo.git.clean('-xdf')

    @retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=60))
    def pull(self):
        """
        Pull repo
        :return: None
        """

        self.log.info("Pull repo " + self.name)
        self.repo.git.pull()

    def revert_commit_by_time(self, commit_time):
        """
        Sets commit by time.
        If commit date <= certain time,
        commit sets to class variable commit_id.

        :param commit_time: datetime
        :return: None
        """

        self.commit_id = str(
            next(
                self.repo.iter_commits(rev='master',
                                       until=commit_time,
                                       max_count=1)))

    def get_time(self, commit_id=None):
        """
        Get datetime of commit

        :param commit_id: Commit ID
        :return: datetime
        """

        commit = commit_id if commit_id else self.commit_id
        return self.repo.commit(commit).committed_date
Beispiel #45
0
class GoogleAPIResource(Resource):

    # Names of the get method of the root resource
    get_method = "get"
    required_resource_data = ['name']

    # jmespath expression for getting labels
    resource_labels_path = "resource.labels"

    # Other properties of a resource we might need to perform evaluations, such as iam policy
    resource_components = {}

    # If a resource is not in a ready state, we can't update it. If we retrieve
    # it, and the state changes, updates will be rejected because the ETAG will
    # have changed. If a resource defines readiness criteria, the get() call
    # will wait until the resource is in a ready state to return
    #
    # Key/Value to check to see if a resource is ready
    readiness_key = None
    readiness_value = None
    readiness_terminal_values = []

    def __init__(self, client_kwargs=None, **resource_data):

        if client_kwargs is None:
            client_kwargs = {}

        # Set some defaults
        self._service = None
        self._resource_metadata = None
        self._full_resource_name = None

        # Load and validate additional resource data
        self._resource_data = resource_data
        self._validate_resource_data()

        # Store the client kwargs to pass to any new clients
        self._client_kwargs = client_kwargs

        self._ancestry = None

    def _validate_resource_data(self):
        ''' Verify we have all the required data for this resource '''
        if not all(arg in self._resource_data
                   for arg in self.required_resource_data):

            raise ResourceException(
                'Missing data required for resource creation. Expected data: {}; Got: {}'
                .format(','.join(self.required_resource_data),
                        ','.join(self._resource_data.keys())))

    @staticmethod
    def _extract_cai_name_data(name):
        ''' Attempt to get identifiable information out of a Cloud Asset Inventory-formatted resource_name '''

        # Most resources need only a subset of these fields to query the google apis
        fields = {
            'project_id': r'/projects/([^\/]+)/',
            'location': r'/(?:locations|regions|zones)/([^\/]+)/',
            'name': r'([^\/]+)$',

            # Less-common resource data
            #  AppEngine
            'app': r'/apps/([^\/]+)/',
            'service': r'/services/([^\/]+)/',
            'version': r'/versions/([^\/]+)/',

            #  NodePools
            'cluster': r'/clusters/([^\/]+)/',

            # ServiceAccounts
            'service_account': r'serviceAccounts/([^\/]+)/',
        }

        resource_data = {}

        # Extract available resource data from resource name
        for field_name in fields:
            m = re.search(fields[field_name], name)
            if m:
                resource_data[field_name] = m.group(1)

        return resource_data

    @classmethod
    def subclass_by_type(cls, resource_type):
        mapper = {
            res_cls.resource_type: res_cls
            for res_cls in cls.__subclasses__()
        }

        try:
            return mapper[resource_type]
        except KeyError:
            raise ResourceException(
                'Unrecognized resource type: {}'.format(resource_type))

    @classmethod
    def from_resource_data(cls,
                           *,
                           resource_type,
                           client_kwargs=None,
                           **resource_data):
        if client_kwargs is None:
            client_kwargs = {}
        res_cls = cls.subclass_by_type(resource_type)
        return res_cls(client_kwargs=client_kwargs, **resource_data)

    @staticmethod
    def from_cai_data(resource_name,
                      resource_type,
                      project_id=None,
                      client_kwargs=None):
        ''' Attempt to return the appropriate resource using Cloud Asset Inventory-formatted resource info '''

        if client_kwargs is None:
            client_kwargs = {}

        res_cls = GoogleAPIResource.subclass_by_type(resource_type)

        resource_data = GoogleAPIResource._extract_cai_name_data(resource_name)

        # if the project_id was passed, and its wasnt found in the resource name, add it
        if project_id and 'project_id' not in resource_data:
            resource_data['project_id'] = project_id

        return res_cls(client_kwargs=client_kwargs, **resource_data)

    def to_dict(self):
        details = self._resource_data.copy()
        details.update({
            'resource_type': self.resource_type,
        })

        try:
            details['full_resource_name'] = self.full_resource_name()
        except Exception:
            details['full_resource_name'] = None

        return details

    def type(self):
        return self.resource_type

    def full_resource_name(self):
        if self._full_resource_name is None:
            self.gen_full_resource_name()

        return self._full_resource_name

    # Google's documentation describes what it calls a 'full resource name' for
    # resources. None of the API's seem to implement it (except Cloud Asset
    # Inventory). This attempts to generate it from the discovery-based api
    # client's generated http request url.
    #
    # If we inject it into the resource, we can use it in policy evaluation to
    # simplify the structure of our policies
    def gen_full_resource_name(self):

        method = getattr(self.service, self.get_method)
        uri = method(**self._get_request_args()).uri

        uri_parsed = urlparse(uri)
        domain = uri_parsed.netloc
        path_segments = uri_parsed.path[1:].split('/')

        # CAI uses cloudsql.googleapis.com in their full_resource_name, so we need to detect all
        # bad incarnations and replace them
        bad_sql_names = ['sql', 'sqladmin']

        # First we need the name of the api
        if domain.startswith("www."):
            # we need to get the api name from the path
            api_name = path_segments.pop(0)
            api_name = 'cloudsql' if api_name in bad_sql_names else api_name
        else:
            # the api name is the first segment of the domain
            api_name = domain.split('.')[0]

            # the sql api is now returning sqladmin.googleapis.com/sql/<ver>
            # and the CAI docs state the FRN for sql instances should start
            # with //cloudsql.googleapis.com/ so lets replace all odd sql ones
            # and rely on the code below to catch duplicates
            path_segments[0] = 'cloudsql' if path_segments[
                0] in bad_sql_names else path_segments[0]
            api_name = 'cloudsql' if api_name in bad_sql_names else api_name

            # occasionally the compute api baseUrl is returned as
            # compute.googleapis.com/compute, in which case we need to remove
            # the duplicated api reference
            # also addresses the sql issue mentioned above
            if api_name == path_segments[0]:
                path_segments.pop(0)

        # Remove the version from the path
        path_segments.pop(0)

        # Remove method from the last path segment
        if ":" in path_segments[-1]:
            path_segments[-1] = path_segments[-1].split(":")[0]

        # Annoying resource-specific fixes

        # The url for buckets is `/b/` instead of `/buckets/`. Initially this fixed that
        # Unfortunately, CAI omits the collection name, rather than follow Google's API design doc
        # So we just remove the collection name
        #
        # https://cloud.google.com/apis/design/resource_names
        # See: https://issuetracker.google.com/issues/131586763
        #
        if api_name == 'storage' and path_segments[0] == 'b':
            path_segments.pop(0)
            # Replace with this if they fix CAI:
            # path_segments[0] = "buckets"

        if api_name == 'bigtableadmin':
            api_name = 'bigtable'

        resource_path = "/".join(path_segments)

        self._full_resource_name = "//{}.googleapis.com/{}".format(
            api_name, resource_path)

    def _get_component(self, component):
        method_name = self.resource_components[component]

        # Many components take the same request signature, but allow for custom request
        # args if needed. Fall back to default args if the expected function doesn't exist
        if hasattr(self, f'_get_{component}_request_args'):
            req_arg_method = getattr(self, f'_get_{component}_request_args')
        else:
            req_arg_method = getattr(self, '_get_request_args')

        method = getattr(self.service, method_name)

        component_metadata = method(**req_arg_method()).execute()
        return component_metadata

    def get(self, refresh=True):

        if not refresh and self._resource_metadata:
            return self._resource_metadata

        method = getattr(self.service, self.get_method)

        # If the resource has readiness criteria, wait for it
        if self.readiness_key and self.readiness_value:
            waiter = Waiter(method, **self._get_request_args())
            asset = waiter.wait(self.readiness_key,
                                self.readiness_value,
                                terminal_values=self.readiness_terminal_values,
                                interval=10,
                                retries=90)
        else:
            asset = method(**self._get_request_args()).execute()

        resp = {
            'type': self.type(),
            'name': self.full_resource_name(),
        }

        resp['resource'] = asset

        for c in self.resource_components:
            resp[c] = self._get_component(c)

        self._resource_metadata = resp
        return self._resource_metadata

    # Determine what remediation steps to take, allow for future remediation specifications
    def remediate(self, remediation):
        # Check for an update spec version, default to version 1
        remediation_spec = remediation.get('_remediation_spec', "")

        if remediation_spec in ['v2beta1', 'v2']:
            required_keys = ['method', 'params']

            for step in remediation.get('steps', []):
                if not all(k in step for k in required_keys):
                    raise InvalidRemediationSpecStep()

                method_name = step.get('method')
                params = step.get('params')
                self._call_method(method_name, params)
        else:
            raise UnsupportedRemediationSpec(
                "The specified remediation spec is not supported")

    @tenacity.retry(retry=tenacity.retry_if_exception(is_retryable_exception),
                    wait=tenacity.wait_random_exponential(multiplier=5,
                                                          max=20),
                    stop=tenacity.stop_after_attempt(15))
    def _call_method(self, method_name, params):
        ''' Call the requested method on the resource '''
        method = getattr(self.service, method_name)
        return method(**params).execute()

    @property
    def ancestry(self):
        if self._ancestry:
            return self._ancestry

        # attempt to fill in the resource's ancestry
        # if the target project has the cloudresourcemanager api disabled, this will fail
        # if the resource_data doesn't include the project_id (ex: with storage buckets) this will also fail
        try:
            resource_manager_projects = build_subresource(
                'cloudresourcemanager.projects', 'v1', **self._client_kwargs)

            resp = resource_manager_projects.getAncestry(
                projectId=self.project_id).execute()

            # Reformat getAncestry response to be a list of resource names
            self._ancestry = [
                f"//cloudresourcemanager.googleapis.com/{ancestor['resourceId']['type']}s/{ancestor['resourceId']['id']}"
                for ancestor in resp.get('ancestor')
            ]

        except Exception:
            # This call is best-effort. Any failures should be caught
            pass

        return self._ancestry

    @property
    def organization(self):
        ancestry = self.ancestry
        if not ancestry:
            return None

        return next((ancestor for ancestor in ancestry if ancestor.startswith(
            '//cloudresourcemanager.googleapis.com/organizations/')), None)

    @property
    def client_kwargs(self):
        return self._client_kwargs

    @client_kwargs.setter
    def client_kwargs(self, client_kwargs):

        # Invalidate service/parent because client_kwargs changed
        self._service = None

        self._client_kwargs = client_kwargs

    @property
    def service(self):
        if self._service is None:

            full_resource_path = "{}.{}".format(self.service_name,
                                                self.resource_path)

            self._service = build_subresource(full_resource_path, self.version,
                                              **self._client_kwargs)
        return self._service

    @property
    def labels(self):
        labels = jmespath.search(self.resource_labels_path,
                                 self.get(refresh=False)) or {}
        if not isinstance(labels, dict):
            raise TypeError("Unexpected label format")

        return labels

    @property
    def project_id(self):
        return self._resource_data.get('project_id')
Beispiel #46
0
from typing import Any, List

from googleapiclient.errors import HttpError
from tenacity import TryAgain, retry, retry_if_exception_type, stop_after_attempt, wait_exponential

from dispatch.decorators import apply, counter, timer
from dispatch.plugins.bases import ParticipantGroupPlugin
from dispatch.plugins.dispatch_google import groups as google_group_plugin
from dispatch.plugins.dispatch_google.common import get_service
from dispatch.plugins.dispatch_google.config import GOOGLE_USER_OVERRIDE, GOOGLE_DOMAIN

log = logging.getLogger(__name__)


@retry(
    stop=stop_after_attempt(3),
    retry=retry_if_exception_type(TryAgain),
    wait=wait_exponential(multiplier=1, min=2, max=5),
)
def make_call(client: Any,
              func: Any,
              delay: int = None,
              propagate_errors: bool = False,
              **kwargs):
    """Make an google client api call."""
    try:
        data = getattr(client, func)(**kwargs).execute()

        if delay:
            time.sleep(delay)
Beispiel #47
0
class Buda(ExchangeClient):
    def __init__(self, public_key=None, secret_key=None):
        self.name = 'Buda'
        # if not read_only and (public_key is None or secret_key is None):
        #     public_key = input('Enter your public key: ')
        #     secret_key = input('Enter your private key: ')
        self.base_uri = 'https://www.buda.com/api'
        self.api_type = 'REST'
        if public_key and secret_key:
            self.auth = BudaHMACAuth(public_key, secret_key)
        self.timeout = 5
        super().__init__(
            read_only=True if not (public_key and secret_key) else False)

    @retry(stop=stop_after_attempt(number_of_attempts), wait=wait_fixed(0.2))
    def get_book(self, pair):
        # try:
        response = requests.get(
            f"{self.base_uri}/v2/markets/{pair.ticker}/order_book",
            timeout=self.timeout)
        book = response.json()['order_book']
        # except JSONDecodeError:
        #     print(response)

        asks = [{'amount': x[1], 'price': x[0]} for x in book['asks']]
        bids = [{'amount': x[1], 'price': x[0]} for x in book['bids']]
        return {ASK: asks, BID: bids}

    @retry(stop=stop_after_attempt(number_of_attempts))
    def get_active_orders(self, pair):
        result = {ASK: [], BID: []}
        try:
            response = requests.get(
                f"{self.base_uri}/v2/markets/{pair.ticker}/orders",
                auth=self.auth,
                timeout=self.timeout).json()
            orders = response['orders']
        except KeyError:
            print(response)
        for order in orders:
            if order['state'] == 'canceled':
                continue
            side = ASK if order['type'].lower() == 'ask' else BID
            result[side].append(
                Order(order['limit'][0],
                      side,
                      order['amount'][0],
                      order_id=order['id'],
                      pair=pair))
        return result

    @retry(retry=retry_if_exception(is_not_local_exception),
           stop=stop_after_attempt(number_of_attempts))
    def cancel_order(self, order):
        requests.put(f"{self.base_uri}/v2/orders/{order.order_id}",
                     auth=self.auth,
                     json={
                         'state': 'canceling'
                     },
                     timeout=self.timeout).json()

    @retry(retry=retry_if_exception(is_not_local_exception),
           stop=stop_after_attempt(number_of_attempts))
    def get_balance(self, currency):
        try:
            response = requests.get(f"{self.base_uri}/v2/balances",
                                    auth=self.auth,
                                    timeout=self.timeout)
            balances = response.json()['balances']
        except KeyError:
            print(response.content)
        try:
            search_result = next(
                item for item in balances
                if item["id"].lower() == currency.symbol.lower())
        except StopIteration:
            raise currency_doesnt_exist
        return [
            search_result['available_amount'][0],
            search_result['frozen_amount'][0]
        ]

    @retry(retry=retry_if_exception(is_not_local_exception),
           stop=stop_after_attempt(number_of_attempts))
    def create_order(self, pair, amount, side, limit_price=None):
        body = {}
        body['price_type'] = 'LIMIT' if limit_price else 'MARKET'
        body['amount'] = truncate(amount, 5)
        if limit_price:
            body['limit'] = limit_price
        body['type'] = 'Ask' if side is ASK else 'Bid'
        response = requests.post(
            f"{self.base_uri}/v2/markets/{pair.ticker}/orders",
            json=body,
            auth=self.auth,
            timeout=self.timeout).json()

    def subscribe(self, pair):
        self._register_pair_and_currencies(pair)
        if self.api_type == 'REST':
            self.__start_threads__(pair)

    def unsubscribe(self, pair):
        pass

    @retry(retry=retry_if_exception(is_not_local_exception),
           stop=stop_after_attempt(number_of_attempts))
    def get_list_of_currencies_and_pairs(self, auto_register=False):
        response = requests.get(f"{self.base_uri}/v2/markets",
                                timeout=self.timeout).json()
        pairs = response['markets']
        list_of_currencies = set([])
        list_of_pairs = []
        for pair in pairs:
            try:
                base_curr = quote_curr = None
                for curr in list_of_currencies:
                    if pair['base_currency'] == curr.symbol:
                        base_curr = curr
                    if pair['quote_currency'] == curr.symbol:
                        quote_curr = curr
                if not base_curr:
                    base_curr = base.exchange.Currency(
                        name=pair['base_currency'],
                        symbol=pair['base_currency'],
                        exchange_client=self)
                if not quote_curr:
                    quote_curr = base.exchange.Currency(
                        name=pair['quote_currency'],
                        symbol=pair['quote_currency'],
                        exchange_client=self)
            except currency_doesnt_exist:
                continue
            pair = base.exchange.Pair(
                ticker=pair['id'],
                quote=quote_curr,
                base=base_curr,
                minimum_step=pair['minimum_order_amount'][0],
                exchange_client=self)
            list_of_pairs.append(pair)
            list_of_currencies.add(quote_curr)
            list_of_currencies.add(base_curr)
        if auto_register is True:
            for pair in list_of_pairs:
                self._register_pair_and_currencies(pair)
        return list(list_of_currencies), list_of_pairs

    def get_history(self, pair):
        response = requests.get(f"{self.base_uri}/v1/trade/{pair.ticker}/",
                                headers=self.headers,
                                timeout=self.timeout).json()
        print(response)
Beispiel #48
0
class ControllerWorker(base_taskflow.BaseTaskFlowEngine):

    def __init__(self):

        self._amphora_flows = amphora_flows.AmphoraFlows()
        self._health_monitor_flows = health_monitor_flows.HealthMonitorFlows()
        self._lb_flows = load_balancer_flows.LoadBalancerFlows()
        self._listener_flows = listener_flows.ListenerFlows()
        self._member_flows = member_flows.MemberFlows()
        self._pool_flows = pool_flows.PoolFlows()
        self._l7policy_flows = l7policy_flows.L7PolicyFlows()
        self._l7rule_flows = l7rule_flows.L7RuleFlows()

        self._amphora_repo = repo.AmphoraRepository()
        self._amphora_health_repo = repo.AmphoraHealthRepository()
        self._health_mon_repo = repo.HealthMonitorRepository()
        self._lb_repo = repo.LoadBalancerRepository()
        self._listener_repo = repo.ListenerRepository()
        self._member_repo = repo.MemberRepository()
        self._pool_repo = repo.PoolRepository()
        self._l7policy_repo = repo.L7PolicyRepository()
        self._l7rule_repo = repo.L7RuleRepository()
        self._flavor_repo = repo.FlavorRepository()

        super(ControllerWorker, self).__init__()

    @tenacity.retry(
        retry=(
            tenacity.retry_if_result(_is_provisioning_status_pending_update) |
            tenacity.retry_if_exception_type()),
        wait=tenacity.wait_incrementing(
            RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def _get_db_obj_until_pending_update(self, repo, id):

        return repo.get(db_apis.get_session(), id=id)

    def create_amphora(self):
        """Creates an Amphora.

        This is used to create spare amphora.

        :returns: amphora_id
        """
        try:
            create_amp_tf = self._taskflow_load(
                self._amphora_flows.get_create_amphora_flow(),
                store={constants.BUILD_TYPE_PRIORITY:
                       constants.LB_CREATE_SPARES_POOL_PRIORITY,
                       constants.FLAVOR: None}
            )
            with tf_logging.DynamicLoggingListener(create_amp_tf, log=LOG):
                create_amp_tf.run()

            return create_amp_tf.storage.fetch('amphora')
        except Exception as e:
            LOG.error('Failed to create an amphora due to: {}'.format(str(e)))

    def delete_amphora(self, amphora_id):
        """Deletes an existing Amphora.

        :param amphora_id: ID of the amphora to delete
        :returns: None
        :raises AmphoraNotFound: The referenced Amphora was not found
        """
        amphora = self._amphora_repo.get(db_apis.get_session(),
                                         id=amphora_id)
        delete_amp_tf = self._taskflow_load(self._amphora_flows.
                                            get_delete_amphora_flow(),
                                            store={constants.AMPHORA: amphora})
        with tf_logging.DynamicLoggingListener(delete_amp_tf,
                                               log=LOG):
            delete_amp_tf.run()

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(
            RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_health_monitor(self, health_monitor_id):
        """Creates a health monitor.

        :param pool_id: ID of the pool to create a health monitor on
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        health_mon = self._health_mon_repo.get(db_apis.get_session(),
                                               id=health_monitor_id)
        if not health_mon:
            LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
                        '60 seconds.', 'health_monitor', health_monitor_id)
            raise db_exceptions.NoResultFound

        pool = health_mon.pool
        listeners = pool.listeners
        pool.health_monitor = health_mon
        load_balancer = pool.load_balancer

        create_hm_tf = self._taskflow_load(
            self._health_monitor_flows.get_create_health_monitor_flow(),
            store={constants.HEALTH_MON: health_mon,
                   constants.POOL: pool,
                   constants.LISTENERS: listeners,
                   constants.LOADBALANCER: load_balancer})
        with tf_logging.DynamicLoggingListener(create_hm_tf,
                                               log=LOG):
            create_hm_tf.run()

    def delete_health_monitor(self, health_monitor_id):
        """Deletes a health monitor.

        :param pool_id: ID of the pool to delete its health monitor
        :returns: None
        :raises HMNotFound: The referenced health monitor was not found
        """
        health_mon = self._health_mon_repo.get(db_apis.get_session(),
                                               id=health_monitor_id)

        pool = health_mon.pool
        listeners = pool.listeners
        load_balancer = pool.load_balancer

        delete_hm_tf = self._taskflow_load(
            self._health_monitor_flows.get_delete_health_monitor_flow(),
            store={constants.HEALTH_MON: health_mon,
                   constants.POOL: pool,
                   constants.LISTENERS: listeners,
                   constants.LOADBALANCER: load_balancer})
        with tf_logging.DynamicLoggingListener(delete_hm_tf,
                                               log=LOG):
            delete_hm_tf.run()

    def update_health_monitor(self, health_monitor_id, health_monitor_updates):
        """Updates a health monitor.

        :param pool_id: ID of the pool to have it's health monitor updated
        :param health_monitor_updates: Dict containing updated health monitor
        :returns: None
        :raises HMNotFound: The referenced health monitor was not found
        """
        health_mon = None
        try:
            health_mon = self._get_db_obj_until_pending_update(
                self._health_mon_repo, health_monitor_id)
        except tenacity.RetryError as e:
            LOG.warning('Health monitor did not go into %s in 60 seconds. '
                        'This either due to an in-progress Octavia upgrade '
                        'or an overloaded and failing database. Assuming '
                        'an upgrade is in progress and continuing.',
                        constants.PENDING_UPDATE)
            health_mon = e.last_attempt.result()

        pool = health_mon.pool
        listeners = pool.listeners
        pool.health_monitor = health_mon
        load_balancer = pool.load_balancer

        update_hm_tf = self._taskflow_load(
            self._health_monitor_flows.get_update_health_monitor_flow(),
            store={constants.HEALTH_MON: health_mon,
                   constants.POOL: pool,
                   constants.LISTENERS: listeners,
                   constants.LOADBALANCER: load_balancer,
                   constants.UPDATE_DICT: health_monitor_updates})
        with tf_logging.DynamicLoggingListener(update_hm_tf,
                                               log=LOG):
            update_hm_tf.run()

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(
            RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_listener(self, listener_id):
        """Creates a listener.

        :param listener_id: ID of the listener to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        listener = self._listener_repo.get(db_apis.get_session(),
                                           id=listener_id)
        if not listener:
            LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
                        '60 seconds.', 'listener', listener_id)
            raise db_exceptions.NoResultFound

        load_balancer = listener.load_balancer

        create_listener_tf = self._taskflow_load(self._listener_flows.
                                                 get_create_listener_flow(),
                                                 store={constants.LOADBALANCER:
                                                        load_balancer,
                                                        constants.LISTENERS:
                                                            [listener]})
        with tf_logging.DynamicLoggingListener(create_listener_tf,
                                               log=LOG):
            create_listener_tf.run()

    def delete_listener(self, listener_id):
        """Deletes a listener.

        :param listener_id: ID of the listener to delete
        :returns: None
        :raises ListenerNotFound: The referenced listener was not found
        """
        listener = self._listener_repo.get(db_apis.get_session(),
                                           id=listener_id)
        load_balancer = listener.load_balancer

        delete_listener_tf = self._taskflow_load(
            self._listener_flows.get_delete_listener_flow(),
            store={constants.LOADBALANCER: load_balancer,
                   constants.LISTENER: listener})
        with tf_logging.DynamicLoggingListener(delete_listener_tf,
                                               log=LOG):
            delete_listener_tf.run()

    def update_listener(self, listener_id, listener_updates):
        """Updates a listener.

        :param listener_id: ID of the listener to update
        :param listener_updates: Dict containing updated listener attributes
        :returns: None
        :raises ListenerNotFound: The referenced listener was not found
        """
        listener = None
        try:
            listener = self._get_db_obj_until_pending_update(
                self._listener_repo, listener_id)
        except tenacity.RetryError as e:
            LOG.warning('Listener did not go into %s in 60 seconds. '
                        'This either due to an in-progress Octavia upgrade '
                        'or an overloaded and failing database. Assuming '
                        'an upgrade is in progress and continuing.',
                        constants.PENDING_UPDATE)
            listener = e.last_attempt.result()

        load_balancer = listener.load_balancer

        update_listener_tf = self._taskflow_load(self._listener_flows.
                                                 get_update_listener_flow(),
                                                 store={constants.LISTENER:
                                                        listener,
                                                        constants.LOADBALANCER:
                                                            load_balancer,
                                                        constants.UPDATE_DICT:
                                                            listener_updates,
                                                        constants.LISTENERS:
                                                            [listener]})
        with tf_logging.DynamicLoggingListener(update_listener_tf, log=LOG):
            update_listener_tf.run()

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(
            RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_load_balancer(self, load_balancer_id, flavor=None):
        """Creates a load balancer by allocating Amphorae.

        First tries to allocate an existing Amphora in READY state.
        If none are available it will attempt to build one specifically
        for this load balancer.

        :param load_balancer_id: ID of the load balancer to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id)
        if not lb:
            LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
                        '60 seconds.', 'load_balancer', load_balancer_id)
            raise db_exceptions.NoResultFound

        # TODO(johnsom) convert this to octavia_lib constant flavor
        # once octavia is transitioned to use octavia_lib
        store = {constants.LOADBALANCER_ID: load_balancer_id,
                 constants.BUILD_TYPE_PRIORITY:
                 constants.LB_CREATE_NORMAL_PRIORITY,
                 constants.FLAVOR: flavor}

        topology = lb.topology

        store[constants.UPDATE_DICT] = {
            constants.TOPOLOGY: topology
        }

        create_lb_flow = self._lb_flows.get_create_load_balancer_flow(
            topology=topology, listeners=lb.listeners)

        create_lb_tf = self._taskflow_load(create_lb_flow, store=store)
        with tf_logging.DynamicLoggingListener(create_lb_tf, log=LOG):
            create_lb_tf.run()

    def delete_load_balancer(self, load_balancer_id, cascade=False):
        """Deletes a load balancer by de-allocating Amphorae.

        :param load_balancer_id: ID of the load balancer to delete
        :returns: None
        :raises LBNotFound: The referenced load balancer was not found
        """
        lb = self._lb_repo.get(db_apis.get_session(),
                               id=load_balancer_id)

        if cascade:
            (flow,
             store) = self._lb_flows.get_cascade_delete_load_balancer_flow(lb)
        else:
            (flow, store) = self._lb_flows.get_delete_load_balancer_flow(lb)
        store.update({constants.LOADBALANCER: lb,
                      constants.SERVER_GROUP_ID: lb.server_group_id})
        delete_lb_tf = self._taskflow_load(flow, store=store)

        with tf_logging.DynamicLoggingListener(delete_lb_tf,
                                               log=LOG):
            delete_lb_tf.run()

    def update_load_balancer(self, load_balancer_id, load_balancer_updates):
        """Updates a load balancer.

        :param load_balancer_id: ID of the load balancer to update
        :param load_balancer_updates: Dict containing updated load balancer
        :returns: None
        :raises LBNotFound: The referenced load balancer was not found
        """
        lb = None
        try:
            lb = self._get_db_obj_until_pending_update(
                self._lb_repo, load_balancer_id)
        except tenacity.RetryError as e:
            LOG.warning('Load balancer did not go into %s in 60 seconds. '
                        'This either due to an in-progress Octavia upgrade '
                        'or an overloaded and failing database. Assuming '
                        'an upgrade is in progress and continuing.',
                        constants.PENDING_UPDATE)
            lb = e.last_attempt.result()

        listeners, _ = self._listener_repo.get_all(
            db_apis.get_session(),
            load_balancer_id=load_balancer_id)

        update_lb_tf = self._taskflow_load(
            self._lb_flows.get_update_load_balancer_flow(),
            store={constants.LOADBALANCER: lb,
                   constants.LISTENERS: listeners,
                   constants.UPDATE_DICT: load_balancer_updates})

        with tf_logging.DynamicLoggingListener(update_lb_tf,
                                               log=LOG):
            update_lb_tf.run()

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(
            RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_member(self, member_id):
        """Creates a pool member.

        :param member_id: ID of the member to create
        :returns: None
        :raises NoSuitablePool: Unable to find the node pool
        """
        member = self._member_repo.get(db_apis.get_session(),
                                       id=member_id)
        if not member:
            LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
                        '60 seconds.', 'member', member_id)
            raise db_exceptions.NoResultFound

        pool = member.pool
        listeners = pool.listeners
        load_balancer = pool.load_balancer

        create_member_tf = self._taskflow_load(self._member_flows.
                                               get_create_member_flow(),
                                               store={constants.MEMBER: member,
                                                      constants.LISTENERS:
                                                          listeners,
                                                      constants.LOADBALANCER:
                                                          load_balancer,
                                                      constants.POOL: pool})
        with tf_logging.DynamicLoggingListener(create_member_tf,
                                               log=LOG):
            create_member_tf.run()

    def delete_member(self, member_id):
        """Deletes a pool member.

        :param member_id: ID of the member to delete
        :returns: None
        :raises MemberNotFound: The referenced member was not found
        """
        member = self._member_repo.get(db_apis.get_session(),
                                       id=member_id)
        pool = member.pool
        listeners = pool.listeners
        load_balancer = pool.load_balancer

        delete_member_tf = self._taskflow_load(
            self._member_flows.get_delete_member_flow(),
            store={constants.MEMBER: member, constants.LISTENERS: listeners,
                   constants.LOADBALANCER: load_balancer, constants.POOL: pool}
        )
        with tf_logging.DynamicLoggingListener(delete_member_tf,
                                               log=LOG):
            delete_member_tf.run()

    def batch_update_members(self, old_member_ids, new_member_ids,
                             updated_members):
        old_members = [self._member_repo.get(db_apis.get_session(), id=mid)
                       for mid in old_member_ids]
        new_members = [self._member_repo.get(db_apis.get_session(), id=mid)
                       for mid in new_member_ids]
        updated_members = [
            (self._member_repo.get(db_apis.get_session(), id=m.get('id')), m)
            for m in updated_members]
        if old_members:
            pool = old_members[0].pool
        elif new_members:
            pool = new_members[0].pool
        else:
            pool = updated_members[0][0].pool
        listeners = pool.listeners
        load_balancer = pool.load_balancer

        batch_update_members_tf = self._taskflow_load(
            self._member_flows.get_batch_update_members_flow(
                old_members, new_members, updated_members),
            store={constants.LISTENERS: listeners,
                   constants.LOADBALANCER: load_balancer,
                   constants.POOL: pool})
        with tf_logging.DynamicLoggingListener(batch_update_members_tf,
                                               log=LOG):
            batch_update_members_tf.run()

    def update_member(self, member_id, member_updates):
        """Updates a pool member.

        :param member_id: ID of the member to update
        :param member_updates: Dict containing updated member attributes
        :returns: None
        :raises MemberNotFound: The referenced member was not found
        """
        member = None
        try:
            member = self._get_db_obj_until_pending_update(
                self._member_repo, member_id)
        except tenacity.RetryError as e:
            LOG.warning('Member did not go into %s in 60 seconds. '
                        'This either due to an in-progress Octavia upgrade '
                        'or an overloaded and failing database. Assuming '
                        'an upgrade is in progress and continuing.',
                        constants.PENDING_UPDATE)
            member = e.last_attempt.result()

        pool = member.pool
        listeners = pool.listeners
        load_balancer = pool.load_balancer

        update_member_tf = self._taskflow_load(self._member_flows.
                                               get_update_member_flow(),
                                               store={constants.MEMBER: member,
                                                      constants.LISTENERS:
                                                          listeners,
                                                      constants.LOADBALANCER:
                                                          load_balancer,
                                                      constants.POOL:
                                                          pool,
                                                      constants.UPDATE_DICT:
                                                          member_updates})
        with tf_logging.DynamicLoggingListener(update_member_tf,
                                               log=LOG):
            update_member_tf.run()

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(
            RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_pool(self, pool_id):
        """Creates a node pool.

        :param pool_id: ID of the pool to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        pool = self._pool_repo.get(db_apis.get_session(),
                                   id=pool_id)
        if not pool:
            LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
                        '60 seconds.', 'pool', pool_id)
            raise db_exceptions.NoResultFound

        listeners = pool.listeners
        load_balancer = pool.load_balancer

        create_pool_tf = self._taskflow_load(self._pool_flows.
                                             get_create_pool_flow(),
                                             store={constants.POOL: pool,
                                                    constants.LISTENERS:
                                                        listeners,
                                                    constants.LOADBALANCER:
                                                        load_balancer})
        with tf_logging.DynamicLoggingListener(create_pool_tf,
                                               log=LOG):
            create_pool_tf.run()

    def delete_pool(self, pool_id):
        """Deletes a node pool.

        :param pool_id: ID of the pool to delete
        :returns: None
        :raises PoolNotFound: The referenced pool was not found
        """
        pool = self._pool_repo.get(db_apis.get_session(),
                                   id=pool_id)

        load_balancer = pool.load_balancer
        listeners = pool.listeners

        delete_pool_tf = self._taskflow_load(
            self._pool_flows.get_delete_pool_flow(),
            store={constants.POOL: pool, constants.LISTENERS: listeners,
                   constants.LOADBALANCER: load_balancer})
        with tf_logging.DynamicLoggingListener(delete_pool_tf,
                                               log=LOG):
            delete_pool_tf.run()

    def update_pool(self, pool_id, pool_updates):
        """Updates a node pool.

        :param pool_id: ID of the pool to update
        :param pool_updates: Dict containing updated pool attributes
        :returns: None
        :raises PoolNotFound: The referenced pool was not found
        """
        pool = None
        try:
            pool = self._get_db_obj_until_pending_update(
                self._pool_repo, pool_id)
        except tenacity.RetryError as e:
            LOG.warning('Pool did not go into %s in 60 seconds. '
                        'This either due to an in-progress Octavia upgrade '
                        'or an overloaded and failing database. Assuming '
                        'an upgrade is in progress and continuing.',
                        constants.PENDING_UPDATE)
            pool = e.last_attempt.result()

        listeners = pool.listeners
        load_balancer = pool.load_balancer

        update_pool_tf = self._taskflow_load(self._pool_flows.
                                             get_update_pool_flow(),
                                             store={constants.POOL: pool,
                                                    constants.LISTENERS:
                                                        listeners,
                                                    constants.LOADBALANCER:
                                                        load_balancer,
                                                    constants.UPDATE_DICT:
                                                        pool_updates})
        with tf_logging.DynamicLoggingListener(update_pool_tf,
                                               log=LOG):
            update_pool_tf.run()

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(
            RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_l7policy(self, l7policy_id):
        """Creates an L7 Policy.

        :param l7policy_id: ID of the l7policy to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        l7policy = self._l7policy_repo.get(db_apis.get_session(),
                                           id=l7policy_id)
        if not l7policy:
            LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
                        '60 seconds.', 'l7policy', l7policy_id)
            raise db_exceptions.NoResultFound

        listeners = [l7policy.listener]
        load_balancer = l7policy.listener.load_balancer

        create_l7policy_tf = self._taskflow_load(
            self._l7policy_flows.get_create_l7policy_flow(),
            store={constants.L7POLICY: l7policy,
                   constants.LISTENERS: listeners,
                   constants.LOADBALANCER: load_balancer})
        with tf_logging.DynamicLoggingListener(create_l7policy_tf,
                                               log=LOG):
            create_l7policy_tf.run()

    def delete_l7policy(self, l7policy_id):
        """Deletes an L7 policy.

        :param l7policy_id: ID of the l7policy to delete
        :returns: None
        :raises L7PolicyNotFound: The referenced l7policy was not found
        """
        l7policy = self._l7policy_repo.get(db_apis.get_session(),
                                           id=l7policy_id)

        load_balancer = l7policy.listener.load_balancer
        listeners = [l7policy.listener]

        delete_l7policy_tf = self._taskflow_load(
            self._l7policy_flows.get_delete_l7policy_flow(),
            store={constants.L7POLICY: l7policy,
                   constants.LISTENERS: listeners,
                   constants.LOADBALANCER: load_balancer})
        with tf_logging.DynamicLoggingListener(delete_l7policy_tf,
                                               log=LOG):
            delete_l7policy_tf.run()

    def update_l7policy(self, l7policy_id, l7policy_updates):
        """Updates an L7 policy.

        :param l7policy_id: ID of the l7policy to update
        :param l7policy_updates: Dict containing updated l7policy attributes
        :returns: None
        :raises L7PolicyNotFound: The referenced l7policy was not found
        """
        l7policy = None
        try:
            l7policy = self._get_db_obj_until_pending_update(
                self._l7policy_repo, l7policy_id)
        except tenacity.RetryError as e:
            LOG.warning('L7 policy did not go into %s in 60 seconds. '
                        'This either due to an in-progress Octavia upgrade '
                        'or an overloaded and failing database. Assuming '
                        'an upgrade is in progress and continuing.',
                        constants.PENDING_UPDATE)
            l7policy = e.last_attempt.result()

        listeners = [l7policy.listener]
        load_balancer = l7policy.listener.load_balancer

        update_l7policy_tf = self._taskflow_load(
            self._l7policy_flows.get_update_l7policy_flow(),
            store={constants.L7POLICY: l7policy,
                   constants.LISTENERS: listeners,
                   constants.LOADBALANCER: load_balancer,
                   constants.UPDATE_DICT: l7policy_updates})
        with tf_logging.DynamicLoggingListener(update_l7policy_tf,
                                               log=LOG):
            update_l7policy_tf.run()

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
        wait=tenacity.wait_incrementing(
            RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
        stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
    def create_l7rule(self, l7rule_id):
        """Creates an L7 Rule.

        :param l7rule_id: ID of the l7rule to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        l7rule = self._l7rule_repo.get(db_apis.get_session(),
                                       id=l7rule_id)
        if not l7rule:
            LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
                        '60 seconds.', 'l7rule', l7rule_id)
            raise db_exceptions.NoResultFound

        l7policy = l7rule.l7policy
        listeners = [l7policy.listener]
        load_balancer = l7policy.listener.load_balancer

        create_l7rule_tf = self._taskflow_load(
            self._l7rule_flows.get_create_l7rule_flow(),
            store={constants.L7RULE: l7rule,
                   constants.L7POLICY: l7policy,
                   constants.LISTENERS: listeners,
                   constants.LOADBALANCER: load_balancer})
        with tf_logging.DynamicLoggingListener(create_l7rule_tf,
                                               log=LOG):
            create_l7rule_tf.run()

    def delete_l7rule(self, l7rule_id):
        """Deletes an L7 rule.

        :param l7rule_id: ID of the l7rule to delete
        :returns: None
        :raises L7RuleNotFound: The referenced l7rule was not found
        """
        l7rule = self._l7rule_repo.get(db_apis.get_session(),
                                       id=l7rule_id)
        l7policy = l7rule.l7policy
        load_balancer = l7policy.listener.load_balancer
        listeners = [l7policy.listener]

        delete_l7rule_tf = self._taskflow_load(
            self._l7rule_flows.get_delete_l7rule_flow(),
            store={constants.L7RULE: l7rule,
                   constants.L7POLICY: l7policy,
                   constants.LISTENERS: listeners,
                   constants.LOADBALANCER: load_balancer})
        with tf_logging.DynamicLoggingListener(delete_l7rule_tf,
                                               log=LOG):
            delete_l7rule_tf.run()

    def update_l7rule(self, l7rule_id, l7rule_updates):
        """Updates an L7 rule.

        :param l7rule_id: ID of the l7rule to update
        :param l7rule_updates: Dict containing updated l7rule attributes
        :returns: None
        :raises L7RuleNotFound: The referenced l7rule was not found
        """
        l7rule = None
        try:
            l7rule = self._get_db_obj_until_pending_update(
                self._l7rule_repo, l7rule_id)
        except tenacity.RetryError as e:
            LOG.warning('L7 rule did not go into %s in 60 seconds. '
                        'This either due to an in-progress Octavia upgrade '
                        'or an overloaded and failing database. Assuming '
                        'an upgrade is in progress and continuing.',
                        constants.PENDING_UPDATE)
            l7rule = e.last_attempt.result()

        l7policy = l7rule.l7policy
        listeners = [l7policy.listener]
        load_balancer = l7policy.listener.load_balancer

        update_l7rule_tf = self._taskflow_load(
            self._l7rule_flows.get_update_l7rule_flow(),
            store={constants.L7RULE: l7rule,
                   constants.L7POLICY: l7policy,
                   constants.LISTENERS: listeners,
                   constants.LOADBALANCER: load_balancer,
                   constants.UPDATE_DICT: l7rule_updates})
        with tf_logging.DynamicLoggingListener(update_l7rule_tf,
                                               log=LOG):
            update_l7rule_tf.run()

    def _perform_amphora_failover(self, amp, priority):
        """Internal method to perform failover operations for an amphora.

        :param amp: The amphora to failover
        :param priority: The create priority
        :returns: None
        """

        stored_params = {constants.FAILED_AMPHORA: amp,
                         constants.LOADBALANCER_ID: amp.load_balancer_id,
                         constants.BUILD_TYPE_PRIORITY: priority, }

        if amp.status == constants.DELETED:
            LOG.warning('Amphora %s is marked DELETED in the database but '
                        'was submitted for failover. Deleting it from the '
                        'amphora health table to exclude it from health '
                        'checks and skipping the failover.', amp.id)
            self._amphora_health_repo.delete(db_apis.get_session(),
                                             amphora_id=amp.id)
            return

        if (CONF.house_keeping.spare_amphora_pool_size == 0) and (
                CONF.nova.enable_anti_affinity is False):
            LOG.warning("Failing over amphora with no spares pool may "
                        "cause delays in failover times while a new "
                        "amphora instance boots.")

        # if we run with anti-affinity we need to set the server group
        # as well
        lb = self._amphora_repo.get_lb_for_amphora(
            db_apis.get_session(), amp.id)
        if CONF.nova.enable_anti_affinity and lb:
            stored_params[constants.SERVER_GROUP_ID] = lb.server_group_id
        if lb and lb.flavor_id:
            stored_params[constants.FLAVOR] = (
                self._flavor_repo.get_flavor_metadata_dict(
                    db_apis.get_session(), lb.flavor_id))
        else:
            stored_params[constants.FLAVOR] = {}

        failover_amphora_tf = self._taskflow_load(
            self._amphora_flows.get_failover_flow(
                role=amp.role, load_balancer=lb),
            store=stored_params)

        with tf_logging.DynamicLoggingListener(failover_amphora_tf, log=LOG):
            failover_amphora_tf.run()

    def failover_amphora(self, amphora_id):
        """Perform failover operations for an amphora.

        :param amphora_id: ID for amphora to failover
        :returns: None
        :raises AmphoraNotFound: The referenced amphora was not found
        """
        try:
            amp = self._amphora_repo.get(db_apis.get_session(),
                                         id=amphora_id)
            if not amp:
                LOG.warning("Could not fetch Amphora %s from DB, ignoring "
                            "failover request.", amphora_id)
                return
            self._perform_amphora_failover(
                amp, constants.LB_CREATE_FAILOVER_PRIORITY)
            if amp.load_balancer_id:
                LOG.info("Mark ACTIVE in DB for load balancer id: %s",
                         amp.load_balancer_id)
                self._lb_repo.update(
                    db_apis.get_session(), amp.load_balancer_id,
                    provisioning_status=constants.ACTIVE)
        except Exception as e:
            try:
                self._lb_repo.update(
                    db_apis.get_session(), amp.load_balancer_id,
                    provisioning_status=constants.ERROR)
            except Exception:
                LOG.error("Unable to revert LB status to ERROR.")
            with excutils.save_and_reraise_exception():
                LOG.error("Failover exception: %s", e)

    def failover_loadbalancer(self, load_balancer_id):
        """Perform failover operations for a load balancer.

        :param load_balancer_id: ID for load balancer to failover
        :returns: None
        :raises LBNotFound: The referenced load balancer was not found
        """

        # Note: This expects that the load balancer is already in
        #       provisioning_status=PENDING_UPDATE state
        try:
            lb = self._lb_repo.get(db_apis.get_session(),
                                   id=load_balancer_id)

            # Exclude amphora already deleted
            amps = [a for a in lb.amphorae if a.status != constants.DELETED]
            for amp in amps:
                # failover amphora in backup role
                # Note: this amp may not currently be the backup
                # TODO(johnsom) Change this to query the amp state
                #               once the amp API supports it.
                if amp.role == constants.ROLE_BACKUP:
                    self._perform_amphora_failover(
                        amp, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY)

            for amp in amps:
                # failover everyhting else
                if amp.role != constants.ROLE_BACKUP:
                    self._perform_amphora_failover(
                        amp, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY)

            self._lb_repo.update(
                db_apis.get_session(), load_balancer_id,
                provisioning_status=constants.ACTIVE)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                LOG.error("LB %(lbid)s failover exception: %(exc)s",
                          {'lbid': load_balancer_id, 'exc': e})
                self._lb_repo.update(
                    db_apis.get_session(), load_balancer_id,
                    provisioning_status=constants.ERROR)

    def amphora_cert_rotation(self, amphora_id):
        """Perform cert rotation for an amphora.

        :param amphora_id: ID for amphora to rotate
        :returns: None
        :raises AmphoraNotFound: The referenced amphora was not found
        """

        amp = self._amphora_repo.get(db_apis.get_session(),
                                     id=amphora_id)
        LOG.info("Start amphora cert rotation, amphora's id is: %s", amp.id)

        certrotation_amphora_tf = self._taskflow_load(
            self._amphora_flows.cert_rotate_amphora_flow(),
            store={constants.AMPHORA: amp,
                   constants.AMPHORA_ID: amp.id})

        with tf_logging.DynamicLoggingListener(certrotation_amphora_tf,
                                               log=LOG):
            certrotation_amphora_tf.run()

    def update_amphora_agent_config(self, amphora_id):
        """Update the amphora agent configuration.

        Note: This will update the amphora agent configuration file and
              update the running configuration for mutatable configuration
              items.

        :param amphora_id: ID of the amphora to update.
        :returns: None
        """
        LOG.info("Start amphora agent configuration update, amphora's id "
                 "is: %s", amphora_id)
        amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id)
        lb = self._amphora_repo.get_lb_for_amphora(db_apis.get_session(),
                                                   amphora_id)
        flavor = {}
        if lb.flavor_id:
            flavor = self._flavor_repo.get_flavor_metadata_dict(
                db_apis.get_session(), lb.flavor_id)

        update_amphora_tf = self._taskflow_load(
            self._amphora_flows.update_amphora_config_flow(),
            store={constants.AMPHORA: amp,
                   constants.FLAVOR: flavor})

        with tf_logging.DynamicLoggingListener(update_amphora_tf,
                                               log=LOG):
            update_amphora_tf.run()
import logging

from tenacity import after_log, before_log, retry, stop_after_attempt, wait_fixed

from app.db.session import SessionLocal

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

max_tries = 60 * 5  # 5 minutes
wait_seconds = 1


@retry(
    stop=stop_after_attempt(max_tries),
    wait=wait_fixed(wait_seconds),
    before=before_log(logger, logging.INFO),
    after=after_log(logger, logging.WARN),
)
def init() -> None:
    try:
        db = SessionLocal()
        # Try to create session to check if DB is awake
        db.execute("SELECT 1")
    except Exception as e:
        logger.error(e)
        raise e


def main() -> None:
    logger.info("Initializing service")
Beispiel #50
0
class FaultyHttpMock:
    @retry(retry=retry_if_exception_type(SnapshotHttpException),
           stop=stop_after_attempt(1))
    async def get_orderbook_snapshot(self, product_id, http_timeout):
        raise SnapshotHttpException
Beispiel #51
0
class FileUploader:
    UPLOAD_RETRY_COUNT = 5

    def __init__(self, file_retriever, upload_path, dbx):
        self._dbx = dbx
        self._upload_path = upload_path
        self._file_retriever = file_retriever
        self._buffer = io.BytesIO()
        self._total_bytes = 0
        self._session = None

    @property
    def total_bytes(self):
        return self._total_bytes

    @retry(stop=stop_after_attempt(UPLOAD_RETRY_COUNT),
           retry=retry_if_exception_type(requests.exceptions.ReadTimeout),
           after=after_log(logging.getLogger(), logging.WARNING))
    def upload(self):
        self._file_retriever.read(self._buffer.write)
        bytes = self._buffer.getvalue()
        assert self._total_bytes + len(bytes) == self._file_retriever.size

        self._dbx.files_upload(bytes,
                               path=self._upload_path,
                               mode=WriteMode('overwrite'),
                               client_modified=self._file_retriever.modified)
        self._total_bytes += len(bytes)

    def upload_in_chunks(self, chunk_size):
        self._file_retriever.read(lambda data: self._on_read(data, chunk_size))
        assert self._session is not None

        bytes = self._buffer.getvalue()
        assert self._total_bytes + len(bytes) == self._file_retriever.size

        self._upload_session(bytes, finish=True)

        self._total_bytes += len(bytes)
        logging.info(
            'chunk %s bytes uploaded. upload finished, total: %s bytes' %
            (len(bytes), self._total_bytes))

    def _on_read(self, data, chunk_size):
        self._buffer.write(data)

        if self._buffer.tell() > chunk_size:
            bytes = self._buffer.getvalue()

            self._upload_session(bytes, finish=False)

            self._total_bytes += len(bytes)
            logging.info('chunk %s bytes uploaded, total: %s bytes' %
                         (len(bytes), self._total_bytes))
            self._buffer.truncate(0)
            self._buffer.seek(0)

    @retry(stop=stop_after_attempt(UPLOAD_RETRY_COUNT),
           retry=retry_if_exception_type(requests.exceptions.ReadTimeout),
           after=after_log(logging.getLogger(), logging.WARNING))
    def _upload_session(self, bytes, finish):
        if self._session is None:
            start_result = self._dbx.files_upload_session_start(bytes)
            self._session = start_result.session_id
        elif finish:
            cursor = UploadSessionCursor(self._session, self._total_bytes)
            info = CommitInfo(path=self._upload_path,
                              mode=WriteMode('overwrite'),
                              client_modified=self._file_retriever.modified)
            self._dbx.files_upload_session_finish(bytes, cursor, info)
        else:
            cursor = UploadSessionCursor(self._session, self._total_bytes)
            self._dbx.files_upload_session_append_v2(bytes, cursor)
    def reply(self, reply=None, failure=None):
        """Send back reply to the RPC client
        :param reply: Dictionary, reply. In case of exception should be None
        :param failure: Tuple, should be a sys.exc_info() tuple.
            Should be None if RPC request was successfully processed.

        :return RpcReplyPikaIncomingMessage, message with reply
        """

        if self.reply_q is None:
            return

        reply_outgoing_message = RpcReplyPikaOutgoingMessage(
            self._pika_engine, self.msg_id, reply=reply, failure_info=failure,
            content_type=self._content_type,
        )

        def on_exception(ex):
            if isinstance(ex, pika_drv_exc.ConnectionException):
                LOG.warning(
                    "Connectivity related problem during reply sending. %s",
                    ex
                )
                return True
            else:
                return False

        if self._pika_engine.rpc_reply_retry_attempts:
            retrier = tenacity.retry(
                stop=(
                    tenacity.stop_never
                    if self._pika_engine.rpc_reply_retry_attempts == -1 else
                    tenacity.stop_after_attempt(
                        self._pika_engine.rpc_reply_retry_attempts
                    )
                ),
                retry=tenacity.retry_if_exception(on_exception),
                wait=tenacity.wait_fixed(
                    self._pika_engine.rpc_reply_retry_delay
                )
            )
        else:
            retrier = None

        try:
            timeout = (None if self.expiration_time is None else
                       max(self.expiration_time - time.time(), 0))
            with timeutils.StopWatch(duration=timeout) as stopwatch:
                reply_outgoing_message.send(
                    reply_q=self.reply_q,
                    stopwatch=stopwatch,
                    retrier=retrier
                )
            LOG.debug(
                "Message [id:'%s'] replied to '%s'.", self.msg_id, self.reply_q
            )
        except Exception:
            LOG.exception(
                "Message [id:'%s'] wasn't replied to : %s", self.msg_id,
                self.reply_q
            )
Beispiel #53
0
# limitations under the License.
"""Encapsulate glance-simplestreams-sync testing."""
import json
import logging
import requests
import tenacity

import zaza.model as zaza_model
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.openstack as openstack_utils


@tenacity.retry(
    retry=tenacity.retry_if_result(lambda images: len(images) < 4),
    wait=tenacity.wait_fixed(6),  # interval between retries
    stop=tenacity.stop_after_attempt(100))  # retry times
def retry_image_sync(glance_client):
    """Wait for image sync with retry."""
    # convert generator to list
    return list(glance_client.images.list())


@tenacity.retry(retry=tenacity.retry_if_exception_type(
    json.decoder.JSONDecodeError),
                wait=tenacity.wait_fixed(10),
                reraise=True,
                stop=tenacity.stop_after_attempt(10))
def get_product_streams(url):
    """Get product streams json data with retry."""
    # There is a race between the images being available in glance and any
    # metadata being written. Use tenacity to avoid this race.
Beispiel #54
0
class FunctionVersionsController(rest.RestController):
    _custom_actions = {
        'scale_up': ['POST'],
        'scale_down': ['POST'],
        'detach': ['POST'],
    }

    def __init__(self, *args, **kwargs):
        self.type = 'function_version'
        self.storage_provider = storage_base.load_storage_provider(CONF)
        self.engine_client = rpc.get_engine_client()

        super(FunctionVersionsController, self).__init__(*args, **kwargs)

    @tenacity.retry(wait=tenacity.wait_fixed(1),
                    stop=tenacity.stop_after_attempt(30),
                    reraise=True,
                    retry=tenacity.retry_if_exception_type(
                        exc.EtcdLockException))
    def _create_function_version(self, project_id, function_id, **kwargs):
        with etcd_util.get_function_version_lock(function_id) as lock:
            if not lock.is_acquired():
                raise exc.EtcdLockException(
                    "Etcd: failed to acquire version lock for function %s." %
                    function_id)

            with db_api.transaction():
                # Get latest function package md5 and version number
                func_db = db_api.get_function(function_id, insecure=False)
                if func_db.code['source'] != constants.PACKAGE_FUNCTION:
                    raise exc.NotAllowedException(
                        "Function versioning only allowed for %s type "
                        "function." % constants.PACKAGE_FUNCTION)

                l_md5 = func_db.code['md5sum']
                l_version = func_db.latest_version

                if len(func_db.versions) >= constants.MAX_VERSION_NUMBER:
                    raise exc.NotAllowedException(
                        'Can not exceed maximum number(%s) of versions' %
                        constants.MAX_VERSION_NUMBER)

                # Check if the latest package changed since last version
                changed = self.storage_provider.changed_since(
                    project_id, function_id, l_md5, l_version)
                if not changed:
                    raise exc.NotAllowedException(
                        'Function package not changed since the latest '
                        'version %s.' % l_version)

                LOG.info("Creating %s, function_id: %s, old_version: %d",
                         self.type, function_id, l_version)

                # Create new version and copy package.
                self.storage_provider.copy(project_id, function_id, l_md5,
                                           l_version)
                version = db_api.increase_function_version(
                    function_id, l_version, **kwargs)
                func_db.latest_version = l_version + 1

            LOG.info("New version %d for function %s created.", l_version + 1,
                     function_id)
            return version

    @rest_utils.wrap_wsme_controller_exception
    @wsme_pecan.wsexpose(resources.FunctionVersion,
                         types.uuid,
                         body=resources.FunctionVersion,
                         status_code=201)
    def post(self, function_id, body):
        """Create a new version for the function.

        Only allow to create version for package type function.

        The supported boy params:
            - description: Optional. The description of the new version.
        """
        ctx = context.get_ctx()
        acl.enforce('function_version:create', ctx)

        params = body.to_dict()
        values = {
            'description': params.get('description'),
        }

        # Try to create a new function version within lock and db transaction
        try:
            version = self._create_function_version(ctx.project_id,
                                                    function_id, **values)
        except exc.EtcdLockException as e:
            LOG.exception(str(e))
            # Reraise a generic exception as the end users should not know
            # the underlying details.
            raise exc.QinlingException('Internal server error.')

        return resources.FunctionVersion.from_db_obj(version)

    @rest_utils.wrap_wsme_controller_exception
    @wsme_pecan.wsexpose(resources.FunctionVersions, types.uuid)
    def get_all(self, function_id):
        """Get all the versions of the given function.

        Admin user can get all versions for the normal user's function.
        """
        acl.enforce('function_version:get_all', context.get_ctx())
        LOG.info("Getting versions for function %s.", function_id)

        # Getting function and versions needs to happen in a db transaction
        with db_api.transaction():
            func_db = db_api.get_function(function_id)
            db_versions = func_db.versions

        versions = [
            resources.FunctionVersion.from_db_obj(v) for v in db_versions
        ]

        return resources.FunctionVersions(function_versions=versions)

    @rest_utils.wrap_pecan_controller_exception
    @pecan.expose()
    @pecan.expose('json')
    def get(self, function_id, version):
        """Get function version or download function version package.

        This method can support HTTP request using either
        'Accept:application/json' or no 'Accept' header.
        """
        ctx = context.get_ctx()
        acl.enforce('function_version:get', ctx)

        download = strutils.bool_from_string(
            pecan.request.GET.get('download', False))
        version = int(version)

        version_db = db_api.get_function_version(function_id, version)

        if not download:
            LOG.info("Getting version %s for function %s.", version,
                     function_id)
            pecan.override_template('json')
            return resources.FunctionVersion.from_db_obj(version_db).to_dict()

        LOG.info("Downloading version %s for function %s.", version,
                 function_id)

        f = self.storage_provider.retrieve(version_db.project_id,
                                           function_id,
                                           None,
                                           version=version)

        if isinstance(f, collections.Iterable):
            pecan.response.app_iter = f
        else:
            pecan.response.app_iter = FileIter(f)
        pecan.response.headers['Content-Type'] = 'application/zip'
        pecan.response.headers['Content-Disposition'] = (
            'attachment; filename="%s_%s"' % (function_id, version))

    @rest_utils.wrap_wsme_controller_exception
    @wsme_pecan.wsexpose(None, types.uuid, int, status_code=204)
    def delete(self, function_id, version):
        """Delete a specific function version.

        - The version should not being used by any job
        - The version should not being used by any webhook
        - Admin user can not delete normal user's version
        """
        ctx = context.get_ctx()
        acl.enforce('function_version:delete', ctx)
        LOG.info("Deleting version %s of function %s.", version, function_id)

        with db_api.transaction():
            version_db = db_api.get_function_version(function_id,
                                                     version,
                                                     insecure=False)
            latest_version = version_db.function.latest_version

            version_jobs = db_api.get_jobs(
                function_id=version_db.function_id,
                function_version=version_db.version_number,
                status={'nin': ['done', 'cancelled']})
            if len(version_jobs) > 0:
                raise exc.NotAllowedException(
                    'The function version is still associated with running '
                    'job(s).')

            version_webhook = db_api.get_webhooks(
                function_id=version_db.function_id,
                function_version=version_db.version_number,
            )
            if len(version_webhook) > 0:
                raise exc.NotAllowedException(
                    'The function version is still associated with webhook.')

            filters = rest_utils.get_filters(
                function_id=version_db.function_id,
                function_version=version_db.version_number)
            version_aliases = db_api.get_function_aliases(**filters)
            if len(version_aliases) > 0:
                raise exc.NotAllowedException(
                    'The function version is still associated with alias.')

            # Delete resources for function version
            self.engine_client.delete_function(function_id, version=version)
            etcd_util.delete_function(function_id, version=version)

            self.storage_provider.delete(ctx.projectid,
                                         function_id,
                                         None,
                                         version=version)

            db_api.delete_function_version(function_id, version)

            if latest_version == version:
                version_db.function.latest_version = latest_version - 1

        LOG.info("Version %s of function %s deleted.", version, function_id)

    @rest_utils.wrap_wsme_controller_exception
    @wsme_pecan.wsexpose(None,
                         types.uuid,
                         int,
                         body=resources.ScaleInfo,
                         status_code=202)
    def scale_up(self, function_id, version, scale):
        """Scale up the workers for function version execution.

        This is admin only operation. The load monitoring of execution
        depends on the monitoring solution of underlying orchestrator.
        """
        acl.enforce('function_version:scale_up', context.get_ctx())

        func_db = db_api.get_function(function_id)

        # If version=0, it's equivalent to /functions/<funcion-id>/scale_up
        if version > 0:
            db_api.get_function_version(function_id, version)

        params = scale.to_dict()

        LOG.info('Starting to scale up function %s(version %s), params: %s',
                 function_id, version, params)

        self.engine_client.scaleup_function(function_id,
                                            runtime_id=func_db.runtime_id,
                                            version=version,
                                            count=params['count'])

    @rest_utils.wrap_wsme_controller_exception
    @wsme_pecan.wsexpose(None,
                         types.uuid,
                         int,
                         body=resources.ScaleInfo,
                         status_code=202)
    def scale_down(self, function_id, version, scale):
        """Scale down the workers for function version execution.

        This is admin only operation. The load monitoring of execution
        depends on the monitoring solution of underlying orchestrator.
        """
        acl.enforce('function_version:scale_down', context.get_ctx())

        db_api.get_function(function_id)
        params = scale.to_dict()

        # If version=0, it's equivalent to /functions/<funcion-id>/scale_down
        if version > 0:
            db_api.get_function_version(function_id, version)

        workers = etcd_util.get_workers(function_id, version=version)
        if len(workers) <= 1:
            LOG.info('No need to scale down function %s(version %s)',
                     function_id, version)
            return

        LOG.info('Starting to scale down function %s(version %s), params: %s',
                 function_id, version, params)
        self.engine_client.scaledown_function(function_id,
                                              version=version,
                                              count=params['count'])

    @rest_utils.wrap_wsme_controller_exception
    @wsme_pecan.wsexpose(None, types.uuid, int, status_code=202)
    def detach(self, function_id, version):
        """Detach the function version from its underlying workers.

        This is admin only operation, which gives admin user a safe way to
        clean up the underlying resources allocated for the function version.
        """
        acl.enforce('function_version:detach', context.get_ctx())

        db_api.get_function(function_id)
        # If version=0, it's equivalent to /functions/<funcion-id>/detach
        if version > 0:
            db_api.get_function_version(function_id, version)

        LOG.info('Starting to detach function %s(version %s)', function_id,
                 version)

        # Delete allocated resources in orchestrator and etcd keys.
        self.engine_client.delete_function(function_id, version=version)
        etcd_util.delete_function(function_id, version=version)
Beispiel #55
0
    render_message_template,
)
from dispatch.plugins.bases import EmailPlugin
from dispatch.plugins.dispatch_google import gmail as google_gmail_plugin
from dispatch.plugins.dispatch_google.common import get_service
from dispatch.plugins.dispatch_google.config import (
    GOOGLE_USER_OVERRIDE,
    GOOGLE_SERVICE_ACCOUNT_DELEGATED_ACCOUNT,
)

from .filters import env

log = logging.getLogger(__name__)


@retry(stop=stop_after_attempt(3))
def send_message(service, message):
    """Sends an email message."""
    return service.users().messages().send(userId="me", body=message).execute()


def create_html_message(recipient: str, subject: str, body: str) -> Dict:
    """Creates a message for an email."""
    message = MIMEText(body, "html")

    if GOOGLE_USER_OVERRIDE:
        recipient = GOOGLE_USER_OVERRIDE
        log.warning("GOOGLE_USER_OVERIDE set. Using override.")

    message["to"] = recipient
    message["from"] = GOOGLE_SERVICE_ACCOUNT_DELEGATED_ACCOUNT
Beispiel #56
0
class InstaLooter(object):
    """A brutal Instagram looter that raids without API tokens.
    """

    @CachedClassProperty
    @classmethod
    def _cachefs(cls):
        """~fs.base.FS: the cache filesystem.
        """
        url = "usercache://{}:{}:{}".format(__appname__, __author__, __version__)
        return fs.open_fs(url, create=True)

    @CachedClassProperty
    @classmethod
    def _user_agents(cls):
        """~fake_useragent.UserAgent: a collection of fake user-agents.
        """
        filename = 'fake_useragent_{}.json'.format(fake_useragent.VERSION)
        return fake_useragent.UserAgent(
            path=cls._cachefs.getsyspath(filename),
            safe_attrs=['__name__', '__objclass__'])

    # str: The name of the cookie file in the cache filesystem
    _COOKIE_FILE = "cookies.txt"

    @classmethod
    def _init_session(cls, session=None):
        # type: (Optional[Session]) -> Session
        """Initialise the given session and load class cookies to its jar.

        Arguments:
            session (~requests.Session, optional): a `requests`
                session, or `None` to create a new one.

        Returns:
            ~requests.Session: an initialised session instance.

        """
        session = session or Session()
        # Load cookies
        session.cookies = LWPCookieJar(
            cls._cachefs.getsyspath(cls._COOKIE_FILE))
        try:
            typing.cast(FileCookieJar, session.cookies).load()
        except IOError:
            pass
        typing.cast(FileCookieJar, session.cookies).clear_expired_cookies()
        return session

    @classmethod
    def _login(cls, username, password, session=None):
        # type: (str, str, Optional[Session]) -> None
        """Login with provided credentials and session.

        Arguments:
            username (str): the username to log in with.
            password (str): the password to log in with.
            session (~requests.Session, optional): the session to use,
                or `None` to create a new session.

        Note:
            Code taken from LevPasha/instabot.py

        """
        session = cls._init_session(session)
        headers = copy.deepcopy(session.headers)
        homepage = "https://www.instagram.com/"
        login_url = "https://www.instagram.com/accounts/login/ajax/"
        data = {'username': username, 'password': password}

        try:
            session.headers.update({
                'Accept-Encoding': 'gzip, deflate',
                'Accept-Language': 'en-US,en;q=0.8',
                'Connection': 'keep-alive',
                'Content-Length': '0',
                'Host': 'www.instagram.com',
                'Origin': 'https://www.instagram.com',
                'Referer': 'https://www.instagram.com',
                'User-Agent': cls._user_agents.firefox,
                'X-Instagram-AJAX': '1',
                'X-Requested-With': 'XMLHttpRequest'
            })

            with session.get(homepage) as res:
                token = get_shared_data(res.text)['config']['csrf_token']
                session.headers.update({'X-CSRFToken': token})

            time.sleep(5 * random.random())  # nosec
            with session.post(login_url, data, allow_redirects=True) as login:
                token = next(c.value for c in login.cookies if c.name == 'csrftoken')
                session.headers.update({'X-CSRFToken': token})
                if not login.ok:
                    raise SystemError("Login error: check your connection")
                data = json.loads(login.text)
                if not data.get('authenticated', False):
                    raise ValueError('Login error: check your login data')

            time.sleep(5 * random.random())  # nosec
            with session.get(homepage) as res:
                if res.text.find(username) == -1:
                    raise ValueError('Login error: check your login data')
                try:
                    typing.cast(FileCookieJar, session.cookies).save()
                except IOError:
                    pass

        finally:
            session.headers = headers

    @classmethod
    def _logout(cls, session=None):
        # type: (Optional[Session]) -> None
        """Log out from current session.

        Also deletes the eventual cookie file left in the cache directory,
        to prevent new connections from using the old session ID.

        Arguments:
            session (~requests.Session): the session to use, or `None`
                to create a new session.

        Note:
            Code taken from LevPasha/instabot.py

        """
        session = cls._init_session(session)
        sessionid = cls._sessionid(session)
        if sessionid is not None:
            url = "https://www.instagram.com/accounts/logout/"
            session.post(url, data={"csrfmiddlewaretoken": sessionid})

        if cls._cachefs.exists(cls._COOKIE_FILE):
            cls._cachefs.remove(cls._COOKIE_FILE)

    @classmethod
    def _logged_in(cls, session=None):
        # type: (Optional[Session]) -> bool
        """Check if there is an open Instagram session.

        Arguments:
            session (~requests.Session): the session to use, or `None`
                to create a new session.

        Returns:
            bool: `True` if there's an active session, `False` otherwise.

        """
        return cls._sessionid(session) is not None

    @classmethod
    def _sessionid(cls, session=None):
        # type: (Optional[Session]) -> Optional[Text]
        """Get the ID of the currently opened Instagram session.

        Arguments:
            session (~requests.Session): the session to use, or `None`
                to create a new session.

        Returns:
            str or None: the session ID, if any, or `None`.

        """
        _session = cls._init_session(session)
        _cookies = typing.cast(FileCookieJar, _session.cookies)
        return next((ck.value for ck in _cookies
                     if ck.domain == ".instagram.com"
                     and ck.name == "ds_user_id"
                     and ck.path == "/"), None)

    def __init__(self,
                 add_metadata=False,    # type: bool
                 get_videos=False,      # type: bool
                 videos_only=False,     # type: bool
                 jobs=16,               # type: int
                 template="{id}",       # type: Text
                 dump_json=False,       # type: bool
                 dump_only=False,       # type: bool
                 extended_dump=False,   # type: bool
                 session=None           # type: Optional[Session]
                 ):
        # type: (...) -> None
        """Create a new looter instance.

        Arguments:
            add_metadata (bool): Add date and comment metadata to
                the downloaded pictures.
            get_videos (bool): Also get the videos from the given target.
            videos_only (bool): Only download videos (implies
                ``get_videos=True``).
            jobs (bool): the number of parallel threads to use to
                download media (12 or more is advised to have a true parallel
                download of media files).
            template (str): a filename format, in Python new-style-formatting
                format. See the the :ref:`Template` page of the documentation
                for available keys.
            dump_json (bool): Save each resource metadata to a
                JSON file next to the actual image/video.
            dump_only (bool): Only save metadata and discard the actual
                resource.
            extended_dump (bool): Attempt to fetch as much metadata as
                possible, at the cost of more time. Set to `True` if, for
                instance, you always want the top comments to be downloaded
                in the dump.
            session (~requests.Session or None): a `requests` session,
                or `None` to create a new one.

        """
        self.add_metadata = add_metadata
        self.get_videos = get_videos or videos_only
        self.videos_only = videos_only
        self.jobs = jobs
        self.namegen = NameGenerator(template)
        self.dump_only = dump_only
        self.dump_json = dump_json or dump_only
        self.extended_dump = extended_dump
        self.session = self._init_session(session)
        atexit.register(self.session.close)

        # Set a fake User-Agent
        if self.session.headers['User-Agent'].startswith('python-requests'):
            self.session.headers['User-Agent'] = self._user_agents.firefox

        # Get CSRFToken and RHX
        with self.session.get('https://www.instagram.com/') as res:
            token = get_shared_data(res.text)['config']['csrf_token']
            self.session.headers['X-CSRFToken'] = token
            self.rhx = get_shared_data(res.text)['rhx_gis']

    @abc.abstractmethod
    def pages(self):
        # type: () -> Iterator[Dict[Text, Any]]
        """Obtain an iterator over Instagram post pages.

        Returns:
            PageIterator: an iterator over the instagram post pages.

        """
        return NotImplemented

    def _medias(self,
                pages_iterator,     # type: Iterable[Dict[Text, Any]]
                timeframe=None      # type: Optional[_Timeframe]
                ):
        # type: (...) -> Iterator[Dict[Text, Any]]
        """Obtain an iterator over the medias of the given pages iterator.

        Arguments:
            pages_iterator (Iterator): an iterator over the Instagram
                pages, returned by `InstaLooter.pages`

        Returns:
            MediasIterator: an iterator over the medias in every pages.

        """
        if timeframe is not None:
            return TimedMediasIterator(pages_iterator, timeframe)
        return MediasIterator(pages_iterator)

    def medias(self, timeframe=None):
        # type: (Optional[_Timeframe]) -> Iterator[Dict[Text, Any]]
        """Obtain an iterator over the Instagram medias.

        Wraps the iterator returned by `InstaLooter.pages` to seamlessly
        iterate over the medias of all the pages.

        Returns:
            MediasIterator: an iterator over the medias in every pages.

        """
        return self._medias(self.pages(), timeframe)

    @tenacity.retry(tenacity.stop_after_attempt(10), tenacity.wait_exponential(1, 10))
    def get_post_info(self, code):
        # type: (str) -> dict
        """Get media information from a given post code.

        Arguments:
            code (str): the code of the post (can be obtained either
                from the ``shortcode`` attribute of media dictionaries, or
                from a post URL: ``https://www.instagram.com/p/<code>/``)

        Returns:
            dict: a media dictionaries, in the format used by Instagram.

        """
        url = "https://www.instagram.com/p/{}/".format(code)
        with self.session.get(url) as res:
            data = get_shared_data(res.text)
            return data['entry_data']['PostPage'][0]['graphql']['shortcode_media']

    def download_pictures(self,
                          destination,       # type: Union[str, fs.base.FS]
                          media_count=None,  # type: Optional[int]
                          timeframe=None,    # type: Optional[_Timeframe]
                          new_only=False,    # type: bool
                          pgpbar_cls=None,   # type: Optional[Type[ProgressBar]]
                          dlpbar_cls=None    # type: Optional[Type[ProgressBar]]
                          ):
        # type: (...) -> int
        """Download all the pictures to the provided destination.

        Actually a shortcut for `.download` with ``condition`` set
        to accept only images.

        """
        return self.download(
            destination,
            condition=lambda media: not media["is_video"],
            media_count=media_count,
            timeframe=timeframe,
            new_only=new_only,
            pgpbar_cls=pgpbar_cls,
            dlpbar_cls=dlpbar_cls,
        )

    def download_videos(self,
                        destination,       # type: Union[str, fs.base.FS]
                        media_count=None,  # type: Optional[int]
                        timeframe=None,    # type: Optional[_Timeframe]
                        new_only=False,    # type: bool
                        pgpbar_cls=None,   # type: Optional[Type[ProgressBar]]
                        dlpbar_cls=None,   # type: Optional[Type[ProgressBar]]
                        ):
        # type: (...) -> int
        """Download all videos to the provided destination.

        Actually a shortcut for `.download` with ``condition`` set
        to accept only videos.

        """
        return self.download(
            destination,
            condition=lambda media: media["is_video"],
            media_count=media_count,
            timeframe=timeframe,
            new_only=new_only,
            pgpbar_cls=pgpbar_cls,
            dlpbar_cls=dlpbar_cls,
        )

    def download(self,
                 destination,           # type: Union[str, fs.base.FS]
                 condition=None,        # type: Optional[Callable[[dict], bool]]
                 media_count=None,      # type: Optional[int]
                 timeframe=None,        # type: Optional[_Timeframe]
                 new_only=False,        # type: bool
                 pgpbar_cls=None,       # type: Optional[Type[ProgressBar]]
                 dlpbar_cls=None,       # type: Optional[Type[ProgressBar]]
                 ):
        # type: (...) -> int
        """Download all medias passing ``condition`` to destination.

        Arguments:
            destination (~fs.base.FS or str): the filesystem where to
                store the downloaded files, as a filesystem instance or
                FS URL.
            condition (function): the condition to filter the
                medias with. If `None` is given, a function is created using
                the ``get_videos`` and ``videos_only`` passed at object
                initialisation.
            media_count (int or None): the maximum number of medias
                to download. Leave to ``None`` to download everything from
                the target. *Note that more files can be downloaded, since
                a post with multiple images/videos is considered to be a
                single media*.
            timeframe (tuple or None): a tuple of two `~datetime.datetime`
                objects to enforce a time frame (the first item must be
                more recent). Leave to `None` to ignore times.
            new_only (bool): stop media discovery when already
                downloaded medias are encountered.
            pgpbar_cls (type or None): an optional `~.pbar.ProgressBar`
                subclass to use to display page scraping progress.
            dlpbar_cls (type or None): an optional `~.pbar.ProgressBar`
                subclass to use to display file download progress.

        Returns:
            int: the number of queued medias.

            May not be equal to the number of downloaded medias if some
            errors occurred during background download.

        """
        # Open the destination filesystem
        destination, close_destination = self._init_destfs(destination)

        # Create an iterator over the pages with an optional progress bar
        pages_iterator = self.pages()   # type: Iterable[Dict[Text, Any]]
        pages_iterator = pgpbar = self._init_pbar(pages_iterator, pgpbar_cls)

        # Create an iterator over the medias
        medias_iterator = self._medias(iter(pages_iterator), timeframe)

        # Create the media download bar from a dummy iterator
        dlpbar = self._init_pbar(
            six.moves.range(length_hint(medias_iterator)), dlpbar_cls)

        # Start a group of workers
        workers, queue = self._init_workers(
            dlpbar if dlpbar_cls is not None else None, destination)

        # Make sure exiting the main thread will shutdown workers
        atexit.register(self._shutdown_workers, workers)

        # Queue all medias
        medias_queued = self._fill_media_queue(
            queue, destination, medias_iterator, media_count,
            new_only, condition)

        # Once queuing the medias is fininished, finish the page progress bar
        # and set a new maximum on the download progress bar.
        if pgpbar_cls is not None:
            pgpbar.finish()                         # type: ignore
        if dlpbar_cls is not None:
            dlpbar.set_maximum(medias_queued)       # type: ignore

        # If no medias were queued, issue a warning
        # TODO: refine warning depending on download parameters
        if medias_queued == 0:
            warnings.warn("No medias found.")

        # Add poison pills to the queue and wait for workers to finish
        self._poison_workers(workers, queue)
        self._join_workers(workers, queue)

        # Once downloading is finished, finish the download progress bar
        # and close the destination if needed.
        if dlpbar_cls is not None:
            dlpbar.finish()                        # type: ignore
        if close_destination:
            destination.close()

        return medias_queued

    def login(self, username, password):
        # type: (str, str) -> None
        """Log the instance in using the given credentials.

        Arguments:
            username (str): the username to log in with.
            password (str): the password to log in with.

        """
        self._login(username, password, session=self.session)

    def logout(self):
        # type: () -> None
        """Log the instance out from the currently opened session.
        """
        self._logout(session=self.session)

    def logged_in(self):
        # type: () -> bool
        """Check if there's an open Instagram session.
        """
        return self._logged_in(self.session)

    def _init_pbar(self,
                   it,             # type: Iterable[_T]
                   pbar_cls=None,  # type: Optional[Type[ProgressBar]]
                   ):
        # type: (...) -> Iterable[_T]
        """Wrap an iterable within a `ProgressBar`.

        Arguments:
            it (~collections.Iterable): an iterable to wrap.
            pgpbar_cls (type or None): an optional `ProgressBar` subclass
                to use, or `None` to avoid using a progress bar.

        Returns:
            ~collections.Iterable: the wrapped iterable.

        """
        if pbar_cls is not None:
            if not issubclass(pbar_cls, ProgressBar):
                raise TypeError("pbar must implement the ProgressBar interface !")
            maximum = length_hint(it)
            it = pbar = pbar_cls(it)
            pbar.set_maximum(maximum)
            pbar.set_lock(threading.RLock())
        return it

    def _init_destfs(self, destination, create=True):
        # type: (Union[str, fs.base.FS], bool) -> Tuple[fs.base.FS, bool]
        """Open a filesystem either from a FS URL or filesystem instance.

        Arguments:
            destination (~fs.base.FS or str): the destination filesystem
                to open, as a filesystem instance or FS URL.
            create (bool): whether or not to create a new
                filesystem if it does not exist.

        Returns:
            (~fs.base.FS, bool): the open FS, and whether to close it.

        """
        close_destination = False
        if isinstance(destination, six.binary_type):
            destination = destination.decode('utf-8')
        if isinstance(destination, six.text_type):
            destination = fs.open_fs(destination, create=create)
            close_destination = True
        if not isinstance(destination, fs.base.FS):
            raise TypeError("<destination> must be a FS URL or FS instance.")
        return destination, close_destination

    def _fill_media_queue(self,
                          queue,            # type: Queue
                          destination,      # type: fs.base.FS
                          medias_iter,      # type: Iterable[Any]
                          media_count=None,  # type: Optional[int]
                          new_only=False,   # type: bool
                          condition=None,   # type: Optional[Callable[[dict], bool]]
                          ):
        # type: (...) -> int
        """Fill the download queue with medias from the provided iterator.

        Arguments:
            queue (~queue.Queue): the download queue to fill.
            destination (~fs.base.FS): the filesystem where to download
                the files.
            medias_iterator (~collections.Iterable): an iterable over the
                Instagram medias to download.
            media_count (int or None): the maximum number of new medias to
                download, or ``None`` to download all discoverable medias.
            new_only (bool): stop media discovery when a media that was
                already downloaded is encountered.
            condition (function or None): the condition to filter the medias
                with. If `None` is given, a function is created using the
                ``get_videos`` and ``videos_only`` passed at object
                initialisation.

        Returns:
            int: the number of queued medias.

            May not be equal to the number of downloaded medias if some
            errors occurred during downloads.

        """
        # Create a condition from parameters if needed
        if condition is not None:
            _condition = condition       # type: Callable[[dict], bool]
        else:
            if self.videos_only:
                def _condition(media): return media['is_video']
            elif not self.get_videos:
                def _condition(media): return not media['is_video']
            else:
                def _condition(media): return True

        # Queue all media filling the condition
        medias_queued = 0
        for media in six.moves.filter(_condition, medias_iter):

            # Check if the whole post info is required
            if self.namegen.needs_extended(media) or media["__typename"] != "GraphImage":
                media = self.get_post_info(media['shortcode'])

            # Check that sidecar children fit the condition
            if media['__typename'] == "GraphSidecar":
                # Check that each node fits the condition
                for sidecar in media['edge_sidecar_to_children']['edges'][:]:
                    if not _condition(sidecar['node']):
                        media['edge_sidecar_to_children']['edges'].remove(sidecar)

                # Check that the nodelist is not depleted
                if not media['edge_sidecar_to_children']['edges']:
                    continue

            # Check that the file does not exist
            # FIXME: not working well with sidecar
            if new_only and destination.exists(self.namegen.file(media)):
                break

            # Put the medias in the queue
            queue.put(media)
            medias_queued += 1

            if media_count is not None and medias_queued >= media_count:
                break

        return medias_queued

    # WORKERS UTILS

    def _init_workers(self,
                      pbar,         # type: Union[ProgressBar, Iterable, None]
                      destination,  # type: fs.base.FS
                      ):
        # type: (...) -> Tuple[List[InstaDownloader], Queue]

        workers = []        # type: List[InstaDownloader]
        queue = Queue()     # type: Queue

        for _ in six.moves.range(self.jobs):
            worker = InstaDownloader(
                queue=queue,
                destination=destination,
                namegen=self.namegen,
                add_metadata=self.add_metadata,
                dump_json=self.dump_json,
                dump_only=self.dump_only,
                pbar=pbar,
                session=self.session)
            worker.start()
            workers.append(worker)

        return workers, queue

    def _poison_workers(self, workers, queue):
        # type: (List[InstaDownloader], Queue) -> None
        for worker in workers:
            queue.put(None)

    def _join_workers(self, workers, queue):
        # type: (List[InstaDownloader], Queue) -> None
        if any(w.is_alive() for w in workers):
            for worker in workers:
                worker.join()

    def _shutdown_workers(self, workers):
        # type: (List[InstaDownloader]) -> None
        for worker in workers:
            worker.terminate()
Beispiel #57
0
class ServiceCoordinator(object):
    """Service coordinator.

    This class uses the `tooz` library to manage group membership.

    To ensure that the other agents know this agent is still alive,
    the `heartbeat` method should be called periodically.
    """
    def __init__(self, my_id=None):
        self._coordinator = None
        self._my_id = six.b(my_id or utils.get_process_identifier())
        self._started = False

    def start(self):
        backend_url = cfg.CONF.coordination.backend_url

        if backend_url:
            try:
                self._coordinator = tooz.coordination.get_coordinator(
                    backend_url, self._my_id)

                self._coordinator.start(start_heart=True)
                self._started = True

                LOG.info('Coordination backend started successfully.')
            except tooz.coordination.ToozError as e:
                self._started = False

                LOG.exception(
                    'Error connecting to coordination backend. '
                    '%s', six.text_type(e))

    def stop(self):
        if not self.is_active():
            return

        try:
            self._coordinator.stop()
        except tooz.coordination.ToozError:
            LOG.warning('Error connecting to coordination backend.')
        finally:
            self._coordinator = None
            self._started = False

    def is_active(self):
        return self._coordinator and self._started

    @tenacity.retry(stop=tenacity.stop_after_attempt(5))
    def join_group(self, group_id):
        if not self.is_active() or not group_id:
            return

        try:
            join_req = self._coordinator.join_group(six.b(group_id))
            join_req.get()

            LOG.info('Joined service group:%s, member:%s', group_id,
                     self._my_id)

            return
        except tooz.coordination.MemberAlreadyExist:
            return
        except tooz.coordination.GroupNotCreated as e:
            create_grp_req = self._coordinator.create_group(six.b(group_id))

            try:
                create_grp_req.get()
            except tooz.coordination.GroupAlreadyExist:
                pass

            # Re-raise exception to join group again.
            raise e

    def leave_group(self, group_id):
        if self.is_active():
            self._coordinator.leave_group(six.b(group_id))

            LOG.info('Left service group:%s, member:%s', group_id, self._my_id)

    def get_members(self, group_id):
        """Gets members of coordination group.

        ToozError exception must be handled when this function is invoded, we
        leave it to the invoker for the handling decision.
        """
        if not self.is_active():
            return []

        get_members_req = self._coordinator.get_members(six.b(group_id))

        try:
            members = get_members_req.get()

            LOG.debug('Members of group %s: %s', group_id, members)

            return members
        except tooz.coordination.GroupNotCreated:
            LOG.warning('Group %s does not exist.', group_id)

            return []
Beispiel #58
0
    before_sleep_log,
    retry,
    retry_if_exception_type,
    stop_after_attempt,
    wait_fixed,
)

from ESSArch_Core.fixity.checksum import calculate_checksum

MB = 1024 * 1024
DEFAULT_BLOCK_SIZE = 10 * MB

logger = logging.getLogger('essarch.storage.copy')


@retry(retry=retry_if_exception_type(RequestException), reraise=True, stop=stop_after_attempt(5),
       wait=wait_fixed(60), before_sleep=before_sleep_log(logger, logging.DEBUG))
def copy_chunk_remotely(src, dst, offset, file_size, requests_session, upload_id=None, block_size=DEFAULT_BLOCK_SIZE):
    filename = os.path.basename(src)

    with open(src, 'rb') as srcf:
        srcf.seek(offset)
        chunk = srcf.read(block_size)

    start = offset
    end = offset + block_size - 1

    if end > file_size:
        end = file_size - 1

    HTTP_CONTENT_RANGE = 'bytes %s-%s/%s' % (start, end, file_size)