Esempio n. 1
0
    def __init__(self, config: dict, logger, exchange):
        self.conf = config
        self.logger = logger
        self._exchange = exchange

        cfg = config.bot

        self._purchase_size_percentage = cfg["purchase_size_percentage"]
        self._update_interval = cfg["update_interval"]
        self._symbol = cfg["symbol"]

        self.util = Utils(self._exchange, self.logger)

        self._aretry = tenacity.AsyncRetrying(
            wait=tenacity.wait_random(0, 2),
            retry=(tenacity.retry_if_exception(ccxt.DDoSProtection)
                   | tenacity.retry_if_exception(ccxt.RequestTimeout)))

        self._markets = {}
        if self._symbol:
            self._markets[self._symbol] = {
                "curr_sold": 0,
                "curr_bought": 0,
                "buys": 0,
                "sells": 0
            }
Esempio n. 2
0
    def __init__(
        self,
        databricks_conn_id: str = default_conn_name,
        timeout_seconds: int = 180,
        retry_limit: int = 3,
        retry_delay: float = 1.0,
        retry_args: Optional[Dict[Any, Any]] = None,
    ) -> None:
        super().__init__()
        self.databricks_conn_id = databricks_conn_id
        self.timeout_seconds = timeout_seconds
        if retry_limit < 1:
            raise ValueError('Retry limit must be greater than or equal to 1')
        self.retry_limit = retry_limit
        self.retry_delay = retry_delay
        self.aad_tokens: Dict[str, dict] = {}
        self.aad_timeout_seconds = 10

        def my_after_func(retry_state):
            self._log_request_error(retry_state.attempt_number,
                                    retry_state.outcome)

        if retry_args:
            self.retry_args = copy.copy(retry_args)
            self.retry_args['retry'] = retry_if_exception(
                self._retryable_error)
            self.retry_args['after'] = my_after_func
        else:
            self.retry_args = dict(
                stop=stop_after_attempt(self.retry_limit),
                wait=wait_exponential(min=self.retry_delay,
                                      max=(2**retry_limit)),
                retry=retry_if_exception(self._retryable_error),
                after=my_after_func,
            )
Esempio n. 3
0
	def __init__(self, logger=None, config=None, db=None):
		self._logger = logger

		if config:
			self._rsi_timeframe = config["rsi_timeframe"]
			self._interval = config["update_interval"]
			self._over_bought = config["over_bought"]
			self._rsi_period = config["rsi_period"]
			self._free_fall = config["free_fall"]
			self._over_sold = config["over_sold"]
			self._mooning = config["mooning"]

		self._db = db

		self._exchange_market_prices = {}
		self._significant_markets = set()

		self._aretry = tenacity.AsyncRetrying(
			wait=tenacity.wait_random(0, 3),
			retry=(
				tenacity.retry_if_exception(ccxt.DDoSProtection) | 
				tenacity.retry_if_exception(ccxt.RequestTimeout) |
				tenacity.retry_if_exception(aiohttp.ServerDisconnectedError)
				)
			)
Esempio n. 4
0
    def __init__(self, exchange: ccxt.Exchange, logger):
        self._exchange = exchange
        self._logger = logger

        self._aretry = tenacity.AsyncRetrying(
            wait=tenacity.wait_random(0, 2),
            retry=(tenacity.retry_if_exception(ccxt.DDoSProtection)
                   | tenacity.retry_if_exception(ccxt.RequestTimeout)))

        self.purchase_strategies = {}
Esempio n. 5
0
class GoogleAPIResource(Resource):

    # Names of the get and update methods. Most are the same but override in
    # the Resource if necessary
    resource_property = ""
    get_method = "get"
    update_method = "update"

    def __init__(self, resource_data, **kwargs):
        full_resource_path = "{}.{}".format(self.service_name,
                                            self.resource_path)

        self.service = build_subresource(full_resource_path, self.version,
                                         **kwargs)

        self.resource_data = resource_data

    @staticmethod
    def factory(resource_data, **kargs):
        resource_type_map = {
            'bigquery.datasets': GcpBigqueryDataset,
            'compute.instances': GcpComputeInstance,
            'sqladmin.instances': GcpSqlInstance,
            'storage.buckets': GcpStorageBucket,
            'storage.buckets.iam': GcpStorageBucketIamPolicy
        }

        resource_type = resource_data.get('resource_type')
        if not resource_type:
            assert 0, 'Unrecognized resource'

        if resource_type not in resource_type_map:
            assert 0, 'Unrecognized resource'

        cls = resource_type_map.get(resource_type)
        return cls(resource_data, **kargs)

    def type(self):
        type_components = ["gcp", self.service_name, self.resource_path]

        # Things like IAM policy are not separate resources, but rather
        # properties of a resource. We may want to evaluate policy on these
        # properties, so we represent them as resources and need to distinguish
        # them in the resource type.
        if self.resource_property:
            type_components.append(self.resource_property)

        return ".".join(type_components)

    def get(self):
        method = getattr(self.service, self.get_method)
        return method(**self._get_request_args()).execute()

    @tenacity.retry(retry=tenacity.retry_if_exception(is_retryable_exception),
                    wait=tenacity.wait_random_exponential(multiplier=1,
                                                          max=10),
                    stop=tenacity.stop_after_attempt(10))
    def update(self, body):
        method = getattr(self.service, self.update_method)
        return method(**self._update_request_args(body)).execute()
Esempio n. 6
0
 def decorated_function(*args, **kwargs) -> _RT:
     return tenacity.retry(
         retry=tenacity.retry_if_exception(is_retriable),
         stop=tenacity.stop.stop_after_attempt(_RETRY_UTILS_MAX_RETRIES),
         wait=tenacity.wait.wait_exponential(max=_RETRY_UTILS_MAX_RETRIES),
         after=tenacity.after.after_log(_LOGGER, logging.DEBUG),
         reraise=True)(function)(*args, **kwargs)
Esempio n. 7
0
    def validate(self, max_retries=0):
        """Performs basic **connection** validation of a sqlalchemy engine."""
        def _retry_on_exception(exc):
            LOG.warning("Engine connection (validate) failed due to '%s'", exc)
            if isinstance(exc, sa_exc.OperationalError) and \
               _is_db_connection_error(six.text_type(exc.args[0])):
                # We may be able to fix this by retrying...
                return True
            if isinstance(exc,
                          (sa_exc.TimeoutError, sa_exc.ResourceClosedError,
                           sa_exc.DisconnectionError)):
                # We may be able to fix this by retrying...
                return True
            # Other failures we likely can't fix by retrying...
            return False

        @tenacity.retry(stop=tenacity.stop_after_attempt(
            max(0, int(max_retries))),
                        wait=tenacity.wait_exponential(),
                        reraise=True,
                        retry=tenacity.retry_if_exception(_retry_on_exception))
        def _try_connect(engine):
            # See if we can make a connection happen.
            #
            # NOTE(harlowja): note that even though we are connecting
            # once it does not mean that we will be able to connect in
            # the future, so this is more of a sanity test and is not
            # complete connection insurance.
            with contextlib.closing(engine.connect()):
                pass

        _try_connect(self._engine)
Esempio n. 8
0
 def decorator_f(self, *args, **kwargs):
     retry_args = getattr(self, 'retry_args', None)
     if retry_args is None:
         return fun(self, *args, **kwargs)
     multiplier = retry_args.get('multiplier', 1)
     min_limit = retry_args.get('min', 1)
     max_limit = retry_args.get('max', 1)
     stop_after_delay = retry_args.get('stop_after_delay', 10)
     tenacity_logger = tenacity.before_log(
         self.log, logging.DEBUG) if self.log else None
     default_kwargs = {
         'wait':
         tenacity.wait_exponential(multiplier=multiplier,
                                   max=max_limit,
                                   min=min_limit),
         'retry':
         tenacity.retry_if_exception(should_retry),
         'stop':
         tenacity.stop_after_delay(stop_after_delay),
         'before':
         tenacity_logger,
         'after':
         tenacity_logger,
     }
     return tenacity.retry(**default_kwargs)(fun)(self, *args,
                                                  **kwargs)
    def validate(self, max_retries=0):
        """Performs basic **connection** validation of a sqlalchemy engine."""

        def _retry_on_exception(exc):
            LOG.warning("Engine connection (validate) failed due to '%s'", exc)
            if isinstance(exc, sa_exc.OperationalError) and \
               _is_db_connection_error(six.text_type(exc.args[0])):
                # We may be able to fix this by retrying...
                return True
            if isinstance(exc, (sa_exc.TimeoutError,
                                sa_exc.ResourceClosedError,
                                sa_exc.DisconnectionError)):
                # We may be able to fix this by retrying...
                return True
            # Other failures we likely can't fix by retrying...
            return False

        @tenacity.retry(
            stop=tenacity.stop_after_attempt(max(0, int(max_retries))),
            wait=tenacity.wait_exponential(),
            reraise=True,
            retry=tenacity.retry_if_exception(_retry_on_exception)
        )
        def _try_connect(engine):
            # See if we can make a connection happen.
            #
            # NOTE(harlowja): note that even though we are connecting
            # once it does not mean that we will be able to connect in
            # the future, so this is more of a sanity test and is not
            # complete connection insurance.
            with contextlib.closing(engine.connect()):
                pass

        _try_connect(self._engine)
class OpenPolicyAgent(Engine):
    def __init__(self, opa_base_url):
        self.opa_base_url = opa_base_url

    @tenacity.retry(
        retry=tenacity.retry_if_exception(is_retryable_exception),
        wait=tenacity.wait_random_exponential(multiplier=1, max=10),
        stop=tenacity.stop_after_attempt(5),
    )
    def _opa_request(self, path, method="GET", data=None):
        url = "{}/{}".format(self.opa_base_url, path)
        headers = {"Content-type": "application/json"}
        req = request.Request(url,
                              data=json.dumps(data).encode("utf-8"),
                              method=method,
                              headers=headers)

        with request.urlopen(req) as resp:
            decoded_resp = resp.read().decode("utf-8")
            deserialized_resp = json.loads(decoded_resp)
            if "result" not in deserialized_resp:
                err = "Endpoint {} not found on the OPA server.".format(url)
                raise NoSuchEndpoint(err)

            return deserialized_resp["result"]

    # Perform an evaluation on a given resource
    def evaluate(self, resource):
        input = {
            "input": resource.get(),
        }

        evals = self._opa_request("rpe/evaluate", method="POST", data=input)

        return [
            Evaluation(engine=self, resource=resource, **ev) for ev in evals
        ]

    def policies(self):
        """
        Returns:
            A list of all configured policies, optionally filtered by a resource_type
        """
        policies = self._opa_request("rpe/policies")

        return [Policy(engine=self, **p) for p in policies]

    def remediate(self, resource, policy_id):
        rem_path = "rpe/policy/{}/remediate".format(policy_id)
        input = {"input": resource.get()}
        remediation = self._opa_request(rem_path, method="POST", data=input)

        if remediation:
            resource.remediate(remediation)
        else:
            raise NoPossibleRemediation(
                "Remediation is not supported for this resource/policy")
Esempio n. 11
0
def _do_it_with_persistence(func, args, config):
    """Exec func with retries based on provided cli flags

    :param: func: function to attempt to execute
    :param: args: argparser generated cli arguments
    :param: config: configparser object of vaultlocker config
    """
    @tenacity.retry(
        wait=tenacity.wait_fixed(1),
        reraise=True,
        stop=(tenacity.stop_after_delay(args.retry)
              if args.retry > 0 else tenacity.stop_after_attempt(1)),
        retry=(tenacity.retry_if_exception(hvac.exceptions.VaultNotInitialized)
               | tenacity.retry_if_exception(hvac.exceptions.VaultDown)))
    def _do_it():
        client = _vault_client(config)
        func(args, client, config)

    _do_it()
Esempio n. 12
0
 def _execute(self, *args, **kwargs):  # pylint: disable=signature-differs
     # Workaround TD bug: throttled operations are reported as internal.
     # Ref b/175345578
     retryer = tenacity.Retrying(
         retry=tenacity.retry_if_exception(self._operation_internal_error),
         wait=tenacity.wait_fixed(10),
         stop=tenacity.stop_after_delay(5 * 60),
         before_sleep=tenacity.before_sleep_log(logger, logging.DEBUG),
         reraise=True)
     retryer(super()._execute, *args, **kwargs)
Esempio n. 13
0
 def decorator(func):
     @tenacity.retry(
         retry=tenacity.retry_if_exception(retry_on_retriable_kafka_error),
         wait=tenacity.wait_fixed(1),
         stop=tenacity.stop_after_attempt(retries),
         reraise=True
     )
     def wrapper(*args, **kwargs):
         return func(*args, **kwargs)
     return wrapper
Esempio n. 14
0
    def decorator(func):
        @tenacity.retry(
            retry=tenacity.retry_if_exception(retry_on_retriable_kafka_error),
            wait=tenacity.wait_fixed(1),
            stop=tenacity.stop_after_attempt(retries),
            reraise=True)
        def wrapper(*args, **kwargs):
            return func(*args, **kwargs)

        return wrapper
Esempio n. 15
0
 def get_columns(self, connection, table_name, schema=None, **kw):
     raw_connection = self._raw_connection(connection)
     schema = schema if schema else raw_connection.schema_name
     query = """
             SELECT
               table_schema,
               table_name,
               column_name,
               data_type,
               is_nullable,
               column_default,
               ordinal_position,
               comment
             FROM information_schema.columns
             WHERE table_schema = '{schema}'
             AND table_name = '{table}'
             """.format(
         schema=schema, table=table_name
     )
     retry_config = raw_connection.retry_config
     retry = tenacity.Retrying(
         retry=retry_if_exception(
             lambda exc: self._retry_if_data_catalog_exception(
                 exc, schema, table_name
             )
         ),
         stop=stop_after_attempt(retry_config.attempt),
         wait=wait_exponential(
             multiplier=retry_config.multiplier,
             max=retry_config.max_delay,
             exp_base=retry_config.exponential_base,
         ),
         reraise=True,
     )
     try:
         return [
             {
                 "name": row.column_name,
                 "type": _TYPE_MAPPINGS.get(
                     self._get_column_type(row.data_type), NULLTYPE
                 ),
                 "nullable": True if row.is_nullable == "YES" else False,
                 "default": row.column_default
                 if not self._is_nan(row.column_default)
                 else None,
                 "ordinal_position": row.ordinal_position,
                 "comment": row.comment,
             }
             for row in retry(connection.execute, query).fetchall()
         ]
     except OperationalError as e:
         if not self._retry_if_data_catalog_exception(e, schema, table_name):
             raise_from(NoSuchTableError(table_name), e)
         else:
             raise e
Esempio n. 16
0
    def reply(self, reply=None, failure=None):
        """Send back reply to the RPC client
        :param reply: Dictionary, reply. In case of exception should be None
        :param failure: Tuple, should be a sys.exc_info() tuple.
            Should be None if RPC request was successfully processed.

        :return RpcReplyPikaIncomingMessage, message with reply
        """

        if self.reply_q is None:
            return

        reply_outgoing_message = RpcReplyPikaOutgoingMessage(
            self._pika_engine,
            self.msg_id,
            reply=reply,
            failure_info=failure,
            content_type=self._content_type,
        )

        def on_exception(ex):
            if isinstance(ex, pika_drv_exc.ConnectionException):
                LOG.warning(
                    "Connectivity related problem during reply sending. %s",
                    ex)
                return True
            else:
                return False

        if self._pika_engine.rpc_reply_retry_attempts:
            retrier = tenacity.retry(
                stop=(tenacity.stop_never
                      if self._pika_engine.rpc_reply_retry_attempts == -1 else
                      tenacity.stop_after_attempt(
                          self._pika_engine.rpc_reply_retry_attempts)),
                retry=tenacity.retry_if_exception(on_exception),
                wait=tenacity.wait_fixed(
                    self._pika_engine.rpc_reply_retry_delay))
        else:
            retrier = None

        try:
            timeout = (None if self.expiration_time is None else max(
                self.expiration_time - time.time(), 0))
            with timeutils.StopWatch(duration=timeout) as stopwatch:
                reply_outgoing_message.send(reply_q=self.reply_q,
                                            stopwatch=stopwatch,
                                            retrier=retrier)
            LOG.debug("Message [id:'%s'] replied to '%s'.", self.msg_id,
                      self.reply_q)
        except Exception:
            LOG.exception("Message [id:'%s'] wasn't replied to : %s",
                          self.msg_id, self.reply_q)
Esempio n. 17
0
def retry_api_call(func, config, logger=None, *args, **kwargs):
    retry = tenacity.Retrying(
        retry=retry_if_exception(lambda e: getattr(e, 'response', {}).get(
            'Error', {}).get('Code', None) in config.exceptions
                                 if e else False),
        stop=stop_after_attempt(config.attempt),
        wait=wait_exponential(multiplier=config.multiplier,
                              max=config.max_delay,
                              exp_base=config.exponential_base),
        after=after_log(logger, logger.level) if logger else None,
        reraise=True)
    return retry(func, *args, **kwargs)
Esempio n. 18
0
def retry_s3(
        operation,
        bucket,
        key,
        size=None,
        limit=None,
        *,
        etag,
        version_id,
        s3_client
):
    """retry head or get operation to S3 with; stop before we run out of time.
    retry is necessary since, due to eventual consistency, we may not
    always get the required version of the object.
    """
    logger_ = get_quilt_logger()

    if operation == "head":
        function_ = s3_client.head_object
    elif operation == "get":
        function_ = s3_client.get_object
    else:
        raise ValueError(f"unexpected operation: {operation}")
    # Keyword arguments to function_
    arguments = {
        "Bucket": bucket,
        "Key": key
    }
    if operation == 'get' and size and limit:
        # can only request range if file is not empty
        arguments['Range'] = f"bytes=0-{min(size, limit) - 1}"
    if version_id:
        arguments['VersionId'] = version_id
    elif etag:
        arguments['IfMatch'] = etag

    logger_.debug("Entering @retry: %s, %s", operation, arguments)

    @retry(
        # debug
        reraise=True,
        stop=stop_after_attempt(MAX_RETRY),
        wait=wait_exponential(multiplier=2, min=4, max=10),
        retry=(retry_if_exception(should_retry_exception))
    )
    def call():
        """local function so we can set stop_after_delay dynamically"""
        # TODO: remove all this, stop_after_delay is not dynamically loaded anymore
        return function_(**arguments)

    return call()
Esempio n. 19
0
    def decorator(f):
        def should_retry(e):
            connect_error = isinstance(e, exception.IBMCConnectionError)
            if connect_error:
                LOG.info(
                    _('Failed to connect to iBMC, will retry now. '
                      'Max retry times is %(retry_times)d.'),
                    {'retry_times': CONF.ibmc.connection_attempts})
            return connect_error

        @tenacity.retry(
            retry=tenacity.retry_if_exception(should_retry),
            stop=tenacity.stop_after_attempt(CONF.ibmc.connection_attempts),
            wait=tenacity.wait_fixed(CONF.ibmc.connection_retry_interval),
            reraise=True)
        @functools.wraps(f)
        def wrapper(*args, **kwargs):
            # NOTE(dtantsur): this code could be written simpler, but then unit
            # testing decorated functions is pretty hard, as we usually pass a
            # Mock object instead of TaskManager there.
            if len(args) > 1:
                is_task_mgr = isinstance(args[1], task_manager.TaskManager)
                task = args[1] if is_task_mgr else args[0]
            else:
                task = args[0]

            node = task.node

            try:
                return f(*args, **kwargs)
            except ibmc_error.IBMCConnectionError as e:
                error = (_('Failed to connect to iBMC for node %(node)s, '
                           'Error: %(error)s') % {
                               'node': node.uuid,
                               'error': e
                           })
                LOG.error(error)
                raise exception.IBMCConnectionError(node=node.uuid,
                                                    error=error)
            except ibmc_error.IBMCClientError as e:
                error = (_('Failed to %(action)s for node %(node)s, '
                           'Error %(error)s') % {
                               'node': node.uuid,
                               'action': action,
                               'error': e
                           })
                LOG.error(error)
                raise exception.IBMCError(node=node.uuid, error=error)

        return wrapper
Esempio n. 20
0
    def _http_request_with_retry(self, *args, **kwargs):
        def is_retryable(error):
            now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            print('{0} {1}'.format(now, repr(error)))

            def is_retryable_status_code(error):
                if not isinstance(error, requests.HTTPError):
                    return False
                if error.response.status_code == 409:
                    return True
                else:
                    return not (400 <= error.response.status_code <= 499)

            return is_retryable_status_code(error) or isinstance(
                error, (requests.ConnectionError, requests.ReadTimeout))

        if 'retry' in kwargs:
            retry = kwargs['retry'] | retry_if_exception(is_retryable)
            del kwargs['retry']
        else:
            retry = retry_if_exception(is_retryable)
        if 'before' in kwargs:
            before = kwargs['before']
            del kwargs['before']
        else:
            before = None

        kwargs['timeout'] = self.individual_request_timeout
        return self._http_request.retry_with(
            retry=retry,
            before=before,
            wait=wait_exponential(multiplier=self.retry_multiplier,
                                  max=self.retry_max_interval),
            stop=(stop_after_delay(self.retry_timeout)
                  | stop_after_attempt(self.retry_max_tries)),
        )(self, *args, **kwargs)
Esempio n. 21
0
 def get_columns(self, connection, table_name, schema=None, **kw):
     schema = schema if schema else connection.connection.schema_name
     query = """
             SELECT
               table_schema,
               table_name,
               column_name,
               data_type,
               is_nullable,
               column_default,
               ordinal_position,
               comment
             FROM information_schema.columns
             WHERE table_schema = '{schema}'
             AND table_name = '{table}'
             """.format(schema=schema, table=table_name)
     retry = tenacity.Retrying(
         retry=retry_if_exception(
             lambda exc: self._retry_if_data_catalog_exception(
                 exc, schema, table_name)),
         stop=stop_after_attempt(connection.connection.retry_attempt),
         wait=wait_exponential(
             multiplier=connection.connection.retry_multiplier,
             max=connection.connection.retry_max_delay,
             exp_base=connection.connection.retry_exponential_base),
         reraise=True)
     try:
         return [{
             'name':
             row.column_name,
             'type':
             _TYPE_MAPPINGS.get(self._get_column_type(row.data_type),
                                NULLTYPE),
             'nullable':
             True if row.is_nullable == 'YES' else False,
             'default':
             row.column_default,
             'ordinal_position':
             row.ordinal_position,
             'comment':
             row.comment,
         } for row in retry(connection.execute, query).fetchall()]
     except OperationalError as e:
         if not self._retry_if_data_catalog_exception(
                 e, schema, table_name):
             raise_from(NoSuchTableError(table_name), e)
         else:
             raise e
Esempio n. 22
0
class PoloniexBaseAPI(metaclass=ABCMeta):
    @abstractmethod
    def __init__(
        self,
        *,
        requests_per_second: int = REQUESTS_PER_SECOND,
    ) -> None:
        self._rate_limiter = RateLimiter(requests_per_second, 1, 1)
        self._requests_per_second = requests_per_second
        self._session = Session()

    @tenacity.retry(
        retry=tenacity.retry_if_exception(_retry_poloniex_error),
        wait=tenacity.wait_exponential() + tenacity.wait_random(0, 1),
        stop=tenacity.stop_after_attempt(4) | tenacity.stop_after_delay(8),
        reraise=True,
    )
    def request(
        self,
        *args,
        **kwargs,
    ) -> Optional[Union[Dict[str, Any], List[Dict[str, Any]]]]:
        cls_name = type(self).__name__
        while self._rate_limiter.check(cls_name) is False:
            sleep(1 / self._requests_per_second)

        request = Request(*args, **kwargs)
        prepared = self._session.prepare_request(request)

        response = self._session.send(prepared)

        status = response.status_code
        if status >= 200 and status < 300:
            data = None
            try:
                data = response.json()
            except ValueError:
                logger.exception('Error decoding response JSON')
            return data
        elif status >= 400 and status < 500:
            raise PoloniexRequestError(response)
        elif status >= 500:
            raise PoloniexServerError(response)
        else:
            # We shouldn't ever get 1xx responses (Poloniex doesn't send them)
            # or 3xx responses (requests follows redirects); return None to
            # make mypy happy
            return None
Esempio n. 23
0
    def send_notification(self, target, ctxt, message, version, retry=None):
        if retry is None:
            retry = self._pika_engine.default_notification_retry_attempts

        def on_exception(ex):
            if isinstance(ex, (pika_drv_exc.ExchangeNotFoundException,
                               pika_drv_exc.RoutingException)):
                LOG.warning("Problem during sending notification. %s", ex)
                try:
                    self._declare_notification_queue_binding(target)
                except pika_drv_exc.ConnectionException as e:
                    LOG.warning("Problem during declaring notification queue "
                                "binding. %s", e)
                return True
            elif isinstance(ex, (pika_drv_exc.ConnectionException,
                                 pika_drv_exc.MessageRejectedException)):
                LOG.warning("Problem during sending notification. %s", ex)
                return True
            else:
                return False

        if retry:
            retrier = tenacity.retry(
                stop=(tenacity.stop_never if retry == -1 else
                      tenacity.stop_after_attempt(retry)),
                retry=tenacity.retry_if_exception(on_exception),
                wait=tenacity.wait_fixed(
                    self._pika_engine.notification_retry_delay
                )
            )
        else:
            retrier = None

        msg = pika_drv_msg.PikaOutgoingMessage(self._pika_engine, message,
                                               ctxt)
        return msg.send(
            exchange=(
                target.exchange or
                self._pika_engine.default_notification_exchange
            ),
            routing_key=target.topic,
            confirm=True,
            mandatory=True,
            persistent=self._pika_engine.notification_persistence,
            retrier=retrier
        )
Esempio n. 24
0
def tf_serving_retry_policy(name):
    return {
        "wait":
        wait_random_exponential(
            multiplier=1,
            min=4,
            max=20,
        ),
        "stop":
        stop_after_attempt(
            int(os.environ.get("MODELKIT_TF_SERVING_ATTEMPTS", 10))),
        "retry":
        retry_if_exception(retriable_error),
        "after":
        log_after_retry(name),
        "reraise":
        True,
    }
Esempio n. 25
0
def bigquery_retry():

    def is_bigquery_rate_limit_exception(e: Exception) -> bool:
        return '403 Exceeded rate limits:' in str(e)

    # Example raised when we exceed BigQuery API rate limits:

    # sqlalchemy.exc.DatabaseError: (google.cloud.bigquery.dbapi.exceptions.DatabaseError)
    # 403 Exceeded rate limits: too many table update operations for this table. For more
    # information, see https://cloud.google.com/bigquery/troubleshooting-errors
    return tenacity.retry(wait=tenacity.wait_random_exponential(multiplier=1, max=60),
                          stop=tenacity.stop_after_attempt(5),
                          before_sleep=tenacity.before_sleep_log(logger, logging.WARNING),
                          retry=(tenacity.retry_if_exception_type(DatabaseError) &
                                 # Wish the specific error were tied to the type or a field...
                                 #
                                 # https://google-cloud-python.readthedocs.io/en/0.32.0/_modules/google/cloud/bigquery/dbapi/exceptions.html#DatabaseError
                                 tenacity.retry_if_exception(is_bigquery_rate_limit_exception)),
                          reraise=True)
Esempio n. 26
0
def retry_upon_exception_exclude_error_codes(exc, excluded_errors, delay,
                                             max_delay, max_attempts):
    """Retry with the configured exponential delay, unless the exception error
    code is in the given list
    """
    def retry_if_not_error_codes(e):
        # return True only for BadRequests without error codes or with error
        # codes not in the exclude list
        if isinstance(e, exc):
            error_code = _get_bad_request_error_code(e)
            if error_code and error_code not in excluded_errors:
                return True
        return False

    return tenacity.retry(
        reraise=True,
        retry=tenacity.retry_if_exception(retry_if_not_error_codes),
        wait=tenacity.wait_exponential(multiplier=delay, max=max_delay),
        stop=tenacity.stop_after_attempt(max_attempts))
Esempio n. 27
0
def retry_api_call(func: Callable[..., Any],
                   config: RetryConfig,
                   logger: logging.Logger = None,
                   *args,
                   **kwargs) -> Any:
    retry = tenacity.Retrying(
        retry=retry_if_exception(lambda e: getattr(e, "response", {}).get(
            "Error", {}).get("Code", None) in config.exceptions
                                 if e else False),
        stop=stop_after_attempt(config.attempt),
        wait=wait_exponential(
            multiplier=config.multiplier,
            max=config.max_delay,
            exp_base=config.exponential_base,
        ),
        after=after_log(logger, logger.level) if logger else None,
        reraise=True,
    )
    return retry(func, *args, **kwargs)
Esempio n. 28
0
    def send_notification(self, target, ctxt, message, version, retry=None):
        if retry is None:
            retry = self._pika_engine.default_notification_retry_attempts

        def on_exception(ex):
            if isinstance(ex, (pika_drv_exc.ExchangeNotFoundException,
                               pika_drv_exc.RoutingException)):
                LOG.warning("Problem during sending notification. %s", ex)
                try:
                    self._declare_notification_queue_binding(target)
                except pika_drv_exc.ConnectionException as e:
                    LOG.warning(
                        "Problem during declaring notification queue "
                        "binding. %s", e)
                return True
            elif isinstance(ex, (pika_drv_exc.ConnectionException,
                                 pika_drv_exc.MessageRejectedException)):
                LOG.warning("Problem during sending notification. %s", ex)
                return True
            else:
                return False

        if retry:
            retrier = tenacity.retry(
                stop=(tenacity.stop_never
                      if retry == -1 else tenacity.stop_after_attempt(retry)),
                retry=tenacity.retry_if_exception(on_exception),
                wait=tenacity.wait_fixed(
                    self._pika_engine.notification_retry_delay))
        else:
            retrier = None

        msg = pika_drv_msg.PikaOutgoingMessage(self._pika_engine, message,
                                               ctxt)
        return msg.send(
            exchange=(target.exchange
                      or self._pika_engine.default_notification_exchange),
            routing_key=target.topic,
            confirm=True,
            mandatory=True,
            persistent=self._pika_engine.notification_persistence,
            retrier=retrier)
Esempio n. 29
0
def retry_api_call(func,
                   exceptions=('ThrottlingException',
                               'TooManyRequestsException'),
                   attempt=5,
                   multiplier=1,
                   max_delay=1800,
                   exp_base=2,
                   logger=None,
                   *args,
                   **kwargs):
    retry = tenacity.Retrying(
        retry=retry_if_exception(lambda e: getattr(e, 'response', {}).get(
            'Error', {}).get('Code', None) in exceptions if e else False),
        stop=stop_after_attempt(attempt),
        wait=wait_exponential(multiplier=multiplier,
                              max=max_delay,
                              exp_base=exp_base),
        after=after_log(logger, logger.level) if logger else None,
        reraise=True)
    return retry(func, *args, **kwargs)
Esempio n. 30
0
def google_sheets_retry():
    # Example raised when we exceed Google Sheets API rate limits:
    #
    # googleapiclient.errors.HttpError:
    # <HttpError 429 when requesting
    # https://sheets.googleapis.com/v4/spreadsheets/...:clear?alt=json returned "Quota
    # exceeded for quota group 'WriteGroup' and limit 'Write requests per user per 100
    # seconds' of service 'sheets.googleapis.com' for consumer 'project_number:...'.">
    #
    # https://tenacity.readthedocs.io/en/latest/
    #
    # 'max' below is in seconds
    return tenacity.retry(wait=tenacity.wait_random_exponential(multiplier=2, max=120),
                          stop=tenacity.stop_after_attempt(10),
                          before_sleep=tenacity.before_sleep_log(logger, logging.WARNING),
                          retry=(tenacity.retry_if_exception_type(HttpError) &
                                 # Wish the http code were exported as a type of the exception:
                                 #
                                 # https://github.com/googleapis/google-api-python-client/blob/master/googleapiclient/errors.py
                                 tenacity.retry_if_exception(lambda e: 'Quota exceeded' in str(e))),
                          reraise=True)
Esempio n. 31
0
def retry_upon_exception_exclude_error_codes(
    exc, excluded_errors, delay, max_delay, max_attempts):
    """Retry with the configured exponential delay, unless the exception error
    code is in the given list
    """
    def retry_if_not_error_codes(e):
        # return True only for BadRequests without error codes or with error
        # codes not in the exclude list
        if isinstance(e, exc):
            error_code = _get_bad_request_error_code(e)
            if error_code and error_code not in excluded_errors:
                return True
        return False

    return tenacity.retry(reraise=True,
                          retry=tenacity.retry_if_exception(
                                retry_if_not_error_codes),
                          wait=tenacity.wait_exponential(
                                multiplier=delay, max=max_delay),
                          stop=tenacity.stop_after_attempt(max_attempts),
                          before=_log_before_retry, after=_log_after_retry)
Esempio n. 32
0
class SampleAssistant(object):
    """Sample Assistant that supports conversations and device actions.

    Args:
      device_model_id: identifier of the device model.
      device_id: identifier of the registered device instance.
      conversation_stream(ConversationStream): audio stream
        for recording query and playing back assistant answer.
      channel: authorized gRPC channel for connection to the
        Google Assistant API.
      deadline_sec: gRPC deadline in seconds for Google Assistant API call.
      device_handler: callback for device actions.
    """

    def __init__(self, language_code, device_model_id, device_id,
                 conversation_stream,
                 channel, deadline_sec, device_handler):
        self.language_code = language_code
        self.device_model_id = device_model_id
        self.device_id = device_id
        self.conversation_stream = conversation_stream

        # Opaque blob provided in AssistResponse that,
        # when provided in a follow-up AssistRequest,
        # gives the Assistant a context marker within the current state
        # of the multi-Assist()-RPC "conversation".
        # This value, along with MicrophoneMode, supports a more natural
        # "conversation" with the Assistant.
        self.conversation_state = None

        # Create Google Assistant API gRPC client.
        self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
            channel
        )
        self.deadline = deadline_sec

        self.device_handler = device_handler

    def __enter__(self):
        return self

    def __exit__(self, etype, e, traceback):
        if e:
            return False
        self.conversation_stream.close()

    def is_grpc_error_unavailable(e):
        is_grpc_error = isinstance(e, grpc.RpcError)
        if is_grpc_error and (e.code() == grpc.StatusCode.UNAVAILABLE):
            logging.error('grpc unavailable error: %s', e)
            return True
        return False

    @retry(reraise=True, stop=stop_after_attempt(3),
           retry=retry_if_exception(is_grpc_error_unavailable))
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []

        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.')
                logging.info('Stopping recording.')
                self.conversation_stream.stop_recording()
            if resp.speech_results:
                logging.info('Transcript of user request: "%s".',
                             ' '.join(r.transcript
                                      for r in resp.speech_results))
            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    self.conversation_stream.start_playback()
                    logging.info('Playing assistant response.')
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json
                )
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        return continue_conversation

    def gen_assist_requests(self):
        """Yields: AssistRequest messages to send to the API."""

        dialog_state_in = embedded_assistant_pb2.DialogStateIn(
                language_code=self.language_code,
                conversation_state=b''
            )
        if self.conversation_state:
            logging.debug('Sending conversation state.')
            dialog_state_in.conversation_state = self.conversation_state
        config = embedded_assistant_pb2.AssistConfig(
            audio_in_config=embedded_assistant_pb2.AudioInConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
            ),
            audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
                volume_percentage=self.conversation_stream.volume_percentage,
            ),
            dialog_state_in=dialog_state_in,
            device_config=embedded_assistant_pb2.DeviceConfig(
                device_id=self.device_id,
                device_model_id=self.device_model_id,
            )
        )
        # The first AssistRequest must contain the AssistConfig
        # and no audio data.
        yield embedded_assistant_pb2.AssistRequest(config=config)
        for data in self.conversation_stream:
            # Subsequent requests need audio data, but not config.
            yield embedded_assistant_pb2.AssistRequest(audio_in=data)
Esempio n. 33
0
class GoogleAPIResource(Resource):

    # Names of the get and update methods. Most are the same but override in
    # the Resource if necessary
    resource_property = None
    get_method = "get"
    update_method = "update"

    # If a resource is not in a ready state, we can't update it. If we retrieve
    # it, and the state changes, updates will be rejected because the ETAG will
    # have changed. If a resource defines readiness criteria, the get() call
    # will wait until the resource is in a ready state to return
    #
    # Key/Value to check to see if a resource is ready
    readiness_key = None
    readiness_value = None

    def __init__(self, resource_data, **kwargs):
        full_resource_path = "{}.{}".format(self.service_name,
                                            self.resource_path)

        self.service = build_subresource(full_resource_path, self.version,
                                         **kwargs)

        # If we are a property of a resource, also get the resource we're
        # associated with
        if self.is_property():
            # Build resource data for the parent
            parent_data = resource_data.copy()
            parent_type = resource_data['resource_type'].rsplit('.', 1)[0]
            parent_data['resource_type'] = parent_type

            self.parent_resource = GoogleAPIResource.factory(
                parent_data, **kwargs)

        self.resource_data = resource_data

    def is_property(self):
        return self.resource_property is not None

    @staticmethod
    def factory(resource_data, **kargs):
        resource_type_map = {
            'bigquery.datasets': GcpBigqueryDataset,
            'compute.instances': GcpComputeInstance,
            'cloudresourcemanager.projects': GcpProject,
            'cloudresourcemanager.projects.iam': GcpProjectIam,
            'pubsub.projects.subscriptions': GcpPubsubSubscription,
            'pubsub.projects.subscriptions.iam': GcpPubsubSubscriptionIam,
            'pubsub.projects.topics': GcpPubsubTopic,
            'pubsub.projects.topics.iam': GcpPubsubTopicIam,
            'sqladmin.instances': GcpSqlInstance,
            'storage.buckets': GcpStorageBucket,
            'storage.buckets.iam': GcpStorageBucketIamPolicy
        }

        resource_type = resource_data.get('resource_type')
        if not resource_type:
            assert 0, 'Unrecognized resource'

        if resource_type not in resource_type_map:
            assert 0, 'Unrecognized resource'

        cls = resource_type_map.get(resource_type)
        return cls(resource_data, **kargs)

    def type(self):
        type_components = ["gcp", self.service_name, self.resource_path]

        # Things like IAM policy are not separate resources, but rather
        # properties of a resource. We may want to evaluate policy on these
        # properties, so we represent them as resources and need to distinguish
        # them in the resource type.
        if self.is_property():
            type_components.append(self.resource_property)

        return ".".join(type_components)

    # Google's documentation describes what it calls a 'full resource name' for
    # resources. None of the API's seem to implement it (except Cloud Asset
    # Inventory). This attempts to generate it from the discovery-based api
    # client's generated http request url.
    #
    # If we inject it into the resource, we can use it in policy evaluation to
    # simplify the structure of our policies
    def full_resource_name(self):

        # If this is a resource property, return the resource's frn instead
        if self.is_property():
            return self.parent_resource.full_resource_name()

        method = getattr(self.service, self.get_method)
        uri = method(**self._get_request_args()).uri

        uri_parsed = urlparse(uri)
        domain = uri_parsed.netloc
        path_segments = uri_parsed.path[1:].split('/')

        # First we need the name of the api
        if domain.startswith("www."):
            # we need to get the api name from the path
            api_name = path_segments.pop(0)
        else:
            # the api name is the first segment of the domain
            api_name = domain.split('.')[0]

            # occasionally the compute api baseUrl is returned as
            # compute.googleapis.com/compute, in which case we need to remove
            # the duplicated api reference
            if api_name == path_segments[0]:
                path_segments.pop(0)

        # Remove the version from the path
        path_segments.pop(0)

        # Remove method from the last path segment
        if ":" in path_segments[-1]:
            path_segments[-1] = path_segments[-1].split(":")[0]

        # Annoying resource-specific fixes
        if api_name == 'storage' and path_segments[0] == 'b':
            path_segments[0] = "buckets"

        resource_path = "/".join(path_segments)

        return "//{}.googleapis.com/{}".format(api_name, resource_path)

    def get(self):
        method = getattr(self.service, self.get_method)

        # If the resource has readiness criteria, wait for it
        if self.readiness_key and self.readiness_value:
            waiter = Waiter(method, **self._get_request_args())
            asset = waiter.wait(self.readiness_key,
                                self.readiness_value,
                                interval=7,
                                retries=60)
        else:
            asset = method(**self._get_request_args()).execute()

        asset['_full_resource_name'] = self.full_resource_name()

        # if this asset is a property, inject its parent
        if self.is_property():
            parent = self.parent_resource.get()
            asset['_resource'] = parent
        return asset

    @tenacity.retry(retry=tenacity.retry_if_exception(is_retryable_exception),
                    wait=tenacity.wait_random_exponential(multiplier=1,
                                                          max=10),
                    stop=tenacity.stop_after_attempt(10))
    def update(self, body):

        # remove injected data before attempting update
        for key in list(body):
            if key.startswith('_'):
                del body[key]

        method = getattr(self.service, self.update_method)
        return method(**self._update_request_args(body)).execute()
Esempio n. 34
0
class NovaClientPlugin(client_plugin.ClientPlugin):

    deferred_server_statuses = [
        'BUILD', 'HARD_REBOOT', 'PASSWORD', 'REBOOT', 'RESCUE', 'RESIZE',
        'REVERT_RESIZE', 'SHUTOFF', 'SUSPENDED', 'VERIFY_RESIZE'
    ]

    exceptions_module = exceptions

    NOVA_API_VERSION = '2.1'

    validate_versions = [V2_2, V2_8, V2_10, V2_15, V2_26, V2_37
                         ] = ['2.2', '2.8', '2.10', '2.15', '2.26', '2.37']

    supported_versions = [NOVA_API_VERSION] + validate_versions

    service_types = [COMPUTE] = ['compute']

    def _get_service_name(self):
        return self.COMPUTE

    def _create(self, version=None):
        if not version:
            # TODO(prazumovsky): remove all unexpected calls from tests and
            # add default_version after that.
            version = self.NOVA_API_VERSION
        endpoint_type = self._get_client_option(CLIENT_NAME, 'endpoint_type')
        extensions = nc.discover_extensions(version)

        args = {
            'session':
            self.context.keystone_session,
            'extensions':
            extensions,
            'endpoint_type':
            endpoint_type,
            'service_type':
            self.COMPUTE,
            'region_name':
            self._get_region_name(),
            'http_log_debug':
            self._get_client_option(CLIENT_NAME, 'http_log_debug')
        }

        client = nc.Client(version, **args)
        # NOTE: check for microversion availability
        if version in self.validate_versions:
            try:
                client.versions.get_current()
            except exceptions.NotAcceptable:
                raise exception.InvalidServiceVersion(service=self.COMPUTE,
                                                      version=version)

        return client

    def is_not_found(self, ex):
        return isinstance(ex, exceptions.NotFound)

    def is_over_limit(self, ex):
        return isinstance(ex, exceptions.OverLimit)

    def is_bad_request(self, ex):
        return isinstance(ex, exceptions.BadRequest)

    def is_conflict(self, ex):
        return isinstance(ex, exceptions.Conflict)

    def is_unprocessable_entity(self, ex):
        http_status = (getattr(ex, 'http_status', None)
                       or getattr(ex, 'code', None))
        return (isinstance(ex, exceptions.ClientException)
                and http_status == 422)

    @tenacity.retry(stop=tenacity.stop_after_attempt(
        max(cfg.CONF.client_retry_limit + 1, 0)),
                    retry=tenacity.retry_if_exception(
                        client_plugin.retry_if_connection_err),
                    reraise=True)
    def get_server(self, server):
        """Return fresh server object.

        Substitutes Nova's NotFound for Heat's EntityNotFound,
        to be returned to user as HTTP error.
        """
        try:
            return self.client().servers.get(server)
        except exceptions.NotFound:
            raise exception.EntityNotFound(entity='Server', name=server)

    def fetch_server(self, server_id):
        """Fetch fresh server object from Nova.

        Log warnings and return None for non-critical API errors.
        Use this method in various ``check_*_complete`` resource methods,
        where intermittent errors can be tolerated.
        """
        server = None
        try:
            server = self.client().servers.get(server_id)
        except exceptions.OverLimit as exc:
            LOG.warning(
                "Received an OverLimit response when "
                "fetching server (%(id)s) : %(exception)s", {
                    'id': server_id,
                    'exception': exc
                })
        except exceptions.ClientException as exc:
            if ((getattr(exc, 'http_status', getattr(exc, 'code', None))
                 in (500, 503))):
                LOG.warning(
                    "Received the following exception when "
                    "fetching server (%(id)s) : %(exception)s", {
                        'id': server_id,
                        'exception': exc
                    })
            else:
                raise
        return server

    def refresh_server(self, server):
        """Refresh server's attributes.

        Also log warnings for non-critical API errors.
        """
        try:
            server.get()
        except exceptions.OverLimit as exc:
            LOG.warning(
                "Server %(name)s (%(id)s) received an OverLimit "
                "response during server.get(): %(exception)s", {
                    'name': server.name,
                    'id': server.id,
                    'exception': exc
                })
        except exceptions.ClientException as exc:
            if ((getattr(exc, 'http_status', getattr(exc, 'code', None))
                 in (500, 503))):
                LOG.warning(
                    'Server "%(name)s" (%(id)s) received the '
                    'following exception during server.get(): '
                    '%(exception)s', {
                        'name': server.name,
                        'id': server.id,
                        'exception': exc
                    })
            else:
                raise

    def get_ip(self, server, net_type, ip_version):
        """Return the server's IP of the given type and version."""
        if net_type in server.addresses:
            for ip in server.addresses[net_type]:
                if ip['version'] == ip_version:
                    return ip['addr']

    def get_status(self, server):
        """Return the server's status.

        :param server: server object
        :returns: status as a string
        """
        # Some clouds append extra (STATUS) strings to the status, strip it
        return server.status.split('(')[0]

    def _check_active(self, server, res_name='Server'):
        """Check server status.

        Accepts both server IDs and server objects.
        Returns True if server is ACTIVE,
        raises errors when server has an ERROR or unknown to Heat status,
        returns False otherwise.

        :param res_name: name of the resource to use in the exception message

        """
        # not checking with is_uuid_like as most tests use strings e.g. '1234'
        if isinstance(server, six.string_types):
            server = self.fetch_server(server)
            if server is None:
                return False
            else:
                status = self.get_status(server)
        else:
            status = self.get_status(server)
            if status != 'ACTIVE':
                self.refresh_server(server)
                status = self.get_status(server)

        if status in self.deferred_server_statuses:
            return False
        elif status == 'ACTIVE':
            return True
        elif status == 'ERROR':
            fault = getattr(server, 'fault', {})
            raise exception.ResourceInError(
                resource_status=status,
                status_reason=_("Message: %(message)s, Code: %(code)s") % {
                    'message': fault.get('message', _('Unknown')),
                    'code': fault.get('code', _('Unknown'))
                })
        else:
            raise exception.ResourceUnknownStatus(
                resource_status=server.status,
                result=_('%s is not active') % res_name)

    def find_flavor_by_name_or_id(self, flavor):
        """Find the specified flavor by name or id.

        :param flavor: the name of the flavor to find
        :returns: the id of :flavor:
        """
        return self._find_flavor_id(self.context.tenant_id, flavor)

    @os_client.MEMOIZE_FINDER
    def _find_flavor_id(self, tenant_id, flavor):
        # tenant id in the signature is used for the memoization key,
        # that would differentiate similar resource names across tenants.
        return self.get_flavor(flavor).id

    def get_flavor(self, flavor_identifier):
        """Get the flavor object for the specified flavor name or id.

        :param flavor_identifier: the name or id of the flavor to find
        :returns: a flavor object with name or id :flavor:
        """
        try:
            flavor = self.client().flavors.get(flavor_identifier)
        except exceptions.NotFound:
            flavor = self.client().flavors.find(name=flavor_identifier)

        return flavor

    def get_host(self, host_name):
        """Get the host id specified by name.

        :param host_name: the name of host to find
        :returns: the list of match hosts
        :raises: exception.EntityNotFound
        """

        host_list = self.client().hosts.list()
        for host in host_list:
            if host.host_name == host_name and host.service == self.COMPUTE:
                return host

        raise exception.EntityNotFound(entity='Host', name=host_name)

    def get_keypair(self, key_name):
        """Get the public key specified by :key_name:

        :param key_name: the name of the key to look for
        :returns: the keypair (name, public_key) for :key_name:
        :raises: exception.EntityNotFound
        """
        try:
            return self.client().keypairs.get(key_name)
        except exceptions.NotFound:
            raise exception.EntityNotFound(entity='Key', name=key_name)

    def build_userdata(self,
                       metadata,
                       userdata=None,
                       instance_user=None,
                       user_data_format='HEAT_CFNTOOLS'):
        """Build multipart data blob for CloudInit.

        Data blob includes user-supplied Metadata, user data, and the required
        Heat in-instance configuration.

        :param resource: the resource implementation
        :type resource: heat.engine.Resource
        :param userdata: user data string
        :type userdata: str or None
        :param instance_user: the user to create on the server
        :type instance_user: string
        :param user_data_format: Format of user data to return
        :type user_data_format: string
        :returns: multipart mime as a string
        """

        if user_data_format == 'RAW':
            return userdata

        is_cfntools = user_data_format == 'HEAT_CFNTOOLS'
        is_software_config = user_data_format == 'SOFTWARE_CONFIG'

        def make_subpart(content, filename, subtype=None):
            if subtype is None:
                subtype = os.path.splitext(filename)[0]
            if content is None:
                content = ''
            try:
                content.encode('us-ascii')
                charset = 'us-ascii'
            except UnicodeEncodeError:
                charset = 'utf-8'
            msg = (text.MIMEText(content, _subtype=subtype, _charset=charset)
                   if subtype else text.MIMEText(content, _charset=charset))

            msg.add_header('Content-Disposition',
                           'attachment',
                           filename=filename)
            return msg

        def read_cloudinit_file(fn):
            return pkgutil.get_data('heat',
                                    'cloudinit/%s' % fn).decode('utf-8')

        if instance_user:
            config_custom_user = '******' % instance_user
            # FIXME(shadower): compatibility workaround for cloud-init 0.6.3.
            # We can drop this once we stop supporting 0.6.3 (which ships
            # with Ubuntu 12.04 LTS).
            #
            # See bug https://bugs.launchpad.net/heat/+bug/1257410
            boothook_custom_user = r"""useradd -m %s
echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
""" % (instance_user, instance_user)
        else:
            config_custom_user = ''
            boothook_custom_user = ''

        cloudinit_config = string.Template(
            read_cloudinit_file('config')).safe_substitute(
                add_custom_user=config_custom_user)
        cloudinit_boothook = string.Template(
            read_cloudinit_file('boothook.sh')).safe_substitute(
                add_custom_user=boothook_custom_user)

        attachments = [(cloudinit_config, 'cloud-config'),
                       (cloudinit_boothook, 'boothook.sh', 'cloud-boothook'),
                       (read_cloudinit_file('part_handler.py'),
                        'part-handler.py')]
        if is_cfntools:
            attachments.append((userdata, 'cfn-userdata', 'x-cfninitdata'))
        elif is_software_config:
            # attempt to parse userdata as a multipart message, and if it
            # is, add each part as an attachment
            userdata_parts = None
            try:
                userdata_parts = email.message_from_string(userdata)
            except Exception:
                pass
            if userdata_parts and userdata_parts.is_multipart():
                for part in userdata_parts.get_payload():
                    attachments.append(
                        (part.get_payload(), part.get_filename(),
                         part.get_content_subtype()))
            else:
                attachments.append((userdata, ''))

        if is_cfntools:
            attachments.append((read_cloudinit_file('loguserdata.py'),
                                'loguserdata.py', 'x-shellscript'))

        if metadata:
            attachments.append(
                (jsonutils.dumps(metadata), 'cfn-init-data', 'x-cfninitdata'))

        heat_client_plugin = self.context.clients.client_plugin('heat')
        watch_url = cfg.CONF.heat_watch_server_url
        if not watch_url:
            watch_url = heat_client_plugin.get_watch_server_url()

        attachments.append((watch_url, 'cfn-watch-server', 'x-cfninitdata'))

        if is_cfntools:
            cfn_md_url = heat_client_plugin.get_cfn_metadata_server_url()
            attachments.append(
                (cfn_md_url, 'cfn-metadata-server', 'x-cfninitdata'))

            # Create a boto config which the cfntools on the host use to know
            # where the cfn and cw API's are to be accessed
            cfn_url = urlparse.urlparse(cfn_md_url)
            cw_url = urlparse.urlparse(watch_url)
            is_secure = cfg.CONF.instance_connection_is_secure
            vcerts = cfg.CONF.instance_connection_https_validate_certificates
            boto_cfg = "\n".join([
                "[Boto]", "debug = 0",
                "is_secure = %s" % is_secure,
                "https_validate_certificates = %s" % vcerts,
                "cfn_region_name = heat",
                "cfn_region_endpoint = %s" % cfn_url.hostname,
                "cloudwatch_region_name = heat",
                "cloudwatch_region_endpoint = %s" % cw_url.hostname
            ])
            attachments.append((boto_cfg, 'cfn-boto-cfg', 'x-cfninitdata'))

        subparts = [make_subpart(*args) for args in attachments]
        mime_blob = multipart.MIMEMultipart(_subparts=subparts)

        return mime_blob.as_string()

    def check_delete_server_complete(self, server_id):
        """Wait for server to disappear from Nova."""
        try:
            server = self.fetch_server(server_id)
        except Exception as exc:
            self.ignore_not_found(exc)
            return True
        if not server:
            return False
        task_state_in_nova = getattr(server, 'OS-EXT-STS:task_state', None)
        # the status of server won't change until the delete task has done
        if task_state_in_nova == 'deleting':
            return False

        status = self.get_status(server)
        if status in ("DELETED", "SOFT_DELETED"):
            return True
        if status == 'ERROR':
            fault = getattr(server, 'fault', {})
            message = fault.get('message', 'Unknown')
            code = fault.get('code')
            errmsg = _("Server %(name)s delete failed: (%(code)s) "
                       "%(message)s") % dict(
                           name=server.name, code=code, message=message)
            raise exception.ResourceInError(resource_status=status,
                                            status_reason=errmsg)
        return False

    def rename(self, server, name):
        """Update the name for a server."""
        server.update(name)

    def resize(self, server_id, flavor_id):
        """Resize the server."""
        server = self.fetch_server(server_id)
        if server:
            server.resize(flavor_id)
            return True
        else:
            return False

    def check_resize(self, server_id, flavor):
        """Verify that a resizing server is properly resized.

        If that's the case, confirm the resize, if not raise an error.
        """
        server = self.fetch_server(server_id)
        # resize operation is asynchronous so the server resize may not start
        # when checking server status (the server may stay ACTIVE instead
        # of RESIZE).
        if not server or server.status in ('RESIZE', 'ACTIVE'):
            return False
        if server.status == 'VERIFY_RESIZE':
            return True
        else:
            raise exception.Error(
                _("Resizing to '%(flavor)s' failed, status '%(status)s'") %
                dict(flavor=flavor, status=server.status))

    def verify_resize(self, server_id):
        server = self.fetch_server(server_id)
        if not server:
            return False
        status = self.get_status(server)
        if status == 'VERIFY_RESIZE':
            server.confirm_resize()
            return True
        else:
            msg = _("Could not confirm resize of server %s") % server_id
            raise exception.ResourceUnknownStatus(result=msg,
                                                  resource_status=status)

    def check_verify_resize(self, server_id):
        server = self.fetch_server(server_id)
        if not server:
            return False
        status = self.get_status(server)
        if status == 'ACTIVE':
            return True
        if status == 'VERIFY_RESIZE':
            return False
        else:
            msg = _("Confirm resize for server %s failed") % server_id
            raise exception.ResourceUnknownStatus(result=msg,
                                                  resource_status=status)

    def rebuild(self,
                server_id,
                image_id,
                password=None,
                preserve_ephemeral=False,
                meta=None,
                files=None):
        """Rebuild the server and call check_rebuild to verify."""
        server = self.fetch_server(server_id)
        if server:
            server.rebuild(image_id,
                           password=password,
                           preserve_ephemeral=preserve_ephemeral,
                           meta=meta,
                           files=files)
            return True
        else:
            return False

    def check_rebuild(self, server_id):
        """Verify that a rebuilding server is rebuilt.

        Raise error if it ends up in an ERROR state.
        """
        server = self.fetch_server(server_id)
        if server is None or server.status == 'REBUILD':
            return False
        if server.status == 'ERROR':
            raise exception.Error(
                _("Rebuilding server failed, status '%s'") % server.status)
        else:
            return True

    def meta_serialize(self, metadata):
        """Serialize non-string metadata values before sending them to Nova."""
        if not isinstance(metadata, collections.Mapping):
            raise exception.StackValidationFailed(
                message=_("nova server metadata needs to be a Map."))

        return dict((key, (value if isinstance(value, six.string_types
                                               ) else jsonutils.dumps(value)))
                    for (key, value) in metadata.items())

    def meta_update(self, server, metadata):
        """Delete/Add the metadata in nova as needed."""
        metadata = self.meta_serialize(metadata)
        current_md = server.metadata
        to_del = sorted(set(current_md) - set(metadata))
        client = self.client()
        if len(to_del) > 0:
            client.servers.delete_meta(server, to_del)

        client.servers.set_meta(server, metadata)

    def server_to_ipaddress(self, server):
        """Return the server's IP address, fetching it from Nova."""
        try:
            server = self.client().servers.get(server)
        except exceptions.NotFound as ex:
            LOG.warning('Instance (%(server)s) not found: %(ex)s', {
                'server': server,
                'ex': ex
            })
        else:
            for n in sorted(server.networks, reverse=True):
                if len(server.networks[n]) > 0:
                    return server.networks[n][0]

    @tenacity.retry(stop=tenacity.stop_after_attempt(
        max(cfg.CONF.client_retry_limit + 1, 0)),
                    retry=tenacity.retry_if_exception(
                        client_plugin.retry_if_connection_err),
                    reraise=True)
    def absolute_limits(self):
        """Return the absolute limits as a dictionary."""
        limits = self.client().limits.get()
        return dict([(limit.name, limit.value)
                     for limit in list(limits.absolute)])

    def get_console_urls(self, server):
        """Return dict-like structure of server's console urls.

        The actual console url is lazily resolved on access.
        """
        nc = self.client
        mks_version = self.V2_8

        class ConsoleUrls(collections.Mapping):
            def __init__(self, server):
                self.console_method = server.get_console_url
                self.support_console_types = [
                    'novnc', 'xvpvnc', 'spice-html5', 'rdp-html5', 'serial',
                    'webmks'
                ]

            def __getitem__(self, key):
                try:
                    if key not in self.support_console_types:
                        raise exceptions.UnsupportedConsoleType(key)
                    if key == 'webmks':
                        data = nc(mks_version).servers.get_console_url(
                            server, key)
                    else:
                        data = self.console_method(key)
                    console_data = data.get('remote_console',
                                            data.get('console'))
                    url = console_data['url']
                except exceptions.UnsupportedConsoleType as ex:
                    url = ex.message
                except Exception as e:
                    url = _('Cannot get console url: %s') % six.text_type(e)

                return url

            def __len__(self):
                return len(self.support_console_types)

            def __iter__(self):
                return (key for key in self.support_console_types)

        return ConsoleUrls(server)

    def attach_volume(self, server_id, volume_id, device):
        try:
            va = self.client().volumes.create_server_volume(
                server_id=server_id, volume_id=volume_id, device=device)
        except Exception as ex:
            if self.is_client_exception(ex):
                raise exception.Error(
                    _("Failed to attach volume %(vol)s to server %(srv)s "
                      "- %(err)s") % {
                          'vol': volume_id,
                          'srv': server_id,
                          'err': ex
                      })
            else:
                raise
        return va.id

    def detach_volume(self, server_id, attach_id):
        # detach the volume using volume_attachment
        try:
            self.client().volumes.delete_server_volume(server_id, attach_id)
        except Exception as ex:
            if not (self.is_not_found(ex) or self.is_bad_request(ex)):
                raise exception.Error(
                    _("Could not detach attachment %(att)s "
                      "from server %(srv)s.") % {
                          'srv': server_id,
                          'att': attach_id
                      })

    def check_detach_volume_complete(self, server_id, attach_id):
        """Check that nova server lost attachment.

        This check is needed for immediate reattachment when updating:
        there might be some time between cinder marking volume as 'available'
        and nova removing attachment from its own objects, so we
        check that nova already knows that the volume is detached.
        """
        try:
            self.client().volumes.get_server_volume(server_id, attach_id)
        except Exception as ex:
            self.ignore_not_found(ex)
            LOG.info("Volume %(vol)s is detached from server %(srv)s", {
                'vol': attach_id,
                'srv': server_id
            })
            return True
        else:
            LOG.debug("Server %(srv)s still has attachment %(att)s.", {
                'att': attach_id,
                'srv': server_id
            })
            return False

    def interface_detach(self, server_id, port_id):
        with self.ignore_not_found:
            server = self.fetch_server(server_id)
            if server:
                server.interface_detach(port_id)
                return True

    def interface_attach(self, server_id, port_id=None, net_id=None, fip=None):
        server = self.fetch_server(server_id)
        if server:
            server.interface_attach(port_id, net_id, fip)
            return True
        else:
            return False

    @tenacity.retry(stop=tenacity.stop_after_attempt(
        cfg.CONF.max_interface_check_attempts),
                    wait=tenacity.wait_fixed(0.5),
                    retry=tenacity.retry_if_result(
                        client_plugin.retry_if_result_is_false))
    def check_interface_detach(self, server_id, port_id):
        with self.ignore_not_found:
            server = self.fetch_server(server_id)
            if server:
                interfaces = server.interface_list()
                for iface in interfaces:
                    if iface.port_id == port_id:
                        return False
        return True

    @tenacity.retry(stop=tenacity.stop_after_attempt(
        cfg.CONF.max_interface_check_attempts),
                    wait=tenacity.wait_fixed(0.5),
                    retry=tenacity.retry_if_result(
                        client_plugin.retry_if_result_is_false))
    def check_interface_attach(self, server_id, port_id):
        if not port_id:
            return True

        server = self.fetch_server(server_id)
        if server:
            interfaces = server.interface_list()
            for iface in interfaces:
                if iface.port_id == port_id:
                    return True
        return False

    @os_client.MEMOIZE_EXTENSIONS
    def _list_extensions(self):
        extensions = self.client().list_extensions.show_all()
        return set(extension.alias for extension in extensions)

    def has_extension(self, alias):
        """Check if specific extension is present."""
        return alias in self._list_extensions()
Esempio n. 35
0
File: s3.py Progetto: luo-zn/gnocchi
        's3',
        endpoint_url=conf.s3_endpoint_url,
        region_name=conf.s3_region_name,
        aws_access_key_id=conf.s3_access_key_id,
        aws_secret_access_key=conf.s3_secret_access_key,
        config=boto_config.Config(
            max_pool_connections=conf.s3_max_pool_connections))
    return conn, conf.s3_region_name, conf.s3_bucket_prefix


# NOTE(jd) OperationAborted might be raised if we try to create the bucket
# for the first time at the same time
@tenacity.retry(
    stop=tenacity.stop_after_attempt(10),
    wait=tenacity.wait_fixed(0.5),
    retry=tenacity.retry_if_exception(retry_if_operationaborted)
)
def create_bucket(conn, name, region_name):
    if region_name:
        kwargs = dict(CreateBucketConfiguration={
            "LocationConstraint": region_name,
        })
    else:
        kwargs = {}
    return conn.create_bucket(Bucket=name, **kwargs)


def bulk_delete(conn, bucket, objects):
    # NOTE(jd) The maximum object to delete at once is 1000
    # TODO(jd) Parallelize?
    deleted = 0
Esempio n. 36
0
    def reply(self, reply=None, failure=None):
        """Send back reply to the RPC client
        :param reply: Dictionary, reply. In case of exception should be None
        :param failure: Tuple, should be a sys.exc_info() tuple.
            Should be None if RPC request was successfully processed.

        :return RpcReplyPikaIncomingMessage, message with reply
        """

        if self.reply_q is None:
            return

        reply_outgoing_message = RpcReplyPikaOutgoingMessage(
            self._pika_engine, self.msg_id, reply=reply, failure_info=failure,
            content_type=self._content_type,
        )

        def on_exception(ex):
            if isinstance(ex, pika_drv_exc.ConnectionException):
                LOG.warning(
                    "Connectivity related problem during reply sending. %s",
                    ex
                )
                return True
            else:
                return False

        if self._pika_engine.rpc_reply_retry_attempts:
            retrier = tenacity.retry(
                stop=(
                    tenacity.stop_never
                    if self._pika_engine.rpc_reply_retry_attempts == -1 else
                    tenacity.stop_after_attempt(
                        self._pika_engine.rpc_reply_retry_attempts
                    )
                ),
                retry=tenacity.retry_if_exception(on_exception),
                wait=tenacity.wait_fixed(
                    self._pika_engine.rpc_reply_retry_delay
                )
            )
        else:
            retrier = None

        try:
            timeout = (None if self.expiration_time is None else
                       max(self.expiration_time - time.time(), 0))
            with timeutils.StopWatch(duration=timeout) as stopwatch:
                reply_outgoing_message.send(
                    reply_q=self.reply_q,
                    stopwatch=stopwatch,
                    retrier=retrier
                )
            LOG.debug(
                "Message [id:'%s'] replied to '%s'.", self.msg_id, self.reply_q
            )
        except Exception:
            LOG.exception(
                "Message [id:'%s'] wasn't replied to : %s", self.msg_id,
                self.reply_q
            )
Esempio n. 37
0
    def send(self, target, ctxt, message, wait_for_reply=None, timeout=None,
             retry=None):
        with timeutils.StopWatch(duration=timeout) as stopwatch:
            if retry is None:
                retry = self._pika_engine.default_rpc_retry_attempts

            exchange = self._pika_engine.get_rpc_exchange_name(
                target.exchange
            )

            def on_exception(ex):
                if isinstance(ex, pika_drv_exc.ExchangeNotFoundException):
                    # it is desired to create exchange because if we sent to
                    # exchange which is not exists, we get ChannelClosed
                    # exception and need to reconnect
                    try:
                        self._declare_rpc_exchange(exchange, stopwatch)
                    except pika_drv_exc.ConnectionException as e:
                        LOG.warning("Problem during declaring exchange. %s", e)
                    return True
                elif isinstance(ex, (pika_drv_exc.ConnectionException,
                                     exceptions.MessageDeliveryFailure)):
                    LOG.warning("Problem during message sending. %s", ex)
                    return True
                else:
                    return False

            if retry:
                retrier = tenacity.retry(
                    stop=(tenacity.stop_never if retry == -1 else
                          tenacity.stop_after_attempt(retry)),
                    retry=tenacity.retry_if_exception(on_exception),
                    wait=tenacity.wait_fixed(self._pika_engine.rpc_retry_delay)
                )
            else:
                retrier = None

            if target.fanout:
                return self.cast_all_workers(
                    exchange, target.topic, ctxt, message, stopwatch, retrier
                )

            routing_key = self._pika_engine.get_rpc_queue_name(
                target.topic, target.server, retrier is None
            )

            msg = pika_drv_msg.RpcPikaOutgoingMessage(self._pika_engine,
                                                      message, ctxt)
            try:
                reply = msg.send(
                    exchange=exchange,
                    routing_key=routing_key,
                    reply_listener=(
                        self._reply_listener if wait_for_reply else None
                    ),
                    stopwatch=stopwatch,
                    retrier=retrier
                )
            except pika_drv_exc.ExchangeNotFoundException as ex:
                try:
                    self._declare_rpc_exchange(exchange, stopwatch)
                except pika_drv_exc.ConnectionException as e:
                    LOG.warning("Problem during declaring exchange. %s", e)
                raise ex

            if reply is not None:
                if reply.failure is not None:
                    raise reply.failure

                return reply.result
Esempio n. 38
0
    for rule in current_rules:
        if '-i %s' % vif in rule and '--among-src' in rule:
            ebtables(['-D', chain] + rule.split())


def _delete_mac_spoofing_protection(vifs, current_rules):
    # delete the jump rule and then delete the whole chain
    jumps = [vif for vif in vifs
             if _mac_vif_jump_present(vif, current_rules)]
    for vif in jumps:
        ebtables(['-D', 'FORWARD', '-i', vif, '-j',
                  _mac_chain_name(vif)])
    for vif in vifs:
        chain = _mac_chain_name(vif)
        if chain_exists(chain, current_rules):
            ebtables(['-X', chain])


# Used to scope ebtables commands in testing
NAMESPACE = None


@tenacity.retry(
    wait=tenacity.wait_exponential(multiplier=0.01),
    retry=tenacity.retry_if_exception(lambda e: e.returncode == 255),
    reraise=True
)
def ebtables(comm):
    execute = ip_lib.IPWrapper(NAMESPACE).netns.execute
    return execute(['ebtables', '--concurrent'] + comm, run_as_root=True)
Esempio n. 39
0
        setattr(klass, '_libvirt_connection', connection)
    return connection


def is_disconnection_exception(e):
    if not libvirt:
        return False
    return (isinstance(e, libvirt.libvirtError)
            and e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
                                       libvirt.VIR_ERR_INTERNAL_ERROR)
            and e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
                                         libvirt.VIR_FROM_RPC))


retry_on_disconnect = tenacity.retry(
    retry=tenacity.retry_if_exception(is_disconnection_exception),
    stop=tenacity.stop_after_attempt(2))


def raise_nodata_if_unsupported(method):
    def inner(in_self, instance, *args, **kwargs):
        try:
            return method(in_self, instance, *args, **kwargs)
        except libvirt.libvirtError as e:
            # NOTE(sileht): At this point libvirt connection error
            # have been reraise as tenacity.RetryError()
            msg = _('Failed to inspect instance %(instance_uuid)s stats, '
                    'can not get info from libvirt: %(error)s') % {
                        "instance_uuid": instance.id,
                        "error": e}
            raise virt_inspector.NoDataException(msg)