コード例 #1
0
ファイル: shippable_api.py プロジェクト: jctanner/ansibullbot
    def fetch(self, url, verb='get', **kwargs):
        """return response or None in case of failure, try twice"""
        @retry(stop=stop_after_attempt(2), wait=wait_fixed(2))
        def _fetch(verb='get'):
            headers = {
                'Authorization': 'apiToken %s' % C.DEFAULT_SHIPPABLE_TOKEN
            }

            logging.info(u'%s %s' % (verb, url))
            http_method = getattr(requests, verb)
            resp = http_method(url, headers=headers, **kwargs)
            logging.info(u'shippable status code: %s' % resp.status_code)
            logging.info(u'shippable reason: %s' % resp.reason)

            if resp.status_code not in [200, 302, 400]:
                logging.error(u'RC: %s', resp.status_code)
                raise TryAgain

            return resp

        try:
            logging.debug(u'%s', url)
            return _fetch(verb=verb)
        except RetryError as e:
            logging.error(e)
コード例 #2
0
    def call_func(self, func, **kwargs):
        """General method for calling any Monasca API function."""
        @tenacity.retry(
            wait=tenacity.wait_fixed(self._retry_interval),
            stop=tenacity.stop_after_attempt(self._max_retries),
            retry=(tenacity.retry_if_exception_type(MonascaServiceException) |
                   tenacity.retry_if_exception_type(MonascaException)))
        def _inner():
            try:
                return func(**kwargs)
            except (exc.http.InternalServerError,
                    exc.http.ServiceUnavailable,
                    exc.http.BadGateway,
                    exc.connection.ConnectionError) as e:
                LOG.exception(e)
                msg = '%s: %s' % (e.__class__.__name__, e)
                raise MonascaServiceException(msg)
            except exc.http.HttpError as e:
                LOG.exception(e)
                msg = '%s: %s' % (e.__class__.__name__, e)
                status_code = e.http_status
                if not isinstance(status_code, int):
                    status_code = 500
                if 400 <= status_code < 500:
                    raise MonascaInvalidParametersException(msg)
                else:
                    raise MonascaException(msg)
            except Exception as e:
                LOG.exception(e)
                msg = '%s: %s' % (e.__class__.__name__, e)
                raise MonascaException(msg)

        return _inner()
コード例 #3
0
ファイル: coordination.py プロジェクト: openstack/tooz
 def _beat_forever_until_stopped(self):
     """Inner beating loop."""
     retry = tenacity.Retrying(
         wait=tenacity.wait_fixed(1),
         before_sleep=tenacity.before_sleep_log(LOG, logging.warning),
     )
     while not self._dead.is_set():
         with timeutils.StopWatch() as w:
             wait_until_next_beat = retry(self._driver.heartbeat)
         ran_for = w.elapsed()
         has_to_sleep_for = wait_until_next_beat - ran_for
         if has_to_sleep_for < 0:
             LOG.warning(
                 "Heartbeating took too long to execute (it ran for"
                 " %0.2f seconds which is %0.2f seconds longer than"
                 " the next heartbeat idle time). This may cause"
                 " timeouts (in locks, leadership, ...) to"
                 " happen (which will not end well).", ran_for,
                 ran_for - wait_until_next_beat)
         self._beats += 1
         # NOTE(harlowja): use the event object for waiting and
         # not a sleep function since doing that will allow this code
         # to terminate early if stopped via the stop() method vs
         # having to wait until the sleep function returns.
         # NOTE(jd): Wait for only the half time of what we should.
         # This is a measure of safety, better be too soon than too late.
         self._dead.wait(has_to_sleep_for / 2.0)
コード例 #4
0
ファイル: utils.py プロジェクト: openstack/panko
def _safe_mongo_call(max_retries, retry_interval):
    return tenacity.retry(
        retry=tenacity.retry_if_exception_type(
            pymongo.errors.AutoReconnect),
        wait=tenacity.wait_fixed(retry_interval),
        stop=(tenacity.stop_after_attempt(max_retries) if max_retries >= 0
              else tenacity.stop_never)
    )
コード例 #5
0
 def decorator(func):
     @tenacity.retry(
         retry=tenacity.retry_if_exception(retry_on_retriable_kafka_error),
         wait=tenacity.wait_fixed(1),
         stop=tenacity.stop_after_attempt(retries),
         reraise=True
     )
     def wrapper(*args, **kwargs):
         return func(*args, **kwargs)
     return wrapper
コード例 #6
0
ファイル: __init__.py プロジェクト: cloudbase/ceilometer
def get_connection_from_config(conf):
    retries = conf.database.max_retries

    @tenacity.retry(
        wait=tenacity.wait_fixed(conf.database.retry_interval),
        stop=(tenacity.stop_after_attempt(retries) if retries >= 0
              else tenacity.stop_never),
        reraise=True)
    def _inner():
        url = (getattr(conf.database, 'metering_connection') or
               conf.database.connection)
        return get_connection(conf, url)

    return _inner()
コード例 #7
0
ファイル: __init__.py プロジェクト: andymcc/ceilometer
def get_connection_from_config(conf, purpose='metering'):
    retries = conf.database.max_retries

    @tenacity.retry(
        wait=tenacity.wait_fixed(conf.database.retry_interval),
        stop=(tenacity.stop_after_attempt(retries) if retries >= 0
              else tenacity.stop_never),
        reraise=True)
    def _inner():
        namespace = 'ceilometer.%s.storage' % purpose
        url = (getattr(conf.database, '%s_connection' % purpose) or
               conf.database.connection)
        return get_connection(conf, url, namespace)

    return _inner()
コード例 #8
0
ファイル: __init__.py プロジェクト: openstack/panko
def get_connection_from_config(conf):
    retries = conf.database.max_retries

    @tenacity.retry(
        reraise=True,
        wait=tenacity.wait_fixed(conf.database.retry_interval),
        stop=(tenacity.stop_after_attempt(retries) if retries >= 0
              else tenacity.stop_never)
    )
    def _inner():
        url = (conf.database.connection or
               getattr(conf.database, 'event_connection', None))
        return get_connection(url, conf)

    return _inner()
コード例 #9
0
ファイル: base.py プロジェクト: GrovoLearning/mesos
def wait_for_task(master, name, state, delay=1):
    """
    Wait for a task with a certain name to be in a given state.
    """
    @retry(wait=wait_fixed(0.2), stop=stop_after_delay(delay))
    def _wait_for_task():
        tasks = http.get_json(master.addr, "tasks")["tasks"]
        for task in tasks:
            if task["name"] == name and task["state"] == state:
                return task
        raise Exception()

    try:
        return _wait_for_task()
    except Exception:
        raise CLIException("Timeout waiting for task expired")
コード例 #10
0
    def __init__(self, conf):
        super(GnocchiDispatcher, self).__init__(conf)
        self.conf = conf
        self.filter_service_activity = (
            conf.dispatcher_gnocchi.filter_service_activity)
        self._ks_client = keystone_client.get_client(conf)
        self.resources_definition = self._load_resources_definitions(conf)

        self.cache = None
        try:
            import oslo_cache
            oslo_cache.configure(self.conf)
            # NOTE(cdent): The default cache backend is a real but
            # noop backend. We don't want to use that here because
            # we want to avoid the cache pathways entirely if the
            # cache has not been configured explicitly.
            if self.conf.cache.enabled:
                cache_region = oslo_cache.create_region()
                self.cache = oslo_cache.configure_cache_region(
                    self.conf, cache_region)
                self.cache.key_mangler = cache_key_mangler
        except ImportError:
            pass
        except oslo_cache.exception.ConfigurationError as exc:
            LOG.warning(_LW('unable to configure oslo_cache: %s') % exc)

        self._gnocchi_project_id = None
        self._gnocchi_project_id_lock = threading.Lock()
        self._gnocchi_resource_lock = LockedDefaultDict(threading.Lock)

        self._gnocchi = gnocchi_client.get_gnocchiclient(conf)

        retries = conf.storage.max_retries

        @tenacity.retry(
            wait=tenacity.wait_fixed(conf.storage.retry_interval),
            stop=(tenacity.stop_after_attempt(retries) if retries >= 0
                  else tenacity.stop_never),
            reraise=True)
        def _get_connection():
            self._gnocchi.capabilities.list()

        try:
            _get_connection()
        except Exception:
            LOG.error(_LE('Failed to connect to Gnocchi.'))
            raise
コード例 #11
0
ファイル: impl_pika.py プロジェクト: ozamiatin/oslo.messaging
    def send_notification(self, target, ctxt, message, version, retry=None):
        if retry is None:
            retry = self._pika_engine.default_notification_retry_attempts

        def on_exception(ex):
            if isinstance(ex, (pika_drv_exc.ExchangeNotFoundException,
                               pika_drv_exc.RoutingException)):
                LOG.warning("Problem during sending notification. %s", ex)
                try:
                    self._declare_notification_queue_binding(target)
                except pika_drv_exc.ConnectionException as e:
                    LOG.warning("Problem during declaring notification queue "
                                "binding. %s", e)
                return True
            elif isinstance(ex, (pika_drv_exc.ConnectionException,
                                 pika_drv_exc.MessageRejectedException)):
                LOG.warning("Problem during sending notification. %s", ex)
                return True
            else:
                return False

        if retry:
            retrier = tenacity.retry(
                stop=(tenacity.stop_never if retry == -1 else
                      tenacity.stop_after_attempt(retry)),
                retry=tenacity.retry_if_exception(on_exception),
                wait=tenacity.wait_fixed(
                    self._pika_engine.notification_retry_delay
                )
            )
        else:
            retrier = None

        msg = pika_drv_msg.PikaOutgoingMessage(self._pika_engine, message,
                                               ctxt)
        return msg.send(
            exchange=(
                target.exchange or
                self._pika_engine.default_notification_exchange
            ),
            routing_key=target.topic,
            confirm=True,
            mandatory=True,
            persistent=self._pika_engine.notification_persistence,
            retrier=retrier
        )
コード例 #12
0
ファイル: __init__.py プロジェクト: openstack/aodh
def get_connection_from_config(conf):
    retries = conf.database.max_retries
    url = conf.database.connection
    connection_scheme = urlparse.urlparse(url).scheme
    LOG.debug('looking for %(name)r driver in %(namespace)r',
              {'name': connection_scheme, 'namespace': _NAMESPACE})
    mgr = driver.DriverManager(_NAMESPACE, connection_scheme)

    @tenacity.retry(
        wait=tenacity.wait_fixed(conf.database.retry_interval),
        stop=tenacity.stop_after_attempt(retries if retries >= 0 else 5),
        reraise=True)
    def _get_connection():
        """Return an open connection to the database."""
        return mgr.driver(conf, url)

    return _get_connection()
コード例 #13
0

def check_headers_available(headers):
    product = '854239'
    country_iso = '490'
    res = fetch_tariff_excel_response(product, country_iso, headers)
    df = pd.read_excel(BytesIO(res.content))
    try:
        assert len(df) > 0
        return True
    except:
        return False


@retry(stop=stop_after_attempt(10),
       wait=wait_fixed(10) + wait_exponential(multiplier=1, max=20))
def reload_available_headers():
    headers = get_new_headers()
    headers_is_available = check_headers_available(headers)
    if headers_is_available:
        return headers
    else:
        raise


def get_decent_headers():
    try:
        with open(CACHED_HEADERS, 'r') as f:
            headers = json.load(f)
    except (json.JSONDecodeError, FileNotFoundError) as e:
        headers = reload_available_headers()
コード例 #14
0
class Checker:
    def __init__(self, chain_info, producer):
        self.chain_info = chain_info
        self.wrong_chain_id = False
        self.producer_info = producer
        self.org_name = self.producer_info['owner']
        self.bp_json = None
        self.patroneos = 0
        self.status = 0
        self.errors = []
        self.oks = []
        self.warnings = []
        self.endpoint_errors = {}
        self.endpoint_oks = {}
        self.healthy_api_endpoints = []
        self.healthy_p2p_endpoints = []
        self.healthy_history_endpoints = []
        self.healthy_hyperion_endpoints = []
        self.healthy_atomic_endpoints = []
        self.healthy_ipfs_endpoints = []
        self.ipfs_errors = []
        self.nodes = []
        self.endpoints = []

    @retry(stop=stop_after_attempt(2), wait=wait_fixed(2), reraise=True)
    def get_producer_chainsjson_path(self, url, chain_id, timeout):
        time.sleep(DELAY)
        try:
            chains_json_content = requests.get(url, timeout=timeout).json()
            return chains_json_content['chains'][chain_id]
        except Exception as e:
            print('Error getting chains.json from {}: {}'.format(url, e))
            return None

    @retry(stop=stop_after_attempt(2), wait=wait_fixed(2), reraise=True)
    def get_bpjson(self, timeout):
        time.sleep(DELAY)
        has_ssl_endpoints = False
        has_p2p_endpoints = False
        has_api_endpoints = False

        #Check if network is defined in chains.json
        chains_json_path = self.get_producer_chainsjson_path(
            self.producer_info['chains_json_url'], self.chain_info['chain_id'],
            timeout)

        if chains_json_path:
            self.producer_info['bp_json_url'] = urljoin(
                self.producer_info['url'], chains_json_path)

        try:
            headers = {
                'User-Agent':
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
            }
            response = requests.get(self.producer_info['bp_json_url'],
                                    headers=headers,
                                    timeout=timeout)

            if response.status_code != 200:
                msg = ('Error getting bp.json: {} - {}'.format(
                    response.status_code,
                    (response.text[:75] +
                     '..') if len(response.text) > 75 else response.text))
                print(msg)
                self.errors.append(msg)
                self.status = 2
                return

            self.bp_json = response.json()

            if not 'github_user' in self.bp_json['org']:
                msg = 'github_user missing in bp.json'
                print(msg)
                self.warnings.append(msg)
            else:
                msg = 'github_user present in bp.json'
                self.oks.append(msg)

            nodes = self.bp_json['nodes']
            for index, node in enumerate(nodes):
                if not 'node_type' in node:
                    msg = 'node_type not present for node {}'.format(index + 1)
                    print(msg)
                    self.errors.append(msg)
                    self.status = 2
                    continue
                node_type = node['node_type']
                if type(node_type) is str:
                    node_type = [node_type]
                    node['node_type'] = node_type

                if not 'features' in node and 'query' in node_type:
                    msg = 'features not present for node {} of type query'.format(
                        index + 1)
                    print(msg)
                    self.errors.append(msg)
                    self.status = 2
                    continue

                if 'api_endpoint' in node:
                    self.endpoint_errors[node['api_endpoint']] = []
                    self.endpoint_oks[node['api_endpoint']] = []
                    self.endpoints.append(node['api_endpoint'])
                if 'ssl_endpoint' in node:
                    has_ssl_endpoints = True
                    self.endpoint_errors[node['ssl_endpoint']] = []
                    self.endpoint_oks[node['ssl_endpoint']] = []
                    self.endpoints.append(node['ssl_endpoint'])
                if 'p2p_endpoint' in node:
                    has_p2p_endpoints = True
                    self.endpoint_errors[node['p2p_endpoint']] = []
                    self.endpoint_oks[node['p2p_endpoint']] = []
                    self.endpoints.append(node['p2p_endpoint'])

                if 'features' in node:
                    if 'chain-api' in node['features']:
                        has_api_endpoints = True
                if 'query' in node_type or 'seed' in node_type:
                    self.nodes.append(node)
            self.endpoints = list(set(self.endpoints))
            self.org_name = self.bp_json['org']['candidate_name']

            if not has_ssl_endpoints:
                msg = 'No SSL api nodes defined (ssl_endpoint)'
                self.errors.append(msg)
                print(msg)

            if not has_p2p_endpoints:
                self.status = 2
                msg = 'No P2P nodes defined (p2p_endpoint)'
                self.errors.append(msg)
                print(msg)

            if not has_api_endpoints:
                msg = 'No chain api nodes defined'
                self.errors.append(msg)
                print(msg)
                self.status = 2

        except requests.exceptions.SSLError as e:
            self.status = 2
            msg = 'Error getting {} bp.json ({}): Certificate error'.format(
                self.producer_info['owner'], self.producer_info['bp_json_url'])
            print(msg)
            self.errors.append(msg)

        except Exception as e:
            self.status = 2
            msg = 'Error getting {} bp.json ({}): {} {}'.format(
                self.producer_info['owner'], self.producer_info['bp_json_url'],
                e, type(e))

            print('excepcion', e)
            print(msg)
            self.errors.append(msg)

    @retry(stop=stop_after_attempt(2), wait=wait_fixed(2))
    def check_p2p(self, url, timeout):
        time.sleep(DELAY)
        try:
            sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            sock.settimeout(timeout)
            host, port = url.split(':')
            result = sock.connect_ex((host, int(port)))
            if result != 0:
                self.status = 2
                self.endpoint_errors[url].append(
                    'Error connecting to {}'.format(url))
                print('Error connecting to {}'.format(url))
                return
        except ValueError as e:
            self.status = 2
            self.endpoint_errors[url].append(
                'Invalid p2p host:port value {}'.format(url))
            print('Invalid p2p host:port value {}'.format(url))
        except Exception as e:
            self.status = 2
            self.endpoint_errors[url].append(
                'Error connecting to {}: {}'.format(url, e))
            print('Error connecting to {}: {} {}'.format(url, type(e), e))

        self.healthy_p2p_endpoints.append(url)
        msg = 'P2P node {} is responding'.format(url)
        print(msg)
        self.endpoint_oks[url].append(msg)

    @retry(stop=stop_after_attempt(2), wait=wait_fixed(2), reraise=True)
    def check_api(self, url, chain_id, timeout):
        time.sleep(DELAY)
        errors_found = False
        try:
            api_url = '{}/v1/chain/get_info'.format(url.rstrip('/'))
            response = requests.get(api_url, timeout=timeout)
            if response.status_code != 200:
                self.status = 2
                msg = 'Error connecting to {}: {}'.format(
                    api_url, 'Response error: {}'.format(response.status_code))
                self.endpoint_errors[url].append(msg)
                print(msg)
                return

            #Check for appropriate CORS headers
            allow_origin = response.headers.get('access-control-allow-origin')
            if not allow_origin or allow_origin != '*':
                self.status = 2
                msg = 'Invalid value for CORS header access-control-allow-origin or header not present'
                self.endpoint_errors[url].append(msg)
                print(msg)
            else:
                msg = 'CORS headers properly configured'
                print(msg)
                self.endpoint_oks[url].append(msg)

            info = response.json()

            if info['chain_id'] != chain_id:
                self.wrong_chain_id = True
                self.status = 2
                msg = 'Wrong chain id'
                self.endpoint_errors[url].append(msg)
                print(msg)
                errors_found = True
                return

            head_block_time = info['head_block_time']
            head_block_time_dt = datetime.datetime.strptime(
                head_block_time, "%Y-%m-%dT%H:%M:%S.%f")
            now = datetime.datetime.utcnow()
            secs_diff = int((now - head_block_time_dt).total_seconds())

            if secs_diff > 300:
                self.status = 2
                msg = 'Last block synced {} ago'.format(
                    humanize.naturaldelta(secs_diff))
                self.endpoint_errors[url].append(msg)
                print(msg)
                errors_found = True

        except requests.exceptions.SSLError as e:
            self.status = 2
            msg = 'Error connecting to {}: {}'.format(url, 'Certificate error')
            self.endpoint_errors[url].append(msg)
            print(msg)
            errors_found = True

        except requests.exceptions.Timeout as e:
            self.status = 2
            msg = 'Error connecting to {}: {}'.format(url,
                                                      'Connection timed out')
            self.endpoint_errors[url].append(msg)
            print(msg)
            errors_found = True

        except Exception as e:
            self.status = 2
            msg = 'Error connecting to {}: {} {}'.format(
                url, type(Exception), e)
            self.endpoint_errors[url].append(msg)
            print(msg)
            errors_found = True

        if not errors_found:
            self.healthy_api_endpoints.append(url)
            msg = 'API node {} is responding correctly'.format(url)
            print(msg)
            self.endpoint_oks[url].append(msg)

    @retry(stop=stop_after_attempt(1), wait=wait_fixed(2), reraise=True)
    def check_history(self, url, timeout):
        time.sleep(DELAY)
        try:
            history_url = f'{url.rstrip("/")}/v1/history/get_actions'
            result = requests.post(history_url, json={
                "account_name": "eosio"
            }).json()
            if not 'actions' in result:
                print('No actions in response')
                return

            if len(result['actions']) == 0:
                print('0 actions returned for eosio')
                return

        except Exception as e:
            msg = 'Error testing v1 history from {}: {}'.format(url, e)
            print(msg)
            self.endpoint_errors[url].append(msg)
            self.status = 2
            return

        self.healthy_history_endpoints.append(url)
        msg = 'History ok for {}'.format(url)
        self.endpoint_oks[url].append(msg)
        print(msg)

    @retry(stop=stop_after_attempt(1), wait=wait_fixed(2), reraise=True)
    def check_account_query(self, url, timeout):
        time.sleep(DELAY)
        try:
            account = "ledgerwiseio"
            api_url = '{}/v1/chain/get_accounts_by_authorizers'.format(
                url.rstrip('/'))
            response = requests.post(api_url,
                                     json={
                                         'json': True,
                                         'accounts': [
                                             account,
                                         ]
                                     },
                                     timeout=timeout)
            if response.status_code != 200:
                print(response.content)
                self.status = 2
                msg = 'Error getting authorizers for account {} from {}: {}'.format(
                    account, api_url,
                    'Response error: {}'.format(response.status_code))
                self.endpoint_errors[url].append(msg)
                print(msg)
                return

            else:
                msg = 'Get authorizers from account is ok on {}'.format(url)
                self.endpoint_oks[url].append(msg)
                print(msg)

        except Exception as e:
            msg = 'Error getting authorizers from {}: {}'.format(url, e)
            print(msg)
            return

    @retry(stop=stop_after_attempt(1), wait=wait_fixed(2), reraise=True)
    def check_hyperion(self, url, timeout):
        time.sleep(DELAY)
        errors_found = False
        try:
            #Check last hyperion indexed action
            history_url = '{}/v2/history/get_actions?limit=1'.format(
                url.rstrip('/'))
            response = requests.get(history_url, timeout=timeout)
            if response.status_code != 200:
                print('No hyperion found ({})'.format(response.status_code))
                self.endpoint_errors[url].append(
                    f'Error {response.status_code} testing hyperion')
                self.status = 2
                errors_found = True
                return

            json = response.json()
            last_action_date = dateutil.parser.parse(
                json['actions'][0]['@timestamp']).replace(tzinfo=None)
            diff_secs = (datetime.datetime.utcnow() -
                         last_action_date).total_seconds()
            if diff_secs > 600:
                msg = 'Hyperion Last action {} ago'.format(
                    humanize.naturaldelta(diff_secs))
                print(msg)
                self.endpoint_errors[url].append(msg)
                self.status = 2
                errors_found = True

            #Check hyperion service health
            health_url = '{}/v2/health'.format(url.rstrip('/'))
            response = requests.get(health_url, timeout=timeout)
            if response.status_code != 200:
                msg = 'Error {} trying to check hyperion health endpoint'.format(
                    response.status_code)
                print(msg)
                self.endpoint_errors[url].append(msg)
                self.status = 2
                errors_found = True
                return

            json = response.json()
            for item in json['health']:
                if item['status'] != 'OK':
                    msg = 'Hyperion service {} has status {}'.format(
                        item['service'], item['status'])
                    print(msg)
                    self.endpoint_errors[url].append(msg)
                    self.status = 2
                    errors_found = True

                if item['service'] == 'Elasticsearch':
                    if 'total_indexed_blocks' in item[
                            'service_data'] and 'last_indexed_block' in item[
                                'service_data']:
                        last_indexed_block = item['service_data'][
                            'last_indexed_block']
                        total_indexed_blocks = item['service_data'][
                            'total_indexed_blocks']
                        if last_indexed_block != total_indexed_blocks:
                            msg = 'Hyperion ElastiSearch last_indexed_block ({}) is different than total_indexed_blocks ({})'.format(
                                last_indexed_block, total_indexed_blocks)
                            print(msg)
                            self.endpoint_errors[url].append(msg)
                            self.status = 2
                            errors_found = True

        except Exception as e:
            msg = 'Error getting hyperion history from {}: {}'.format(url, e)
            print(msg)
            self.endpoint_errors[url].append(msg)
            self.status = 2
            errors_found = True
            return

        if not errors_found:
            self.healthy_hyperion_endpoints.append(url)
            msg = 'Hyperion history ok for {}'.format(url)
            self.endpoint_oks[url].append(msg)
            print(msg)

    @retry(stop=stop_after_attempt(1), wait=wait_fixed(2), reraise=True)
    def check_atomic(self, url, timeout):
        time.sleep(DELAY)
        errors_found = False
        try:

            #Check atomic service health
            health_url = '{}/health'.format(url.rstrip('/'))
            response = requests.get(health_url, timeout=timeout)
            if response.status_code != 200:
                msg = 'Error {} trying to check atomic health endpoint'.format(
                    response.status_code)
                print(msg)
                self.endpoint_errors[url].append(msg)
                self.status = 2
                errors_found = True
                return

            json = response.json()
            for item in json['data']:
                if 'status' in item:
                    if item['status'] != 'OK':
                        msg = 'Atomic service {} has status {}'.format(
                            item['service'], item['status'])
                        print(msg)
                        self.endpoint_errors[url].append(msg)
                        self.status = 2
                        errors_found = True

            head_block = json['data']['chain']['head_block']
            last_indexed_block = 0
            for reader in json['data']['postgres']['readers']:
                last_indexed_block = max(last_indexed_block,
                                         int(reader['block_num']))
            if abs(last_indexed_block - head_block) > 100:
                msg = 'Atomic API last_indexed_block is behind head_block'
                print(msg)
                self.endpoint_errors[url].append(msg)
                self.status = 2
                errors_found = True

        except Exception as e:
            msg = 'Error getting atomic data from {}: {}'.format(url, e)
            print(msg)
            print(msg)
            self.endpoint_errors[url].append(msg)
            self.status = 2
            errors_found = True
            return

        if not errors_found:
            self.healthy_atomic_endpoints.append(url)
            msg = 'Atomic API ok for {}'.format(url)
            self.endpoint_oks[url].append(msg)
            print(msg)

    @retry(stop=stop_after_attempt(1), wait=wait_fixed(2), reraise=True)
    def check_patroneos(self, url, timeout):
        time.sleep(DELAY)
        try:
            url = url.rstrip('/')
            headers = {
                'Content-type': 'application/json',
                'Accept': 'text/plain'
            }
            r = requests.post('{}/v1/chain/get_account'.format(url),
                              data='{"account_name": "ledgerwiseio"}',
                              headers=headers,
                              timeout=timeout)
            r_json = r.json()

            if 'message' not in r_json:
                print('Response doesn\'t look like patroneos (no message)')
                return
            else:
                if r_json['message'] != 'INVALID_JSON':
                    print(
                        'Response doesn\'t look like patroneos  (message not INVALID_JSON)'
                    )
                    return

        except Exception as e:
            print('Error verifyng patroneos from {}: {}'.format(url, e))
            return

        self.patroneos = 1
        msg = 'Patroneos ok for {}'.format(url)
        self.endpoint_oks[url].append(msg)
        print(msg)

    @retry(stop=stop_after_attempt(1), wait=wait_fixed(3), reraise=True)
    def check_ipfs(self, url, timeout):
        time.sleep(DELAY)
        try:
            api_url = f'{url}/ipfs/QmWnfdZkwWJxabDUbimrtaweYF8u9TaESDBM8xvRxxbQxv'
            response = requests.get(api_url, timeout=timeout)
            if response.status_code != 200:
                self.status = 2
                msg = 'Error getting ipfs image from {}: {}'.format(
                    api_url, 'Response error: {}'.format(response.status_code))

                self.ipfs_errors[url].append(msg)
                print(msg)
                return

            else:
                msg = 'IPFS is ok on {}'.format(url)
                self.endpoint_oks[url].append(msg)
                self.healthy_ipfs_endpoints.append(url)
                print(msg)

        except Exception as e:
            msg = 'Error getting ipfs image from {}: {}'.format(url, e)
            print(msg)
            return

    def run_checks(self):
        self.get_bpjson(timeout=self.chain_info['timeout'])
        if self.nodes:
            for node in self.bp_json['nodes']:
                if 'query' in node['node_type'] and 'features' in node:
                    #Check API
                    if 'chain-api' in node['features']:
                        if 'api_endpoint' in node:
                            self.check_api(node['api_endpoint'],
                                           self.chain_info['chain_id'],
                                           self.chain_info['timeout'])
                            if self.wrong_chain_id:
                                return
                            self.check_patroneos(node['api_endpoint'],
                                                 self.chain_info['timeout'])
                        if 'ssl_endpoint' in node:
                            self.check_api(node['ssl_endpoint'],
                                           self.chain_info['chain_id'],
                                           self.chain_info['timeout'])
                            if self.wrong_chain_id:
                                return
                            self.check_patroneos(node['ssl_endpoint'],
                                                 self.chain_info['timeout'])
                    #Check Account Query
                    if 'account-query' in node['features']:
                        if 'api_endpoint' in node:
                            self.check_account_query(
                                node['api_endpoint'],
                                self.chain_info['timeout'])
                        if 'ssl_endpoint' in node:
                            self.check_account_query(
                                node['ssl_endpoint'],
                                self.chain_info['timeout'])

                    #Check History V1
                    if 'history-v1' in node['features']:
                        if 'api_endpoint' in node:
                            self.check_history(node['api_endpoint'],
                                               self.chain_info['timeout'])
                        if 'ssl_endpoint' in node:
                            self.check_history(node['ssl_endpoint'],
                                               self.chain_info['timeout'])

                    #Check Hyperion
                    if 'hyperion-v2' in node['features']:
                        if 'api_endpoint' in node:
                            self.check_hyperion(node['api_endpoint'],
                                                self.chain_info['timeout'])
                        if 'ssl_endpoint' in node:
                            self.check_hyperion(node['ssl_endpoint'],
                                                self.chain_info['timeout'])

                    #Check Hyperion
                    if 'atomic-assets-api' in node['features']:
                        if 'api_endpoint' in node:
                            self.check_atomic(node['api_endpoint'],
                                              self.chain_info['timeout'])
                        if 'ssl_endpoint' in node:
                            self.check_atomic(node['ssl_endpoint'],
                                              self.chain_info['timeout'])

                    #Check IPFS
                    if 'ipfs' in node['features']:
                        if 'api_endpoint' in node:
                            self.check_ipfs(node['api_endpoint'],
                                            self.chain_info['timeout'])
                        if 'ssl_endpoint' in node:
                            self.check_ipfs(node['ssl_endpoint'],
                                            self.chain_info['timeout'])

                if 'seed' in node['node_type']:
                    #Check P2P
                    if 'p2p_endpoint' in node:
                        self.check_p2p(node['p2p_endpoint'],
                                       self.chain_info['timeout'])
コード例 #15
0
ファイル: db_wrapper.py プロジェクト: blockvigil/moneyvigil
class DBCallsWrapper(object):
    @tenacity.retry(stop=tenacity.stop_after_attempt(3),
                    wait=tenacity.wait_fixed(3),
                    reraise=True,
                    retry=tenacity.retry_if_exception_type(
                        sqlalchemy.exc.OperationalError),
                    before_sleep=tenacity.before_sleep_log(
                        logger, logging.DEBUG))
    def query_user_by_(self, session_obj, *args, **kwargs):
        return session_obj.query(MoneyVigilUser).filter_by(**kwargs).first()

    @tenacity.retry(stop=tenacity.stop_after_attempt(3),
                    wait=tenacity.wait_fixed(3),
                    reraise=True,
                    retry=tenacity.retry_if_exception_type(
                        sqlalchemy.exc.OperationalError),
                    before_sleep=tenacity.before_sleep_log(
                        logger, logging.DEBUG))
    def query_group_by_(self, session_obj, *args, **kwargs):
        return session_obj.query(MoneyVigilGroup).filter_by(**kwargs).first()

    @tenacity.retry(stop=tenacity.stop_after_attempt(3),
                    wait=tenacity.wait_fixed(3),
                    reraise=True,
                    retry=tenacity.retry_if_exception_type(
                        sqlalchemy.exc.OperationalError),
                    before_sleep=tenacity.before_sleep_log(
                        logger, logging.DEBUG))
    def query_all_users(self, session_obj):
        return session_obj.query(MoneyVigilUser).all()

    @tenacity.retry(stop=tenacity.stop_after_attempt(3),
                    wait=tenacity.wait_fixed(3),
                    reraise=True,
                    retry=tenacity.retry_if_exception_type(
                        sqlalchemy.exc.OperationalError),
                    before_sleep=tenacity.before_sleep_log(
                        logger, logging.DEBUG))
    def query_all_groups(self, session_obj):
        return session_obj.query(MoneyVigilGroup).all()

    @tenacity.retry(stop=tenacity.stop_after_attempt(3),
                    wait=tenacity.wait_fixed(3),
                    reraise=True,
                    retry=tenacity.retry_if_exception_type(
                        sqlalchemy.exc.OperationalError),
                    before_sleep=tenacity.before_sleep_log(
                        logger, logging.DEBUG))
    def query_unsubscribetoken_by_(self, session_obj, *args, **kwargs):
        return session_obj.query(MoneyVigilUnsubscribeTokens).filter_by(
            **kwargs).first()

    @tenacity.retry(stop=tenacity.stop_after_attempt(3),
                    wait=tenacity.wait_fixed(3),
                    reraise=True,
                    retry=tenacity.retry_if_exception_type(
                        sqlalchemy.exc.OperationalError),
                    before_sleep=tenacity.before_sleep_log(
                        logger, logging.DEBUG))
    def query_bill_by_(self, session_obj, *args, **kwargs):
        return session_obj.query(MoneyVigilBill).filter_by(**kwargs).first()

    @tenacity.retry(stop=tenacity.stop_after_attempt(3),
                    wait=tenacity.wait_fixed(3),
                    reraise=True,
                    retry=tenacity.retry_if_exception_type(
                        sqlalchemy.exc.OperationalError),
                    before_sleep=tenacity.before_sleep_log(
                        logger, logging.DEBUG))
    def query_invites_by_all(self, session_obj, *args, **kwargs):
        return session_obj.query(MoneyVigilInvites).filter_by(**kwargs).all()

    @tenacity.retry(stop=tenacity.stop_after_attempt(3),
                    wait=tenacity.wait_fixed(3),
                    reraise=True,
                    retry=tenacity.retry_if_exception_type(
                        sqlalchemy.exc.OperationalError),
                    before_sleep=tenacity.before_sleep_log(
                        logger, logging.DEBUG))
    def query_entity_by_(self, session_obj, *args, **kwargs):
        return session_obj.query(MoneyVigilCorporateEntity).filter_by(
            **kwargs).first()

    def query_role_by_(self, session_obj, *args, **kwargs):
        return session_obj.query(MoneyVigilCorporateEntityRole).filter_by(
            **kwargs).first()

    def query_roles_by_(self, session_obj, *args, **kwargs):
        return session_obj.query(MoneyVigilCorporateEntityRole).filter_by(
            **kwargs).all()

    def query_permission_by_(self, session_obj, *args, **kwargs):
        return session_obj.query(
            MoneyVigilCorporateEntityPermission).filter_by(**kwargs).first()
コード例 #16
0
ファイル: deq.py プロジェクト: vishalforcode/birdhouse
key_for_checking_last_telemetry_date = "pm25"


def main():
    # Note -- when adding an item here make sure the device has been defined on the server!
    get_data_for_station(
        2, "DEQ (SEL)"
    )  # 2  => SE Lafayette (SEL)               [latitude:45.496640911, longitude:-122.60287735]
    get_data_for_station(
        64, "DEQ (PCH)"
    )  # 64 => Portland Cully Helensview (PCH)  [latitude:45.562203,    longitude:-122.575624]


# This fails a lot, so we'll try tenacity
@retry(stop=stop_after_attempt(7), wait=wait_fixed(10), reraise=True)
def get_data(station_id, from_ts, to_ts):
    result = deq_tools.get_data(station_id, from_ts, to_ts)
    return result


def get_data_for_station(station_id, device_name):
    print_if_ssh("Retrieving data for %s..." % device_name)

    device = tbapi.get_device_by_name(device_name)
    device_token = tbapi.get_device_token(tbapi.get_id(device))

    start = time.time()

    # Date range for the data we're requesting from DEQ
    from_ts = get_from_ts(
コード例 #17
0
ファイル: serverctl.py プロジェクト: wjswlal/seafile-server
class ServerCtl(object):
    def __init__(self,
                 datadir,
                 db='sqlite3',
                 seaf_server_bin='seaf-server',
                 ccnet_server_bin='ccnet-server'):
        self.db = db
        self.datadir = datadir
        self.central_conf_dir = join(datadir, 'conf')
        self.seafile_conf_dir = join(datadir, 'seafile-data')
        self.ccnet_conf_dir = join(datadir, 'ccnet')

        self.log_dir = join(datadir, 'logs')
        mkdirs(self.log_dir)
        self.ccnet_log = join(self.log_dir, 'ccnet.log')
        self.seafile_log = join(self.log_dir, 'seafile.log')

        self.ccnet_server_bin = ccnet_server_bin
        self.seaf_server_bin = seaf_server_bin

        self.ccnet_proc = None
        self.seafile_proc = None

    def setup(self):
        if self.db == 'mysql':
            create_mysql_dbs()

        self.init_ccnet()
        self.init_seafile()

    def init_ccnet(self):
        cmd = [
            'ccnet-init',
            '-F',
            self.central_conf_dir,
            '-c',
            self.ccnet_conf_dir,
            '--name',
            'test',
            '--host',
            'test.seafile.com',
        ]
        shell(cmd)
        if self.db == 'mysql':
            self.add_ccnet_db_conf()
        else:
            self.add_ccnet_sqlite_db_conf()

    def add_ccnet_sqlite_db_conf(self):
        ccnet_conf = join(self.central_conf_dir, 'ccnet.conf')
        ccnet_db_conf = '''\
[Database]
CREATE_TABLES = true
'''
        with open(ccnet_conf, 'a+') as fp:
            fp.write('\n')
            fp.write(ccnet_db_conf)

    def add_ccnet_db_conf(self):
        ccnet_conf = join(self.central_conf_dir, 'ccnet.conf')
        ccnet_db_conf = '''\
[Database]
ENGINE = mysql
HOST = 127.0.0.1
PORT = 3306
USER = seafile
PASSWD = seafile
DB = ccnet
CONNECTION_CHARSET = utf8
CREATE_TABLES = true
'''
        with open(ccnet_conf, 'a+') as fp:
            fp.write('\n')
            fp.write(ccnet_db_conf)

    def init_seafile(self):
        cmd = [
            'seaf-server-init',
            '--central-config-dir',
            self.central_conf_dir,
            '--seafile-dir',
            self.seafile_conf_dir,
            '--fileserver-port',
            '8082',
        ]

        shell(cmd)
        if self.db == 'mysql':
            self.add_seafile_db_conf()
        else:
            self.add_seafile_sqlite_db_conf()

    def add_seafile_sqlite_db_conf(self):
        seafile_conf = join(self.central_conf_dir, 'seafile.conf')
        seafile_db_conf = '''\
[database]
create_tables = true
'''
        with open(seafile_conf, 'a+') as fp:
            fp.write('\n')
            fp.write(seafile_db_conf)

    def add_seafile_db_conf(self):
        seafile_conf = join(self.central_conf_dir, 'seafile.conf')
        seafile_db_conf = '''\
[database]
type = mysql
host = 127.0.0.1
port = 3306
user = seafile
password = seafile
db_name = seafile
connection_charset = utf8
create_tables = true
'''
        with open(seafile_conf, 'a+') as fp:
            fp.write('\n')
            fp.write(seafile_db_conf)

    @contextmanager
    def run(self):
        try:
            self.start()
            yield self
        except:
            self.print_logs()
            raise
        finally:
            self.stop()

    def print_logs(self):
        for logfile in self.ccnet_log, self.seafile_log:
            if exists(logfile):
                shell('cat {0}'.format(logfile))

    @retry(wait=wait_fixed(1), stop=stop_after_attempt(10))
    def wait_ccnet_ready(self):
        if not exists(join(self.ccnet_conf_dir, 'ccnet.sock')):
            raise TryAgain

    def start(self):
        logger.info('Starting ccnet server')
        self.start_ccnet()
        self.wait_ccnet_ready()
        logger.info('Starting seafile server')
        self.start_seafile()

    def start_ccnet(self):
        cmd = [
            self.ccnet_server_bin,
            "-F",
            self.central_conf_dir,
            "-c",
            self.ccnet_conf_dir,
            "-f",
            self.ccnet_log,
        ]
        self.ccnet_proc = shell(cmd, wait=False)

    def start_seafile(self):
        cmd = [
            self.seaf_server_bin,
            "-F",
            self.central_conf_dir,
            "-c",
            self.ccnet_conf_dir,
            "-d",
            self.seafile_conf_dir,
            "-l",
            self.seafile_log,
        ]
        self.seafile_proc = shell(cmd, wait=False)

    def stop(self):
        if self.ccnet_proc:
            logger.info('Stopping ccnet server')
            self.ccnet_proc.terminate()
        if self.seafile_proc:
            logger.info('Stopping seafile server')
            self.seafile_proc.terminate()

    def get_seaserv_envs(self):
        envs = dict(os.environ)
        envs.update({
            'SEAFILE_CENTRAL_CONF_DIR': self.central_conf_dir,
            'CCNET_CONF_DIR': self.ccnet_conf_dir,
            'SEAFILE_CONF_DIR': self.seafile_conf_dir,
        })
        return envs
コード例 #18
0
ファイル: Sap.py プロジェクト: ArnoldSouza/SapRefresh
Created on 3/8/2021
Author: Arnold Souza
Email: [email protected]
"""
from tenacity import retry, wait_fixed, before_sleep_log, stop_after_attempt

from sapRefresh.Core.Cripto import secret_decode
from sapRefresh.Core.Time import timeit

import logging
from sapRefresh.Core.base_logger import get_logger
from sapRefresh import LOG_PATH
logger, LOG_FILEPATH = get_logger(__name__, LOG_PATH)


@retry(reraise=True, wait=wait_fixed(10), before_sleep=before_sleep_log(logger, logging.DEBUG), stop=stop_after_attempt(3))
@timeit
def sap_logon(xl_Instance, source, client, user, password):
    """API method to trigger a logon to a system for a specified data source"""
    result = xl_Instance.Application.Run("SAPLogon", source, client, secret_decode(user), secret_decode(password))
    if result == 1:
        print('\nSuccessfully logged in SAP AfO')
    else:
        raise ConnectionError("Couldn't login in SAP AfO")
    return result


@timeit
def sap_refresh(xl_Instance):
    """
    Do there initial refresh of data in the workbook.
コード例 #19
0
def writeLayer(wait=wait_fixed(2)):
    p.write_block_flp(blockNum, layer)
コード例 #20
0
class RKEKubernetesAnsibleAppConfigurer(AnsibleAppConfigurer):
    """Add CloudMan2 specific vars to playbook."""
    def _cb_provider_id_to_kube_provider_id(self, provider_id):
        CB_CLOUD_TO_KUBE_CLOUD_MAP = {
            'aws': 'aws',
            'openstack': 'openstack',
            'azure': 'azure',
            'gcp': 'gce'
        }
        return CB_CLOUD_TO_KUBE_CLOUD_MAP.get(provider_id)

    @tenacity.retry(stop=tenacity.stop_after_attempt(2),
                    wait=tenacity.wait_fixed(10),
                    reraise=True,
                    after=lambda *args: log.debug(
                        "Node not registered yet, checking again..."))
    def has_reached_desired_state(self, provider_config):
        # Newly added node should now be registered with the cluster
        kube_client = KubeClient()
        node_ip = provider_config.get('host_config', {}).get('private_ip')
        k8s_node = kube_client.nodes.find(address=node_ip)
        if k8s_node and k8s_node[0]:
            return True
        else:
            raise NodeNotRegistered(
                f"New node with ip: {node_ip} has still not registered with k8s cluster"
            )

    @tenacity.retry(stop=tenacity.stop_after_attempt(2),
                    wait=tenacity.wait_fixed(10),
                    reraise=True,
                    after=lambda *args, **kwargs: log.debug(
                        "Node not registered, rerunning playbook..."))
    def configure(self, app_config, provider_config):
        playbook_vars = {
            'kube_cloud_provider':
            self._cb_provider_id_to_kube_provider_id(
                provider_config.get('cloud_provider').PROVIDER_ID),
            'cluster_hostname':
            app_config.get('config_kube_rke', {}).get('rke_cluster_id'),
            'rke_registration_server':
            app_config.get('config_kube_rke',
                           {}).get('rke_registration_server'),
            'rke_registration_token':
            app_config.get('config_kube_rke', {}).get('rke_registration_token')
        }
        result = super().configure(app_config,
                                   provider_config,
                                   playbook_vars=playbook_vars)
        if self.has_reached_desired_state(provider_config):
            kube_client = KubeClient()
            node_ip = provider_config.get('host_config', {}).get('private_ip')
            k8s_node = kube_client.nodes.find(address=node_ip)[0]
            labels = {
                'usegalaxy.org/cm_node_name':
                app_config.get('deployment_config', {}).get('name', '')
            }
            autoscaling_group = app_config.get('config_cloudman',
                                               {}).get('autoscaling_group', '')
            if autoscaling_group:
                labels[
                    'usegalaxy.org/cm_autoscaling_group'] = autoscaling_group
            kube_client.nodes.set_label(k8s_node, labels)
            return result
        else:
            raise NodeNotRegistered(f"Node has not been added to the cluster")
コード例 #21
0
 def test_fixed_sleep(self):
     r = Retrying(wait=tenacity.wait_fixed(1))
     self.assertEqual(1, r.wait(12, 6546))
コード例 #22
0
class CapturingHandler(logging.Handler):
    """Captures log records for inspection."""

    def __init__(self, *args, **kwargs):
        super(CapturingHandler, self).__init__(*args, **kwargs)
        self.records = []

    def emit(self, record):
        self.records.append(record)


def current_time_ms():
    return int(round(time.time() * 1000))


@retry(wait=tenacity.wait_fixed(0.05),
       retry=tenacity.retry_if_result(lambda result: result is None))
def _retryable_test_with_wait(thing):
    return thing.go()


@retry(stop=tenacity.stop_after_attempt(3),
       retry=tenacity.retry_if_result(lambda result: result is None))
def _retryable_test_with_stop(thing):
    return thing.go()


@retry(retry=tenacity.retry_if_exception_type(IOError))
def _retryable_test_with_exception_type_io(thing):
    return thing.go()
コード例 #23
0
ファイル: test_workflow.py プロジェクト: timfeirg/lain-cli

@pytest.mark.run(after='test_override_release_name')
@pytest.mark.usefixtures('dummy')
def test_lain_job_in_non_lain_app_directory():
    ensure_absent([CHART_DIR_NAME])
    command = ('which', 'lain')
    run(lain, args=['job', '--wait', '--force', *command])
    _, job_name = run_under_click_context(make_job_name,
                                          args=(command, ),
                                          obj={'appname': 'lain'})
    logs_res = kubectl('logs', f'-ljob-name={job_name}', capture_output=True)
    assert ensure_str(logs_res.stdout).strip() == '/usr/local/bin/lain'


@retry(reraise=True, wait=wait_fixed(3), stop=stop_after_attempt(6))
def url_get_json(url, **kwargs):
    sleep(4)
    res = requests.get(url, **kwargs)
    res.raise_for_status()
    return res.json()


def get_dummy_pod_names():
    res = kubectl(
        'get',
        'po',
        f'-lapp.kubernetes.io/name={DUMMY_APPNAME}',
        capture_output=True,
    )
    pods = ensure_str(res.stdout).splitlines()
コード例 #24
0
from servicelib import openapi
from servicelib.application_keys import APP_CONFIG_KEY
from servicelib.openapi import create_openapi_specs
from servicelib.rest_middlewares import append_rest_middlewares
from tenacity import before_sleep_log, retry, stop_after_attempt, wait_fixed

from . import rest_handlers
from .rest_config import APP_OPENAPI_SPECS_KEY, CONFIG_SECTION_NAME

logger = logging.getLogger(__name__)


RETRY_WAIT_SECS = 2
RETRY_COUNT = 10

@retry( wait=wait_fixed(RETRY_WAIT_SECS),
        stop=stop_after_attempt(RETRY_COUNT),
        before_sleep=before_sleep_log(logger, logging.INFO) )
async def get_specs(location):
    specs = await create_openapi_specs(location)
    return specs

def create_routes(specs):
    base_path = openapi.get_base_path(specs)

    logger.debug("creating %s ", __name__)
    routes = []
    path, handle = '/', rest_handlers.check_health
    operation_id = specs.paths[path].operations['get'].operation_id
    routes.append( web.get(base_path+path, handle, name=operation_id) )
コード例 #25
0
    async def search(self, request, context, metadata):
        start = default_timer()
        processor_response = None
        cache_hit = True
        page_size = request.page_size or 5
        schemas = tuple(sorted([schema for schema in request.schemas]))
        user_id = metadata['user-id']

        if (
            (user_id, request.language, schemas, request.query) not in self.query_cache
            or len(self.query_cache[(user_id, request.language, schemas, request.query)].scored_documents) == 0
        ):
            cache_hit = False
            query = despace_full(request.query)
            processor_response = self.query_processor.process(query, request.language)
            processor_response = await self.processed_response_hook(processor_response, context)

            with suppress(RetryError):
                async for attempt in AsyncRetrying(
                    retry=retry_if_exception_type(NeedRetryError),
                    wait=wait_fixed(10),
                    stop=stop_after_attempt(3)
                ):
                    with attempt:
                        requests = []
                        for schema in schemas:
                            requests.append(
                                self.summa_client.search(
                                    schema=schema,
                                    query=processor_response['query'],
                                    page=0,
                                    page_size=self.page_sizes[schema],
                                    request_id=metadata['request-id'],
                                )
                            )
                        search_response = self.merge_search_responses(await asyncio.gather(*requests))
                        search_response = await self.post_search_hook(
                            search_response,
                            processor_response=processor_response,
                            request=request,
                            context=context,
                            metadata=metadata,
                            retry_state=attempt.retry_state
                        )

            rescored_documents = await self.rescorer.rescore(
                scored_documents=search_response['scored_documents'],
                query=query,
                session_id=metadata['session-id'],
                language=request.language,
            )
            search_response['scored_documents'] = rescored_documents
            search_response_pb = self.cast_search_response(search_response)
            self.query_cache[(user_id, request.language, schemas, request.query)] = search_response_pb

        logging.getLogger('query').info({
            'action': 'request',
            'cache_hit': cache_hit,
            'duration': default_timer() - start,
            'mode': 'search',
            'page': request.page,
            'page_size': page_size,
            'processed_query': processor_response['query'] if processor_response else None,
            'query': request.query,
            'query_class': processor_response['class'].value if processor_response else None,
            'request_id': metadata['request-id'],
            'schemas': schemas,
            'session_id': metadata['session-id'],
            'user_id': user_id,
        })

        scored_documents = self.query_cache[(user_id, request.language, schemas, request.query)].scored_documents
        left_offset = request.page * page_size
        right_offset = left_offset + page_size
        has_next = len(scored_documents) > right_offset

        search_response_pb = SearchResponsePb(
            scored_documents=scored_documents[left_offset:right_offset],
            has_next=has_next,
        )

        return search_response_pb
コード例 #26
0
import random


import tenacity


def do_something():
    if random.randint(0, 1) == 0:
        print('Failure')
        raise RuntimeError
    print('Success')


@tenacity.retry(wait=tenacity.wait_fixed(1), retry=tenacity.retry_if_exception_type(RuntimeError))
def do_something_and_retry():
    do_something()


do_something_and_retry()
コード例 #27
0
ファイル: s3.py プロジェクト: luo-zn/gnocchi
    conn = boto3.client(
        's3',
        endpoint_url=conf.s3_endpoint_url,
        region_name=conf.s3_region_name,
        aws_access_key_id=conf.s3_access_key_id,
        aws_secret_access_key=conf.s3_secret_access_key,
        config=boto_config.Config(
            max_pool_connections=conf.s3_max_pool_connections))
    return conn, conf.s3_region_name, conf.s3_bucket_prefix


# NOTE(jd) OperationAborted might be raised if we try to create the bucket
# for the first time at the same time
@tenacity.retry(
    stop=tenacity.stop_after_attempt(10),
    wait=tenacity.wait_fixed(0.5),
    retry=tenacity.retry_if_exception(retry_if_operationaborted)
)
def create_bucket(conn, name, region_name):
    if region_name:
        kwargs = dict(CreateBucketConfiguration={
            "LocationConstraint": region_name,
        })
    else:
        kwargs = {}
    return conn.create_bucket(Bucket=name, **kwargs)


def bulk_delete(conn, bucket, objects):
    # NOTE(jd) The maximum object to delete at once is 1000
    # TODO(jd) Parallelize?
コード例 #28
0
class RKEKubernetesApp(BaseVMAppPlugin):
    """
    RKE Kubernetes Appliance.
    """
    @staticmethod
    def validate_app_config(provider, name, cloud_config, app_config):
        rke_config = get_required_val(
            app_config, "config_kube_rke", "RKE configuration data"
            " must be provided. config_kube_rke entry not found in"
            " app_config.")
        assert 'rke_cluster_id' in rke_config
        assert 'rke_registration_server' in rke_config
        assert 'rke_registration_token' in rke_config
        return app_config

    def deploy(self, name, task, app_config, provider_config, **kwargs):
        """
        Handle the app launch process and wait for http.

        Pass boolean ``check_http`` as a ``False`` kwarg if you don't
        want this method to perform the app http check and prefer to handle
        it in the child class.
        """
        result = super().deploy(name, task, app_config, provider_config)
        return result

    @tenacity.retry(stop=tenacity.stop_after_attempt(2),
                    wait=tenacity.wait_fixed(10),
                    reraise=True,
                    after=lambda *args: log.debug(
                        "Node not deleted yet, checking again..."))
    def check_node_no_longer_exists(self, node_name):
        # Newly added node should now be registered with the cluster
        kube_client = KubeClient()
        k8s_node = kube_client.nodes.find(
            labels={'usegalaxy.org/cm_node_name': node_name})
        if not k8s_node:
            return True
        else:
            raise NodeNotDeleted(
                f"Deleted node with name: {node_name} still attached to the cluster."
            )

    @tenacity.retry(
        stop=tenacity.stop_after_attempt(2),
        wait=tenacity.wait_fixed(10),
        reraise=True,
        after=lambda *args: log.debug("Node not removed, retrying......"))
    def delete(self, provider, deployment):
        """
        Delete resource(s) associated with the supplied deployment.

        This is a blocking call that will wait until the instance is marked
        as deleted or disappears from the provider.

        *Note* that this method will delete resource(s) associated with
        the deployment - this is an un-recoverable action.
        """
        deployment_name = deployment.get('name')
        try:
            kube_client = KubeClient()
            k8s_node = kube_client.nodes.find(
                labels={'usegalaxy.org/cm_node_name': deployment_name})
            if k8s_node:
                k8s_node = k8s_node[0]
                try:
                    # stop new jobs being scheduled on this node
                    print(f"Cordoning node: {deployment_name}")
                    kube_client.nodes.cordon(k8s_node)
                    # let existing jobs finish
                    print(
                        f"Waiting for jobs to finish on node: {deployment_name}"
                    )
                    kube_client.nodes.wait_till_jobs_complete(k8s_node)
                    # drain remaining pods
                    print(f"Draining node: {deployment_name}")
                    kube_client.nodes.drain(k8s_node, timeout=120)
                finally:
                    # delete the k8s node
                    print(f"Deleting k8s node: {deployment_name}")
                    kube_client.nodes.delete(k8s_node)
        finally:
            # delete the VM
            result = super().delete(provider, deployment)
        if self.check_node_no_longer_exists(deployment_name):
            return result
        else:
            raise NodeNotDeleted(f"Node has not been removed from the cluster")

    def _get_configurer(self, app_config):
        # CloudMan2 can only be configured with ansible
        return RKEKubernetesAnsibleAppConfigurer()

    def _provision_host(self, name, task, app_config, provider_config):
        provider = provider_config.get('cloud_provider')
        clust_name = app_config.get('config_cloudman', {}).get('cluster_name')

        handler_class = get_iam_handler_for(provider.PROVIDER_ID)
        if handler_class:
            provider = provider_config.get('cloud_provider')
            handler = handler_class(provider, clust_name, app_config)
            provider_config['extra_provider_args'] = \
                handler.create_iam_policy()
        result = super()._provision_host(name, task, app_config,
                                         provider_config)
        return result
コード例 #29
0
class TestNetmikoSwitch(NetmikoSwitchTestBase):
    @mock.patch('networking_generic_switch.devices.netmiko_devices.'
                'NetmikoSwitch.send_commands_to_device')
    def test_add_network(self, m_sctd):
        self.switch.add_network(22, '0ae071f5-5be9-43e4-80ea-e41fefe85b21')
        m_sctd.assert_called_with(None)

    @mock.patch('networking_generic_switch.devices.netmiko_devices.'
                'NetmikoSwitch.send_commands_to_device')
    def test_del_network(self, m_sctd):
        self.switch.del_network(22)
        m_sctd.assert_called_with(None)

    @mock.patch('networking_generic_switch.devices.netmiko_devices.'
                'NetmikoSwitch.send_commands_to_device')
    def test_plug_port_to_network(self, m_sctd):
        self.switch.plug_port_to_network(2222, 22)
        m_sctd.assert_called_with(None)

    def test__format_commands(self):
        self.switch._format_commands(netmiko_devices.NetmikoSwitch.ADD_NETWORK,
                                     segmentation_id=22,
                                     network_id=22)

    @mock.patch.object(netmiko_devices.tenacity,
                       'wait_fixed',
                       return_value=tenacity.wait_fixed(0.01))
    @mock.patch.object(netmiko_devices.tenacity,
                       'stop_after_delay',
                       return_value=tenacity.stop_after_delay(0.1))
    @mock.patch.object(netmiko, 'ConnectHandler')
    def test__get_connection_connect_fail(self, m_conn_handler, m_stop,
                                          m_wait):
        m_conn = mock.MagicMock()
        m_conn_handler.side_effect = [paramiko.SSHException, m_conn]
        with self.switch._get_connection() as conn:
            self.assertEqual(conn, m_conn)
        m_stop.assert_called_once_with(60)
        m_wait.assert_called_once_with(10)

    @mock.patch.object(netmiko_devices.tenacity,
                       'wait_fixed',
                       return_value=tenacity.wait_fixed(0.01))
    @mock.patch.object(netmiko_devices.tenacity,
                       'stop_after_delay',
                       return_value=tenacity.stop_after_delay(0.1))
    @mock.patch.object(netmiko, 'ConnectHandler')
    def test__get_connection_timeout(self, m_conn_handler, m_stop, m_wait):
        switch = self._make_switch_device({
            'ngs_ssh_connect_timeout': '1',
            'ngs_ssh_connect_interval': '1'
        })
        m_conn_handler.side_effect = (paramiko.SSHException)

        def get_connection():
            with switch._get_connection():
                self.fail()

        self.assertRaises(exc.GenericSwitchNetmikoConnectError, get_connection)
        m_stop.assert_called_once_with(1)
        m_wait.assert_called_once_with(1)

    @mock.patch.object(netmiko_devices.tenacity,
                       'wait_fixed',
                       return_value=tenacity.wait_fixed(0.01))
    @mock.patch.object(netmiko_devices.tenacity,
                       'stop_after_delay',
                       return_value=tenacity.stop_after_delay(0.1))
    @mock.patch.object(netmiko, 'ConnectHandler')
    def test__get_connection_caller_failure(self, m_conn_handler, m_stop,
                                            m_wait):
        m_conn = mock.MagicMock()
        m_conn_handler.return_value = m_conn

        class FakeError(Exception):
            pass

        def get_connection():
            with self.switch._get_connection():
                raise FakeError()

        self.assertRaises(FakeError, get_connection)
        m_conn.__exit__.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY)

    @mock.patch.object(netmiko_devices.NetmikoSwitch, '_get_connection')
    def test_send_commands_to_device_empty(self, gc_mock):
        connect_mock = mock.MagicMock()
        gc_mock.return_value.__enter__.return_value = connect_mock
        self.assertIsNone(self.switch.send_commands_to_device([]))
        self.assertFalse(connect_mock.send_config_set.called)
        self.assertFalse(connect_mock.send_command.called)

    @mock.patch.object(netmiko_devices.NetmikoSwitch, '_get_connection')
    def test_send_commands_to_device(self, gc_mock):
        connect_mock = mock.MagicMock(SAVE_CONFIGURATION=None)
        gc_mock.return_value.__enter__.return_value = connect_mock
        self.switch.send_commands_to_device(['spam ham aaaa'])
        gc_mock.assert_called_once_with()
        connect_mock.send_config_set.assert_called_once_with(
            config_commands=['spam ham aaaa'])
        self.assertFalse(connect_mock.send_command.called)

    @mock.patch.object(netmiko_devices.NetmikoSwitch, '_get_connection')
    def test_send_commands_to_device_save_configuration(self, gc_mock):
        connect_mock = mock.MagicMock(SAVE_CONFIGURAION='save me')
        gc_mock.return_value.__enter__.return_value = connect_mock
        self.switch.send_commands_to_device(['spam ham aaaa'])
        connect_mock.send_config_set.assert_called_once_with(
            config_commands=['spam ham aaaa'])
        connect_mock.send_command.called_once_with('save me')

    @mock.patch.object(netmiko_devices.ngs_lock, 'PoolLock', autospec=True)
    @mock.patch.object(netmiko_devices.netmiko, 'ConnectHandler')
    @mock.patch.object(coordination, 'get_coordinator', autospec=True)
    def test_switch_send_commands_with_coordinator(self, get_coord_mock,
                                                   nm_mock, lock_mock):
        self.cfg.config(acquire_timeout=120,
                        backend_url='mysql://localhost',
                        group='ngs_coordination')
        self.cfg.config(host='viking')
        coord = mock.Mock()
        get_coord_mock.return_value = coord

        switch = self._make_switch_device(extra_cfg={'ngs_max_connections': 2})
        self.assertEqual(coord, switch.locker)
        get_coord_mock.assert_called_once_with('mysql://localhost',
                                               'ngs-viking'.encode('ascii'))

        connect_mock = mock.MagicMock(SAVE_CONFIGURATION=None)
        connect_mock.__enter__.return_value = connect_mock
        nm_mock.return_value = connect_mock
        lock_mock.return_value.__enter__.return_value = lock_mock
        switch.send_commands_to_device(['spam ham'])

        lock_mock.assert_called_once_with(coord,
                                          locks_pool_size=2,
                                          locks_prefix='host',
                                          timeout=120)
        lock_mock.return_value.__exit__.assert_called_once()
        lock_mock.return_value.__enter__.assert_called_once()
コード例 #30
0
class SkydbTable(object):
	"""
	- The main goals with this class will be to implement basic database functions such as add_rows,
	edit_rows, fetchone, fetchall
	"""

	def __init__(self, table_name:str, columns:list, seed:str, column_split:list=[], verbose=0):
		"""
		Args:
			table_name(str): This is the name of the table and will also act as key in the 
			skydb registry.

			columns(list): This parameter will name all the columns of the table. In general I 
			plan of setting each of the row as multiple key -> value pairs with key being the
			table_name:column_name:row_index and the value will be data stored at that (row i.e. index, column)
			place.

			seed(str): This is an important parameter. The seed will be used to generate the same
			public and private key pairs. If the seed is lost then access to the data entrys in the 
			registry will also be lost.

			column_split(list): If you are making a single column hold all the values in the row seperated by
			';', column_split will hold the column names for each of the single values
		"""
		self.table_name = table_name
		self.seed = seed
		self.columns = columns
		self.column_split = column_split

		self.logger = logging.getLogger(__name__)
		self.logger.addHandler(logging.NullHandler())
		self.logger.setLevel(logging.DEBUG)

		if verbose:
			formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
			ch = logging.StreamHandler(sys.stdout)
			ch.setFormatter(formatter)
			self.logger.addHandler(ch)

		# Initialize the Registry
		self._pk, self._sk = genKeyPairFromSeed(self.seed)
		self.registry = RegistryEntry(self._pk, self._sk, verbose=verbose)
		self.logger.debug("Initialized Table")

		# The index will be checked for and if there was no such table before then the index will be zero
		self.index, self._index_revision = self.get_index()

	@staticmethod
	def check_table(table_name:str, seed:str):
		"""
			Given a table_name and seed, check if that table already exists in the Skydb.
		"""
		pk, sk = genKeyPairFromSeed(seed)
		registry = RegistryEntry(pk, sk)
		try:	
			index, revision = registry.get_entry(f"INDEX:{table_name}", timeout=5)
			return int(index), revision
		except Timeout as T:
			return None


	def calibrate_index(self):
		self.logger.debug("Inside calibrate_index function")
		index, revision = self.registry.get_entry(f"INDEX:{self.table_name}", timeout=5)
		self._index_revision = revision
		self.index = int(index)
		self.logger.debug("Calibrated Index to: "+str(index))
	
	
	def get_index(self) -> int:
		"""
		- Check if the table existed before, if so then retrieve its index and return it else
		return 0. If a Timeout exception is raised then that means that the required data is not available at 
		the moment.
		"""
		self.logger.debug("Inside get index function")
		try:
			index, revision = self.registry.get_entry(f"INDEX:{self.table_name}", timeout=5)
			return int(index), revision
		except Timeout as T:
			self.logger.debug("Initializing the index...")
			self.registry.set_entry(data_key=f"INDEX:{self.table_name}", data=f"{0}", revision=1)
			return (0,1)

	@retry(wait=wait_fixed(3), retry=retry_if_exception_type(ReadTimeoutError))
	def add_row(self, row:dict) -> int:
		"""
		Args:
			row(dict): this dictionary must have all the keys that have been passed as columns 
			while initializing this object.
		Returns:
			latest_index(int): This value represents the index of the added row

		"""
		# Check for invalid column names
		for k in row.keys():
			if k not in self.columns:
				raise ValueError("An invalid column has been passed.")

		# Check if all the columns are filled or not
		for k in self.columns:
			if k not in list(row.keys()):
				raise ValueError(f"Column {k} is empty")

		
		self.logger.debug("Adding row: ")
		self.logger.debug(row)
		# Add data to the registry one by one
		for key in row.keys():
			self.registry.set_entry(
					data_key=f"{self.table_name}:{key}:{self.index}",
					data=f"{row[key]}",
					revision=1
				)

		self.index += 1
		self.registry.set_entry(f"INDEX:{self.table_name}",f"{self.index}", self._index_revision+1)
		self._index_revision += 1

		return self.index - 1

	@retry(wait=wait_fixed(3), retry=retry_if_exception_type(ReadTimeoutError))
	def update_row(self, row_index:int, data:dict):
		"""
		Args:
			row_index(int): The index of the row that you want to update.
			data(dict): The data that you want to update with.
		"""

		self.calibrate_index()
		if row_index >= self.index or row_index < 0:
			raise ValueError(f"row_index={row_index} is invalid. It should in the range of 0-{self.index}")

		# Check for invalid column names
		for k in data.keys():
			if k not in self.columns:
				raise ValueError("An invalid column has been passed.")

		self.logger.debug("Updating row at index: "+str(row_index))
		self.logger.debug(data)
		for k in data.keys():
			old_data, revision = self.registry.get_entry(
					data_key=f"{self.table_name}:{k}:{row_index}",
				)
			self.registry.set_entry(
					data_key=f"{self.table_name}:{k}:{row_index}",
					data=f"{data[k]}",
					revision=revision+1,
				)


	@retry(wait=wait_fixed(3), retry=retry_if_exception_type(ReadTimeoutError))
	def fetch_row(self, row_index:int) -> dict:
		"""
		Args:
			row_index(int): The index of the row that you want to fetch
		"""
		self.calibrate_index()
		if row_index >= self.index or row_index < 0:
			raise ValueError(f"row_index={row_index} is invalid. It should in the range of 0-{self.index}")

		self.logger.debug("Fetching row at index: "+str(row_index))
		row = {}
		for c in self.columns:
			data, revision = self.registry.get_entry(data_key=f"{self.table_name}:{c}:{row_index}")
			row[c] = data

		return row

	@retry(wait=wait_fixed(3), retry=retry_if_exception_type(ReadTimeoutError))
	def _fetch(self, condition:dict, n_rows:int, work_index:int, n_skip:int, condition_func):
		""" 
		This function is meant to be run as a thread. Will check through each column of the row and see if it 
		matches the condition
		Args:
			condition(dict): The column values that we need to match
			n_rows(int): The max rows that we need to fetch
			work_index(int): The current working index of the thread
			n_skip(int): The no.of rows to skip for the current thread to get its next work_index

		"""
			
		keys_satisfy = False
		while True:
			if work_index < 0 or work_index >= self.index or len(self.fetched_rows) >= n_rows:
				"""
					- If the thread is on an index which is more that the no.of rows in the table or an 
					index which is less than zero.
					- If we have reached the max no.of rows that we needed to fetch
				"""
				break

			""" For each of the given condition, check if the row at work_index matches the condition """
			for k in condition.keys():
				data, revision = self.registry.get_entry(
								data_key=f"{self.table_name}:{k}:{work_index}"
							)
				if condition_func(condition, k, data, self.column_split): # The value at the column matches the condition
					keys_satisfy = True
					break
				else:
					keys_satisfy = False


			if keys_satisfy:
				""" The condition match """
				self.fetch_lock.acquire()
				if len(self.fetched_rows) < n_rows:
					self.fetched_rows[work_index] = self.fetch_row(row_index=work_index)
				self.fetch_lock.release()	
			work_index += n_skip


	@retry(wait=wait_fixed(3), retry=retry_if_exception_type(ReadTimeoutError))
	def fetch(self, condition:dict, start_index:int, n_rows:int=2, num_workers:int=1, condition_func=None) -> dict:
		"""
		- This function will fetch a row or bunch of rows, which satifies the condition. The condition can be something like
		{'c1':'data 1', 'c2':'JeJa'}. The rows with value 'data 1' at column c1 and value 'JeJa' at column c2
		will be matched and returned.

		- This function searches the rows in descending order, for example if the start_index=28, the function
		will search for rows that match the condition from row 28 all the way to row 0, until the no.of rows 
		matched are equal to n_rows.

		Args:
			Date: 18th Nov 2020
			condition(dict): This variable is basically the values that will be in the row 
			that you want to fetch

			start_index(int): The index from where the searching should start.

			n_rows(int): This variable specifies the no.of rows that I need to fetch at max in this fetch_operation.

			num_workers(int): This value represents the number of threads that will be assigned to search 
			for the rows

			condition_func: A function which takes condition, k, target_value and columns as arguments. You can use this 
			function along with the conditions so that a row matches that condition.

		"""
		self.calibrate_index()
		# Make sure the condition is not empty
		assert len(condition) > 0, "The condition should not be empty"

		# Make sure that the start_index is not greater latest record and not less than zero
		assert start_index in range(0, self.index),\
						f"The start_index:{start_index} is invalid. It should in the range [0,{self.index})."

		# Check if the keys are valid column names
		for k in condition.keys():
			assert (k in self.columns or k in self.column_split), f"Invalid column name: {k}"


		self.fetch_lock = threading.Lock()
		self.fetched_rows = {}
		self.logger.debug("In function fetch. Recieved arguments: ")
		self.logger.debug(condition)
		self.logger.debug("Start Index: "+str(start_index)+" n_rows: "+str(n_rows)+" num_workers: "+str(num_workers))
		if condition_func == None:
			condition_func = _equal
		# We will be searching the registry from latest index to zero. That means searching will
		# take place in descending order, thats why there is `start_index-i` below
		threads = [threading.Thread(target=self._fetch, args=(condition, n_rows, start_index-i, -num_workers, condition_func))\
				for i in range(num_workers)]

		for t in threads:
			t.start()

		for t in threads:
			t.join()

		return self.fetched_rows
コード例 #31
0
        new_row = [''] * row_length
        new_row[0] = flow_date.strftime('%Y-%m-%d')
        for account in flows:
            account_position = account_positions[account]
            logging.info('setting account {} at position {}'.format(account, account_position))
            new_row[account_positions[account] + 1] = float(flows[account])

        logging.info('inserting new row: {}'.format(str(new_row)))
        sheet.insert_rows(row=1, number=1, values=[new_row])

    else:
        logging.info('Google flows sheet already up to date (latest update: {}, last flow: {})'.format(last_date, flow_date))


@tenacity.retry(wait=tenacity.wait_fixed(1), stop=tenacity.stop_after_attempt(5))
def main(args):
    full_config_path = os.path.abspath(args.config)
    logging.info('using config file "{}"'.format(full_config_path))
    with open(full_config_path, 'r') as config_file:
        config = json.load(config_file)

    full_flex_path = os.path.abspath(args.file_ibrokers_flex)
    logging.info('using InteractiveBrokers flex file "{}"'.format(full_flex_path))
    with open(full_flex_path, 'r') as ibrokers_response_file:
        ibrokers_response = ibrokers_response_file.read()
        secrets_file_path = os.path.abspath(args.file_secret)
        logging.info('using secrets file "{}"'.format(secrets_file_path))
        with open(secrets_file_path) as json_data:
            secrets_content = json.load(json_data)
            google_credential = secrets_content['google.credential']
コード例 #32
0
    return logs_exchange, instrumentation_exchange


@pytest.fixture(scope="function")
async def rabbit_queue(
    rabbit_channel: aio_pika.Channel,
    rabbit_exchange: Tuple[aio_pika.Exchange, aio_pika.Exchange],
) -> aio_pika.Queue:
    (logs_exchange, instrumentation_exchange) = rabbit_exchange
    # declare queue
    queue = await rabbit_channel.declare_queue(exclusive=True)
    assert queue
    # Binding queue to exchange
    await queue.bind(logs_exchange)
    await queue.bind(instrumentation_exchange)
    yield queue


# HELPERS --


@tenacity.retry(
    wait=tenacity.wait_fixed(5),
    stop=tenacity.stop_after_attempt(60),
    before_sleep=tenacity.before_sleep_log(log, logging.INFO),
    reraise=True,
)
async def wait_till_rabbit_responsive(url: str) -> None:
    connection = await aio_pika.connect(url)
    await connection.close()
コード例 #33
0
class AzureClient(object):
    """
    Azure client is the wrapper on top of azure python sdk
    """
    def __init__(self, config):
        self._config = config
        self.subscription_id = str(config.get('azure_subscription_id'))
        self._credentials = ServicePrincipalCredentials(
            client_id=config.get('azure_client_id'),
            secret=config.get('azure_secret'),
            tenant=config.get('azure_tenant')
        )

        self._access_token = config.get('azure_access_token')
        self._resource_client = None
        self._storage_client = None
        self._network_management_client = None
        self._subscription_client = None
        self._compute_client = None
        self._access_key_result = None
        self._block_blob_service = None
        self._table_service = None
        self._storage_account = None

        log.debug("azure subscription : %s", self.subscription_id)

    @property
    @tenacity.retry(stop=tenacity.stop_after_attempt(5), reraise=True)
    def access_key_result(self):
        if not self._access_key_result:
            storage_account = self.storage_account

            if self.get_storage_account(storage_account).\
                    provisioning_state.value != 'Succeeded':
                log.debug(
                    "Storage account %s is not in Succeeded state yet. ",
                    storage_account)
                raise WaitStateException(
                    "Waited too long for storage account: {0} to "
                    "become ready.".format(
                        storage_account,
                        self.get_storage_account(storage_account).
                        provisioning_state))

            self._access_key_result = self.storage_client.storage_accounts. \
                list_keys(self.resource_group, storage_account)
        return self._access_key_result

    @property
    def resource_group(self):
        return self._config.get('azure_resource_group')

    @property
    def storage_account(self):
        return self._config.get('azure_storage_account')

    @property
    def region_name(self):
        return self._config.get('azure_region_name')

    @property
    def public_key_storage_table_name(self):
        return self._config.get('azure_public_key_storage_table_name')

    @property
    def storage_client(self):
        if not self._storage_client:
            self._storage_client = \
                StorageManagementClient(self._credentials,
                                        self.subscription_id)
        return self._storage_client

    @property
    def subscription_client(self):
        if not self._subscription_client:
            self._subscription_client = SubscriptionClient(self._credentials)
        return self._subscription_client

    @property
    def resource_client(self):
        if not self._resource_client:
            self._resource_client = \
                ResourceManagementClient(self._credentials,
                                         self.subscription_id)
        return self._resource_client

    @property
    def compute_client(self):
        if not self._compute_client:
            self._compute_client = \
                ComputeManagementClient(self._credentials,
                                        self.subscription_id)
        return self._compute_client

    @property
    def network_management_client(self):
        if not self._network_management_client:
            self._network_management_client = NetworkManagementClient(
                self._credentials, self.subscription_id)
        return self._network_management_client

    @property
    def blob_service(self):
        self._get_or_create_storage_account()
        if not self._block_blob_service:
            if self._access_token:
                token_credential = TokenCredential(self._access_token)
                self._block_blob_service = BlockBlobService(
                    account_name=self.storage_account,
                    token_credential=token_credential)
            else:
                self._block_blob_service = BlockBlobService(
                    account_name=self.storage_account,
                    account_key=self.access_key_result.keys[0].value)
        return self._block_blob_service

    @property
    def table_service(self):
        self._get_or_create_storage_account()
        if not self._table_service:
            self._table_service = TableService(
                self.storage_account,
                self.access_key_result.keys[0].value)
        if not self._table_service. \
                exists(table_name=self.public_key_storage_table_name):
            self._table_service.create_table(
                self.public_key_storage_table_name)
        return self._table_service

    def get_resource_group(self, name):
        return self.resource_client.resource_groups.get(name)

    def create_resource_group(self, name, parameters):
        return self.resource_client.resource_groups. \
            create_or_update(name, parameters)

    def get_storage_account(self, storage_account):
        return self.storage_client.storage_accounts. \
            get_properties(self.resource_group, storage_account)

    def create_storage_account(self, name, params):
        return self.storage_client.storage_accounts. \
            create(self.resource_group, name.lower(), params).result()

    # Create a storage account. To prevent a race condition, try
    # to get or create at least twice
    @tenacity.retry(stop=tenacity.stop_after_attempt(2),
                    retry=tenacity.retry_if_exception_type(CloudError),
                    reraise=True)
    def _get_or_create_storage_account(self):
        if self._storage_account:
            return self._storage_account
        else:
            try:
                self._storage_account = \
                    self.get_storage_account(self.storage_account)
            except CloudError as cloud_error:
                if cloud_error.error.error == "ResourceNotFound":
                    storage_account_params = {
                        'sku': {
                            'name': 'Standard_LRS'
                        },
                        'kind': 'storage',
                        'location': self.region_name,
                    }
                    try:
                        self._storage_account = \
                            self.create_storage_account(self.storage_account,
                                                        storage_account_params)
                    except CloudError as cloud_error2:  # pragma: no cover
                        if cloud_error2.error.error == "AuthorizationFailed":
                            mess = 'The following error was returned by ' \
                                   'Azure:\n%s\n\nThis is likely because the' \
                                   ' Role associated with the provided ' \
                                   'credentials does not allow for Storage ' \
                                   'Account creation.\nA Storage Account is ' \
                                   'necessary in order to perform the ' \
                                   'desired operation. You must either ' \
                                   'provide an existing Storage Account name' \
                                   ' as part of the configuration, or ' \
                                   'elevate the associated Role.\nFor more ' \
                                   'information on roles, see: https://docs.' \
                                   'microsoft.com/en-us/azure/role-based-' \
                                   'access-control/overview\n' % cloud_error2
                            raise ProviderConnectionException(mess)

                        elif cloud_error2.error.error == \
                                "StorageAccountAlreadyTaken":
                            mess = 'The following error was ' \
                                   'returned by Azure:\n%s\n\n' \
                                   'Note that Storage Account names must be ' \
                                   'unique across Azure (not just in your ' \
                                   'subscription).\nFor more information ' \
                                   'see https://docs.microsoft.com/en-us/' \
                                   'azure/azure-resource-manager/resource-' \
                                   'manager-storage-account-name-errors\n' \
                                   % cloud_error2
                            raise InvalidLabelException(mess)
                        else:
                            raise cloud_error2
                else:
                    raise cloud_error

    def list_locations(self):
        return self.subscription_client.subscriptions. \
            list_locations(self.subscription_id)

    def list_vm_firewall(self):
        return self.network_management_client.network_security_groups. \
            list(self.resource_group)

    def create_vm_firewall(self, name, parameters):
        return self.network_management_client.network_security_groups. \
            create_or_update(self.resource_group, name,
                             parameters).result()

    def update_vm_firewall_tags(self, fw_id, tags):
        url_params = azure_helpers.parse_url(VM_FIREWALL_RESOURCE_ID,
                                             fw_id)
        name = url_params.get(VM_FIREWALL_NAME)
        return self.network_management_client.network_security_groups. \
            create_or_update(self.resource_group, name,
                             {'tags': tags,
                              'location': self.region_name}).result()

    def get_vm_firewall(self, fw_id):
        url_params = azure_helpers.parse_url(VM_FIREWALL_RESOURCE_ID,
                                             fw_id)
        fw_name = url_params.get(VM_FIREWALL_NAME)
        return self.network_management_client.network_security_groups. \
            get(self.resource_group, fw_name)

    def delete_vm_firewall(self, fw_id):
        url_params = azure_helpers.parse_url(VM_FIREWALL_RESOURCE_ID,
                                             fw_id)
        name = url_params.get(VM_FIREWALL_NAME)
        self.network_management_client \
            .network_security_groups.delete(self.resource_group, name).wait()

    def create_vm_firewall_rule(self, fw_id,
                                rule_name, parameters):
        url_params = azure_helpers.parse_url(VM_FIREWALL_RESOURCE_ID,
                                             fw_id)
        vm_firewall_name = url_params.get(VM_FIREWALL_NAME)
        return self.network_management_client.security_rules. \
            create_or_update(self.resource_group, vm_firewall_name,
                             rule_name, parameters).result()

    def delete_vm_firewall_rule(self, fw_rule_id, vm_firewall):
        url_params = azure_helpers.parse_url(VM_FIREWALL_RULE_RESOURCE_ID,
                                             fw_rule_id)
        name = url_params.get(VM_FIREWALL_RULE_NAME)
        return self.network_management_client.security_rules. \
            delete(self.resource_group, vm_firewall, name).result()

    def list_containers(self, prefix=None, limit=None, marker=None):
        results = self.blob_service.list_containers(prefix=prefix,
                                                    num_results=limit,
                                                    marker=marker)
        return (results.items, results.next_marker)

    def create_container(self, container_name):
        try:
            self.blob_service.create_container(container_name,
                                               fail_on_exist=True)
        except AzureConflictHttpError as cloud_error:
            if cloud_error.error_code == "ContainerAlreadyExists":
                msg = "The given Bucket name '%s' already exists. Please " \
                      "use the `get` or `find` method to get a reference to " \
                      "an existing Bucket, or specify a new Bucket name to " \
                      "create.\nNote that in Azure, Buckets are contained " \
                      "in Storage Accounts." % container_name
                raise DuplicateResourceException(msg)

        return self.blob_service.get_container_properties(container_name)

    def get_container(self, container_name):
        return self.blob_service.get_container_properties(container_name)

    def delete_container(self, container_name):
        self.blob_service.delete_container(container_name)

    def list_blobs(self, container_name, prefix=None):
        return self.blob_service.list_blobs(container_name, prefix=prefix)

    def get_blob(self, container_name, blob_name):
        return self.blob_service.get_blob_properties(container_name, blob_name)

    def create_blob_from_text(self, container_name, blob_name, text):
        self.blob_service.create_blob_from_text(container_name,
                                                blob_name, text)

    def create_blob_from_file(self, container_name, blob_name, file_path):
        self.blob_service.create_blob_from_path(container_name,
                                                blob_name, file_path)

    def delete_blob(self, container_name, blob_name):
        self.blob_service.delete_blob(container_name, blob_name)

    def get_blob_url(self, container_name, blob_name, expiry_time):
        expiry_date = datetime.datetime.utcnow() + datetime.timedelta(
            seconds=expiry_time)
        sas = self.blob_service.generate_blob_shared_access_signature(
            container_name, blob_name, permission=BlobPermissions.READ,
            expiry=expiry_date)
        return self.blob_service.make_blob_url(container_name, blob_name,
                                               sas_token=sas)

    def get_blob_content(self, container_name, blob_name):
        out_stream = BytesIO()
        self.blob_service.get_blob_to_stream(container_name,
                                             blob_name, out_stream)
        return out_stream

    def create_empty_disk(self, disk_name, params):
        return self.compute_client.disks.create_or_update(
            self.resource_group,
            disk_name,
            params
        ).result()

    def create_snapshot_disk(self, disk_name, params):
        return self.compute_client.disks.create_or_update(
            self.resource_group,
            disk_name,
            params
        ).result()

    def get_disk(self, disk_id):
        url_params = azure_helpers.parse_url(VOLUME_RESOURCE_ID,
                                             disk_id)
        disk_name = url_params.get(VOLUME_NAME)
        return self.compute_client.disks.get(self.resource_group, disk_name)

    def list_disks(self):
        return self.compute_client.disks. \
            list_by_resource_group(self.resource_group)

    def delete_disk(self, disk_id):
        url_params = azure_helpers.parse_url(VOLUME_RESOURCE_ID,
                                             disk_id)
        disk_name = url_params.get(VOLUME_NAME)
        self.compute_client.disks.delete(self.resource_group, disk_name).wait()

    def update_disk_tags(self, disk_id, tags):
        url_params = azure_helpers.parse_url(VOLUME_RESOURCE_ID,
                                             disk_id)
        disk_name = url_params.get(VOLUME_NAME)
        return self.compute_client.disks.update(
            self.resource_group,
            disk_name,
            {'tags': tags},
            raw=True
        )

    def list_snapshots(self):
        return self.compute_client.snapshots. \
            list_by_resource_group(self.resource_group)

    def get_snapshot(self, snapshot_id):
        url_params = azure_helpers.parse_url(SNAPSHOT_RESOURCE_ID,
                                             snapshot_id)
        snapshot_name = url_params.get(SNAPSHOT_NAME)
        return self.compute_client.snapshots.get(self.resource_group,
                                                 snapshot_name)

    def create_snapshot(self, snapshot_name, params):
        return self.compute_client.snapshots.create_or_update(
            self.resource_group,
            snapshot_name,
            params
        ).result()

    def delete_snapshot(self, snapshot_id):
        url_params = azure_helpers.parse_url(SNAPSHOT_RESOURCE_ID,
                                             snapshot_id)
        snapshot_name = url_params.get(SNAPSHOT_NAME)
        self.compute_client.snapshots.delete(self.resource_group,
                                             snapshot_name).wait()

    def update_snapshot_tags(self, snapshot_id, tags):
        url_params = azure_helpers.parse_url(SNAPSHOT_RESOURCE_ID,
                                             snapshot_id)
        snapshot_name = url_params.get(SNAPSHOT_NAME)
        return self.compute_client.snapshots.update(
            self.resource_group,
            snapshot_name,
            {'tags': tags},
            raw=True
        )

    def is_gallery_image(self, image_id):
        url_params = azure_helpers.parse_url(IMAGE_RESOURCE_ID,
                                             image_id)
        # If it is a gallery image, it will always have an offer
        return 'offer' in url_params

    def create_image(self, name, params):
        return self.compute_client.images. \
            create_or_update(self.resource_group, name,
                             params).result()

    def delete_image(self, image_id):
        url_params = azure_helpers.parse_url(IMAGE_RESOURCE_ID,
                                             image_id)
        if not self.is_gallery_image(image_id):
            name = url_params.get(IMAGE_NAME)
            self.compute_client.images.delete(self.resource_group, name).wait()

    def list_images(self):
        azure_images = list(self.compute_client.images.
                            list_by_resource_group(self.resource_group))
        return azure_images

    def list_gallery_refs(self):
        return gallery_image_references

    def get_image(self, image_id):
        url_params = azure_helpers.parse_url(IMAGE_RESOURCE_ID,
                                             image_id)
        if self.is_gallery_image(image_id):
            return GalleryImageReference(publisher=url_params['publisher'],
                                         offer=url_params['offer'],
                                         sku=url_params['sku'],
                                         version=url_params['version'])
        else:
            name = url_params.get(IMAGE_NAME)
            return self.compute_client.images.get(self.resource_group, name)

    def update_image_tags(self, image_id, tags):
        url_params = azure_helpers.parse_url(IMAGE_RESOURCE_ID,
                                             image_id)
        if self.is_gallery_image(image_id):
            return True
        else:
            name = url_params.get(IMAGE_NAME)
            return self.compute_client.images. \
                create_or_update(self.resource_group, name,
                                 {
                                     'tags': tags,
                                     'location': self.region_name
                                 }).result()

    def list_vm_types(self):
        return self.compute_client.virtual_machine_sizes. \
            list(self.region_name)

    def list_networks(self):
        return self.network_management_client.virtual_networks.list(
            self.resource_group)

    def get_network(self, network_id):
        url_params = azure_helpers.parse_url(NETWORK_RESOURCE_ID,
                                             network_id)
        network_name = url_params.get(NETWORK_NAME)
        return self.network_management_client.virtual_networks.get(
            self.resource_group, network_name)

    def create_network(self, name, params):
        return self.network_management_client.virtual_networks. \
            create_or_update(self.resource_group,
                             name,
                             parameters=params).result()

    def delete_network(self, network_id):
        url_params = azure_helpers.parse_url(NETWORK_RESOURCE_ID, network_id)
        network_name = url_params.get(NETWORK_NAME)
        return self.network_management_client.virtual_networks. \
            delete(self.resource_group, network_name).wait()

    def update_network_tags(self, network_id, tags):
        url_params = azure_helpers.parse_url(NETWORK_RESOURCE_ID, network_id)
        network_name = url_params.get(NETWORK_NAME)
        return self.network_management_client.virtual_networks. \
            create_or_update(self.resource_group,
                             network_name, tags).result()

    def get_network_id_for_subnet(self, subnet_id):
        url_params = azure_helpers.parse_url(SUBNET_RESOURCE_ID, subnet_id)
        network_id = NETWORK_RESOURCE_ID[0]
        for key, val in url_params.items():
            network_id = network_id.replace("{" + key + "}", val)
        return network_id

    def list_subnets(self, network_id):
        url_params = azure_helpers.parse_url(NETWORK_RESOURCE_ID, network_id)
        network_name = url_params.get(NETWORK_NAME)
        return self.network_management_client.subnets. \
            list(self.resource_group, network_name)

    def get_subnet(self, subnet_id):
        url_params = azure_helpers.parse_url(SUBNET_RESOURCE_ID,
                                             subnet_id)
        network_name = url_params.get(NETWORK_NAME)
        subnet_name = url_params.get(SUBNET_NAME)
        return self.network_management_client.subnets. \
            get(self.resource_group, network_name, subnet_name)

    def create_subnet(self, network_id, subnet_name, params):
        url_params = azure_helpers.parse_url(NETWORK_RESOURCE_ID, network_id)
        network_name = url_params.get(NETWORK_NAME)
        result_create = self.network_management_client \
            .subnets.create_or_update(
                self.resource_group,
                network_name,
                subnet_name,
                params
            )
        subnet_info = result_create.result()

        return subnet_info

    def __if_subnet_in_use(e):
        # return True if the CloudError exception is due to subnet being in use
        if isinstance(e, CloudError):
            if e.error.error == "InUseSubnetCannotBeDeleted":
                return True
        return False

    @tenacity.retry(stop=tenacity.stop_after_attempt(5),
                    retry=tenacity.retry_if_exception(__if_subnet_in_use),
                    wait=tenacity.wait_fixed(5),
                    reraise=True)
    def delete_subnet(self, subnet_id):
        url_params = azure_helpers.parse_url(SUBNET_RESOURCE_ID,
                                             subnet_id)
        network_name = url_params.get(NETWORK_NAME)
        subnet_name = url_params.get(SUBNET_NAME)

        try:
            result_delete = self.network_management_client \
                .subnets.delete(
                    self.resource_group,
                    network_name,
                    subnet_name
                )
            result_delete.wait()
        except CloudError as cloud_error:
            log.exception(cloud_error.message)
            raise cloud_error

    def create_floating_ip(self, public_ip_name, public_ip_parameters):
        return self.network_management_client.public_ip_addresses. \
            create_or_update(self.resource_group,
                             public_ip_name,
                             public_ip_parameters).result()

    def get_floating_ip(self, public_ip_id):
        url_params = azure_helpers.parse_url(PUBLIC_IP_RESOURCE_ID,
                                             public_ip_id)
        public_ip_name = url_params.get(PUBLIC_IP_NAME)
        return self.network_management_client. \
            public_ip_addresses.get(self.resource_group, public_ip_name)

    def delete_floating_ip(self, public_ip_id):
        url_params = azure_helpers.parse_url(PUBLIC_IP_RESOURCE_ID,
                                             public_ip_id)
        public_ip_name = url_params.get(PUBLIC_IP_NAME)
        self.network_management_client. \
            public_ip_addresses.delete(self.resource_group,
                                       public_ip_name).wait()

    def update_fip_tags(self, fip_id, tags):
        url_params = azure_helpers.parse_url(PUBLIC_IP_RESOURCE_ID,
                                             fip_id)
        fip_name = url_params.get(PUBLIC_IP_NAME)
        self.network_management_client.public_ip_addresses. \
            create_or_update(self.resource_group,
                             fip_name, tags).result()

    def list_floating_ips(self):
        return self.network_management_client.public_ip_addresses.list(
            self.resource_group)

    def list_vm(self):
        return self.compute_client.virtual_machines.list(
            self.resource_group
        )

    def restart_vm(self, vm_id):
        url_params = azure_helpers.parse_url(VM_RESOURCE_ID,
                                             vm_id)
        vm_name = url_params.get(VM_NAME)
        return self.compute_client.virtual_machines.restart(
            self.resource_group, vm_name).wait()

    def delete_vm(self, vm_id):
        url_params = azure_helpers.parse_url(VM_RESOURCE_ID,
                                             vm_id)
        vm_name = url_params.get(VM_NAME)
        return self.compute_client.virtual_machines.delete(
            self.resource_group, vm_name).wait()

    def get_vm(self, vm_id):
        url_params = azure_helpers.parse_url(VM_RESOURCE_ID,
                                             vm_id)
        vm_name = url_params.get(VM_NAME)
        return self.compute_client.virtual_machines.get(
            self.resource_group,
            vm_name,
            expand='instanceView'
        )

    def create_vm(self, vm_name, params):
        return self.compute_client.virtual_machines. \
            create_or_update(self.resource_group,
                             vm_name, params).result()

    def update_vm(self, vm_id, params):
        url_params = azure_helpers.parse_url(VM_RESOURCE_ID,
                                             vm_id)
        vm_name = url_params.get(VM_NAME)
        return self.compute_client.virtual_machines. \
            create_or_update(self.resource_group,
                             vm_name, params, raw=True)

    def deallocate_vm(self, vm_id):
        url_params = azure_helpers.parse_url(VM_RESOURCE_ID,
                                             vm_id)
        vm_name = url_params.get(VM_NAME)
        self.compute_client. \
            virtual_machines.deallocate(self.resource_group,
                                        vm_name).wait()

    def generalize_vm(self, vm_id):
        url_params = azure_helpers.parse_url(VM_RESOURCE_ID,
                                             vm_id)
        vm_name = url_params.get(VM_NAME)
        self.compute_client.virtual_machines. \
            generalize(self.resource_group, vm_name)

    def start_vm(self, vm_id):
        url_params = azure_helpers.parse_url(VM_RESOURCE_ID,
                                             vm_id)
        vm_name = url_params.get(VM_NAME)
        self.compute_client.virtual_machines. \
            start(self.resource_group,
                  vm_name).wait()

    def update_vm_tags(self, vm_id, tags):
        url_params = azure_helpers.parse_url(VM_RESOURCE_ID,
                                             vm_id)
        vm_name = url_params.get(VM_NAME)
        self.compute_client.virtual_machines. \
            create_or_update(self.resource_group,
                             vm_name, tags).result()

    def delete_nic(self, nic_id):
        nic_params = azure_helpers.\
            parse_url(NETWORK_INTERFACE_RESOURCE_ID, nic_id)
        nic_name = nic_params.get(NETWORK_INTERFACE_NAME)
        self.network_management_client. \
            network_interfaces.delete(self.resource_group,
                                      nic_name).wait()

    def get_nic(self, nic_id):
        nic_params = azure_helpers.\
            parse_url(NETWORK_INTERFACE_RESOURCE_ID, nic_id)
        nic_name = nic_params.get(NETWORK_INTERFACE_NAME)
        return self.network_management_client. \
            network_interfaces.get(self.resource_group, nic_name)

    def update_nic(self, nic_id, params):
        nic_params = azure_helpers.\
            parse_url(NETWORK_INTERFACE_RESOURCE_ID, nic_id)
        nic_name = nic_params.get(NETWORK_INTERFACE_NAME)
        async_nic_creation = self.network_management_client. \
            network_interfaces.create_or_update(
                self.resource_group,
                nic_name,
                params
            )
        nic_info = async_nic_creation.result()
        return nic_info

    def create_nic(self, nic_name, params):
        return self.network_management_client. \
            network_interfaces.create_or_update(
                self.resource_group,
                nic_name,
                params
            ).result()

    def create_public_key(self, entity):
        return self.table_service. \
            insert_or_replace_entity(self.public_key_storage_table_name,
                                     entity)

    def get_public_key(self, name):
        entities = self.table_service. \
            query_entities(self.public_key_storage_table_name,
                           "Name eq '{0}'".format(name), num_results=1)

        return entities.items[0] if len(entities.items) > 0 else None

    def delete_public_key(self, entity):
        self.table_service.delete_entity(self.public_key_storage_table_name,
                                         entity.PartitionKey, entity.RowKey)

    def list_public_keys(self, partition_key, limit=None, marker=None):
        entities = self.table_service. \
            query_entities(self.public_key_storage_table_name,
                           "PartitionKey eq '{0}'".format(partition_key),
                           marker=marker, num_results=limit)
        return (entities.items, entities.next_marker)

    def delete_route_table(self, route_table_name):
        self.network_management_client. \
            route_tables.delete(self.resource_group, route_table_name
                                ).wait()

    def attach_subnet_to_route_table(self, subnet_id, route_table_id):
        url_params = azure_helpers.parse_url(SUBNET_RESOURCE_ID,
                                             subnet_id)
        network_name = url_params.get(NETWORK_NAME)
        subnet_name = url_params.get(SUBNET_NAME)

        subnet_info = self.network_management_client.subnets.get(
            self.resource_group,
            network_name,
            subnet_name
        )
        if subnet_info:
            subnet_info.route_table = {
                'id': route_table_id
            }

            result_create = self.network_management_client. \
                subnets.create_or_update(
                 self.resource_group,
                 network_name,
                 subnet_name,
                 subnet_info)
            subnet_info = result_create.result()

        return subnet_info

    def detach_subnet_to_route_table(self, subnet_id, route_table_id):
        url_params = azure_helpers.parse_url(SUBNET_RESOURCE_ID,
                                             subnet_id)
        network_name = url_params.get(NETWORK_NAME)
        subnet_name = url_params.get(SUBNET_NAME)

        subnet_info = self.network_management_client.subnets.get(
            self.resource_group,
            network_name,
            subnet_name
        )

        if subnet_info and subnet_info.route_table.id == route_table_id:
            subnet_info.route_table = None

            result_create = self.network_management_client. \
                subnets.create_or_update(
                 self.resource_group,
                 network_name,
                 subnet_name,
                 subnet_info)
            subnet_info = result_create.result()

        return subnet_info

    def list_route_tables(self):
        return self.network_management_client. \
            route_tables.list(self.resource_group)

    def get_route_table(self, router_id):
        url_params = azure_helpers.parse_url(ROUTER_RESOURCE_ID,
                                             router_id)
        router_name = url_params.get(ROUTER_NAME)
        return self.network_management_client. \
            route_tables.get(self.resource_group, router_name)

    def create_route_table(self, route_table_name, params):
        return self.network_management_client. \
            route_tables.create_or_update(
             self.resource_group,
             route_table_name, params).result()

    def update_route_table_tags(self, route_table_name, tags):
        self.network_management_client.route_tables. \
            create_or_update(self.resource_group,
                             route_table_name, tags).result()
コード例 #34
0
            print("App Group {} selected! ✅".format(selected_group["name"]))
            break

    return selected_group["name"], filtered_apps


@add_line_after
def get_subdomain(domain):
    while True:
        subdomain = click.prompt("Enter subdomain").strip()
        if is_valid_subdomain(subdomain) and is_subdomain_available(subdomain):
            print("Site Domain: {}.{}".format(subdomain, domain))
            return subdomain


@retry(stop=stop_after_attempt(2), wait=wait_fixed(5))
def upload_backup_file(file_type, file_path):
    return session.post(files_url,
                        data={},
                        files={
                            "file": open(file_path, "rb"),
                            "is_private": 1,
                            "folder": "Home",
                            "method": "press.api.site.upload_backup",
                            "type": file_type
                        })


@add_line_after
def upload_backup(local_site):
    # take backup
コード例 #35
0
ファイル: migrator.py プロジェクト: ysalimi/ansibullbot
class IssueMigrator(object):
    def __init__(self, token):
        self.token = token
        self.postcount = 0
        self.s = requests.Session()
        self.migration_map = {}

    def get_headers(self):
        headers = {'Authorization': 'token %s' % self.token}
        return headers

    @retry(stop=stop_after_attempt(5), wait=wait_fixed(60 * 5))
    def __post(self, url, headers, payload):

        logging.info('POSTCOUNT: {}'.format(self.postcount))

        if self.postcount > 18:
            logging.info('sleep 2m')
            time.sleep(2 * 60)
            self.postcount = 0

        logging.info(headers)
        rr = self.s.post(url, headers=headers, data=json.dumps(payload))

        if isinstance(rr.json(), dict) and 'documentation_url' in rr.json():
            logging.error(rr.json())
            raise Exception

        self.postcount += 1
        return rr

    def migrate(self, issueurl, destrepo):

        # https://developer.github.com/v3/issues/#create-an-issue
        self.s = requests.Session()

        # split the source into relevant data
        parts = urlparse(issueurl).path.split('/')
        src_repo = '/'.join(parts[1:3])
        src_number = int(parts[-1])
        src_api_url = 'https://api.github.com/repos/{}/issues/{}'.format(
            src_repo, src_number)

        # get the api data
        src_rr = self.s.get(src_api_url, headers=self.get_headers())
        src_data = src_rr.json()

        # get the labels
        labels = sorted([x['name'] for x in src_data['labels']])
        vlabel = [x for x in labels if x.startswith('affects_')]
        if len(vlabel) >= 1:
            vlabel = vlabel[0]
        else:
            vlabel = None

        # get the comments
        src_comment_url = src_api_url + '/comments'
        src_comment_rr = self.s.get(src_comment_url,
                                    headers=self.get_headers())
        src_comment_data = src_comment_rr.json()

        # paginate for more comments
        if len(src_comment_data
               ) != src_data['comments'] or src_comment_rr.links:
            while 'next' in src_comment_rr.links:
                src_comment_rr = self.s.get(
                    src_comment_rr.links['next']['url'],
                    headers=self.get_headers())
                src_comment_data += src_comment_rr.json()

        if not self.migration_map.get(issueurl, {}).get('new'):

            # create the post url
            new_post_url = 'https://api.github.com/repos/{}/issues'.format(
                destrepo)

            # create the payload
            newbody = 'From @{} on {}\r\n'.format(src_data['user']['login'],
                                                  src_data['created_at'])
            newbody += src_data['body'] + '\r\n'
            newbody += 'Copied from original issue: {}#{}\r\n'.format(
                src_repo, src_number)

            payload = {'title': src_data['title'], 'body': newbody}

            # create the new issue
            logging.info('copy {} to {}'.format(issueurl, destrepo))
            new_rr = self.__post(new_post_url, self.get_headers(), payload)

            new_data = new_rr.json()
            #new_api_issue = new_rr.json()['url']
            new_html_issue = new_rr.json()['html_url']

            self.migration_map[issueurl] = {}
            self.migration_map[issueurl]['new'] = new_html_issue
            self.migration_map[issueurl]['comments'] = []

        else:

            # need to fetch the api data for the new issue
            newurl = self.migration_map.get(issueurl, {}).get('new')
            parts = urlparse(newurl).path.split('/')
            new_repo = '/'.join(parts[1:3])
            new_number = int(parts[-1])
            new_api_url = 'https://api.github.com/repos/{}/issues/{}'.format(
                new_repo, new_number)
            new_rr = self.s.get(new_api_url, headers=self.get_headers())
            new_data = new_rr.json()
            new_html_issue = new_rr.json()['html_url']

        # add the version label if known
        if vlabel:
            clabels = [x['name'] for x in new_data['labels']]
            if vlabel not in clabels:
                label_url = new_data['labels_url']
                label_url = label_url.split('{')[0]
                payload = [vlabel]
                logging.info('adding {} label to {}'.format(
                    vlabel, new_html_issue))
                new_rr = self.__post(label_url, self.get_headers(), payload)

        # add the comments
        totalc = len(src_comment_data)
        new_comments_url = new_data['comments_url']
        for idc, comment in enumerate(src_comment_data):

            if '<!-- boilerplate: repomerge -->' in comment['body']:
                logging.info('skip comment {} of {} -- {}'.format(
                    idc, totalc, new_html_issue))
                continue

            if '<!--- boilerplate: issue_renotify_maintainer --->' in comment[
                    'body']:
                logging.info('skip comment {} of {} -- {}'.format(
                    idc, totalc, new_html_issue))
                continue

            if idc in self.migration_map[issueurl]['comments']:
                logging.info('skip comment {} of {} -- {}'.format(
                    idc, totalc, new_html_issue))
                continue

            newbody = 'From @{} on {}\r\n\r\n'.format(comment['user']['login'],
                                                      src_data['created_at'])
            newbody += comment['body']
            payload = {'body': newbody}
            logging.info('copy comment {} of {} to {}'.format(
                idc, totalc, new_html_issue))
            self.__post(new_comments_url, self.get_headers(), payload)
            self.migration_map[issueurl]['comments'].append(idc)

        # add note about migration in old issue
        comment = 'This issue was migrated to {}\r\n'.format(new_html_issue)
        payload = {'body': comment}
        curl = src_api_url + '/comments'
        logging.info('note migration in {}'.format(issueurl))
        self.__post(curl, self.get_headers(), payload)

        # close the old issue
        payload = {'state': 'closed'}
        logging.info('close {}'.format(issueurl))
        closure_rr = self.s.patch(src_api_url,
                                  headers=self.get_headers(),
                                  data=json.dumps(payload))
        if closure_rr.status_code != 200:
            logging.info('closing {} failed'.format(issueurl))

        self.s.close()
コード例 #36
0
    os.environ["REGISTRY_PW"] = ""

    yield url
    # restore environs
    os.environ = old
    # remove registry
    if not keep_docker_up:
        container.stop()
        container.remove(force=True)

        while docker_client.containers.list(filters={"name": container.name}):
            time.sleep(1)


@tenacity.retry(
    wait=tenacity.wait_fixed(2),
    stop=tenacity.stop_after_delay(20),
    before_sleep=tenacity.before_sleep_log(log, logging.INFO),
    reraise=True,
)
def wait_till_registry_is_responsive(url: str) -> bool:
    docker_client = docker.from_env()
    docker_client.login(registry=url, username="******")
    return True


# ********************************************************* Services ***************************************
def _pull_push_service(pull_key: str, tag: str, new_registry: str,
                       node_meta_schema: Dict) -> Dict[str, str]:
    client = docker.from_env()
    # pull image from original location
コード例 #37
0
class Aws:
    """The AWS class handles all interactions with AWS."""
    def __init__(self):
        self.red = RedisHandler().redis_sync()
        self.redis_key = config.get("aws.iamroles_redis_key", "IAM_ROLE_CACHE")
        self.dynamo = IAMRoleDynamoHandler()

    @retry(
        stop_max_attempt_number=3,
        wait_exponential_multiplier=1000,
        wait_exponential_max=1000,
    )
    def _add_role_to_redis(self, role_entry: dict):
        """Add the role to redis with a retry.

        :param role_entry:
        :return:
        """
        self.red.hset(self.redis_key, role_entry["arn"],
                      json.dumps(role_entry))

    @retry(
        stop_max_attempt_number=3,
        wait_exponential_multiplier=1000,
        wait_exponential_max=1000,
    )
    def _fetch_role_from_redis(self, role_arn: str):
        """Fetch the role from redis with a retry.

        :param role_arn:
        :return:
        """
        return self.red.hget(self.redis_key, role_arn)

    @retry(
        stop_max_attempt_number=3,
        wait_exponential_multiplier=1000,
        wait_exponential_max=1000,
    )
    def _invoke_lambda(self, client: object, function_name: str,
                       payload: bytes):
        """Invoke the lambda function for creating the user-roles."""
        return client.invoke(
            FunctionName=function_name,
            InvocationType="RequestResponse",
            Payload=payload,
        )

    async def _cloudaux_to_aws(self, role):
        """Convert the cloudaux get_role into the get_account_authorization_details equivalent."""
        # Pop out the fields that are not required:
        # Arn and RoleName will be popped off later:
        unrequired_fields = ["_version", "MaxSessionDuration"]

        for uf in unrequired_fields:
            role.pop(uf, None)

        # Fix the Managed Policies:
        role["AttachedManagedPolicies"] = list(
            map(
                lambda x: {
                    "PolicyName": x["name"],
                    "PolicyArn": x["arn"]
                },
                role.get("ManagedPolicies", []),
            ))
        role.pop("ManagedPolicies", None)

        # Fix the tags:
        if isinstance(role.get("Tags", {}), dict):
            role["Tags"] = list(
                map(
                    lambda key: {
                        "Key": key,
                        "Value": role["Tags"][key]
                    },
                    role.get("Tags", {}),
                ))

        # Note: the instance profile list is verbose -- not transforming it (outside of renaming the field)!
        role["InstanceProfileList"] = role.pop("InstanceProfiles", [])

        # Inline Policies:
        role["RolePolicyList"] = list(
            map(
                lambda name: {
                    "PolicyName": name,
                    "PolicyDocument": role["InlinePolicies"][name],
                },
                role.get("InlinePolicies", {}),
            ))
        role.pop("InlinePolicies", None)

        return role

    async def fetch_iam_role(self,
                             account_id: str,
                             role_arn: str,
                             force_refresh: bool = False) -> dict:
        """Fetch the IAM Role template from Redis and/or Dynamo.

        :param account_id:
        :param role_arn:
        :return:
        """
        log_data: dict = {
            "function":
            f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
            "role_arn": role_arn,
            "account_id": account_id,
            "force_refresh": force_refresh,
        }

        result: dict = {}

        if not force_refresh:
            # First check redis:
            result: str = await sync_to_async(self._fetch_role_from_redis
                                              )(role_arn)

            if result:
                result: dict = json.loads(result)

                # If this item is less than an hour old, then return it from Redis.
                if result["ttl"] > int(
                    (datetime.utcnow() - timedelta(hours=1)).timestamp()):
                    log_data[
                        "message"] = "Role not in Redis -- fetching from DDB."
                    log.debug(log_data)
                    stats.count(
                        "aws.fetch_iam_role.in_redis",
                        tags={
                            "account_id": account_id,
                            "role_arn": role_arn
                        },
                    )
                    result["policy"] = json.loads(result["policy"])
                    return result

            # If not in Redis or it's older than an hour, proceed to DynamoDB:
            result = await sync_to_async(self.dynamo.fetch_iam_role
                                         )(role_arn, account_id)

        # If it's NOT in dynamo, or if we're forcing a refresh, we need to reach out to AWS and fetch:
        if force_refresh or not result.get("Item"):
            if force_refresh:
                log_data[
                    "message"] = "Force refresh is enabled. Going out to AWS."
                stats.count(
                    "aws.fetch_iam_role.force_refresh",
                    tags={
                        "account_id": account_id,
                        "role_arn": role_arn
                    },
                )
            else:
                log_data[
                    "message"] = "Role is missing in DDB. Going out to AWS."
                stats.count(
                    "aws.fetch_iam_role.missing_dynamo",
                    tags={
                        "account_id": account_id,
                        "role_arn": role_arn
                    },
                )
            log.debug(log_data)
            try:
                tasks = []
                role_name = role_arn.split("/")[-1]
                # Instantiate a cached CloudAux client
                client = await sync_to_async(boto3_cached_conn)(
                    "iam",
                    account_number=account_id,
                    assume_role=config.get("policies.role_name"),
                )
                conn = {
                    "account_number": account_id,
                    "assume_role": config.get("policies.role_name"),
                    "region": config.region,
                }

                role_details = asyncio.ensure_future(
                    sync_to_async(client.get_role)(RoleName=role_name))
                tasks.append(role_details)

                all_tasks = [
                    get_role_managed_policies,
                    get_role_inline_policies,
                    list_role_tags,
                ]

                for t in all_tasks:
                    tasks.append(
                        asyncio.ensure_future(
                            sync_to_async(t)({
                                "RoleName": role_name
                            }, **conn)))

                responses = asyncio.gather(*tasks)
                result = await responses
                role = result[0]["Role"]
                role["ManagedPolicies"] = result[1]
                role["InlinePolicies"] = result[2]
                role["Tags"] = result[3]

            except ClientError as ce:
                if ce.response["Error"]["Code"] == "NoSuchEntity":
                    # The role does not exist:
                    log_data["message"] = "Role does not exist in AWS."
                    log.error(log_data)
                    stats.count(
                        "aws.fetch_iam_role.missing_in_aws",
                        tags={
                            "account_id": account_id,
                            "role_arn": role_arn
                        },
                    )
                    return None

                else:
                    log_data["message"] = f"Some other error: {ce.response}"
                    log.error(log_data)
                    stats.count(
                        "aws.fetch_iam_role.aws_connection_problem",
                        tags={
                            "account_id": account_id,
                            "role_arn": role_arn
                        },
                    )
                    raise

            # Format the role for DynamoDB and Redis:
            await self._cloudaux_to_aws(role)
            result = {
                "arn":
                role.get("Arn"),
                "name":
                role.pop("RoleName"),
                "resourceId":
                role.pop("RoleId"),
                "accountId":
                account_id,
                "ttl":
                int((datetime.utcnow() + timedelta(hours=36)).timestamp()),
                "policy":
                self.dynamo.convert_role_to_json(role),
                "templated":
                self.red.hget(
                    config.get("templated_roles.redis_key",
                               "TEMPLATED_ROLES_v2"),
                    role.get("Arn").lower(),
                ),
            }

            # Sync with DDB:
            await sync_to_async(self.dynamo.sync_iam_role_for_account)(result)
            log_data["message"] = "Role fetched from AWS, and synced with DDB."
            stats.count(
                "aws.fetch_iam_role.fetched_from_aws",
                tags={
                    "account_id": account_id,
                    "role_arn": role_arn
                },
            )

        else:
            log_data["message"] = "Role fetched from DDB."
            stats.count(
                "aws.fetch_iam_role.in_dynamo",
                tags={
                    "account_id": account_id,
                    "role_arn": role_arn
                },
            )

            # Fix the TTL:
            result["Item"]["ttl"] = int(result["Item"]["ttl"])
            result = result["Item"]

        # Update the redis cache:
        stats.count(
            "aws.fetch_iam_role.in_dynamo",
            tags={
                "account_id": account_id,
                "role_arn": role_arn
            },
        )
        await sync_to_async(self._add_role_to_redis)(result)

        log_data["message"] += " Updated Redis."
        log.debug(log_data)

        result["policy"] = json.loads(result["policy"])
        return result

    async def call_user_lambda(self,
                               role: str,
                               user_email: str,
                               account_id: str,
                               user_role_name: str = "user") -> str:
        """Call out to the lambda function to provision the per-user role for the account."""
        # Get the template's name based on the account and user role name:
        accounts = await get_account_id_to_name_mapping()
        account_name = accounts[account_id]
        role_to_fetch = (
            f"arn:aws:iam::{account_id}:role/{account_name}_{user_role_name}")

        # Fetch the role
        role_details = await self.fetch_iam_role(account_id, role_to_fetch)

        # If we did not receive any role details, raise an exception:
        if not role_details:
            raise NoRoleTemplateException(f"Unable to locate {role_to_fetch}")

        # Prepare the payload for the lambda and send it out:
        payload = json.dumps({
            "user_role_short_name":
            role.split("role/")[1],
            "user_email":
            user_email,
            "account_number":
            account_id,
            "primary_policies":
            role_details["policy"].get("RolePolicyList", []),
            "managed_policy_arns":
            role_details["policy"].get("AttachedManagedPolicies", []),
        }).encode()

        client = boto3.client("lambda", region_name=config.region)

        lambda_result = await sync_to_async(self._invoke_lambda)(
            client,
            config.get("lambda_role_creator.function_name", "UserRoleCreator"),
            payload,
        )
        lambda_result = json.loads(lambda_result["Payload"].read().decode())

        if not lambda_result.get("success", False):
            raise UserRoleLambdaException(
                f"Received invalid response: {lambda_result}")

        return f'arn:aws:iam::{lambda_result["account_number"]}:role/{lambda_result["role_name"]}'

    @tenacity.retry(
        wait=tenacity.wait_fixed(2),
        stop=tenacity.stop_after_attempt(5),
        retry=tenacity.retry_if_exception_type(UserRoleNotAssumableYet),
    )
    async def get_credentials(
        self,
        user: str,
        role: str,
        enforce_ip_restrictions: bool = True,
        user_role: bool = False,
        account_id: str = None,
        custom_ip_restrictions: list = None,
    ) -> dict:
        """Get Credentials will return the list of temporary credentials from AWS."""
        log_data = {
            "function":
            f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
            "user": user,
            "role": role,
            "enforce_ip_restrictions": enforce_ip_restrictions,
            "custom_ip_restrictions": custom_ip_restrictions,
            "message": "Generating credentials",
        }
        session = boto3.Session()
        client = session.client(
            "sts",
            region_name=config.region,
            endpoint_url=f"https://sts.{config.region}.amazonaws.com",
        )

        ip_restrictions = config.get("aws.ip_restrictions")
        stats.count("aws.get_credentials", tags={"role": role, "user": user})

        # If this is a dynamic request, then we need to fetch the role details, call out to the lambda
        # wait for it to complete, assume the role, and then return the assumed credentials back.
        if user_role:
            stats.count("aws.call_user_lambda",
                        tags={
                            "role": role,
                            "user": user
                        })
            try:
                role = await self.call_user_lambda(role, user, account_id)
            except Exception as e:
                raise e

        try:
            if enforce_ip_restrictions and ip_restrictions:
                policy = json.dumps(
                    dict(
                        Version="2012-10-17",
                        Statement=[
                            dict(
                                Effect="Deny",
                                Action="*",
                                Resource="*",
                                Condition=dict(NotIpAddress={
                                    "aws:SourceIP": ip_restrictions
                                }),
                            ),
                            dict(Effect="Allow", Action="*", Resource="*"),
                        ],
                    ))

                credentials = await sync_to_async(client.assume_role)(
                    RoleArn=role,
                    RoleSessionName=user.lower(),
                    Policy=policy,
                    DurationSeconds=config.get("aws.session_duration", 3600),
                )
                credentials["Credentials"]["Expiration"] = int(
                    credentials["Credentials"]["Expiration"].timestamp())
                return credentials
            if custom_ip_restrictions:
                policy = json.dumps(
                    dict(
                        Version="2012-10-17",
                        Statement=[
                            dict(
                                Effect="Deny",
                                Action="*",
                                Resource="*",
                                Condition=dict(
                                    NotIpAddress={
                                        "aws:SourceIP": custom_ip_restrictions
                                    }),
                            ),
                            dict(Effect="Allow", Action="*", Resource="*"),
                        ],
                    ))

                credentials = await sync_to_async(client.assume_role)(
                    RoleArn=role,
                    RoleSessionName=user.lower(),
                    Policy=policy,
                    DurationSeconds=config.get("aws.session_duration", 3600),
                )
                credentials["Credentials"]["Expiration"] = int(
                    credentials["Credentials"]["Expiration"].timestamp())
                return credentials

            credentials = await sync_to_async(client.assume_role)(
                RoleArn=role,
                RoleSessionName=user.lower(),
                DurationSeconds=config.get("aws.session_duration", 3600),
            )
            credentials["Credentials"]["Expiration"] = int(
                credentials["Credentials"]["Expiration"].timestamp())
            log.debug(log_data)
            return credentials
        except ClientError as e:
            # TODO(ccastrapel): Determine if user role was really just created, or if this is an older role.
            if user_role:
                raise UserRoleNotAssumableYet(e.response["Error"])
            raise

    async def generate_url(
        self,
        user: str,
        role: str,
        region: str = "us-east-1",
        user_role: bool = False,
        account_id: str = None,
    ) -> str:
        """Generate URL will get temporary credentials and craft a URL with those credentials."""
        function = (
            f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
        )
        log_data = {
            "function": function,
            "user": user,
            "role": role,
            "message": "Generating authenticated AWS console URL",
        }
        log.debug(log_data)
        credentials = await self.get_credentials(
            user,
            role,
            user_role=user_role,
            account_id=account_id,
            enforce_ip_restrictions=False,
        )

        credentials_d = {
            "sessionId": credentials.get("Credentials", {}).get("AccessKeyId"),
            "sessionKey": credentials.get("Credentials",
                                          {}).get("SecretAccessKey"),
            "sessionToken": credentials.get("Credentials",
                                            {}).get("SessionToken"),
        }

        req_params = {
            "Action": "getSigninToken",
            "Session": bleach.clean(json.dumps(credentials_d)),
            "DurationSeconds": config.get("aws.session_duration", 3600),
        }

        http_client = AsyncHTTPClient(force_instance=True)

        url_with_params: str = url_concat(
            config.get("aws.federation_url",
                       "https://signin.aws.amazon.com/federation"),
            req_params,
        )
        r = await http_client.fetch(url_with_params,
                                    ssl_options=ssl.SSLContext())
        token = json.loads(r.body)

        login_req_params = {
            "Action":
            "login",
            "Issuer":
            config.get("aws.issuer"),
            "Destination": ("{}".format(
                config.get(
                    "aws.console_url",
                    "https://{}.console.aws.amazon.com").format(region))),
            "SigninToken":
            bleach.clean(token.get("SigninToken")),
            "SessionDuration":
            config.get("aws.session_duration", 3600),
        }

        r2 = requests_sync.Request(
            "GET",
            config.get("aws.federation_url",
                       "https://signin.aws.amazon.com/federation"),
            params=login_req_params,
        )
        url = r2.prepare().url
        return url

    async def sns_publisher_group_requests(self, user, group, justification,
                                           request_id, bg_check_passed):
        raise NotImplementedError()

    async def sns_publish_policy_requests(self, request, request_uri):
        raise NotImplementedError()

    async def send_communications_policy_change_request(
            self, request, send_sns=False):
        """
        Optionally send a notification when there's a new policy change request

        :param request:
        :param send_sns:
        :return:
        """
        log_data: dict = {
            "function":
            f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
            "message": "Function is not configured.",
        }
        log.warning(log_data)
        return

    async def send_communications_new_policy_request(self, extended_request,
                                                     admin_approved,
                                                     approval_probe_approved):
        """
        Optionally send a notification when there's a new policy change request

        :param approval_probe_approved:
        :param admin_approved:
        :param extended_request:
        :return:
        """
        log_data: dict = {
            "function":
            f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
            "message": "Function is not configured.",
        }
        log.warning(log_data)
        return

    @staticmethod
    def handle_detected_role(role):
        pass

    async def should_auto_approve_policy(self, events, user, user_groups):
        return False

    async def should_auto_approve_policy_v2(self, extended_request, user,
                                            user_groups):
        return {"approved": False}
コード例 #38
0

@pytest.fixture(scope='session')
def docker_services(docker_services):
    """Waits until the app in Docker becomes responsive.
    Overwrites the fixture from pytest-docker.
    """
    waiter_port = docker_services.port_for('waiter', 8080)
    waiter_url = f'http://localhost:{waiter_port}'
    _wait_for_compose(waiter_url)
    return docker_services


@tenacity.retry(
    stop=tenacity.stop_after_delay(10),
    wait=tenacity.wait_fixed(0.1),
)
def _wait_for_compose(app_url: str):
    response = requests.get(app_url)
    response.raise_for_status()


@pytest.fixture(scope='session')
def app_url(docker_services):
    app_port = docker_services.port_for('api', 8080)
    return f'http://localhost:{app_port}'


def test_names_from_greetings_get_saved(app_url, docker_services):
    names = ['Wieńczysław', 'Spycigniew', 'Perystaltyka']
    for name in names:
コード例 #39
0
ファイル: initial_data.py プロジェクト: Rev-AMP/backend
from tenacity import after_log, before_log, retry, stop_after_attempt, wait_fixed

from app.db.init_db import init_db
from app.db.session import SessionLocal

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

max_tries = 60 * 5  # 5 minutes
wait_seconds = 1


@retry(
    stop=stop_after_attempt(max_tries),
    wait=wait_fixed(wait_seconds),
    before=before_log(logger, logging.INFO),
    after=after_log(logger, logging.WARN),
)
def init() -> None:
    try:
        db = SessionLocal()
        # Try to create session to check if DB is awake
        db.execute("SELECT 1")
        init_db(db)
    except Exception as e:
        logger.error(e)
        raise e


def main() -> None:
コード例 #40
0
import random


import tenacity


def do_something():
    if random.randint(0, 1) == 0:
        print('Failure')
        raise RuntimeError
    print('Success')


@tenacity.retry(wait=tenacity.wait_fixed(10) + tenacity.wait_random(0, 3))
def do_something_and_retry():
    do_something()


do_something_and_retry()
コード例 #41
0
ファイル: helpers.py プロジェクト: msyesyan/seldon-core
class MicroserviceWrapper:
    def __init__(self, app_location, envs={}, grpc=False, tracing=False):
        self.app_location = app_location
        self.env_vars = self._env_vars(envs, grpc)
        self.cmd = self._get_cmd(tracing)

    def _env_vars(self, envs, grpc):
        env_vars = dict(os.environ)
        env_vars.update(envs)
        env_vars.update(
            {
                "PYTHONUNBUFFERED": "x",
                "PYTHONPATH": self.app_location,
                "APP_HOST": "127.0.0.1",
                "PREDICTIVE_UNIT_SERVICE_PORT": "5000",
                "PREDICTIVE_UNIT_METRICS_SERVICE_PORT": "6005",
                "PREDICTIVE_UNIT_METRICS_ENDPOINT": "/metrics-endpoint",
            }
        )

        s2i_env_file = os.path.join(self.app_location, ".s2i", "environment")
        with open(s2i_env_file) as fh:
            for line in fh.readlines():
                line = line.strip()
                if line:
                    key, value = line.split("=", 1)
                    key, value = key.strip(), value.strip()
                    if key and value:
                        env_vars[key] = value

        if grpc:
            env_vars["API_TYPE"] = "GRPC"

        return env_vars

    def _get_cmd(self, tracing):
        cmd = (
            "seldon-core-microservice",
            self.env_vars["MODEL_NAME"],
            self.env_vars["API_TYPE"],
            "--service-type",
            self.env_vars["SERVICE_TYPE"],
            "--persistence",
            self.env_vars["PERSISTENCE"],
        )

        if tracing:
            cmd += ("--tracing",)

        return cmd

    def __enter__(self):
        try:
            logging.info(f"starting: {' '.join(self.cmd)}")
            self.p = Popen(
                self.cmd, cwd=self.app_location, env=self.env_vars, preexec_fn=os.setsid
            )

            time.sleep(1)
            self._wait_until_ready()

            return self.p
        except Exception:
            logging.error("microservice failed to start")
            raise RuntimeError("Server did not bind to 127.0.0.1:5000")

    @retry(wait=wait_fixed(4), stop=stop_after_attempt(10))
    def _wait_until_ready(self):
        logging.debug("=== trying again")
        s1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        r1 = s1.connect_ex(("127.0.0.1", 5000))
        s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        r2 = s2.connect_ex(("127.0.0.1", 6005))
        if r1 != 0 or r2 != 0:
            raise EOFError("Server not ready yet")

        logging.info("microservice ready")

    def _get_return_code(self):
        self.p.poll()
        return self.p.returncode

    def __exit__(self, exc_type, exc_val, exc_tb):
        if self.p:
            group_id = os.getpgid(self.p.pid)
            # Kill the entire process groups (including subprocesses of self.p)
            os.killpg(group_id, signal.SIGKILL)
コード例 #42
0
ファイル: helper.py プロジェクト: ramineni/my_congress
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
#

import os

import tenacity


@tenacity.retry(stop=tenacity.stop_after_attempt(20),
                wait=tenacity.wait_fixed(1))
def retry_check_function_return_value(f, expected_value, error_msg=None):
    """Check if function f returns expected value."""
    if not error_msg:
        error_msg = 'Expected value "%s" not found' % expected_value
    r = f()
    if r != expected_value:
        raise Exception(error_msg)


def retry_on_exception(f):
    """Decorator to retry on an exception."""
    def wrapper():
        try:
            return f()
        except Exception:
コード例 #43
0
    def reply(self, reply=None, failure=None):
        """Send back reply to the RPC client
        :param reply: Dictionary, reply. In case of exception should be None
        :param failure: Tuple, should be a sys.exc_info() tuple.
            Should be None if RPC request was successfully processed.

        :return RpcReplyPikaIncomingMessage, message with reply
        """

        if self.reply_q is None:
            return

        reply_outgoing_message = RpcReplyPikaOutgoingMessage(
            self._pika_engine, self.msg_id, reply=reply, failure_info=failure,
            content_type=self._content_type,
        )

        def on_exception(ex):
            if isinstance(ex, pika_drv_exc.ConnectionException):
                LOG.warning(
                    "Connectivity related problem during reply sending. %s",
                    ex
                )
                return True
            else:
                return False

        if self._pika_engine.rpc_reply_retry_attempts:
            retrier = tenacity.retry(
                stop=(
                    tenacity.stop_never
                    if self._pika_engine.rpc_reply_retry_attempts == -1 else
                    tenacity.stop_after_attempt(
                        self._pika_engine.rpc_reply_retry_attempts
                    )
                ),
                retry=tenacity.retry_if_exception(on_exception),
                wait=tenacity.wait_fixed(
                    self._pika_engine.rpc_reply_retry_delay
                )
            )
        else:
            retrier = None

        try:
            timeout = (None if self.expiration_time is None else
                       max(self.expiration_time - time.time(), 0))
            with timeutils.StopWatch(duration=timeout) as stopwatch:
                reply_outgoing_message.send(
                    reply_q=self.reply_q,
                    stopwatch=stopwatch,
                    retrier=retrier
                )
            LOG.debug(
                "Message [id:'%s'] replied to '%s'.", self.msg_id, self.reply_q
            )
        except Exception:
            LOG.exception(
                "Message [id:'%s'] wasn't replied to : %s", self.msg_id,
                self.reply_q
            )
コード例 #44
0
import random


import tenacity


def do_something():
    if random.randint(0, 1) == 0:
        print('Failure')
        raise RuntimeError
    print('Success')


@tenacity.retry(wait=tenacity.wait_fixed(1))
def do_something_and_retry():
    do_something()


do_something_and_retry()
コード例 #45
0
ファイル: login.py プロジェクト: cn595980161/qcc_test
def retry_if_result_none(result):
    print('result:', result)
    return result is None


def return_last_value(retry_state):
    """return the result of the last call attempt"""
    return retry_state.outcome.result()


@retry(retry=(retry_if_result(retry_if_result_none)
              | retry_if_exception_type()),
       retry_error_callback=return_last_value,
       stop=stop_after_attempt(5),
       wait=wait_fixed(1))
async def mouse_slide(page=None):
    # await asyncio.sleep(3)
    print('开始移动')
    try:

        await page.waitForSelector(".nc_iconfont.btn_slide")
        await page.hover('.nc_iconfont.btn_slide')
        await page.mouse.down()

        await page.mouse.move(2000, 0, {'delay': random.randint(1000, 2000)})
        await page.mouse.up()
        await page.screenshot(
            {'path': './screenshot/headless-slide-result.png'})
    except Exception as e:
        print(e, '     :slide login False')
コード例 #46
0
ファイル: impl_pika.py プロジェクト: ozamiatin/oslo.messaging
    def send(self, target, ctxt, message, wait_for_reply=None, timeout=None,
             retry=None):
        with timeutils.StopWatch(duration=timeout) as stopwatch:
            if retry is None:
                retry = self._pika_engine.default_rpc_retry_attempts

            exchange = self._pika_engine.get_rpc_exchange_name(
                target.exchange
            )

            def on_exception(ex):
                if isinstance(ex, pika_drv_exc.ExchangeNotFoundException):
                    # it is desired to create exchange because if we sent to
                    # exchange which is not exists, we get ChannelClosed
                    # exception and need to reconnect
                    try:
                        self._declare_rpc_exchange(exchange, stopwatch)
                    except pika_drv_exc.ConnectionException as e:
                        LOG.warning("Problem during declaring exchange. %s", e)
                    return True
                elif isinstance(ex, (pika_drv_exc.ConnectionException,
                                     exceptions.MessageDeliveryFailure)):
                    LOG.warning("Problem during message sending. %s", ex)
                    return True
                else:
                    return False

            if retry:
                retrier = tenacity.retry(
                    stop=(tenacity.stop_never if retry == -1 else
                          tenacity.stop_after_attempt(retry)),
                    retry=tenacity.retry_if_exception(on_exception),
                    wait=tenacity.wait_fixed(self._pika_engine.rpc_retry_delay)
                )
            else:
                retrier = None

            if target.fanout:
                return self.cast_all_workers(
                    exchange, target.topic, ctxt, message, stopwatch, retrier
                )

            routing_key = self._pika_engine.get_rpc_queue_name(
                target.topic, target.server, retrier is None
            )

            msg = pika_drv_msg.RpcPikaOutgoingMessage(self._pika_engine,
                                                      message, ctxt)
            try:
                reply = msg.send(
                    exchange=exchange,
                    routing_key=routing_key,
                    reply_listener=(
                        self._reply_listener if wait_for_reply else None
                    ),
                    stopwatch=stopwatch,
                    retrier=retrier
                )
            except pika_drv_exc.ExchangeNotFoundException as ex:
                try:
                    self._declare_rpc_exchange(exchange, stopwatch)
                except pika_drv_exc.ConnectionException as e:
                    LOG.warning("Problem during declaring exchange. %s", e)
                raise ex

            if reply is not None:
                if reply.failure is not None:
                    raise reply.failure

                return reply.result
コード例 #47
0
ファイル: helpers.py プロジェクト: aberrier/kurby
            selected_animes_with_score_by_id[anime.id] = (score, anime)
    return list(x[1] for x in sorted(
        selected_animes_with_score_by_id.values(),
        key=lambda x: x[0],
        reverse=True,
    ))[:limit]


@retry(
    reraise=True,
    before_sleep=before_sleep_log(logger, logging.ERROR),
    retry=retry_if_exception_type(httpcore.TimeoutException)
    | retry_if_exception_type(httpx.NetworkError)
    | retry_if_exception_type(httpx.TransportError)
    | retry_if_exception_type(httpx.HTTPStatusError),
    wait=wait_fixed(2) + wait_random(1, 10),
)
def download_source(source: AnimeSource, filepath: Path):
    with get_auth_client() as new_client:
        url = urljoin(TWIST_CDN_URL, source.source)
        with new_client.stream(
                "GET",
                url,
                headers={
                    **dict(new_client.headers), "referer": TWIST_URL
                },
        ) as response:
            if response.status_code == HTTPStatus.NOT_FOUND:
                raise MissingEpisodeError(response=response)
            response.raise_for_status()
            total = int(response.headers.get("Content-Length"))
コード例 #48
0
        logging.info("Execute notify-bootstrapped action after cold boot on "
                     "the leader node ...")
        zaza.model.run_action_on_leader(self.application,
                                        "notify-bootstrapped",
                                        action_params={})
        logging.info("Wait for application states ...")
        for unit in zaza.model.get_units(self.application):
            zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
        test_config = lifecycle_utils.get_charm_config(fatal=False)
        zaza.model.wait_for_application_states(
            states=test_config.get("target_deploy_status", {}))


@tenacity.retry(
    retry=tenacity.retry_if_result(lambda is_new: is_new is False),
    wait=tenacity.wait_fixed(5),  # interval between retries
    stop=tenacity.stop_after_attempt(10))  # retry times
def retry_is_new_crm_master(test, old_crm_master):
    """Check new crm master with retries.

    Return True if a new crm master detected, retry 10 times if False.
    """
    new_crm_master = test.get_crm_master()
    if new_crm_master and new_crm_master != old_crm_master:
        logging.info(
            "New crm_master unit detected on {}".format(new_crm_master))
        return True
    return False


class PerconaClusterScaleTests(PerconaClusterBaseTest):
コード例 #49
0
ファイル: helper.py プロジェクト: openstack/congress

def str2pol(policy_string, theories=None):
    return compile.parse(policy_string, theories=theories)


def pol2str(policy):
    return " ".join(str(x) for x in policy)


def form2str(formula):
    return str(formula)


@tenacity.retry(stop=tenacity.stop_after_attempt(1000),
                wait=tenacity.wait_fixed(0.1))
def retry_check_for_last_message(obj):
    if not hasattr(obj, "last_msg"):
        raise AttributeError("Missing 'last_msg' attribute")


@tenacity.retry(stop=tenacity.stop_after_attempt(1000),
                wait=tenacity.wait_fixed(0.1))
def retry_check_for_message_to_arrive(obj):
    if not hasattr(obj.msg, "body"):
        raise AttributeError("Missing 'body' attribute")


@tenacity.retry(stop=tenacity.stop_after_attempt(1000),
                wait=tenacity.wait_fixed(0.1))
def retry_check_for_message_data(obj, data):
コード例 #50
0
class Market:
    """MicroTE is a futures trading based market design for transactive energy as part of TREX

    The market mechanism here works more like standard futures contracts,
    where delivery time interval is submitted along with the bid or ask.

    bids and asks are organized by source type
    the addition of delivery time requires that the bids and asks to be further organized by time slot

    Bids/asks can be are accepted for any time slot starting from one step into the future to infinity
    The minimum close slot is determined by 'close_steps', where a close_steps of 2 is 1 step into the future
    The the minimum close time slot is the last delivery slot that will accept bids/asks

    """
    def __init__(self, market_id, **kwargs):
        self.server_online = False
        self.run = True
        # Initialize timing intervals and definitions
        self.__status = {
            'active_participants': 0,
            'round_metered': 0,
            'round_matched': False,
            'round_settled': [],
            'round_settle_delivered': []
        }
        self.__timing = {
            'mode': 'sim',
            'timezone': kwargs['timezone'],
            'current_round': (0, 60),
            'duration': 60,
            'last_round': (0, 0),
            'close_steps':
            kwargs['close_steps'] if 'close_steps' in kwargs else 2
            # close steps = 2 for 1 step-ahead market agent debugging

            # Ideally close_steps should be 16 for a 15-step ahead market.
            # bids and asks are settled 15 steps ahead of delivery time
            # settle takes 1 step after bid/ask submision
        }

        self.__db = {}
        self.save_transactions = True
        self.market_id = market_id
        self.__client = kwargs['sio_client']
        self.__server_ts = 0

        self.__clients = {}
        self.__participants = {}

        self.__grid = Grid(**kwargs['grid_params'])

        self.__open = {}
        self.__settled = {}
        self.__transactions = []
        self.__transaction_last_record_time = 0
        self.transactions_count = 0

    def __time(self):
        """Return time based on time convention

        Market timing operates in two modes: real-time, and simulation.
        In real-time mode, the market has control of timing, and
        in simulation mode, the simulation controller has control
        Because of this, the way time propagates through the system is slightly different between modes

        In real-time mode, master time is acquired from the system clock of the market
        In simulation mode, master time is the last time tuple that was received from the simulation controller
        """
        if self.__timing['mode'] == 'rt':
            return calendar.timegm(time.gmtime())
        if self.__timing['mode'] == 'sim':
            return self.__server_ts

    def mode_switch(self, mode):
        """Switch timing modes between real-time mode and simulation mode

        """
        self.__timing['mode'] = mode

    async def open_db(self, db_string, table_name):
        if not self.save_transactions:
            return

        self.__db['path'] = db_string
        self.__db['table_name'] = table_name

        if 'table' not in self.__db or self.__db['table'] is None:
            table_name = self.__db.pop('table_name') + '_market'
            await db_utils.create_table(db_string=db_string,
                                        table_type='market2',
                                        table_name=table_name)
            self.__db['table'] = db_utils.get_table(db_string, table_name)

    async def register(self):
        """Function that attempts to register Market client with socket.io server in the market namespace

        """
        async def register_cb(success):
            if success:
                self.server_online = True

        client_data = {'type': ('market', 'MicroTE'), 'id': self.market_id}
        await self.__client.emit('register',
                                 client_data,
                                 namespace='/market',
                                 callback=register_cb)

    async def participant_connected(self, client_data):
        if client_data['id'] not in self.__participants:
            self.__participants[client_data['id']] = {
                'sid': client_data['sid'],
                'online': True,
                'meter': {}
            }
        else:
            # if previously registered participant returned, update with new session ID and toggle online status
            self.__participants[client_data['id']].update({
                'sid':
                client_data['sid'],
                'online':
                True
            })
        self.__clients[client_data['sid']] = client_data['id']
        self.__status['active_participants'] = min(
            self.__status['active_participants'] + 1, len(self.__participants))
        return self.market_id, client_data['sid']

    async def participant_disconnected(self, participant_id):
        # if a registered participant disconnects for any reason, switch online status to off
        self.__participants[participant_id].update({'online': False})
        self.__clients.pop(self.__participants[participant_id]['sid'], None)
        self.__status['active_participants'] -= 1

    async def __classify_source(self, source):
        return await source_classifier.classify(source)

    # Initialize variables for new time step
    def __reset_status(self):
        self.__status['round_metered'] = 0
        self.__status['round_matched'] = False
        self.__status['round_settled'].clear()
        self.__status['round_settle_delivered'].clear()

    async def __start_round(self, duration):
        """
        Message all participants the start of the current round, as well as the duration

        Because having somewhat synchronized timing is key to proper market operation, the start round message mostly
        contains useful time intervals. Additional info that are deemed useful can be included,
        such as grid prices that change with time.
        As always, it is advised to keep the message length minimal to maximize performance and to conserve bandwidth.

        Participants can take the times in this message and determine clock differences and communication delays.
        Will be necessary in real-time to ensure actions are received by the market before the start of the next round,
        as the market does not wait in real-time mode
        """
        start_time = self.__time()
        self.__reset_status()
        market_info = {
            str(self.__timing['current_round']): {
                'grid': {
                    'buy_price': self.__grid.buy_price(),
                    'sell_price': self.__grid.sell_price()
                }
            },
            str(self.__timing['next_settle']): {
                'grid': {
                    'buy_price': self.__grid.buy_price(),
                    'sell_price': self.__grid.sell_price()
                }
            },
        }

        start_msg = {
            'time': start_time,
            'duration': duration,
            'timezone': self.__timing['timezone'],
            'last_round': self.__timing['last_round'],
            'current_round': self.__timing['current_round'],
            'last_settle': self.__timing['last_settle'],
            'next_settle': self.__timing['next_settle'],
            'market_info': market_info,
        }
        await self.__client.emit('start_round', start_msg, namespace='/market')

    async def submit_bid(self, message: dict):
        """Processes bids sent from the participants

        If action from participants are valid, then an entry will be made on the market for matching.
        In all cases, a confirmation message will be sent back to the sender indicating success or failure.
        The handling of the confirmation message is up to the participant.

        If the message and entry_type are valid, an open record will be made in the time delivery slot for source type.
        the record is a dictionary containing the following:

        - 'uuid'
        - 'participant_id'
        - 'session_id'
        - 'price'
        - 'time_submission'
        - 'quantity'
        - 'lock'

        Note: as of April 1, 2020, 'lock' is not being used in simulation mode.
        Deprecation in general is under consideration

        Parameters
        ----------
        message : dict
            Message should be a dictionary containing the following:
            - 'participant_id'
            - 'quantity' (quantity in Wh)
            - 'price' (price in $/kWh)
            - 'time_delivery'

        Returns
        -------
        confirmation
            returns the participant session id and confirmation message for SIO server callback

            - For all invalid entries, confirmation message is a dictionary with 'uuid' as the key and None as the value
            - For all valid entries, confirmation message be a dictionary containing the following:

                - 'uuid'
                - 'time_submission'
                - 'price'
                - 'quantity'
                - 'time_delivery'

        """

        # entry validity check step 1: quantity must be positive
        if message['quantity'] <= 0:
            # raise Exception('quantity must be a positive integer')
            return message['session_id'], {'uuid': None}

        # if entry is valid, then update entry with market specific info
        # convert kwh price to token price

        entry = {
            'uuid': cuid(),
            'participant_id': message['participant_id'],
            'session_id': message['session_id'],
            'price': message['price'],
            'time_submission': self.__time(),
            'quantity': message['quantity'],
            'lock': False
        }

        # create a new time slot container if the time slot doesn't exist
        time_delivery = tuple(message['time_delivery'])
        if time_delivery not in self.__open:
            self.__open[time_delivery] = {'bid': []}

        # if the time slot exists but no entry exist, create the entry container
        if 'bid' not in self.__open[time_delivery]:
            self.__open[time_delivery]['bid'] = []

        # add open entry
        self.__open[time_delivery]['bid'].append(entry)

        reply = {
            'uuid': entry['uuid'],
            'time_submission': entry['time_submission'],
            'price': entry['price'],
            'quantity': entry['quantity'],
            'time_delivery': time_delivery
        }

        return message['session_id'], reply

    async def submit_ask(self, message: dict):
        """Processes bids/asks sent from the participants

        If action from participants are valid, then an entry will be made on the market for matching.
        In all cases, a confirmation message will be sent back to the sender indicating success or failure.
        The handling of the confirmation message is up to the participant.

        If the message and entry_type are valid, an open record will be made in the time delivery slot for source type.
        the record is a dictionary containing the following:

        - 'uuid'
        - 'participant_id'
        - 'session_id'
        - 'source'
        - 'price'
        - 'time_submission'
        - 'quantity'
        - 'lock'

        Note: as of April 1, 2020, 'lock' is not being used in simulation mode.
        Deprecation in general is under consideration

        Parameters
        ----------
        message : dict
            Message should be a dictionary containing the following:

            - 'participant_id'
            - 'quantity' (quantity in Wh)
            - 'price' (price in $/kWh)
            - 'source' (such as 'solar', 'bess', 'wind', etc. Must be classifiable)
            - 'time_delivery'

        entry_type : str
            Must be either 'bid' or 'ask'

        Returns
        -------
        confirmation
            returns the participant session id and confirmation message for SIO server callback

            - For all invalid entries, confirmation message is a dictionary with 'uuid' as the key and None as the value
            - For all valid entries, confirmation message be a dictionary containing the following:

                - 'uuid'
                - 'time_submission'
                - 'source'
                - 'price'
                - 'quantity'
                - 'time_delivery'

        """

        # if entry_type not in {'bid', 'ask'}:
        #     # raise Exception('invalid action')
        #     return message['session_id'], {'uuid': None}

        # entry validity check step 1: quantity must be positive
        if message['quantity'] <= 0:
            # raise Exception('quantity must be a positive integer')
            return message['session_id'], {'uuid': None}

        # entry validity check step 2: source must be classifiable
        source_type = await self.__classify_source(message['source'])
        if not source_type:
            # raise Exception('quantity must be a positive integer')
            return message['session_id'], {'uuid': None}

        # if entry is valid, then update entry with market specific info
        # convert kwh price to token price

        entry = {
            'uuid': cuid(),
            'participant_id': message['participant_id'],
            'session_id': message['session_id'],
            'source': message['source'],
            'price': message['price'],
            'time_submission': self.__time(),
            'quantity': message['quantity'],
            'lock': False
        }

        # create a new time slot container if the time slot doesn't exist
        time_delivery = tuple(message['time_delivery'])
        if time_delivery not in self.__open:
            self.__open[time_delivery] = {'ask': []}

        # if the time slot exists but no entry exist, create the entry container
        if 'ask' not in self.__open[time_delivery]:
            self.__open[time_delivery]['ask'] = []

        # add open entry
        self.__open[time_delivery]['ask'].append(entry)

        reply = {
            'uuid': entry['uuid'],
            'time_submission': entry['time_submission'],
            'source': entry['source'],
            'price': entry['price'],
            'quantity': entry['quantity'],
            'time_delivery': time_delivery
        }

        return message['session_id'], reply

    async def __match(self, time_delivery):
        """Matches bids with asks for a single source type in a time slot

        THe matching and settlement process closely resemble double auctions.
        For all bids/asks for a source in the delivery time slots, highest bids are matched with lowest asks
        and settled pairwise. Quantities can be partially settled. Unsettled quantities are discarded. Participants are only obligated to buy/sell quantities settled for the delivery period.

        Parameters
        ----------
        time_delivery : tuple
            Tuple containing the start and end timestamps in UNIX timestamp format indicating the interval for energy to be delivered.

        Notes
        -----
        Presently, the settlement price is hard-coded as the average price of the bid/ask pair. In the near future, dedicated, more sophisticated functions for determining settlement price will be implemented

        """

        if time_delivery not in self.__open:
            return

        if 'ask' not in self.__open[time_delivery]:
            return

        if 'bid' not in self.__open[time_delivery]:
            return

        # remove zero-quantity bid and ask entries
        # sort bids by decreasing price and asks by increasing price
        self.__open[time_delivery]['ask'][:] = \
            sorted([ask for ask in self.__open[time_delivery]['ask'] if ask['quantity'] > 0],
                   key=itemgetter('price'), reverse=False)
        self.__open[time_delivery]['bid'][:] = \
            sorted([bid for bid in self.__open[time_delivery]['bid'] if bid['quantity'] > 0],
                   key=itemgetter('price'), reverse=True)

        bids = self.__open[time_delivery]['bid']
        asks = self.__open[time_delivery]['ask']

        for bid, ask, in itertools.product(bids, asks):
            if ask['price'] > bid['price']:
                continue

            if bid['participant_id'] == ask['participant_id']:
                continue

            # if bid['source'] != ask['source']:
            #     continue

            if bid['lock'] or ask['lock']:
                continue

            if bid['quantity'] <= 0 or ask['quantity'] <= 0:
                continue

            if bid['participant_id'] not in self.__participants:
                bid['lock'] = True
                continue

            if ask['participant_id'] not in self.__participants:
                ask['lock'] = True
                continue

            # Settle highest price bids with lowest price asks
            await self.__settle(bid, ask, time_delivery)

    async def __settle(self,
                       bid: dict,
                       ask: dict,
                       time_delivery: tuple,
                       settlement_method=None,
                       locking=False):
        """Performs settlement for bid/ask pairs found during the matching process.

        If bid/ask are valid, the bid/ask quantities are adjusted, a commitment record is created, and a settlement confirmation is sent to both participants.

        Parameters
        ----------
        bid: dict
            bid entry to be settled. Should be a reference to the open bid

        ask: dict
            bid entry to be settled. Should be a reference to the open ask

        time_delivery : tuple
            Tuple containing the start and end timestamps in UNIX timestamp format.

        locking: bool
        Optinal locking mode, which locks the bid and ask until a callback is received after settlement confirmation is sent. The default value is False.

        Currently, locking should be disabled in simulation mode, as waiting for callback causes some settlements to be incomplete, likely due a flaw in the implementation or a poor understanding of how callbacks affect the sequence of events to be executed in async mode.

        Notes
        -----
        It is possible to settle directly with the grid, although this feature is currently not used by the agents and is under consideration to be deprecated.


        """

        # grid is not allowed to interact through market
        if ask['source'] == 'grid':
            return

        # only proceed to settle if settlement quantity is positive
        quantity = min(bid['quantity'], ask['quantity'])
        if quantity <= 0:
            return

        if locking:
            # lock the bid and ask until confirmations are received
            ask['lock'] = True
            bid['lock'] = True

        commit_id = cuid()
        settlement_time = self.__timing['current_round'][1]
        settlement_price_sell = ask['price']
        settlement_price_buy = bid['price']
        record = {
            'quantity': quantity,
            'seller_id': ask['participant_id'],
            'buyer_id': bid['participant_id'],
            'energy_source': ask['source'],
            'settlement_price_sell': settlement_price_sell,
            'settlement_price_buy': settlement_price_buy,
            'time_purchase': settlement_time
        }

        # Record successful settlements
        if time_delivery not in self.__settled:
            self.__settled[time_delivery] = {}

        self.__settled[time_delivery][commit_id] = {
            'time_settlement': settlement_time,
            'source': ask['source'],
            'record': record,
            'ask': ask,
            'seller_id': ask['participant_id'],
            'bid': bid,
            'buyer_id': bid['participant_id'],
            'lock': locking
        }

        message = {
            'commit_id': commit_id,
            'ask_id': ask['uuid'],
            'bid_id': bid['uuid'],
            'source': ask['source'],
            'quantity': quantity,
            'sell_price': settlement_price_sell,
            'buy_price': settlement_price_buy,
            'buyer_id': bid['participant_id'],
            'seller_id': ask['participant_id'],
            'time_delivery': time_delivery
        }

        if locking:
            await self.__client.emit('send_settlement',
                                     message,
                                     namespace='/market',
                                     callback=self.__settle_confirm_lock)
        else:
            await self.__client.emit('send_settlement',
                                     message,
                                     namespace='/market')
            bid['quantity'] = max(
                0, bid['quantity'] -
                self.__settled[time_delivery][commit_id]['record']['quantity'])
            ask['quantity'] = max(
                0, ask['quantity'] -
                self.__settled[time_delivery][commit_id]['record']['quantity'])
        self.__status['round_settled'].append(commit_id)

    # after settlement confirmation, update bid and ask quantities
    async def settlement_delivered(self, commit_id):
        self.__status['round_settle_delivered'].append(commit_id)

    async def __settle_confirm_lock(self, message):
        """Callback for settle in locking mode

        """

        time_delivery = tuple(message['time_delivery'])
        if time_delivery not in self.__settled:
            return

        commit_id = message['commit_id']
        ask = self.__settled[time_delivery][commit_id]['ask']
        bid = self.__settled[time_delivery][commit_id]['bid']

        ask['lock'] = not message['seller']
        bid['lock'] = not message['buyer']

        if not ask['lock'] and not bid['lock']:
            self.__settled[time_delivery][commit_id]['lock'] = False
            bid['quantity'] = max(
                0, bid['quantity'] -
                self.__settled[time_delivery][commit_id]['record']['quantity'])
            ask['quantity'] = max(
                0, ask['quantity'] -
                self.__settled[time_delivery][commit_id]['record']['quantity'])

    async def meter_data(self, message):
        """Update meter data from participant

        Meter data should be received from participants at the end of the each round for delivery.
        """

        # meter = {
        #     'time_interval': (),
        #     'generation': {
        #         'solar': 0,
        #         'bess': 0
        #     },
        #     'consumption': {
        #         'bess': {
        #             'solar': 0,
        #         },
        #         'other': {
        #             'solar': 0,
        #             'bess': 0,
        #             'other': 0
        #         }
        #     }
        # }

        # TODO: add data validation later
        participant_id = message['participant_id']
        time_delivery = tuple(message['meter']['time_interval'])
        self.__participants[participant_id]['meter'][time_delivery] = message[
            'meter']
        self.__status['round_metered'] += 1

    async def __process_settlements(self, time_delivery, source_type):
        physical_tranactions = []
        financial_transactions = []
        settlements = self.__settled[time_delivery]
        for buyer in self.__participants:
            for seller in self.__participants:
                if buyer == seller:
                    continue
                # make sure the buyer and seller are online
                if not self.__participants[buyer]['online']:
                    continue
                if not self.__participants[seller]['online']:
                    continue

                # Extract settlements involving buyer and seller (that are not locked)
                relevant_settlements = {
                    k: v
                    for (k, v) in settlements.items()
                    if settlements[k]['lock'] is False
                    and settlements[k]['buyer_id'] == buyer
                    and settlements[k]['seller_id'] == seller
                }

                if relevant_settlements:
                    for commit_id in relevant_settlements.keys():
                        energy_source = self.__settled[time_delivery][
                            commit_id]['source']
                        energy_type = await self.__classify_source(
                            energy_source)
                        if energy_type != source_type:
                            continue

                        settled_quantity = self.__settled[time_delivery][
                            commit_id]['record']['quantity']
                        if not settled_quantity:
                            continue
                        residual_generation = self.__participants[seller][
                            'meter'][time_delivery]['generation'][
                                energy_source]
                        residual_consumption = \
                        self.__participants[buyer]['meter'][time_delivery]['consumption']['other']['external']

                        # check to see if physical generation is less than settled quantity
                        # extra_purchase = 0
                        deficit_generation = max(
                            0, settled_quantity - residual_generation)
                        # Add on the amount that needed to be bought from the grid?
                        # self.__participants[buyer]['meter']['consumption']['other']['external'] += deficit_generation
                        # if not deficit_generation:
                        # check if settled quantity is greater than residual consumption
                        # if settled amount is greater than residual generation, then figure out
                        # the financial compensation.
                        extra_purchase = max(
                            0, settled_quantity - residual_consumption)
                        # print(settled_quantity, energy_source, residual_generation, residual_consumption, extra_purchase, deficit_generation)
                        pt, ft = await self.__transfer_energy(
                            time_delivery, commit_id, extra_purchase,
                            deficit_generation)
                        physical_tranactions.extend(pt)
                        financial_transactions.extend(ft)
        return physical_tranactions, financial_transactions

    # async def __process_self_consumption(self, participant_id):

    async def __scrub_financial_transaction(self, transactions):
        scrubbed_transactions = {}
        for transaction in transactions:
            if transaction['seller_id'] not in scrubbed_transactions:
                scrubbed_transactions[transaction['seller_id']] = {
                    'buy': [],
                    'sell': []
                }
            if transaction['buyer_id'] not in scrubbed_transactions:
                scrubbed_transactions[transaction['buyer_id']] = {
                    'buy': [],
                    'sell': []
                }
            scrubbed_transaction = {
                'quantity': transaction['quantity'],
                'energy_source': transaction['energy_source'],
                'settlement_price_sell': transaction['settlement_price_sell'],
                'settlement_price_buy': transaction['settlement_price_buy'],
                'time_creation': transaction['time_creation'],
                'time_purchase': transaction['time_purchase']
            }
            scrubbed_transactions[transaction['buyer_id']]['buy'].append(
                scrubbed_transaction)
            scrubbed_transactions[transaction['seller_id']]['sell'].append(
                scrubbed_transaction)
        return scrubbed_transactions

    async def __process_energy_exchange(self, time_delivery):
        """The main function for finalizing energy exchange using settlements and meter data.

        Energy exchange takes the following steps in order of priority:

        1. Exchange settled energy
        2. Process self-consumption
        3. Process residual energy

        Aside from perfect settlements (i.e, ), which are not expected in realistic scen

        There are five possible scenarios for each settlement:

        1. Seller's residual generation is the exact amount as settled
        2. Seller's residual generation is less than settled
        3. Seller's residual generation is more than settled
        4. Buyer's residual consumption is the exact amount as settled
        5. Buyer's residual consumption is less than settled
        6. Buyer's residual consumption is more than settled

        Aside from 1 and 4, all other scenarios require additional handling.

        - Scenario 2: The seller must either pay for the shortage from the grid, or compensate by injecting the shortage from their BESS. BESS compensation must be done prior to sending meter data.
        - Scenario 3: The residual are sold to the grid at grid prices
        - Scenario 5: The buyer must pay the seller the full amount of the settlement. The residual generation cannot be sold to the grid again, as that would be double compensation.
        - Scenario 6: The buyer must buy the residual consumption from the grid at grid prices.

        On top of properly balancing the market, these schemes should also provide sufficient punishment that drive the agents to make more optimal decisions.

        """
        # print('-----')
        # STEP 1
        # process auction deliveries
        transactions = []
        financial_transactions = []
        # Step 1: exchange settled
        if time_delivery in self.__settled:
            for source_type in {'dispatch', 'non_dispatch'}:
                # important: dispatch must be first!!!
                pt, ft = await self.__process_settlements(
                    time_delivery, source_type)
                transactions.extend(pt + ft)
                financial_transactions.extend(ft)

        scrubbed_financial_transactions = await self.__scrub_financial_transaction(
            financial_transactions)

        # Steps 2 & 3
        # process self-consumption
        # process residual energy
        for participant_id in self.__participants:
            if not self.__participants[participant_id]['meter']:
                continue

            if time_delivery not in self.__participants[participant_id][
                    'meter']:
                print(participant_id, 'not metered')
                continue

            # self consumption
            for load in self.__participants[participant_id]['meter'][
                    time_delivery]['consumption']:
                for source in self.__participants[participant_id]['meter'][
                        time_delivery]['consumption'][load]:
                    if source in self.__participants[participant_id]['meter'][
                            time_delivery]['generation']:
                        # assuming everything is perfectly sub metered
                        quantity = self.__participants[participant_id][
                            'meter'][time_delivery]['consumption'][load][
                                source]

                        if quantity > 0:
                            transaction_record = {
                                'quantity': quantity,
                                'seller_id': participant_id,
                                'buyer_id': participant_id,
                                'energy_source': source,
                                'settlement_price_sell': 0,
                                'settlement_price_buy': 0,
                                'time_creation': time_delivery[0],
                                'time_purchase': time_delivery[1],
                                'time_consumption': time_delivery[1]
                            }
                            transactions.append(transaction_record.copy())
                            self.__participants[participant_id]['meter'][
                                time_delivery]['consumption'][load][
                                    source] -= quantity

            extra_transactions = {
                'participant': participant_id,
                'time_delivery': time_delivery,
                'grid': {
                    'buy': [],
                    'sell': []
                }
            }
            # sell residual generation(s) to the grid
            for source in self.__participants[participant_id]['meter'][
                    time_delivery]['generation']:
                residual_generation = self.__participants[participant_id][
                    'meter'][time_delivery]['generation'][source]
                if residual_generation > 0:
                    transaction_record = {
                        'quantity': residual_generation,
                        'seller_id': participant_id,
                        'buyer_id': self.__grid.id,
                        'energy_source': source,
                        'settlement_price_sell': self.__grid.sell_price(),
                        'settlement_price_buy': self.__grid.sell_price(),
                        'time_creation': time_delivery[0],
                        'time_purchase': time_delivery[1],
                        'time_consumption': time_delivery[1]
                    }
                    transactions.append(transaction_record.copy())
                    self.__participants[participant_id]['meter'][
                        time_delivery]['generation'][
                            source] -= residual_generation
                    extra_transactions['grid']['sell'].append(
                        transaction_record.copy())
            # buy residual consumption (other) from grid
            residual_consumption = self.__participants[participant_id][
                'meter'][time_delivery]['consumption']['other']['external']
            if residual_consumption > 0:
                transaction_record = {
                    'quantity': residual_consumption,
                    'seller_id': self.__grid.id,
                    'buyer_id': participant_id,
                    'energy_source': 'grid',
                    'settlement_price_sell': self.__grid.buy_price(),
                    'settlement_price_buy': self.__grid.buy_price(),
                    'time_creation': time_delivery[0],
                    'time_purchase': time_delivery[1],
                    'time_consumption': time_delivery[1]
                }
                transactions.append(transaction_record.copy())
                self.__participants[participant_id]['meter'][time_delivery][
                    'consumption']['other']['external'] -= residual_consumption
                extra_transactions['grid']['buy'].append(
                    transaction_record.copy())

            if participant_id in scrubbed_financial_transactions:
                extra_transactions[
                    'financial'] = scrubbed_financial_transactions[
                        participant_id]

            await self.__client.emit(event='return_extra_transactions',
                                     data=extra_transactions,
                                     namespace='/market')
        if self.save_transactions:
            self.__transactions.extend(transactions)
            await self.record_transactions(10000)

    async def __transfer_energy(self,
                                time_delivery,
                                commit_id,
                                extra_purchase=0,
                                deficit_generation=0):
        # pt, ft = await self.__transfer_energy(time_delivery, commit_id, extra_purchase, deficit_generation)
        """This function makes the energy transaction records for each settlement

        """

        physical_transactions = []
        financial_transactions = []
        seller_id = self.__settled[time_delivery][commit_id]['seller_id']
        buyer_id = self.__settled[time_delivery][commit_id]['buyer_id']
        energy_source = self.__settled[time_delivery][commit_id]['source']
        physical_qty = 0
        settlement = self.__settled[time_delivery][commit_id]['record']
        # For extra consumption by buyer greater than settled amount:
        physical_record = settlement.copy()

        # extra purchase by buyer
        # buyer settled for more than consumed
        if extra_purchase:
            print('-extra---------')
            print(buyer_id, extra_purchase)
            print(settlement)
            print(self.__participants[buyer_id]['meter'][time_delivery])

        # extra_purchase and deficit_generation SHOULD be mutually exclusive

        if not extra_purchase and not deficit_generation:
            physical_record.update({
                'time_creation': time_delivery[0],
                'time_consumption': time_delivery[1],
            })
            physical_qty = physical_record['quantity']
            self.__participants[seller_id]['meter'][time_delivery][
                'generation'][energy_source] -= physical_qty
            self.__participants[buyer_id]['meter'][time_delivery][
                'consumption']['other']['external'] -= physical_qty
            physical_transactions.append(physical_record)
        # settled for more than consumed
        elif extra_purchase:
            physical_record.update({
                'quantity': physical_record['quantity'] - extra_purchase,
                'time_creation': time_delivery[0],
                'time_consumption': time_delivery[1],
            })
            financial_record = settlement.copy()
            financial_record.update({
                'quantity': extra_purchase,
                'time_creation': time_delivery[0]
            })
            financial_transactions.append(financial_record)

            if physical_record['quantity']:
                physical_qty = physical_record['quantity']
                self.__participants[seller_id]['meter'][time_delivery][
                    'generation'][energy_source] -= physical_qty
                self.__participants[buyer_id]['meter'][time_delivery][
                    'consumption']['other']['external'] -= physical_qty
                physical_transactions.append(physical_record)

        elif deficit_generation:
            # print('-=-------------=-')
            # print(settlement)
            # print(short)
            # print(self.__participants[seller_id]['meter']['generation']['bess'])

            # seller makes up for less than promised by
            # first, compensate from battery (if extra discharge). These are physical
            # second, financially compensate by buying energy from grid for buyer. These are financial.

            # battery can only compensate for non-dispatch settlements for now
            source_type = await self.__classify_source(
                settlement['energy_source'])
            if source_type == 'non_dispatch':
                residual_bess = self.__participants[seller_id]['meter'][
                    time_delivery]['generation']['bess']
                bess_compensation = min(deficit_generation, residual_bess)
                # print(deficit_generation,
                #       bess_compensation,
                #       self.__participants[seller_id]['meter'][time_delivery]['generation']['bess'],
                #       self.__participants[seller_id]['meter'][time_delivery]['generation']['solar'],
                #       physical_qty)

                if bess_compensation > 0:
                    compensation_record = {
                        'quantity':
                        bess_compensation,
                        'seller_id':
                        settlement['seller_id'],
                        'buyer_id':
                        settlement['buyer_id'],
                        'energy_source':
                        'bess',
                        'settlement_price_sell':
                        settlement['settlement_price_sell'],
                        'settlement_price_buy':
                        settlement['settlement_price_buy'],
                        'time_creation':
                        time_delivery[0],
                        'time_purchase':
                        settlement['time_purchase'],
                        'time_consumption':
                        time_delivery[1]
                    }
                    self.__participants[seller_id]['meter'][time_delivery][
                        'generation']['bess'] -= bess_compensation
                    self.__participants[buyer_id]['meter'][time_delivery][
                        'consumption']['other'][
                            'external'] -= bess_compensation
                    deficit_generation -= bess_compensation
                    physical_transactions.append(compensation_record)

                    # print(deficit_generation,
                    #       bess_compensation,
                    #       self.__participants[seller_id]['meter'][time_delivery]['generation']['bess'],
                    #       self.__participants[seller_id]['meter'][time_delivery]['generation']['solar'],
                    #       physical_qty)

            # if deficit_generation:
            #     # print(extra_purchase, deficit_generation)
            #     print('-short---------')
            #     # print(buyer_id, extra_purchase)
            #     print(seller_id, deficit_generation)
            #     print(settlement)
            #     print(self.__participants[seller_id]['meter'][time_delivery])

            if deficit_generation > 0:
                financial_record = {
                    'quantity': deficit_generation,
                    'seller_id': seller_id,
                    'buyer_id': buyer_id,
                    'energy_source': 'grid',
                    'settlement_price_sell': 0,
                    'settlement_price_buy':
                    -self.__grid.buy_price(),  # seller pays buyer
                    'time_creation': time_delivery[0],
                    'time_purchase': time_delivery[1]
                }
                financial_transactions.append(financial_record)

        await self.__complete_settlement(time_delivery, commit_id)
        return physical_transactions, financial_transactions

    # async def __complete_settlement_cb(self, time_delivery, commit_id):
    #     if not commit_id:
    #         return
    #     time_delivery = tuple(time_delivery)
    #     if time_delivery not in self.__settled:
    #         return
    #     if commit_id not in self.__settled[time_delivery]:
    #         return
    #     del self.__settled[time_delivery][commit_id]

    # mark completion of successful settlements
    async def __complete_settlement(self, time_delivery, commit_id):
        # message = {
        #     'time_delivery': time_delivery,
        #     'commit_id': commit_id,
        #     'seller_id': self.__settled[time_delivery][commit_id]['seller_id'],
        #     'buyer_id': self.__settled[time_delivery][commit_id]['buyer_id']
        # }
        # await self.__client.emit('settlement_complete', message, namespace='/market', callback=self.__complete_settlement_cb)
        # await self.__client.emit('settlement_complete', message, namespace='/market')
        del self.__settled[time_delivery][commit_id]

    @tenacity.retry(wait=tenacity.wait_fixed(5))
    async def __ensure_transactions_complete(self):
        table_len = db_utils.get_table_len(self.__db['path'],
                                           self.__db['table'])
        if table_len < self.transactions_count:
            raise Exception
        return True

    async def record_transactions(self,
                                  buf_len=0,
                                  delay=True,
                                  check_table_len=False):
        """This function records the transaction records into the ledger

        """

        if check_table_len:
            table_len = db_utils.get_table_len(self.__db['path'],
                                               self.__db['table'])
            if table_len < self.transactions_count:
                return False

        if delay and buf_len:
            delay = buf_len / 100
            ts = datetime.datetime.now().timestamp()
            if ts - self.__transaction_last_record_time < delay:
                return False

        transactions_len = len(self.__transactions)
        if transactions_len < buf_len:
            return False

        transactions = self.__transactions[:transactions_len]
        asyncio.create_task(
            db_utils.dump_data(transactions, self.__db['path'],
                               self.__db['table']))

        self.__transaction_last_record_time = datetime.datetime.now(
        ).timestamp()
        del self.__transactions[:transactions_len]
        self.transactions_count += transactions_len
        return True

    async def __clean_market(self, time_delivery):
        # clean buffer from 2 rounds before the current round
        # ensure this will not interfere with settlement callbacks
        duration = self.__timing['duration']
        time_clean = (time_delivery[0] - duration, time_delivery[1] - duration)
        self.__open.pop(time_clean, None)
        self.__settled.pop(time_clean, None)
        for participant in self.__participants:
            self.__participants[participant]['meter'].pop(time_delivery, None)

    async def __update_time(self, time):
        self.__server_ts = time['time']
        duration = time['duration']
        start_time = time['time']
        end_time = start_time + duration
        self.__timing.update({
            'timezone':
            self.__timing['timezone'],
            'duration':
            duration,
            'last_round':
            self.__timing['current_round'],
            'current_round': (start_time, end_time),
            'last_settle':
            (start_time + duration * (self.__timing['close_steps'] - 1),
             start_time + duration * self.__timing['close_steps']),
            'next_settle':
            (start_time + duration * self.__timing['close_steps'],
             start_time + duration * (self.__timing['close_steps'] + 1))
        })

    # Make sure time interval provided is valid
    async def __time_interval_is_valid(self, time_interval: tuple):
        duration = self.__timing['duration']
        if (time_interval[1] - time_interval[0]) % duration != 0:
            # make sure duration is a multiple of round duration
            return False
        if time_interval[0] % duration != 0:
            return False
        if time_interval[1] % duration != 0:
            return False
        return True

    async def __match_all(self, time_delivery):
        await self.__match(time_delivery)
        self.__status['round_matched'] = True

    # should be for simulation mode only
    @tenacity.retry(wait=tenacity.wait_fixed(0.01))
    async def __ensure_round_complete(self):
        if self.__status['round_metered'] < self.__status[
                'active_participants']:
            raise Exception

        if not self.__status['round_matched']:
            raise Exception

        if set(self.__status['round_settle_delivered']) != set(
                self.__status['round_settled']):
            raise Exception

    # Finish all processes and remove all unnecessary/ remaining records in preparation for a new time step, begin processes for next step
    async def step(self, timeout=60, sim_params=None):
        # timing for simulation mode and real-time mode a slightly different due to one with an explicit end condition. RT mode sequence is not too relevant at the moment will be added later.
        if self.__timing['mode'] == 'sim':
            await self.__update_time(sim_params)
            if not self.__timing['current_round'][0] % 3600:
                self.__grid.update_price(self.__timing['current_round'][0],
                                         self.__timing['timezone'])
            await self.__start_round(duration=timeout)
            await self.__match_all(self.__timing['last_settle'])
            await self.__ensure_round_complete()
            await self.__process_energy_exchange(self.__timing['current_round']
                                                 )
            await self.__clean_market(self.__timing['last_round'])
            await self.__client.emit('end_round',
                                     data='',
                                     namespace='/simulation')

    async def loop(self):
        # change loop depending on sim mode or RT mode
        while self.run:
            if self.server_online and self.__timing['mode'] == 'rt':
                await self.step(60)
                continue
            await self.__client.sleep(0.001)

        await self.__client.sleep(5)
        raise SystemExit

    async def reset_market(self):
        self.__db.clear()
        self.transactions_count = 0
        self.__open.clear()
        self.__settled.clear()
        for participant in self.__participants:
            self.__participants[participant]['meter'].clear()

    async def end_sim_generation(self):
        await self.record_transactions(delay=False)
        await self.__ensure_transactions_complete()
        await self.reset_market()
        await self.__client.emit('market_ready', namespace='/simulation')
コード例 #51
0
ファイル: redis.py プロジェクト: Surfict/osparc-simcore
from .config import (
    APP_CLIENT_REDIS_CLIENT_KEY,
    APP_CLIENT_REDIS_LOCK_MANAGER_CLIENT_KEY,
    APP_CLIENT_REDIS_LOCK_MANAGER_KEY,
    CONFIG_SECTION_NAME,
)

log = logging.getLogger(__name__)

THIS_SERVICE_NAME = "redis"
DSN = "redis://{host}:{port}"

retry_upon_init_policy = dict(
    stop=stop_after_attempt(4),
    wait=wait_fixed(1.5),
    before=before_log(log, logging.WARNING),
    reraise=True,
)


async def redis_client(app: web.Application):
    cfg = app[APP_CONFIG_KEY][CONFIG_SECTION_NAME]
    url = DSN.format(**cfg["redis"])

    async def create_client(url) -> aioredis.Redis:
        # create redis client
        client: Optional[aioredis.Redis] = None
        async for attempt in AsyncRetrying(**retry_upon_init_policy):
            with attempt:
                client = await aioredis.create_redis_pool(url,
コード例 #52
0
ファイル: sms.py プロジェクト: tsotetsi/django-bulksms
from .utils import clean_msisdn, read_cvs


headers = ({'Content-Type': 'application/x-www-form-urlencode'})
logger = logging.getLogger('bulksms')

# API Authentication credentials.
username = getattr(settings, 'BULKSMS_AUTH_USERNAME', '')
password = getattr(settings, 'BULKSMS_AUTH_PASSWORD', '')
api_url = getattr(settings, 'BULKSMS_API_URL', '')

# Whether to insert country codes or not.
clean_msisdn_number = getattr(settings, 'CLEAN_MSISDN_NUMBER', False)


@retry(wait=wait_fixed(2))
def send_single(msisdn=None, message=None):
    """
    Send SMS to any number in several countries.
    :param msisdn number. str
        The number to send to using international format.
    :param str message the message to be sent to msisdn.
    @return: Request results in pipe format [statusCode|statusString]
    """
    if clean_msisdn_number:
        msisdn = clean_msisdn(msisdn)

    payload = (
        {
            'username': username,
            'password': password,