Exemple #1
0
 async def _update(self, data, author_user_id):
     # TODO: ten changelog řešit ještě nějak obecněji
     assert isinstance(data, dict)
     changelog_entry = {
         '_id': ObjectId(),
         'author_user_id': author_user_id,
         'changes': {},
     }
     op_set = {}
     for key, new_value in sorted(data.items()):
         old_value = self._user_doc.get(key)
         if new_value == old_value:
             continue
         logger.info('Updating user %s %s: %s -> %s', self.id, key,
                     smart_repr(old_value), smart_repr(new_value))
         assert key not in op_set
         op_set[key] = new_value
         changelog_entry['changes'][key] = {
             'old_value': old_value,
             'new_value': new_value,
         }
     if not op_set:
         logger.info('Nothing to update (user %s)', self.id)
         return
     new_user_doc = await self._c_users.find_one_and_update(
         {
             '_id': self.id,
             'v': self._user_doc['v'],
         }, {
             '$set': op_set,
             '$push': {
                 'changelog': changelog_entry
             },
             '$inc': {
                 'v': 1
             },
         },
         return_document=ReturnDocument.AFTER)
     if new_user_doc is None:
         raise RetryNeeded()
     assert new_user_doc['v'] == self._user_doc['v'] + 1
     assert new_user_doc['_id'] == self.id
     self._user_doc = new_user_doc
     self._view = UserView(new_user_doc)
     await self._offload_changelog()
Exemple #2
0
 async def _update(self, data, author_user_id):
     # TODO: ten changelog řešit ještě nějak obecněji
     assert isinstance(data, dict)
     changelog_entry = {
         '_id': ObjectId(),
         'author_user_id': author_user_id,
         'changes': {},
     }
     op_set = {}
     for key, new_value in sorted(data.items()):
         old_value = self._user_doc.get(key)
         if new_value == old_value:
             continue
         logger.info(
             'Updating user %s %s: %s -> %s',
             self.id, key, smart_repr(old_value), smart_repr(new_value))
         assert key not in op_set
         op_set[key] = new_value
         changelog_entry['changes'][key] = {
             'old_value': old_value,
             'new_value': new_value,
         }
     if not op_set:
         logger.info('Nothing to update (user %s)', self.id)
         return
     new_user_doc = await self._c_users.find_one_and_update(
         {
             '_id': self.id,
             'v': self._user_doc['v'],
         }, {
             '$set': op_set,
             '$push': {'changelog': changelog_entry},
             '$inc': {'v': 1},
         },
         return_document=ReturnDocument.AFTER)
     if new_user_doc is None:
         raise RetryNeeded()
     assert new_user_doc['v'] == self._user_doc['v'] + 1
     assert new_user_doc['_id'] == self.id
     self._user_doc = new_user_doc
     self._view = UserView(new_user_doc)
     await self._offload_changelog()
 def process_line(self, line_bytes, timestamp):
     try:
         line = line_bytes.decode().rstrip()
     except ValueError as e:
         logger.warning('Failed to decode line %s: %r', smart_repr(line_bytes), e)
         line = str(line_bytes)
     is_error = False
     for ep in self.wf_conf.error_patterns:
         if ep.regex and ep.regex.search(line):
             is_error = True
     if is_error:
         n = next(self.line_counter)
         self.error_lines.append((timestamp, n, line))
Exemple #4
0
 def instance_console_output(self):
     self.instance_ready()
     start_mt = monotime()
     while True:
         logger.info('Getting console output of instance %s...',
                     self.instance_id())
         reply = self.ec2_client.get_console_output(
             DryRun=False, InstanceId=self.instance_id())
         logger.debug('reply: %s', smart_repr(reply))
         if 'Output' in reply:
             return reply['Output'].splitlines()
         if monotime() - start_mt > self.console_output_timeout:
             raise Exception('No console output received')
         logger.debug('No output yet, sleeping...')
         sleep(3)
Exemple #5
0
async def retrieve_alerts(conf, session):
    post_kwargs = dict(headers={
        'Accept': 'application/json',
    },
                       json={'query': alert_query},
                       timeout=30)
    url = conf.graphql_endpoint
    logger.debug('Retrieving alerts from %s', redacted(url))
    t0 = monotime()
    async with session.post(url, **post_kwargs) as resp:
        resp.raise_for_status()
        rj = await resp.json()
        logger.debug('GQL response: %s', smart_repr(rj))
        logger.debug('pageInfo: %r', rj['data']['activeAlerts']['pageInfo'])
        if rj.get('error') or rj.get('errors'):
            raise Exception(
                f'Received error response from {redacted(url)}: {rj}')
        alerts = [edge['node'] for edge in rj['data']['activeAlerts']['edges']]
        logger.debug('Retrieved %d alerts in %.3f s', len(alerts),
                     monotime() - t0)
        return alerts
 async def send_report(self, report_data):
     assert isinstance(report_data, dict)
     if not self._report_url:
         raise OverwatchClientNotConfiguredError('No report_url')
     if self._session is None:
         raise Exception('Not in context block')
     post_kwargs = dict(json=report_data,
                        headers={
                            'Accept': 'application/json',
                            'Authorization': 'token ' + self._report_token,
                        },
                        timeout=post_timeout_s)
     logger.debug('Sending Overwatch report - POST %s with payload: %s',
                  self._report_url, smart_repr(report_data))
     try:
         async with self._session.post(self._report_url,
                                       **post_kwargs) as resp:
             logger.debug('Response: %r', resp)
             resp.raise_for_status()
     except Exception as e:
         raise OverwatchClientReportError(
             'Failed to post report to {!r}: {!r}'.format(
                 self._report_url, e))
    def call(self, method_name, params):
        assert isinstance(params, dict)
        req_key = 'rpc-req:{service}'.format(service=self._service_name)
        req_timeout = timedelta(seconds=30)
        token = uuid4().hex
        req_payload = {
            'method': method_name,
            'params': params,
            'token': token,
            'expire': time() + req_timeout.total_seconds(),
        }
        logger.info('Calling %s(%s)', method_name, smart_repr(params))
        self._redis.expire(req_key, req_timeout)
        self._redis.rpush(req_key, json.dumps(req_payload, sort_keys=True).encode())

        res_key = 'rpc-res:{service}:{token}'.format(service=self._service_name, token=token)
        res = self._redis.blpop(res_key, int(req_timeout.total_seconds()) or 1)
        if not res:
            raise Exception('RPC call timeout')
        res_key, res_data = res
        res_payload = json.loads(res_data.decode())
        if res_payload.get('error'):
            raise Exception('RPC handler failed: {}'.format(res_payload['error']))
        return res_payload['reply']
Exemple #8
0
async def check_target_ip(conf, target, report, hostname, ip):
    logger.debug('Checking IP address %s', ip)
    ip_report = {
        'error': {
            '__value': None,
            '__check': {
                'state': 'green',
            },
        },
    }
    try:
        ip_report['reverse_record'] = await get_ip_reverse(ip)
    except CancelledError as e:
        logger.debug('get_ip_reverse cancelled')
        raise e
    except ReverseLookupError as e:
        logger.debug('get_ip_reverse ReverseLookupError: %s', e)
        ip_report['reverse_record'] = None
        ip_report['reverse_record_error'] = str(e)
    start_time = monotime()
    try:
        conn = CustomTCPConnector(resolver=CustomResolver(hostname, ip))
        async with ClientSession(connector=conn) as session:
            start_time = monotime()
            async with session.get(target.url) as response:
                status_ok = response.status == 200
                ip_report['status_code'] = {
                    '__value': response.status,
                    '__check': {
                        'state': 'green' if status_ok else 'red',
                    },
                }
                data = await response.read()
                duration = monotime() - start_time
                duration_threshold = target.duration_threshold or conf.default_duration_threshold
                duration_ok = timedelta(seconds=duration) <= duration_threshold
                ip_report['duration'] = {
                    '__value': duration,
                    '__unit': 'seconds',
                    '__check': {
                        'state': 'green' if duration_ok else 'red',
                    },
                }
                ip_report['response_size'] = {
                    '__value': len(data),
                    '__unit': 'bytes',
                }
                ip_report['redirect_count'] = len(response.history)
                ip_report['final_url'] = str(response.url)
                ip_report['ssl_certificate'] = get_ssl_cert_report(
                    conn.get_last_cert())
                # try:
                #     ip_report['response_preview'] = data.decode()[:100]
                # except Exception:
                #     ip_report['response_preview'] = str(data)[:100]
                logger.debug('GET %r -> %s %s in %.3f s', target.url,
                             response.status, smart_repr(data), duration)
                if status_ok and target.check_response_contains:
                    present = to_bytes(target.check_response_contains) in data
                    ip_report['check_response_contains'] = {
                        'content': target.check_response_contains,
                        'present': {
                            '__value': present,
                            '__check': {
                                'state': 'green' if present else 'red',
                            },
                        },
                    }
    except CancelledError as e:
        logger.info('GET %r via %s cancelled: %r', target.url, ip, e)
        raise e
    except Exception as e:
        logger.info('GET %r via %s failed: %r', target.url, ip, e)
        if isinstance(e, SSLError):
            if 'certificate has expired' in str(e):
                msg = 'SSL certificate has expired'
            else:
                msg = f'SSLError: {e}'
        else:
            msg = str(e) or repr(e)
        ip_report['error'] = {
            '__value': msg,
            '__check': {
                'state': 'red',
            },
        }
    ip_report.setdefault('duration', {'__value': monotime() - start_time})
    report['state']['by_ip'][ip] = ip_report
def main():
    p = argparse.ArgumentParser()
    p.add_argument('--one', '-1', action='store_true', help='process only one request')
    p.add_argument('--verbose', '-v', action='store_true')
    args = p.parse_args()

    logging.basicConfig(
        format='%(asctime)s %(name)s %(levelname)5s: %(message)s',
        level=logging.DEBUG if args.verbose else logging.WARNING)


    r = redis.StrictRedis(host='localhost', port=6379, db=0)

    handlers = Handlers(r)
    routes = handlers.get_routes()

    service_name = 'echo'
    req_key = 'rpc-req:{service}'.format(service=service_name)

    logger.debug('req_key: %r', req_key)

    while True:
        try:
            data = r.blpop(req_key, 10)
        except KeyboardInterrupt as e:
            # do not make too verbose traceback
            sys.exit('{!r} while redis blpop'.format(e))
        if data is None:
            continue
        key, value = data

        try:
            req_payload = json.loads(value.decode())
        except Exception as e:
            logger.error('Failed to parse request JSON: %r; value: %r', e, smart_repr(value))
            continue

        logger.debug('Request: %s', smart_repr(req_payload))

        now = time()
        if req_payload['expire'] < now:
            logger.info('Request expired %.3f s ago', now - req_payload['expire'])
            continue

        handler = routes.get(req_payload['method'])
        if handler is None:
            logger.warning('Method not found: %r', req_payload['method'])
            response = {'error': 'Method {!r} not found'.format(req_payload['method'])}
        else:
            try:
                reply = handler(req_payload['params'])
                response = {'reply': reply}
            except Exception as e:
                logger.exception('Method %r handler %r failed: %r', req_payload['method'], handler, e)
                response = {'error': repr(e)}

        res_key = 'rpc-res:{service}:{token}'.format(service=service_name, token=req_payload['token'])
        #logger.debug('res_key: %r', res_key)
        res_json = json.dumps(response, sort_keys=True).encode()
        r.rpush(res_key, res_json)
        r.expire(res_key, 60)

        if args.one:
            break