예제 #1
0
class ProducerImpl(DependencyMount):
    env = CurrentEnvironment()

    def __init__(self, env):
        DependencyMount.__init__(self, parent=env)
        self.producer = SyncProducer(env.get_config('kafka'))

    async def __aexit__(self, exc_type, exc_value, tb):
        await self.flush()
        return await DependencyMount.__aexit__(self, exc_type, exc_value, tb)

    @property
    def event_count(self):
        return self.producer.event_count

    async def flush(self):
        def flush():
            logger.info('Waiting for producer to flush %s events...',
                        len(self.producer))
            self.producer.flush()

        return await submit(flush)

    def fast_flush(self):
        return _FastFlush(self)

    async def produce_event(self, project, event, timestamp=None):
        return await submit(
            lambda: self.producer.produce_event(project, event, timestamp))
예제 #2
0
class Operation(DependencyMount):
    env = CurrentEnvironment()

    def __init__(self, env, req=None, project_id=None):
        DependencyMount.__init__(self,
                                 parent=env,
                                 scope='operation',
                                 descriptor_type=CurrentOperation)
        self.req = req
        self.project_id = project_id
예제 #3
0
class SubmitEventEndpoint(Endpoint):
    url_path = '/events/{project_id}'
    env = CurrentEnvironment()
    auth = Auth()
    producer = Producer()
    filter = Filter()

    async def get_allowed_origins(self):
        return await self.filter.get_allowed_origins()

    async def accept_event(self):
        max_json_packet = self.env.get_config(
            'apiserver.limits.max_json_packet')
        line = await self.op.req.content.readline()
        if not line:
            return
        try:
            line = line.decode('utf-8')
            if len(line) > max_json_packet:
                raise PayloadTooLarge('JSON event above maximum size')
            return normalize_event(json.loads(line))
        except IOError as e:
            raise ClientReadFailed(str(e))
        except ValidationError as e:
            raise ApiError(e.message)

    async def post(self):
        remote_addr = get_remote_addr(self.env, self.op.req)
        if remote_addr is not None \
           and await self.filter.ip_is_blacklisted(remote_addr):
            raise ClientBlacklisted('The ip address of the client is '
                                    'blacklisted for event submission')

        errors = []
        events = 0
        while True:
            try:
                event = await self.accept_event()
                if event is None:
                    break
                await self.producer.produce_event(self.auth.project_id, event,
                                                  self.auth.timestamp)
                events += 1
            except ApiError as e:
                errors.append(e.to_json())

        return ApiResponse({
            'errors': errors,
            'events': events,
        })
예제 #4
0
class FilterManager(DependencyMount):
    env = CurrentEnvironment()
    project_options = ProjectOptions()

    def __init__(self, op):
        DependencyMount.__init__(self, parent=op)

    def _ip_is_system_blacklisted(self, addr):
        for net in self.env.get_config('apiserver.whitelisted_ips'):
            try:
                net = ip_network(net, strict=False)
            except ValueError:
                continue
            if addr in net:
                return False
        for net in self.env.get_config('apiserver.blacklisted_ips'):
            try:
                net = ip_network(net, strict=False)
            except ValueError:
                continue
            if addr in net:
                return True
        return False

    async def _ip_is_project_blacklisted(self, addr, project_id=None):
        opt = await self.project_options.get('sentry:blacklisted_ips',
                                             project_id=project_id)
        for net in opt or ():
            try:
                net = ip_network(net, strict=False)
            except ValueError:
                continue
            if addr in net:
                return True
        return False

    async def ip_is_blacklisted(self, addr, project_id=None):
        addr = ip_address(addr)
        if self._ip_is_system_blacklisted(addr):
            return True
        if await self._ip_is_project_blacklisted(addr, project_id):
            return True
        return False

    async def get_allowed_origins(self, project_id=None):
        return await self.project_options.get('sentry:origins',
                                              project_id=project_id) or []
예제 #5
0
class Server(DependencyMount):
    env = CurrentEnvironment()
    producer = Producer()

    def __init__(self, env):
        DependencyMount.__init__(self, parent=env)
        self.app = web.Application()
        self.shutdown_timeout = env.get_config('apiserver.shutdown_timeout')

        for endpoint_cls in get_endpoints():
            endpoint_cls.register_with_server(self)

    async def add_cors_headers(self, req, resp, endpoint=None):
        origin = req.headers.get('ORIGIN')
        if not origin:
            return resp

        allowed_origins = set(self.env.get_config('apiserver.allowed_origins'))
        if endpoint is not None:
            if not endpoint.allow_cors:
                return resp
            allowed_origins.update(await endpoint.get_allowed_origins())

        if not is_allowed_origin(origin, allowed_origins):
            return exceptions.Forbidden('Origin is not allowed') \
                .get_response().to_http_response()

        if endpoint is not None:
            methods = endpoint.get_methods()
        else:
            methods = ['OPTIONS']
            if req.method not in methods:
                methods.append(req.method)
            if 'GET' in methods and 'HEAD' not in methods:
                methods.append('HEAD')

        resp.headers['Access-Control-Allow-Origin'] = origin
        resp.headers['Access-Control-Allow-Methods'] = \
            ', '.join(sorted(methods))

        return resp

    async def postprocess_response(self, req, resp, endpoint=None):
        if 'origin' not in resp.headers:
            resp = await self.add_cors_headers(req, resp, endpoint)
        return resp

    async def make_response(self, req, rv, endpoint=None):
        if isinstance(rv, dict):
            rv = ApiResponse(rv)
        elif isinstance(rv, tuple):
            rv = ApiResponse(*rv)
        if isinstance(rv, ApiResponse):
            rv = rv.to_http_response()
        return await self.postprocess_response(req, rv, endpoint)

    def run(self, host=None, port=None, fd=None, sock=None, backlog=128):
        loop = asyncio.get_event_loop()

        if sock is not None or fd is not None:
            if sock is not None:
                fd = None
            else:
                sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
            host = None
            port = None
        else:
            sock = None
            if host is None:
                host = self.env.get_config('apiserver.host')
            if port is None:
                port = self.env.get_config('apiserver.port')

        with self.producer:
            handler = self.app.make_handler(
                access_log=logger,
                slow_request_timeout=self.env.get_config(
                    'apiserver.slow_request_timeout'),
                keepalive_timeout=self.env.get_config(
                    'apiserver.keepalive_timeout'),
                tcp_keepalive=self.env.get_config('apiserver.tcp_keepalive'))
            server = loop.create_server(handler,
                                        host=host,
                                        port=port,
                                        backlog=backlog,
                                        sock=sock)
            srv, startup_res = loop.run_until_complete(
                asyncio.gather(server, self.app.startup(), loop=loop))
            try:
                loop.run_forever()
            except KeyboardInterrupt:
                pass
            finally:
                srv.close()
                loop.run_until_complete(srv.wait_closed())
                loop.run_until_complete(self.app.shutdown())
                loop.run_until_complete(
                    handler.finish_connections(self.shutdown_timeout))
                loop.run_until_complete(self.app.cleanup())
예제 #6
0
class DemoObject(DependencyMount):
    env = CurrentEnvironment()
    stuff = MyDependency()

    def __init__(self, env):
        DependencyMount.__init__(self, parent=env)
예제 #7
0
class DatabaseBackend(DependencyMount):
    env = CurrentEnvironment()

    def __init__(self, operation, name):
        DependencyMount.__init__(self, parent=operation)
        self.name = name
예제 #8
0
class Recorder(DependencyMount):
    redis = Redis()
    env = CurrentEnvironment()
    consumer = KafkaConsumer(topics=['events'])

    def __init__(self, env):
        DependencyMount.__init__(self, parent=env)
        self.ttl = self.env.get_config('recorder.ttl')
        self.resolutions = self.env.get_config('recorder.resolutions')
        self.batch_size = self.env.get_config('recorder.batch_size')

    def format_session_key(self, key):
        project, session = key
        return 's:{}:{}'.format(project, session)

    def dump_session_data(self, value):
        return json.dumps(value).encode('utf-8')

    def load_session_data(self, value):
        return json.loads(value.decode('utf-8')) if value is not None else None

    def load_message(self, message):
        return json.loads(message.value().decode('utf-8'))

    def process_session_events(self, session, events):
        for event in events:
            if session is None:
                session = {
                    'start': event['ts'],
                    'watermark': event['ts'],
                    'stop': None,
                }
            else:
                if session['stop'] is not None:
                    break

                if event['ts'] < session['start']:
                    session['start'] = event['ts']

                if event['ts'] > session['watermark']:
                    session['watermark'] = event['ts']

            if event['ty'] == 'cl':
                session['stop'] = max(
                    event['ts'],
                    session['watermark'],
                )

        return session

    def buckets(self, start, stop=None):
        if stop is None:
            stop = start
        results = set()
        for resolution in self.resolutions:
            for index in range(int(start // resolution),
                               int((stop // resolution) + 1)):
                results.add((resolution, index))
        return results

    def process(self, messages):
        # Batch together all of events by session.
        sessions = defaultdict(list)
        for message in messages:
            project, event = self.load_message(message)
            sessions[(project, event['sid'])].append(event)

        # Fetch the session state for all affected sessions.
        keys = list(sessions.keys())
        data = self.redis.mget(map(self.format_session_key, keys))

        # Update the sessions.
        results = {}
        counters = defaultdict(set)
        touched = set()
        closed = set()

        for key, session in zip(keys, map(self.load_session_data, data)):
            updated = self.process_session_events(
                session.copy() if session is not None else None,
                sessions[key],
            )

            if updated['stop'] is None:
                touched.add(key)

            def changed(key, callback):
                prev = session and session.get(key) or None
                curr = updated and updated.get(key) or None
                if prev != curr:
                    return callback(prev, curr)

            results[self.format_session_key(key)] = \
                self.dump_session_data(updated)

            update_buckets = set()

            def handle_start_change(previous, current):
                if previous is not None:
                    assert current < previous
                    update_buckets.update(self.buckets(current, previous))
                else:
                    update_buckets.update(self.buckets(current))

            def handle_watermark_change(previous, current):
                if previous is not None:
                    assert current > previous
                    update_buckets.update(self.buckets(previous, current))

            def handle_stop_change(previous, current):
                assert previous is None
                closed.add(key)

            changed('start', handle_start_change)
            changed('watermark', handle_watermark_change)
            changed('stop', handle_stop_change)

            project, session = key
            for bucket in update_buckets:
                counters[bucket + (project,)].add(session)

        now = time.time()

        pipeline = self.redis.pipeline()
        pipeline.mset(results)
        for key in results.keys():
            pipeline.expire(key, self.ttl)
        if touched:
            pipeline.zadd('s:schedule', **{'%s:%s' % x: now for x in touched})
        if closed:
            pipeline.zrem('s:schedule', *['%s:%s' % x for x in closed])
        for (resolution, index, project), sessions in counters.items():
            pipeline.pfadd(
                's:ts:{}:{}:{}'.format(resolution, index, project),
                *sessions
            )
        pipeline.execute()

    def run(self):
        try:
            batch(self.consumer, self.process, size=self.batch_size)
        except KeyboardInterrupt:
            pass
        except Exception as e:
            logger.exception(e)