def __init__(self, app, id=None, config=None):
		super().__init__(app, id=id, config=config)

		self.ConnectionEvent = asyncio.Event(loop=app.Loop)
		self.ConnectionEvent.clear()

		self.PubSub = asab.PubSub(app)
		self.Loop = app.Loop

		self._host = self.Config['host']
		self._port = int(self.Config['port'])
		self._user = self.Config['user']
		self._password = self.Config['password']
		self._connect_timeout = self.Config['connect_timeout']
		self._db = self.Config['db']
		self._reconnect_delay = self.Config['reconnect_delay']
		self._output_queue_max_size = self.Config['output_queue_max_size']
		self._max_bulk_size = int(self.Config['max_bulk_size'])

		self._conn_future = None
		self._connection_request = False
		self._pause = False

		# Subscription
		self._on_health_check('connection.open!')
		app.PubSub.subscribe("Application.stop!", self._on_application_stop)
		app.PubSub.subscribe("Application.tick!", self._on_health_check)
		app.PubSub.subscribe("PostgreSQLConnection.pause!", self._on_pause)
		app.PubSub.subscribe("PostgreSQLConnection.unpause!", self._on_unpause)

		self._output_queue = asyncio.Queue(loop=app.Loop)
		self._bulks = {}  # We have a "bulk" per query
예제 #2
0
    def __init__(self, app, id=None):

        self.Id = id if id is not None else self.__class__.__name__
        self.Loop = app.Loop

        self.Sources = []
        self.Processors = [
            []
        ]  # List of lists of processors, the depth is increased by a Generator object
        self._source_coros = []  # List of source main() coroutines

        # Publish-Subscribe for this pipeline
        self.PubSub = asab.PubSub(app)
        self.Metrics = app.Metrics

        self._error = None  # None if not in error state otherwise there is a tuple (exception, event)

        self._throttles = set()

        self._ready = asyncio.Event(loop=app.Loop)
        self._ready.clear()

        # Chillout is used to break a pipeline processing to smaller tasks that allows other event in event loop to be processed
        self._chillout_trigger = 10000
        self._chillout_counter = 0

        self._context = {}
예제 #3
0
    def __init__(self, app, id=None):
        self.Id = id if id is not None else self.__class__.__name__
        self.App = app
        self.Loop = app.Loop

        self.Sources = []
        self.Processors = [
            []
        ]  # List of lists of processors, the depth is increased by a Generator object
        self._source_coros = []  # List of source main() coroutines

        # Publish-Subscribe for this pipeline
        self.PubSub = asab.PubSub(app)
        self.MetricsService = app.get_service('asab.MetricsService')
        self.MetricsCounter = self.MetricsService.create_counter(
            "bspump.pipeline",
            tags={'pipeline': self.Id},
            init_values={
                'event.in': 0,
                'event.out': 0,
                'event.drop': 0,
                'warning': 0,
                'error': 0,
            })
        self.MetricsGauge = self.MetricsService.create_gauge(
            "bspump.pipeline.gauge",
            tags={'pipeline': self.Id},
            init_values={
                'warning.ratio': 0.0,
                'error.ratio': 0.0,
            })
        self.MetricsDutyCycle = self.MetricsService.create_duty_cycle(
            self.Loop,
            "bspump.pipeline.dutycycle",
            tags={'pipeline': self.Id},
            init_values={
                'ready': False,
            })
        app.PubSub.subscribe("Application.Metrics.Flush!",
                             self._on_metrics_flush)

        # Pipeline logger
        self.L = PipelineLogger("bspump.pipeline.{}".format(self.Id),
                                self.MetricsCounter)

        self.LastReadyStateSwitch = self.Loop.time()

        self._error = None  # None if not in error state otherwise there is a tuple (context, event, exc, timestamp)

        self._throttles = set()

        self._ready = asyncio.Event(loop=app.Loop)
        self._ready.clear()

        # Chillout is used to break a pipeline processing to smaller tasks that allows other event in event loop to be processed
        self._chillout_trigger = 10000
        self._chillout_counter = 0

        self._context = {}
예제 #4
0
파일: lookup.py 프로젝트: SaturioTDV/bspump
    def __init__(self, app, lookup_id, config=None):
        assert (lookup_id is not None)
        super().__init__("lookup:{}".format(lookup_id), config=config)
        self.Id = lookup_id
        self.PubSub = asab.PubSub(app)

        self.ETag = None
        master_url = self.Config['master_url']
        if master_url:
            while master_url[-1] == '/':
                master_url = master_url[:-1]
            master_lookup_id = self.Config['master_lookup_id']
            if master_lookup_id == "": master_lookup_id = self.Id
            self.MasterURL = master_url + '/lookup/' + master_lookup_id
        else:
            self.MasterURL = None  # No master is defined
예제 #5
0
    def __init__(self, app, id=None, config=None, lazy=False):
        _id = id if id is not None else self.__class__.__name__
        super().__init__("lookup:{}".format(_id), config=config)

        self.App = app
        self.Loop = app.Loop
        self.Lazy = lazy

        self.Id = _id
        self.PubSub = asab.PubSub(app)

        self.ETag = None
        master_url = self.Config['master_url']
        if master_url:
            while master_url[-1] == '/':
                master_url = master_url[:-1]
            master_lookup_id = self.Config['master_lookup_id']
            if master_lookup_id == "":
                master_lookup_id = self.Id
            self.MasterURL = master_url + '/lookup/' + master_lookup_id
        else:
            self.MasterURL = None  # No master is defined
예제 #6
0
	def __init__(self, app, dtype='float_', id=None, config=None):
		super().__init__(app, dtype=dtype, id=id, config=config)
		self.PubSub = asab.PubSub(app)
예제 #7
0
    def __init__(self, app, id=None, config=None):
        _id = id if id is not None else self.__class__.__name__
        super().__init__("pipeline:{}".format(_id), config=config)

        self.Id = _id
        self.App = app
        self.Loop = app.Loop

        self.AsyncFutures = []
        self.AsyncConcurencyLimit = int(self.Config["async_concurency_limit"])
        self.ResetProfiler = self.Config.getboolean("reset_profiler")
        assert (self.AsyncConcurencyLimit > 1)

        # This object serves to identify the throttler, because list cannot be used as a throttler
        self.AsyncFuturesThrottler = object()

        self.Sources = []
        self.Processors = [
            []
        ]  # List of lists of processors, the depth is increased by a Generator object

        # Publish-Subscribe for this pipeline
        self.PubSub = asab.PubSub(app)
        self.MetricsService = app.get_service('asab.MetricsService')
        self.MetricsCounter = self.MetricsService.create_counter(
            "bspump.pipeline",
            tags={'pipeline': self.Id},
            init_values={
                'event.in': 0,
                'event.out': 0,
                'event.drop': 0,
                'warning': 0,
                'error': 0,
            })
        self.MetricsGauge = self.MetricsService.create_gauge(
            "bspump.pipeline.gauge",
            tags={'pipeline': self.Id},
            init_values={
                'warning.ratio': 0.0,
                'error.ratio': 0.0,
            })
        self.MetricsDutyCycle = self.MetricsService.create_duty_cycle(
            self.Loop,
            "bspump.pipeline.dutycycle",
            tags={'pipeline': self.Id},
            init_values={
                'ready': False,
            })
        self.ProfilerCounter = {}

        app.PubSub.subscribe("Application.Metrics.Flush!",
                             self._on_metrics_flush)

        # Pipeline logger
        self.L = PipelineLogger("bspump.pipeline.{}".format(self.Id),
                                self.MetricsCounter)

        self.LastReadyStateSwitch = self.Loop.time()

        self._error = None  # None if not in error state otherwise there is a tuple (context, event, exc, timestamp)

        self._throttles = set()
        self._ancestral_pipelines = set()

        self._ready = asyncio.Event(loop=app.Loop)
        self._ready.clear()

        # Chillout is used to break a pipeline processing to smaller tasks that allows other event in event loop to be processed
        self._chillout_trigger = 10000
        self._chillout_counter = 0

        self._context = {}