def _createProcesses4(self): self.processes.append( ProcessProxy(name='subscriberParser', function=estreamer.pipeline.SubscriberParser, settings=self.settings, loggingQueue=estreamer.crossprocesslogging.queue(), inputQueue=None, outputQueue=self.decoratorQueue)) self.processes.append( ProcessProxy(name='decorator', function=estreamer.pipeline.Decorator, settings=self.settings, loggingQueue=estreamer.crossprocesslogging.queue(), inputQueue=self.decoratorQueue, outputQueue=self.transformQueue)) self.processes.append( ProcessProxy(name='transformer', function=estreamer.pipeline.Transformer, settings=self.settings, loggingQueue=estreamer.crossprocesslogging.queue(), inputQueue=self.transformQueue, outputQueue=self.writerQueue)) self.processes.append( ProcessProxy(name='writer', function=estreamer.pipeline.Writer, settings=self.settings, loggingQueue=estreamer.crossprocesslogging.queue(), inputQueue=self.writerQueue, outputQueue=None))
def _createProcesses1(self): self.processes.append( ProcessProxy(name='worker', function=estreamer.pipeline.SingleWorker, settings=self.settings, loggingQueue=estreamer.crossprocesslogging.queue(), inputQueue=None, outputQueue=None))
def _createProcessesN(self, workerProcesses): # We're going to have: # 1 Subscriber # n Parsers # 1 Decorator # n Transformers # 1 Writer # # Of the single instance queues, the decorator is the slowest, writer is # the fastest and the subscriber is limited by the network. # # The Transformer and Parser are approximately the same speed and are # about 4 times slower than the decorator. That said, the parser is # slightly slower, so prefer that. In other words, there is no # point in having more than about 4 Parsers / Transformers. Or 11 in total # # We'll allow 12 if workerProcesses > Controller.MAX_WORKERS: self.logger.info('Limiting worker processes to {0}'.format( Controller.MAX_WORKERS)) workerProcesses = Controller.MAX_WORKERS elif workerProcesses < 5: self.logger.error('System error. Worker processes to {0}'.format( Controller.MAX_WORKERS)) availableWorkers = float(workerProcesses - Controller.RESERVED_WORKERS) parserCount = int(math.ceil(availableWorkers / 2)) transformerCount = int(math.floor(availableWorkers / 2)) self.processes.append( ProcessProxy(name='subscriber', function=estreamer.pipeline.Subscriber, settings=self.settings, loggingQueue=estreamer.crossprocesslogging.queue(), inputQueue=None, outputQueue=self.parserQueue)) for index in range(0, parserCount): self.processes.append( ProcessProxy( name='parser {0}'.format(index), function=estreamer.pipeline.Parser, settings=self.settings, loggingQueue=estreamer.crossprocesslogging.queue(), inputQueue=self.parserQueue, outputQueue=self.decoratorQueue)) self.processes.append( ProcessProxy(name='decorator', function=estreamer.pipeline.Decorator, settings=self.settings, loggingQueue=estreamer.crossprocesslogging.queue(), inputQueue=self.decoratorQueue, outputQueue=self.transformQueue)) for index in range(0, transformerCount): self.processes.append( ProcessProxy( name='transformer {0}'.format(index), function=estreamer.pipeline.Transformer, settings=self.settings, loggingQueue=estreamer.crossprocesslogging.queue(), inputQueue=self.transformQueue, outputQueue=self.writerQueue)) self.processes.append( ProcessProxy(name='writer', function=estreamer.pipeline.Writer, settings=self.settings, loggingQueue=estreamer.crossprocesslogging.queue(), inputQueue=self.writerQueue, outputQueue=None))