def main(options, args): loglevel = ('WARNING', 'INFO', 'DEBUG')[min(2, options.verbose)] logging.basicConfig( filename=options.logfile, level=logging.getLevelName(loglevel), format='%(asctime)s %(levelname)s %(message)s', ) if args and not options.sqs_name: # Process rpm files directly rpmfiles = upload_rpmfiles(args, options) return update_repodata(options.repopath, rpmfiles, options) conn = boto.sqs.connect_to_region(options.region) queue = conn.get_queue(options.sqs_name) queue.set_message_class(boto.sqs.message.RawMessage) messages = [] delay_count = 0 visibility_timeout = ((options.process_delay_count + 2) * options.queue_check_interval) logging.debug('sqs visibility_timeout: %d', visibility_timeout) while True: new_messages = queue.get_messages(10, visibility_timeout) if new_messages: messages.extend(new_messages) # Immediately check for more messages continue if messages: if delay_count < options.process_delay_count: logging.debug('Delaying processing: %d < %d', delay_count, options.process_delay_count) delay_count += 1 else: pkgmap = collections.defaultdict(list) for message in messages: body = json.loads(message.get_body()) repopath = str(body.get('Subject', options.repopath)) pkgmap[repopath].extend(str(body['Message']).split()) for repopath, rpmfiles in pkgmap.items(): logging.info('updating: %s: %r', repopath, rpmfiles) try: update_repodata(repopath, set(rpmfiles), options) except: # sqs messages will be deleted even on failure logging.exception('update failed: %s: %r', repopath, rpmfiles) # Reset: for message in messages: message.delete() messages = [] delay_count = 0 logging.debug('sleeping %ds...', options.queue_check_interval) try: time.sleep(options.queue_check_interval) except KeyboardInterrupt: break
def getSensors(self): print('DogEnv.getSensors() called') messages = [] while len(messages) < constants.RL_MESS_CHUNK: messages += self.event_queue.get_messages(constants.RL_MESS_CHUNK - len(messages)) for message in messages: print('DogEnv: SQS message received: ' + str(message.get_body())) if message[constants.ATTR_DOG_STATE] == constants.DOG_STATE_BARKING: self.sensors.barking = 1 return self.sensors.asarray()
def main(options, args): loglevel = ('WARNING', 'INFO', 'DEBUG')[min(2, options.verbose)] logging.basicConfig( filename=options.logfile, level=logging.getLevelName(loglevel), format='%(asctime)s %(levelname)s %(message)s', ) if args and not options.sqs_name: return update_repodata(options.repopath, args, options) conn = boto.sqs.connect_to_region(options.region) queue = conn.get_queue(options.sqs_name) queue.set_message_class(boto.sqs.message.RawMessage) messages = [] delay_count = 0 visibility_timeout = ((options.process_delay_count + 2) * options.queue_check_interval) logging.debug('sqs visibility_timeout: %d', visibility_timeout) while True: new_messages = queue.get_messages(10, visibility_timeout) if new_messages: messages.extend(new_messages) # Immediately check for more messages continue if messages: if delay_count < options.process_delay_count: logging.debug('Delaying processing: %d < %d', delay_count, options.process_delay_count) delay_count += 1 else: pkgmap = collections.defaultdict(list) for message in messages: body = json.loads(message.get_body()) repopath = str(body.get('Subject', options.repopath)) pkgmap[repopath].append(str(body['Message'])) for repopath, rpmfiles in pkgmap.items(): logging.info('updating: %s: %r', repopath, rpmfiles) try: update_repodata(repopath, set(rpmfiles), options) except: # sqs messages will be deleted even on failure logging.exception('update failed: %s: %r', repopath, rpmfiles) # Reset: for message in messages: message.delete() messages = [] delay_count = 0 logging.debug('sleeping %ds...', options.queue_check_interval) try: time.sleep(options.queue_check_interval) except KeyboardInterrupt: break
def _decode_message(self, message): payload = json.loads(message.get_body()) retries = int(message.attributes['ApproximateReceiveCount']) created_on = int(message.attributes['SentTimestamp']) first_execution_on = int(message.attributes['ApproximateFirstReceiveTimestamp']) payload['_metadata'] = { 'id': message.receipt_handle, 'retries': retries, 'created_on': datetime.fromtimestamp(created_on / 1000), 'first_execution_on': datetime.fromtimestamp(first_execution_on / 1000) } logging.debug('Message payload: %s', str(payload)) return payload
def sequential(args=None): if args is None: args = sys.argv[1:] [ini] = args parser = ConfigParser.RawConfigParser() with open(ini) as fp: parser.readfp(fp) container = dict(parser.items('container')) name = container.pop('queue') region = container.pop('region', 'us-east-1') worker = container.pop('worker') ZConfig.configureLoggers(container.pop('loggers')) if container: print("Unexpected container options", container) if parser.has_section('worker'): worker_options = dict(parser.items('worker')) else: worker_options = {} module, expr = worker.split(':', 1) module = __import__(module, {}, {}, ['*']) worker = eval(expr, module.__dict__)(worker_options) connection = boto.sqs.connect_to_region(region) queue = connection.get_queue(name) while 1: rs = queue.get_messages(wait_time_seconds=20) if len(rs): message = rs[0] data = message.get_body() try: args, kw = json.loads(data) worker(*args, **kw) except TransientError: continue except Exception: logger.exception("Handling a message") message_logger.info(data) queue.delete_message(message)
def _decode_message(self, message): payload = json.loads(message.get_body()) retries = int(message.attributes['ApproximateReceiveCount']) created_on = int(message.attributes['SentTimestamp']) first_execution_on = int( message.attributes['ApproximateFirstReceiveTimestamp']) payload['_metadata'] = { 'id': message.receipt_handle, 'retries': retries, 'created_on': datetime.fromtimestamp(created_on / 1000), 'first_execution_on': datetime.fromtimestamp(first_execution_on / 1000) } logging.debug('Message payload: %s', str(payload)) return payload
def get_sqs_messages(self): while True: logger.debug('listening for sqs messages ... ') message = self._sqs.read(wait_time_seconds=10) if not message: sleep(.01) continue logger.debug('got an sqs message!') payload = ujson.loads(message.get_body()) if payload['user_id'] == 'USLACKBOT': continue if payload['user_name'] == 'slackbot': continue try: yield payload finally: self._sqs.delete_message(message)
def poll(self): logger.debug("Waiting for messages") messages = self._process_queue.get_messages(wait_time_seconds=10) logger.debug("Got {} messages".format(len(messages))) for message in messages: body = json.loads(message.get_body()) source_process = self._arn_to_process(body['TopicArn']) exception_raised = False if source_process in self._source_process_subscriptions: trigger_datetime = parse_utc_timestamp_to_local_datetime(body['Timestamp']) _message = json.loads(body['Message']) event = _message['event'] if isinstance(event, list): event = tuple(event) if event in self._source_process_subscriptions[source_process]: for handler in self._source_process_subscriptions[source_process][event]: try: self._handle_message(handler, event=event, target=_message['target'], source=_message['source'], extra=_message['extra'], trigger_datetime=trigger_datetime) except Exception as exc: logger.error(u"Handler '{}' raised an exception: '{}'. Traceback: {}".format( handler, exc, traceback.format_exc() )) exception_raised = True else: logger.debug(u"Dropped message '{}'. Event not wanted.".format(body)) else: logger.warning(u"Dropped message '{}'. Didn't ask for it.".format(body)) if self._ack_on_exception or not exception_raised: message.delete()
def reader_writer(message): sys.stderr.write(message.get_body() + "\n") return True
def fillQueueAsList(self): while self.queue.count(): response = self.queue.get_messages(num_messages=10,visibility_timeout=60) for message in response: self.queueAslist.append(message.get_body())