def trigger_launch(): aws_id, aws_key = util.read_aws_credential() aws_creds = {'aws_access_key_id': aws_id, 'aws_secret_access_key': aws_key} sqs = boto.sqs.connect_to_region(config.aws_region, **aws_creds) req_q = sqs.get_queue("%s_request" % config.controller) notify_q = sqs.get_queue("notify_%s" % config.controller) for q in [req_q, notify_q]: q.set_message_class(JSONMessage) msg = JSONMessage() msg.set_body({ 'launch-fp-as': '*****@*****.**', 'launch-refrtok': '<redacted>', 'launch-serial': 1 }) print "Sending request..." req_q.write(msg) return # Comment out to wait for response! print "Awaiting response..." while True: msg = notify_q.read() if msg is not None: print "Got message: %r" % msg.get_body() notify_q.delete_message(msg) return sys.stdout.write(".") sys.stdout.flush()
def report_completion(): # DRY warning: upload_wrappers.py. installer_location = file('/home/lantern/wrapper_location').read() sqs = boto.sqs.connect_to_region(AWS_REGION, **aws_creds) logging.info("Reporting installers for %s are ready at %s." % (clip_email(USERID), installer_location)) ctrl_req_q = sqs.get_queue("%s_request" % CONTROLLER) ctrl_notify_q = sqs.get_queue("notify_%s" % CONTROLLER) msg = JSONMessage() msg.set_body( {'fp-up-user': USERID, 'fp-up-instance': INSTANCEID, 'fp-up-insloc': installer_location, 'fp-up-ip': IP, 'fp-up-port': PORT, # TRANSITION: keep supporting old controllers for a while to make # deployment less timing sensitive. 'invsrvup-user': USERID, 'invsrvup-insloc': installer_location}) ctrl_notify_q.write(msg) DEL_FLAG = '/home/lantern/deleted_sqs_message' if not os.path.exists(DEL_FLAG): to_delete = loads(b64decode(SQSMSG)) ctrl_req_q.delete_message(to_delete) file(DEL_FLAG, 'w').write('OK') file('/home/lantern/reported_completion', 'w').write('OK')
def send_message(d): aws_id, aws_key = util.read_aws_credential() aws_creds = {'aws_access_key_id': aws_id, 'aws_secret_access_key': aws_key} sqs = boto.sqs.connect_to_region(config.aws_region, **aws_creds) req_q = sqs.get_queue("%s_request" % config.controller) req_q.set_message_class(JSONMessage) msg = JSONMessage() msg.set_body(d) print "Sending request..." req_q.write(msg) print "Sent."
def report_error_to_controller(error): sqs = boto.sqs.connect_to_region(AWS_REGION, **aws_creds) ctrl_notify_q = sqs.get_queue("notify_%s" % CONTROLLER) msg = JSONMessage() # DRY: SQSChecker at lantern-controller. msg.set_body({'fp-alarm': error, 'instance-id': INSTANCEID, 'ip': IP, 'port': PORT, 'send-email': True}) ctrl_notify_q.write(msg)
def report(failures): fps_str = '\n' + '\n'.join(sorted(failures)) log.warn("Fallbacks failed to proxy: %s" % fps_str) sqs = boto.sqs.connect_to_region(AWS_REGION, **aws_creds) report_q = sqs.get_queue("notify_%s" % CONTROLLER) msg = JSONMessage() msg.set_body({ 'fp-alarm': "Fallbacks not proxying", 'subject': "ALARM: fallback(s) failing to proxy", 'send-email': True, 'ip': fps_str, # These fields are expected by the controller, but they # make no sense in this case. 'user': "******", 'instance-id': 'unavailable', 'port': "n/a" }) report_q.write(msg)
def push(body, queue='pi-status'): ''' Create a JSON-encoded boto-style Message object and write it to the queue. ''' sqs = SQSConnection() sqs_queue = sqs.create_queue(queue) message = JSONMessage(body=body) sqs_queue.write(message)
def post(self): users_future = (UserScore.query(UserScore.ported == False) .order(-UserScore.score) .fetch_async(500)) # Include secrets as a submodule if anything grows out of this. aws_creds = {'aws_access_key_id': '<REDACTED>', 'aws_secret_access_key': '<REDACTED>'} sqs = boto.sqs.connect_to_region('ap-southeast-1', **aws_creds) q = sqs.get_queue("notify_lanternctrl1-2") q.set_message_class(JSONMessage) msg = JSONMessage() users = list(users_future.get_result()) msg.set_body({'port-users': '\n'.join(u.key.id() for u in users)}) q.write(msg) logging.info("Sent request.") for user in users: user.ported = True ndb.put_multi(users) logging.info("Marked users as ported.")
def push_to_queue(path, width, height, gray): ''' Pune mesajele in coada. path = path-ul din s3. ar fi bine sa fie unic ex: 'videos/21242312/jumbix.mp4' width: latimea height: inaltimea gray: True sau False, semnifica daca se vrea convertirea la B/W ''' queue = sqs.connect_to_region("eu-west-1") q = queue.get_queue('video-converter-sqs') vals = { 'path' : path, 'width' : width, 'height' : height, 'gray' : gray } m = JSONMessage() m.set_body(vals) q.write(m)
def send_message(self, message): # dart always uses the JSONMessage format self.queue.write(JSONMessage(self.queue, message))
def write_event(self, event_content): self._get_queue().write(JSONMessage(body=event_content))
if topics is not None: try: msg_topic = jo["topic"] except: logging.error("Can't extract 'topic' field from %s: " % line) continue try: allow_msg = any(map(lambda s: msg_topic.startswith(s), topics)) except Exception, e: logging.error("Error processing message topic against list of topics: %s" % e) continue if not allow_msg: continue m = JSONMessage() m.set_body(jo) else: m = RawMessage() m.set_body(line) logging.debug("Writing to SQS queue '%s': %s" % (queue_name, line)) try: if simulate_error: raise Exception("Network error simulation") q.write(m) except: ### give us a chance... sleep(1) try: if simulate_error:
def get_queue_attributes(self): return {'MessageRetentionPeriod': 4 * 24 * 3600} def get_wait_time(self): return 0 SAMPLE_JSON_SQS_MSG = JSONMessage( body={ 'start_date': '2014-02-02', 'end_date': '2014-02-06', 'script_start_date_arg': '2014-02-02', 'script_end_date_arg': '2014-02-06', 'step': 1, 'log_name': 'log', 'log_schema_version': 'version', 'hash_key': SAMPLE_RECORD_ET_STATUS_SCHEDULED['hash_key'], 'redshift_id': 'some-id', 'redshift_host': 'host', 'redshift_port': 'port', 'redshift_schema': 'rs_namespace', 's3_path': 's3_path', 'uuid': 'uuid', 'additional_arguments': {}, }) SUCCESS_RECORD = { 'status': 'success', 'start_time': 4, 'end_time': 5, 'date': '2014-02-02', 'error_info': {}