from iron_mq import IronMQ import os project_id = os.getenv("IRON_PROJECT_ID") token = os.getenv("IRON_TOKEN") ironmq = IronMQ(project_id=project_id, token=token) queue = ironmq.queue("requests") # Warning to all... this is probably the worst possible way to # enque a bunch of inorder numbers... you have been warned. # Queue ALL THE THINGS l = range(500000) # but do it in small batches n = 3000 list_of_messages = [l[i:i + n] for i in range(0, len(l), n)] for ls in list_of_messages: # unpack and post the array of #s queue.post(*[str(i) for i in ls])
from json import loads #connection to es cluster es = Elasticsearch(['http://58d3ea40c46e8b15000.qbox.io:80']) #initiate ironmq connection ironmq = IronMQ(host="mq-aws-us-east-1.iron.io", project_id="557330ae33f0350006000040", token="JZsM3ArjIhEfiKlG52Bt99b7Hh4", protocol="https", port=443, api_version=1, config_file=None) #specify the queue where seqment is writing segment_queue = ironmq.queue("segment") #iterate over things waiting in the queue for i in range(segment_queue.info()['size']): #get next event off queue data = segment_queue.get() try: #get rid of cruft event = data['messages'][0] #id to delete on sucessful write queue_id = event['id'] #interesting things body = loads(event['body']) doc = {} #check if this is a track event
from iron_mq import IronMQ import redis import time mq = IronMQ(host="mq-rackspace-ord.iron.io") q = mq.queue("all_requests") r = redis.StrictRedis() while True: # poll indefinitely msg = q.get() # ask the queue for messages if len(msg["messages"]) < 1: # if there are no messages time.sleep(1) # wait a second continue # try again # if we made it this far, we have a message r.incr("requests") # increment the number of requests q.delete(msg["messages"][0]["id"]) # delete the message
datadir='C:\\Users\\agitzes\\Documents\\github\\metadata-dashboard\\data-generation\\' #connection to es cluster es = Elasticsearch(['http://58d3ea40c46e8b15000.qbox.io:80']) #initiate ironmq connection ironmq = IronMQ(host="mq-aws-us-east-1.iron.io", project_id="557330ae33f0350006000040", token="JZsM3ArjIhEfiKlG52Bt99b7Hh4", protocol="https", port=443, api_version=1, config_file=None) #specify the queue where seqment is writing eventqueue = ironmq.queue("event-stream") Trends=pandas.read_csv(datadir+"GlobalmetadataTrends.csv",index_col=0) interventions=pandas.read_csv(datadir+"intervention.csv",index_col=0) weather=pandas.read_csv(datadir+"Globalmetadataweather.csv",index_col=0) CompPrice=pandas.read_csv(datadir+"Globalmetadataweather.csv",index_col=0) for i in range (0,len(interventions)): Promotions = {
mongo_url = os.getenv("MONGO_URL") mongo_user = os.getenv("MONGO_USER") mongo_password = os.getenv("MONGO_PASSWORD") schema_name = os.getenv("SCHEMA_NAME") project_id = os.getenv("IRON_PROJECT_ID") token = os.getenv("IRON_TOKEN") # connect to mongo and choose a database client = pymongo.MongoClient(mongo_url) client.the_database.authenticate(mongo_user, mongo_password, source=schema_name) db = client.angellist # connect to ironmq and choose a queue ironmq = IronMQ(project_id=project_id, token=token) queue = ironmq.queue("requests") url = 'http://api.angel.co/1/startups/{0}' # results = [] i = 0 max_reqs = 1000 while True: message = queue.get()['messages'][0] # grab the "id" to use r = requests.get(url.format(message['body'])).json() if not r.get('error'): results.append(r) # remove from queue since it was successful queue.delete(message['id']) print(message['body']) elif r.get('error') == "over_limit": # if we go over our limit then stop trying to get more
import sendgrid from iron_helper import WorkerArgs from iron_mq import IronMQ import json args = WorkerArgs() username = args.config["username"] password = args.config["password"] queue_name = args.config["queue"] s = sendgrid.Sendgrid(username, password, secure=True) mq = IronMQ() queue = mq.queue(queue_name) def getMessage(): resp = queue.get() if "messages" not in resp: return None if len(resp["messages"]) < 1: return None return resp["messages"][0] msg = getMessage() print msg while msg is not None: msg["body"] = json.loads(msg["body"]) from_address = None from_name = None if isinstance(msg["body"]["from"], basestring): from_address = msg["body"]["from"] else: from_address = msg["body"]["from"]["address"]
def get_connection(list_key=Conf.PREFIX): ironmq = IronMQ(name=None, **Conf.IRON_MQ) return ironmq.queue(queue_name=list_key)
def __init__(self, q_config, name): self.q_config = q_config c = IronMQ(token=self.q_config['TOKEN'], project_id=self.q_config['PROJECT_ID']) self.q = c.queue(name)
from iron_mq import IronMQ import redis import time mq = IronMQ() q = mq.queue("ua_requests") r = redis.StrictRedis() while True: # poll indefinitely msg = q.get() # ask the queue for messages if len(msg["messages"]) < 1: # if there are no messages time.sleep(1) # wait a second continue # try again # if we made it this far, we have a message # separate the user agent user_agent = msg["messages"][0]["body"]["HTTP_USER_AGENT"] # increment the number of requests from the user agent r.hincrby("user_agent_requests", user_agent, 1) q.delete(msg["messages"][0]["id"]) # delete the message
import sendgrid from iron_helper import WorkerArgs from iron_mq import IronMQ import json args = WorkerArgs() username = args.config["username"] password = args.config["password"] queue_name = args.config["queue"] s = sendgrid.Sendgrid(username, password, secure=True) mq = IronMQ() queue = mq.queue(queue_name) def getMessage(): resp = queue.get() if "messages" not in resp: return None if len(resp["messages"]) < 1: return None return resp["messages"][0] msg = getMessage() print msg while msg is not None: msg["body"] = json.loads(msg["body"]) from_address = None from_name = None if isinstance(msg["body"]["from"], basestring): from_address = msg["body"]["from"]
def __init__(self): mq = IronMQ(host="mq-rackspace-ord.iron.io") # instantiate our IronMQ client once self.queue = mq.queue("requests") # set our queue