Beispiel #1
0
def create_app(queue=None):
    """Returns our Flask app"""

    if queue is None:
        queue = IronMQ().queue(QUEUE_NAME)

    app = Flask(__name__)

    @app.route('/secret', methods=['GET', 'POST'])
    def get_secret():
        """GET or POST a secret

        POST: escape, and submit into the queuing service
        GET: Return the first message from the queue
        """

        if request.method == 'POST':
            queue.post(escape(request.form[FORM_FIELD]))
            return '', 201

        elif request.method == 'GET':
            message = queue.get()['messages'].pop()
            queue.delete(message['id'])
            return jsonify(secret=message['body'])

    return app
Beispiel #2
0
def get_ironmq_queue_count(active_queues):
    if not IronMQ:
        return print("iron_mq not loaded, not getting queue count")
    assert(settings.IRON_MQ_PROJECT_ID)
    assert(settings.IRON_MQ_TOKEN)
    assert(settings.IRON_MQ_HOST)

    lock = redis.StrictRedis(
        host=settings.proc_scalar_lock_url.hostname,
        port=int(settings.proc_scalar_lock_url.port),
        db=int(settings.proc_scalar_lock_url.path[1:]),
        password=settings.proc_scalar_lock_url.password
    )

    queue = IronMQ(
        host=settings.IRON_MQ_HOST,
        project_id=settings.IRON_MQ_PROJECT_ID,
        token=settings.IRON_MQ_TOKEN
    )
    if not active_queues:
        active_queues = {}

    data = {}

    for queuename, procname in PROC_MAP.iteritems():
        details = {}
        try:
            details = queue.getQueueDetails(queuename)
            print(repr(details))
            length = details["size"]
        except (HTTPException, requests.exceptions.HTTPError):
            length = 0

        if not procname in data:
            key = "DISABLE_CELERY_%s" % procname
            lock_type = lock.get(key)
            if not lock_type == 0:
                lock_type = 0
            data[procname] = {'count': length, 'active': 0, 'deploy_lock': lock_type}
        else:
            data[procname]['count'] += length

        if procname in active_queues:
            data[procname]['active'] += active_queues[procname]

    return data
This is then loaded to ElasticSearch
"""

from datetime import datetime
from elasticsearch import Elasticsearch
from iron_mq import IronMQ
from json import loads


#connection to es cluster
es = Elasticsearch(['http://58d3ea40c46e8b15000.qbox.io:80'])

#initiate ironmq connection
ironmq = IronMQ(host="mq-aws-us-east-1.iron.io",
            project_id="557330ae33f0350006000040",
            token="JZsM3ArjIhEfiKlG52Bt99b7Hh4",
            protocol="https", port=443,
            api_version=1,
            config_file=None)

#specify the queue where seqment is writing 
segment_queue = ironmq.queue("segment")

#iterate over things waiting in the queue
for i in range(segment_queue.info()['size']):
    #get next event off queue
    data = segment_queue.get()
    try:
        #get rid of cruft
        event = data['messages'][0]
        #id to delete on sucessful write
        queue_id = event['id']
Beispiel #4
0
from iron_mq import IronMQ
import os

project_id = os.getenv("IRON_PROJECT_ID")
token = os.getenv("IRON_TOKEN")

ironmq = IronMQ(project_id=project_id, token=token)
queue = ironmq.queue("requests")

# Warning to all... this is probably the worst possible way to
# enque a bunch of inorder numbers... you have been warned.

# Queue ALL THE THINGS
l = range(500000)
# but do it in small batches
n = 3000
list_of_messages = [l[i:i + n] for i in range(0, len(l), n)]

for ls in list_of_messages:
    # unpack and post the array of #s
    queue.post(*[str(i) for i in ls])
from elasticsearch import Elasticsearch
from iron_mq import IronMQ
import pandas
import string
import json

datadir='C:\\Users\\agitzes\\Documents\\github\\metadata-dashboard\\data-generation\\'


#connection to es cluster
es = Elasticsearch(['http://58d3ea40c46e8b15000.qbox.io:80'])

#initiate ironmq connection
ironmq = IronMQ(host="mq-aws-us-east-1.iron.io",
            project_id="557330ae33f0350006000040",
            token="JZsM3ArjIhEfiKlG52Bt99b7Hh4",
            protocol="https", port=443,
            api_version=1,
            config_file=None)

#specify the queue where seqment is writing 
eventqueue = ironmq.queue("event-stream")



Trends=pandas.read_csv(datadir+"GlobalmetadataTrends.csv",index_col=0)
interventions=pandas.read_csv(datadir+"intervention.csv",index_col=0)
weather=pandas.read_csv(datadir+"Globalmetadataweather.csv",index_col=0)
CompPrice=pandas.read_csv(datadir+"Globalmetadataweather.csv",index_col=0)


from iron_mq import IronMQ
import redis
import time

mq = IronMQ(host="mq-rackspace-ord.iron.io")
q = mq.queue("all_requests")
r = redis.StrictRedis()

while True: # poll indefinitely
    msg = q.get() # ask the queue for messages
    if len(msg["messages"]) < 1: # if there are no messages
        time.sleep(1) # wait a second
        continue # try again
    # if we made it this far, we have a message
    r.incr("requests") # increment the number of requests
    q.delete(msg["messages"][0]["id"]) # delete the message
import sendgrid
from iron_helper import WorkerArgs
from iron_mq import IronMQ
import json

args = WorkerArgs()
username = args.config["username"]
password = args.config["password"]
queue_name = args.config["queue"]
s = sendgrid.Sendgrid(username, password, secure=True)
mq = IronMQ()
queue = mq.queue(queue_name)

def getMessage():
    resp = queue.get()
    if "messages" not in resp:
        return None
    if len(resp["messages"]) < 1:
        return None
    return resp["messages"][0]

msg = getMessage()
print msg
while msg is not None:
    msg["body"] = json.loads(msg["body"])
    from_address = None
    from_name = None
    if isinstance(msg["body"]["from"], basestring):
        from_address = msg["body"]["from"]
    else:
        from_address = msg["body"]["from"]["address"]
mongo_url = os.getenv("MONGO_URL")
mongo_user = os.getenv("MONGO_USER")
mongo_password = os.getenv("MONGO_PASSWORD")
schema_name = os.getenv("SCHEMA_NAME")

project_id = os.getenv("IRON_PROJECT_ID")
token = os.getenv("IRON_TOKEN")

# connect to mongo and choose a database
client = pymongo.MongoClient(mongo_url)
client.the_database.authenticate(mongo_user, mongo_password, source=schema_name)
db = client.angellist

# connect to ironmq and choose a queue
ironmq = IronMQ(project_id=project_id, token=token)
queue = ironmq.queue("requests")
url = 'http://api.angel.co/1/startups/{0}'
#
results = []
i = 0
max_reqs = 1000
while True:
    message = queue.get()['messages'][0] # grab the "id" to use
    r = requests.get(url.format(message['body'])).json()
    if not r.get('error'):
        results.append(r)
        # remove from queue since it was successful
        queue.delete(message['id'])
        print(message['body'])
    elif r.get('error') == "over_limit":
Beispiel #9
0
 def get_connection(list_key=Conf.PREFIX):
     ironmq = IronMQ(name=None, **Conf.IRON_MQ)
     return ironmq.queue(queue_name=list_key)
Beispiel #10
0
 def __init__(self, q_config, name):
     self.q_config = q_config
     c = IronMQ(token=self.q_config['TOKEN'], 
             project_id=self.q_config['PROJECT_ID'])
     self.q = c.queue(name)
Beispiel #11
0
from iron_mq import IronMQ
ironmq = IronMQ(project_id="", token="")
queue = ironmq.queue("requests")

# Warning to all... this is probably the worst possible way to
# enque a bunch of inorder numbers... you have been warned.

# Queue ALL THE THINGS
l = range(500000)
# but do it in small batches
n = 3000
list_of_messages = [l[i:i + n] for i in range(0, len(l), n)]

for ls in list_of_messages:
    # unpack and post the array of #s
    queue.post(*[str(i) for i in ls])
from iron_mq import IronMQ
import redis
import time

mq = IronMQ()
q = mq.queue("ua_requests")
r = redis.StrictRedis()

while True: # poll indefinitely
    msg = q.get() # ask the queue for messages
    if len(msg["messages"]) < 1: # if there are no messages
        time.sleep(1) # wait a second
        continue # try again
    # if we made it this far, we have a message
    # separate the user agent
    user_agent = msg["messages"][0]["body"]["HTTP_USER_AGENT"]
    # increment the number of requests from the user agent
    r.hincrby("user_agent_requests", user_agent, 1)
    q.delete(msg["messages"][0]["id"]) # delete the message
Beispiel #13
0
import sendgrid
from iron_helper import WorkerArgs
from iron_mq import IronMQ
import json

args = WorkerArgs()
username = args.config["username"]
password = args.config["password"]
queue_name = args.config["queue"]
s = sendgrid.Sendgrid(username, password, secure=True)
mq = IronMQ()
queue = mq.queue(queue_name)


def getMessage():
    resp = queue.get()
    if "messages" not in resp:
        return None
    if len(resp["messages"]) < 1:
        return None
    return resp["messages"][0]


msg = getMessage()
print msg
while msg is not None:
    msg["body"] = json.loads(msg["body"])
    from_address = None
    from_name = None
    if isinstance(msg["body"]["from"], basestring):
        from_address = msg["body"]["from"]
 def __init__(self):
     mq = IronMQ(host="mq-rackspace-ord.iron.io") # instantiate our IronMQ client once
     self.queue = mq.queue("requests") # set our queue