Esempio n. 1
0
    def __init__(self):
        service.MultiService.__init__(self)
        self.email_producer = None
        self.irc_producer = None
        self.irc_client = None

        # Map queue names to service instances
        self._queues = {}
        self._irc_queues = {}
        self._email_queues = {}
        self._irc_services = []
        self._email_services = []

        db.initialize(config.conf)

        if config.conf["IRC_ENABLED"]:
            queues, bindings = self.get_queues(db.DeliveryType.irc)
            consumers = {q["queue"]: self._dispatch_irc for q in queues}
            producer = FedoraMessagingService(
                queues=queues, bindings=bindings, consumers=consumers)
            producer.setName("irc-{}".format(len(self._irc_services)))
            self._irc_services.append(producer)
            for queue in queues:
                self._queues[queue["queue"]] = producer
            self.addService(producer)

        if config.conf["EMAIL_ENABLED"]:
            queues, bindings = self.get_queues(db.DeliveryType.email)
            consumers = {q["queue"]: mail.deliver for q in queues}
            producer = FedoraMessagingService(
                queues=queues, bindings=bindings, consumers=consumers)
            producer.setName("email-{}".format(len(self._email_services)))
            self._email_services.append(producer)
            for queue in queues:
                self._queues[queue["queue"]] = producer
            self.addService(producer)

        amqp_endpoint = endpoints.clientFromString(
            reactor, 'tcp:localhost:5672'
        )
        params = pika.URLParameters('amqp://')
        control_queue = {
            "queue": "fedora-notifications-control-queue",
            "durable": True,
        }
        factory = FedoraMessagingFactory(
            params,
            queues=[control_queue],
        )
        factory.consume(self._manage_service, control_queue["queue"])
        self.amqp_service = internet.ClientService(amqp_endpoint, factory)
        self.addService(self.amqp_service)
        # TODO set up a listener for messages about new queues.
        # Then we need an API to poke a message service to start a new subscription
        # or stop an existing one.

        if self._irc_services:
            irc_endpoint = endpoints.clientFromString(
                reactor, config.conf["IRC_ENDPOINT"]
            )
            irc_factory = protocol.Factory.forProtocol(irc.IrcProtocol)
            self.irc_client = internet.ClientService(irc_endpoint, irc_factory)
            self.addService(self.irc_client)
Esempio n. 2
0
 def connect(self):
     LOGGER.debug('Connecting to %s', self._url)
     return pika.SelectConnection(pika.URLParameters(self._url),
                                  self.on_connection_open,
                                  stop_ioloop_on_close=False)
Esempio n. 3
0
    ch.basic_reject(delivery_tag=method.delivery_tag, requeue=True)


# main -------------------------------------
if __name__ == '__main__':
    try:
        # configure logging
        logger = configure_logging()

        # get command arguments
        args = get_args()

        # create AMQP handler
        try:
            # amqp configuration
            parameters = pika.URLParameters(args.amqp_url)
            connection = pika.BlockingConnection(parameters)
            amqp = connection.channel()
            amqp.exchange_declare(exchange=args.exchange, durable=True)
            amqp.queue_declare(queue=args.queue, durable=True)
            amqp.queue_bind(queue=args.queue,
                            exchange=args.exchange,
                            routing_key=args.routing_key)
            amqp.basic_qos(prefetch_count=args.qos_num)
            # proclaim function and settings to consume amqp messages with
            amqp.basic_consume(check_message, queue=args.queue, no_ack=False)
        except Exception as e:
            logger.critical('Failed to create %s AMQP handler: %s' %
                            (args.amqp_url, str(e)))
            logger.exception(e)
            time.sleep(sleep)
Esempio n. 4
0
import pika, json

from main import Product, db

params = pika.URLParameters(
    'amqps://*****:*****@shark.rmq.cloudamqp.com/qkyaxufh'
)

connection = pika.BlockingConnection(params)

channel = connection.channel()

channel.queue_declare(queue='main')


def callback(ch, method, properties, body):
    print('Received in main')
    data = json.loads(body)
    print(data)

    if properties.content_type == 'product_created':
        product = Product(id=data['id'],
                          title=data['title'],
                          image=data['image'])
        db.session.add(product)
        db.session.commit()
        print('Product created in main', product)

    elif properties.content_type == 'product_updated':
        product = Product.query.get(data['id'])
        if product is None:
import settings
import logging
from logging import config
import helpers

logging.config.dictConfig(settings.LOGGING)
logger = logging.getLogger('main')


if __name__ == "__main__":
    if settings.AMQP_URL is None:
        logger.error('AMQP URL is not defined')
        exit(1)

    logger.info("LET'S THE GAME BEGIN")
    parameters = pika.URLParameters(settings.AMQP_URL)
    connection = pika.BlockingConnection(parameters=parameters)
    channel = connection.channel()

    # Exchanges
    exchanges = helpers.json_file_to_list('exchanges.json')
    helpers.declare_exchanges(channel, exchanges)

    # Queues
    queues = helpers.json_file_to_list('queues.json')
    helpers.declare_queues(channel, queues)

    # Binding
    bindings = helpers.json_file_to_list('bindings.json')
    helpers.bind(channel, bindings)
Esempio n. 6
0
    channel.basic_ack(delivery_tag=method.delivery_tag)


def on_open(connection):
    connection.channel(on_channel_open)


def on_channel_open(aChannel):
    global channel
    channel = aChannel
    channel.queue_declare(queue='station', callback=on_queue_declared1)
    channel.queue_declare(queue='monitoring', callback=on_queue_declared2)


def on_queue_declared1(frame):
    channel.basic_consume(callback, queue='station')


def on_queue_declared2(frame):
    channel.basic_consume(callback, queue='monitoring')


parameters = pika.URLParameters('amqp://*****:*****@localhost:5672/%2F')
connection = pika.SelectConnection(parameters=parameters,
                                   on_open_callback=on_open)

try:
    connection.ioloop.start()
except KeyboardInterrupt:
    connection.close()
Esempio n. 7
0
import pika, json, os, django

os.environ.setdefault("DJANGO_SETTINGS_MODULE", "admin.settings")
django.setup()

from products.models import Product

params = pika.URLParameters(
    'amqps://*****:*****@grouse.rmq.cloudamqp.com/llzozvnt')

connection = pika.BlockingConnection(params)

channel = connection.channel()


channel.queue_declare(queue='admin')


def callback(ch, method, properties, body):
    print('Received in admin')
    id = json.loads(body)
    print(id)
    product = Product.objects.get(id=id)
    product.likes = product.likes + 1
    product.save()
    print('Product likes increased!')


channel.basic_consume(
    queue='admin', on_message_callback=callback, auto_ack=True)
Esempio n. 8
0
def connect(connection_url):
    """ Create and return a fresh connection
    """
    return pika.BlockingConnection(pika.URLParameters(connection_url))
Esempio n. 9
0
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 13 13:55:28 2020

@author: bouvaran
"""

import mykeys
import pika

AMQP_URL = mykeys.cloudamplink

connection = pika.BlockingConnection(pika.URLParameters(AMQP_URL))
channel = connection.channel()
channel.queue_declare(queue='presentation’')

channel.basic_publish(exchange='',
                      routing_key='presentation’',
                      body='Hello World!')
print("[Antoine_le_bg] salut la pleb")
connection.close()
Esempio n. 10
0
logs_path = os.path.join(basedir, 'logs')

if not os.path.exists(checkpoint_path):
    os.makedirs(checkpoint_path)
if not os.path.exists(logistic_regression_path):
    os.makedirs(logistic_regression_path)
if not os.path.exists(support_vector_classifier_path):
    os.makedirs(support_vector_classifier_path)
if not os.path.exists(random_forest_path):
    os.makedirs(random_forest_path)

# Following models supported for training
models = ['logRegr', 'svc', 'RandomForest']
# --------------------------------------------------------------

connection = pika.BlockingConnection(pika.URLParameters(RABBIT_URI))
channel = connection.channel()
channel.queue_declare(queue='trainings')


def callback(ch, method, properties, body):
    # Training of the model is launched
    df = training_dataframe(mongodb_uri=MONGO_URI)
    users = df['user_email'].unique()

    for model in models:
        print('Lanzando GridSearch de Hiperparámetros para modelo ', model)
        if model == 'logRegr':
            os.system("python gridsearch_logRegr.py")
            # Cargamos los parámetros idóneos para cada usuario en un json
            os.chdir(logs_path)
Esempio n. 11
0
#!/usr/bin/env python
import pika
import json
from time import sleep
import sys

# ./ezviz records get 1 2019-05-30\ 00:00:00 2019-05-30\ 09:00:00 C90674290 WGXWZT a287e05ace374c3587e051db8cd4be82 at.bg2xm8xf03z5ygp01y84xxmv36z54txj-4n5jmc9bua-0iw2lll-qavzt882f
parameters = pika.URLParameters(
    "amqp://*****:*****@192.168.1.102"
)  #amqp://ilabservice:[email protected]:5672/
connection = pika.BlockingConnection(parameters)
chanPlay = connection.channel()
chanPlay.exchange_declare(exchange="ezviz.exchange.rtplay",
                          exchange_type="direct")

# args = {"x-max-priority": 10}
# args["x-expires"] = 10 * 1000
chanPlay.queue_declare(queue='ezviz.work.queue.rtplay', durable=False)

chanPlay.queue_bind(exchange="ezviz.exchange.rtplay",
                    queue='ezviz.work.queue.rtplay',
                    routing_key='rtplay')

chanStop = connection.channel()
chanStop.exchange_declare(exchange="ezviz.exchange.rtplay",
                          exchange_type="direct")

# args = {"x-max-priority": 10}
# args["x-expires"] = 10 * 1000
chanStop.queue_declare(queue='ezviz.work.queue.rtstop_', durable=False)
Esempio n. 12
0
import json

import pika

params = pika.URLParameters(
    'amqps://*****:*****@jackal.rmq.cloudamqp.com/zfpolbnp'
)

connection = pika.BlockingConnection(params)

channel = connection.channel()


def publish(method, body):
    properties = pika.BasicProperties(method)
    channel.basic_publish(exchange='',
                          routing_key='admin',
                          body=json.dumps(body),
                          properties=properties)
Esempio n. 13
0
#   -   disabled skype messaging
#   2013-05-15  Lawrence Sunglao <*****@*****.**>
#   -   initial hack

import settings
import pika
import logging
from datetime import datetime
import simplejson as json
from pytz import timezone

from celery.execute import send_task

logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')

parameters = pika.URLParameters(settings.PIKA_URL)
connection = pika.BlockingConnection(parameters=parameters)
channel_mass_responder_code = connection.channel()
channel_mass_responder_code.queue_declare(queue='expire_mass_responder_code',
                                          durable=True)

logging.info(' [*] Waiting for logs. To exit press CTRL+C')


def schedule_mass_responder_code_expire(ch, method, properties, body):
    """
    expecting a json data
    body = dict(
        userid = 69,
        scheduled_date = '2013-05-15 09:23:00', #Asia/Manila timezone
        mass_responder_code = 'fffa69ff84bbb0ce4674cac984293bb339153f17',
Esempio n. 14
0
import pika
import sys
import time
import logging

LOGGER = logging.getLogger(__name__)

LOGGER.warning('Prepare to start consumer .....')
time.sleep(20)  # Hack here to wait for rabbitmq when run with docker-compose

connection = pika.BlockingConnection(
    pika.URLParameters('amqp://*****:*****@rabbitmq:5672/%2F'))
channel = connection.channel()

channel.exchange_declare(exchange='topic_logs', exchange_type='topic')

result = channel.queue_declare('', exclusive=True)
queue_name = result.method.queue

binding_keys = sys.argv[1:]
if not binding_keys:
    sys.stderr.write("Usage: %s [binding_key]...\n" % sys.argv[0])
    sys.exit(1)

for binding_key in binding_keys:
    channel.queue_bind(exchange='topic_logs',
                       queue=queue_name,
                       routing_key=binding_key)

LOGGER.warning(' [*] Waiting for logs. To exit press CTRL+C')
Esempio n. 15
0
def get_url_parameters():
    return pika.URLParameters('amqp://{}:{}@rabbitmq:{}/'.format(
        env.RMQ_USER, env.RMQ_PASS, env.RMQ_PORT))
Esempio n. 16
0
        logging.warning("reject")
        # we reject the message
        ch.basic_reject(delivery_tag=method.delivery_tag, requeue=False)
        pika.BlockingConnection.sleep(broker_connection, duration=1)
        # and re-publish a new one in the same queue
        channel.basic_publish(exchange=method.exchange,
                              routing_key=method.routing_key,
                              body=body,
                              properties=properties)
    else:
        logging.warning("ack")
        dead_letter_queue = properties.headers['x-dead-letter-queue']
        ch.basic_nack(delivery_tag=method.delivery_tag, requeue=False)
        channel.basic_publish(exchange='',
                              routing_key=dead_letter_queue,
                              body=body,
                              properties=pika.BasicProperties(
                                  headers=properties.headers
                              ))


# Connect to Rabbit using credentials
broker_connection = pika.BlockingConnection(pika.URLParameters(os.getenv('AMQP_URI')))

# create a new channel
channel = broker_connection.channel()
# create the queue if doesn't exits
channel.queue_declare(queue=queue_name)
channel.basic_consume(consumer_callback=callback, queue=queue_name, no_ack=False)
channel.start_consuming()
Esempio n. 17
0
    def run(self, terms, variables=None, url=None, queue=None, count=None):
        if not HAS_PIKA:
            raise AnsibleError(
                'pika python package is required for rabbitmq lookup.')
        if not url:
            raise AnsibleError('URL is required for rabbitmq lookup.')
        if not queue:
            raise AnsibleError('Queue is required for rabbitmq lookup.')

        display.vvv(u"terms:%s : variables:%s url:%s queue:%s count:%s" %
                    (terms, variables, url, queue, count))

        try:
            parameters = pika.URLParameters(url)
        except Exception as e:
            raise AnsibleError("URL malformed: %s" % to_native(e))

        try:
            connection = pika.BlockingConnection(parameters)
        except Exception as e:
            raise AnsibleError("Connection issue: %s" % to_native(e))

        try:
            conn_channel = connection.channel()
        except pika.exceptions.AMQPChannelError as e:
            try:
                connection.close()
            except pika.exceptions.AMQPConnectionError as ie:
                raise AnsibleError(
                    "Channel and connection closing issues: %s / %s" %
                    to_native(e), to_native(ie))
            raise AnsibleError("Channel issue: %s" % to_native(e))

        ret = []
        idx = 0

        while True:
            method_frame, properties, body = conn_channel.basic_get(
                queue=queue)
            if method_frame:
                display.vvv(u"%s, %s, %s " %
                            (method_frame, properties, to_text(body)))

                # TODO: In the future consider checking content_type and handle text/binary data differently.
                msg_details = dict({
                    'msg': to_text(body),
                    'message_count': method_frame.message_count,
                    'routing_key': method_frame.routing_key,
                    'delivery_tag': method_frame.delivery_tag,
                    'redelivered': method_frame.redelivered,
                    'exchange': method_frame.exchange,
                    'delivery_mode': properties.delivery_mode,
                    'content_type': properties.content_type,
                    'headers': properties.headers
                })
                if properties.content_type == 'application/json':
                    try:
                        msg_details['json'] = json.loads(msg_details['msg'])
                    except ValueError as e:
                        raise AnsibleError(
                            "Unable to decode JSON for message %s: %s" %
                            (method_frame.delivery_tag, to_native(e)))

                ret.append(msg_details)
                conn_channel.basic_ack(method_frame.delivery_tag)
                idx += 1
                if method_frame.message_count == 0 or idx == count:
                    break
            # If we didn't get a method_frame, exit.
            else:
                break

        if connection.is_closed:
            return [ret]
        else:
            try:
                connection.close()
            except pika.exceptions.AMQPConnectionError:
                pass
            return [ret]
Esempio n. 18
0
 def connect(self):
     self._connection = adapters.TornadoConnection(
         pika.URLParameters(self._url), self.on_connected)
plugin = 'coresense:3'

url = 'amqp://*****:*****@localhost'

# url = 'amqps://*****:*****@beehive1.mcs.anl.gov:23181?{}'.format(urlencode({
#     'ssl': 't',
#     'ssl_options': {
#         'certfile': os.path.abspath('SSL/node/cert.pem'),
#         'keyfile': os.path.abspath('SSL/node/key.pem'),
#         'ca_certs': os.path.abspath('SSL/waggleca/cacert.pem'),
#         'cert_reqs': ssl.CERT_REQUIRED
#     }
# }))

connection = pika.BlockingConnection(pika.URLParameters(url))

channel = connection.channel()

channel.exchange_declare(exchange='plugins-in', exchange_type='direct')

channel.exchange_bind(source='data-pipeline-in', destination='plugins-in')

channel.queue_declare(queue=plugin, durable=True)

channel.queue_bind(queue=plugin, exchange='plugins-in', routing_key=plugin)

channel.exchange_declare(exchange='plugins-out',
                         exchange_type='fanout',
                         durable=True)
Esempio n. 20
0
from threading import Thread

from classifier import Classifier
from classifier_container import ClassifierContainer

# This class is used to classify (detect objects in) the videos.
classifier = Classifier(
  det_threshold = float(os.environ['DET_THRESHOLD']),
  classes_of_interest = json.loads(os.environ['CLASSES_OF_INTEREST']),
  frame_skip = int(os.environ['FRAME_SKIP']),
  model_file = os.environ['NET_MODEL_FILE'],
  classes_file = os.environ['NET_CLASSES_FILE'])

# Connect to rabbitmq (the container name is "mq").
mq_conn_str = 'amqp://{}:{}'.format(os.environ['MQ_HOST'], os.environ['MQ_PORT'])
connection  = pika.BlockingConnection(pika.URLParameters(mq_conn_str))
channel     = connection.channel()

# Create the queues if they have not already been created.
# The API server publishes messages on the first queue.
# The python script responds on the second queue.
channel.queue_declare(queue=os.environ['MQ_NOTIFY_QUEUE'])
channel.queue_declare(queue=os.environ['MQ_SAVE_QUEUE'])

# Fires when a message is received.  The message will contain a
# MotionRecording entity (see the motion-detection-api).  The entity
# has a video file name, which is classified.
def on_message_received(channel, method_frame, header_frame, body):
  recording = json.loads(body)['data']

  # Classify the video in a thread.  A wrapper container is used to hold the
Esempio n. 21
0
import configparser
import json

import pika

from main import Product, db

config = configparser.ConfigParser()
config.read('config.ini')
params = pika.URLParameters(config['rabbitmq']['uri'])

connection = pika.BlockingConnection(params)

channel = connection.channel()

channel.queue_declare(queue='main')


def callback(ch, method, properties, body):
    print('Received in main')
    data = json.loads(body)
    print(data)

    if properties.content_type == 'product_created':
        product = Product(id=data['id'],
                          title=data['title'],
                          image=data['image'])
        db.session.add(product)
        db.session.commit()
        print('Product created')
Esempio n. 22
0
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)


# callback to give to the consume function
def get_messages(ch, method, properties, body):
    print(body.decode("UTF-8"))


# basic argument handler
if len(argv) < 2 or len(argv) > 3:
    print("Usage: {} AMQP_URI [QUEUE_NAME]".format(argv[0]))
    exit(1)

amqp_uri = argv[1]
queue = argv[2] if 2 < len(argv) else 'default'
queue = 'queue_%s' % queue

# try to set up and use the connection
try:
    parameters = pika.URLParameters(amqp_uri)
    connection = pika.BlockingConnection(parameters)

    channel = connection.channel()
    channel.queue_declare(queue=queue, durable=True)

    channel.basic_consume(get_messages, queue=queue, no_ack=True)
    channel.start_consuming()
except Exception as e:
    print(e)
Esempio n. 23
0
 def connect(self):
     self.connection = TornadoConnection(pika.URLParameters(self._amqp_url),
                                         on_open_callback=self.on_connected)
Esempio n. 24
0
if MONGO_URL:
    mongo_conn = pymongo.MongoClient(MONGO_URL)
    db = mongo_conn[urlparse(MONGO_URL).path[1:]]
else:
    # Not on an app with the MongoHQ add-on, do some localhost action
    mongo_conn = pymongo.MongoClient('localhost', 27017)
    db = mongo_conn['someapps-db']

# Default of 0.25 seconds is too quick for my taste; give it 5 seconds.
pika.adapters.BlockingConnection.SOCKET_CONNECT_TIMEOUT = 5

pika_params = []
if TX_QUEUE_URL and RX_QUEUE_URL:
    pika_params.append(
        pika.URLParameters(RX_QUEUE_URL + (
            "?retry_delay=1&connection_attempts=3&heartbeat_interval=%d" %
            CONNECTION_EXPIRES_S)))
    pika_params.append(
        pika.URLParameters(TX_QUEUE_URL + (
            "?retry_delay=1&connection_attempts=3&heartbeat_interval=%d" %
            CONNECTION_EXPIRES_S)))
else:
    pika_params.append(pika.ConnectionParameters(host='localhost'))
    pika_params.append(pika.ConnectionParameters(host='localhost'))

logging.basicConfig(filename='error.log', level=logging.DEBUG)

app = Flask(__name__)
app.config.from_pyfile('config.py')

channel_cache = [deque(), deque()]
Esempio n. 25
0
    def __init__(self, target_bucket, target_fname, upload=False):
        self.target_fname = target_fname
        self.target_bucket = target_bucket
        self.ini_error = False
        format_str = "cloudfunctions:\n  'endpoint': ''\n  'namespace': ''\n  'api_key': ''\nrabbitamqp:\n  'url': ''\ncos:\n  service_endpoint: ''\n  secret_key: ''\n  access_key: ''"

        try:
            # load keys securely
            with open('secret.yaml', 'r') as f:
                secret = yaml.safe_load(f)

            # initialitze the remote storage wrapper, and upload the target file
            self.cb = COSBackend(secret['cos']['service_endpoint'],
                                 secret['cos']['secret_key'],
                                 secret['cos']['access_key'])
            if upload:
                target_file = open(self.target_fname, "rb")
                self.cb.put_object(target_bucket, target_fname,
                                   target_file.read())
                target_file.close()

            # retrieve file length, ensure file has been uploaded
            try:
                self.fsize = int(
                    self.cb.head_object(self.target_bucket,
                                        self.target_fname)['content-length'])
            except:
                print(
                    'File \'{}\' was not found in this bucket \'{}\'. Upload it and retry.'
                    .format(self.target_fname, self.target_bucket))
                self.ini_error = True
                return None

            # initialize the function wrapper
            config = {}
            config['endpoint'] = secret['cloudfunctions']['endpoint']
            config['namespace'] = secret['cloudfunctions']['namespace']
            config['api_key'] = secret['cloudfunctions']['api_key']
            self.cf = CloudFunctions(config)

            # initialize the queue system
            self.pika_params = pika.URLParameters(secret['rabbitamqp']['url'])

        except KeyError:
            print('Wrong yaml document format. Please use the following one:')
            print(format_str)
            self.ini_error = True
        except FileNotFoundError as e:
            print('File \'{}\' not found.'.format(e.filename))
            self.ini_error = True

        # set the common args stub
        self.comargs = {}
        self.comargs['cos'] = secret['cos']
        self.comargs['rabbitamqp_url'] = secret['rabbitamqp']['url']
        self.comargs['target_bucket'] = self.target_bucket
        self.comargs['target_fname'] = self.target_fname

        # two separate queues, the reducer waits for the mappers and the orchestrator waits for the reducer
        self.mapper_qid = 'mapperQueue'
        self.reducer_qid = 'reducerQueue'
import pika

#connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
parameters = pika.URLParameters('amqp://*****:*****@rabbit:5672/%2F')

connection = pika.BlockingConnection()
channel = connection.channel()

channel.queue_declare(queue='hello')

channel.basic_publish(exchange='',
                      routing_key='hello',
                      body='Hello World!')
print(" [x] Sent 'Hello World!'")
connection.close()
Esempio n. 27
0
#!/usr/bin/python

# RabbitMQ Sample (part 2): Message receiver
# Your RabbitMQ service needs to be running first!

#This script should receive transformed messages from anonymize service, according to the rules defined in rabbitmq_schema.json

import pika, time


def get_message(ch, method, properties, body):
    print(body.decode('utf-8'))


connection_string = "amqp://localhost/"
parameters = pika.URLParameters(connection_string)

queue_name = "output"
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.basic_consume(get_message, queue=queue_name, no_ack=True)
print("Listening started...")
channel.start_consuming()
                'data': {
                    'text': [
                        t['text']
                        for t in r['edge_media_to_comment'].get('data', [])
                    ]
                }
            }
        }

        es.index(index='instagram', body=doc, id=r['id'])


if __name__ == '__main__':
    es = Elasticsearch(['ELASTIC_LINK'])

    parameters = pika.URLParameters('RMQ_LINK')
    connection = pika.BlockingConnection(parameters)
    channel = connection.channel()

    channel.queue_declare(queue='NAME_OF_QUEUE', durable=True)
    print(' [*] Waiting for messages. To exit press CTRL+C')

    def callback(ch, method, properties, body):
        print(" [x] Received %r" % body)
        time.sleep(body.count(b'.'))
        tag = body.decode("utf-8")
        print(f'running for {tag}')

        subprocess.run([
            "instagram-scraper", tag, "--latest", "--comments",
            "--profile-metadata", "--include-location", "--media-metadata",
import pika
import os
import logging
logging.basicConfig()

# Parse CLODUAMQP_URL (fallback to localhost)
url = os.environ.get('CLOUDAMQP_URL', 'amqp://*****:*****@rabbitmq')
params = pika.URLParameters(url)
params.socket_timeout = 5

connection = pika.BlockingConnection(params)  # Connect to CloudAMQP
channel = connection.channel()  # start a channel
channel.queue_declare(queue='pdfprocess')  # Declare a queue
# send a message

channel.basic_publish(exchange='',
                      routing_key='pdfprocess',
                      body='User information')
print("[x] Message sent to consumer")
connection.close()
Esempio n. 30
0
 def from_url(cls, url):
     return cls(pika.URLParameters(url))