Esempio n. 1
0
 def get_host_action(self, vhost):
     for host in self.rabbit_config.hosts:
         cl = Client(f'{host}:15672', self.rabbit_config.username, self.rabbit_config.password)
         try:
             cl.is_alive(vhost)
             return host
         except APIError:
             pass
def main():
    rabbit = None
    try:
        # Ensure there are no paralell runs of this script
        lock.acquire(timeout=5)

        # Connect to Rabbit
        nullwrite = NullWriter()
        oldstdout = sys.stdout
        sys.stdout = nullwrite # disable output
        rabbit = RabbitClient('%s:%s' % (rabbitmq['host'], rabbitmq['port']), 
            rabbitmq['username'], 
            rabbitmq['password'])
        if not rabbit.is_alive():
            raise Exception('Cannot connect to RabbitMQ')
        queues = rabbit.get_queues(rabbitmq['vhost'])
        sys.stdout = oldstdout # enable output

        # Build outcome
        if args.d == 'queues':
            outcome = {'data': []}
            for queue in queues:
                outcome['data'].append({'{#QUEUE}': queue['name']})
    except LockTimeout:
        print 'Lock not acquired, exiting'
    except AlreadyLocked:
        print 'Already locked, exiting'
    except Exception, e:
        print type(e)
        print 'Error: %s' % e
Esempio n. 3
0
def main():
    rabbit = None
    outcome = None
    try:
        # Ensure there are no paralell runs of this script
        lock.acquire(timeout=5)

        # Connect to Rabbit
        nullwrite = NullWriter()
        oldstdout = sys.stdout
        sys.stdout = nullwrite  # disable output
        rabbit = RabbitClient('%s:%s' % (rabbitmq['host'], rabbitmq['port']),
                              rabbitmq['username'], rabbitmq['password'])
        if not rabbit.is_alive():
            raise Exception('Cannot connect to RabbitMQ')
        queues = rabbit.get_queues(rabbitmq['vhost'])
        sys.stdout = oldstdout  # enable output

        # Build outcome
        if args.d == 'queues':
            outcome = {'data': []}
            for queue in queues:
                outcome['data'].append({'{#QUEUE}': queue['name']})
    except LockTimeout:
        print 'Lock not acquired, exiting'
    except AlreadyLocked:
        print 'Already locked, exiting'
    except Exception, e:
        print type(e)
        print 'Error: %s' % e
Esempio n. 4
0
class MqSmokeTest(unittest.TestCase):
    def setUp(self):
        self.cl = Client("mq:15672", os.environ.get("SFM_RABBITMQ_USER"),
                         os.environ.get("SFM_RABBITMQ_PASSWORD"))
        self.assertTrue(self.cl.is_alive())

    def test_exchange(self):
        exchanges = self.cl.get_exchanges()
        for exchange in exchanges:
            if exchange["name"] == "sfm_exchange":
                break
        else:
            self.assertTrue(False, "Exchange not found.")

    def test_queues(self):
        queues = self.cl.get_queues()
        queues_names = {queue["name"] for queue in queues}
        # Add additional queue names as new components are added.
        self.assertTrue(
            queues_names.issuperset(
                set([
                    "flickr_harvester", "flickr_exporter", "sfm_ui",
                    "twitter_harvester", "twitter_rest_harvester",
                    "twitter_rest_harvester_priority", "twitter_rest_exporter",
                    "twitter_stream_exporter", "tumblr_harvester",
                    "tumblr_exporter"
                ])))
Esempio n. 5
0
def main():
    rabbit = None
    try:
        # Ensure there are no paralell runs of this script
        lock.acquire(timeout=5)

        # Connect to Rabbit
        nullwrite = NullWriter()
        oldstdout = sys.stdout
        sys.stdout = nullwrite  # disable output
        rabbit = RabbitClient('%s:%s' % (rabbitmq['host'], rabbitmq['port']),
                              rabbitmq['username'], rabbitmq['password'])
        if not rabbit.is_alive():
            raise Exception('Cannot connect to RabbitMQ')
        vhost = rabbit.get_vhost(rabbitmq['vhost'])
        queues = rabbit.get_queues(rabbitmq['vhost'])
        sys.stdout = oldstdout  # enable output

        # Build outcome
        metrics = []
        for key in keys:
            if type(key) == dict and 'vhost' in key:
                for subkey in key['vhost']:
                    if subkey in vhost:
                        metrics.append(
                            Metric(rabbitmq['host'],
                                   'rabbitmq.%s.%s' % ('vhost', subkey),
                                   vhost[subkey]))
            elif type(key) == dict and 'vhost.message_stats' in key:
                for subkey in key['vhost.message_stats']:
                    if subkey in vhost['message_stats']:
                        metrics.append(
                            Metric(
                                rabbitmq['host'], 'rabbitmq.%s.%s' %
                                ('vhost.message_stats', subkey),
                                vhost['message_stats'][subkey]))
            elif type(key) == dict and 'queues' in key:
                for queue in queues:
                    for subkey in key['queues']:
                        if subkey in queue:
                            metrics.append(
                                Metric(
                                    rabbitmq['host'], 'rabbitmq.%s.%s[%s]' %
                                    ('queue', subkey, queue['name']),
                                    queue[subkey]))

        # Send packet to zabbix
        send_to_zabbix(metrics, zabbix_host, zabbix_port)
    except LockTimeout:
        print 'Lock not acquired, exiting'
    except AlreadyLocked:
        print 'Already locked, exiting'
    except Exception, e:
        print type(e)
        print 'Error: %s' % e
Esempio n. 6
0
def get_queue_depths(host, username, password, vhost):
    cl = Client(host, username, password)
    if not cl.is_alive():
        raise Exception("Failed to connect to rabbitmq")
    depths = {}
    queues = [q["name"] for q in cl.get_queues(vhost=vhost)]
    for queue in queues:
        if queue == "aliveness-test":
            continue
        depths[queue] = cl.get_queue_depth(vhost, queue)
    return depths
def get_queue_depths(host, username, password, vhost):
    cl = Client(host, username, password)
    if not cl.is_alive():
        raise Exception("Failed to connect to rabbitmq")
    depths = {}
    queues = [q['name'] for q in cl.get_queues(vhost=vhost)]
    for queue in queues:
        if queue == "aliveness-test": #pyrabbit
            continue
        elif queue.endswith('.pidbox') or queue.startswith('celeryev.'): #celery
            continue
        depths[queue] = cl.get_queue_depth(vhost, queue)
    return depths
def get_queue_depths(host, username, password, vhost):
    """ Fetches queue depths from rabbitmq instance."""
    cl = Client(host, username, password)
    if not cl.is_alive():
        raise Exception("Failed to connect to rabbitmq")
    depths = {}
    queues = [q['name'] for q in cl.get_queues(vhost=vhost)]
    for queue in queues:
        if queue == "aliveness-test":  #pyrabbit
            continue
        elif queue.startswith('amq.gen-'):  #Anonymous queues
            continue
        depths[queue] = cl.get_queue_depth(vhost, queue)
    return depths
Esempio n. 9
0
def get_queue_depths(host, username, password, vhost):
    cl = Client(host, username, password)
    if not cl.is_alive():
        raise Exception("Failed to connect to rabbitmq")
    depths = {}
    queues = [q['name'] for q in cl.get_queues(vhost=vhost)]
    for queue in queues:
        if queue == "aliveness-test":  #pyrabbit
            continue
        elif queue.endswith('.pidbox') or queue.startswith(
                'celeryev.'):  #celery
            continue
        depths[queue] = cl.get_queue_depth(vhost, queue)
    return depths
Esempio n. 10
0
def main():
    rabbit = None
    try:
        # Ensure there are no paralell runs of this script
        lock.acquire(timeout=5)

        # Connect to Rabbit
        nullwrite = NullWriter()
        oldstdout = sys.stdout
        sys.stdout = nullwrite # disable output
        rabbit = RabbitClient('%s:%s' % (rabbitmq['host'], rabbitmq['port']), 
            rabbitmq['username'], 
            rabbitmq['password'])
        if not rabbit.is_alive():
            raise Exception('Cannot connect to RabbitMQ')
        vhost = rabbit.get_vhost(rabbitmq['vhost'])
        queues = rabbit.get_queues(rabbitmq['vhost'])
        sys.stdout = oldstdout # enable output

        # Build outcome
        metrics = []
        for key in keys:
            if type(key) == dict and 'vhost' in key:
                for subkey in key['vhost']:
                    if subkey in vhost:
                        metrics.append(Metric(rabbitmq['host'], 'rabbitmq.%s.%s' % ('vhost', subkey), vhost[subkey]))
            elif type(key) == dict and 'vhost.message_stats' in key:
                for subkey in key['vhost.message_stats']:
                    if subkey in vhost['message_stats']:
                        metrics.append(Metric(rabbitmq['host'], 'rabbitmq.%s.%s' % ('vhost.message_stats', subkey), 
                            vhost['message_stats'][subkey]))
            elif type(key) == dict and 'queues' in key:
                for queue in queues:
                    for subkey in key['queues']:
                        if subkey in queue:
                            metrics.append(Metric(rabbitmq['host'], 'rabbitmq.%s.%s[%s]' % ('queue', subkey, queue['name']), 
                                queue[subkey]))

        # Send packet to zabbix
        send_to_zabbix(metrics, zabbix_host, zabbix_port)
    except LockTimeout:
        print 'Lock not acquired, exiting'
    except AlreadyLocked:
        print 'Already locked, exiting'
    except Exception, e:
        print type(e)
        print 'Error: %s' % e
Esempio n. 11
0
class MqSmokeTest(unittest.TestCase):
    def setUp(self):
        self.cl = Client("mq:15672",
                         os.environ.get("RABBITMQ_USER"),
                         os.environ.get("RABBITMQ_PASSWORD"))
        self.assertTrue(self.cl.is_alive())

    def test_exchange(self):
        exchanges = self.cl.get_exchanges()
        for exchange in exchanges:
            if exchange["name"] == "sfm_exchange":
                break
        else:
            self.assertTrue(False, "Exchange not found.")

    def test_queues(self):
        queues = self.cl.get_queues()
        queues_names = {queue["name"] for queue in queues}
        # Add additional queue names as new components are added.
        self.assertTrue(queues_names.issuperset(set(["flickr_harvester",
                                                     "sfm_ui",
                                                     "twitter_harvester",
                                                     "twitter_rest_harvester"])))
Esempio n. 12
0
from pyrabbit.api import Client
import parralel_declare_queues
cl = Client('localhost:15672', 'guest', 'guest')
print(cl.is_alive())

# for i in range(15):
#     vhost_name = f'{i}'
#     cl.create_vhost(vhost_name)
#     parralel_declare_queues.add_queues(vhost_name)

# print(cl.get_vhost_names())

for i in range(15):
    vhost_name = f'{i}'
    try:
        cl.delete_vhost(vhost_name)
    except Exception as e:
        print(e)

print(cl.get_vhost_names())
Esempio n. 13
0
from pyrabbit.api import Client

# It uses the port of the management interface:
# In rabbitmq.config:
# {rabbitmq_management,
#  [
#   {listener, [{port,     12345},
#               {ip,       "127.0.0.1"}]}

# rabbitmq-plugins enable rabbitmq_management

cl = Client('localhost:12345', 'guest', 'guest')

sys.stdout.write("\n"+"cl=%s\n"%str(cl))

isal = cl.is_alive()

sys.stdout.write("\n"+"isalive=%s\n"%str(isal))

sys.stdout.write("\n\n")

# queues = [q['name'] for q in cl.get_queues()]
queues = [q for q in cl.get_queues()]

# exclusive=False
# reductions=11223
# garbage_collection={u'min_heap_size': 233, u'fullsweep_after': 65535, u'minor_gcs': 2, u'min_bin_vheap_size': 46422}
# messages_ready_ram=0
# idle_since=2017-02-05 14:08:12
# message_bytes_unacknowledged=0
# message_stats={u'deliver_no_ack': 0, u'publish_out': 0, u'get_no_ack': 10, u'return_unroutable': 0, u'confirm': 0, u'get_no_ack_details': {u'rate': 0.0}, u'publish': 10, u'confirm_details': {u'rate': 0.0}, u'ack_details': {u'rate': 0.0}, u'get': 0, u'publish_out_details': {u'rate': 0.0}, u'deliver': 0, u'deliver_no_ack_details': {u'rate': 0.0}, u'deliver_details': {u'rate': 0.0}, u'deliver_get_details': {u'rate': 0.0}, u'publish_details': {u'rate': 0.0}, u'publish_in_details': {u'rate': 0.0}, u'ack': 0, u'publish_in': 0, u'return_unroutable_details': {u'rate': 0.0}, u'get_details': {u'rate': 0.0}, u'deliver_get': 10, u'redeliver_details': {u'rate': 0.0}, u'redeliver': 0}
Esempio n. 14
0
    users = [{
        "username": "******",
        "password": "******",
        "tags": "cronio"
    }, {
        "username": "******",
        "password": "******",
        "tags": "cronio"
    }, {
        "username": "******",
        "password": "******",
        "tags": "cronio"
    }]

    cl = Client(AMQP_HOST_STOMP, AMQP_API_USERNAME, AMQP_API_PASSWORD)
    if cl.is_alive():

        if DO_DELETE_ALL_USERS_EXCEPT_ADMIN:
            UsersFoundInRabbitMQ = cl.get_users()
            for x in UsersFoundInRabbitMQ:
                pprint.pprint(x)
                if ("cronio") in x["tags"]:
                    cl.delete_user(x["name"])
                    print "User: "******"name"] + " - Deleted"

            # if AMQP_VHOST in cl.get_vhost_names():
            # 	print "delete vhose: "
            # 	cl.delete_vhost(AMQP_VHOST)
        for user in users:
            print user
Esempio n. 15
0
from pyrabbit.api import Client

client = Client('http://localhost:55672', 'guest', 'guest')
client = Client('localhost:15672', 'guest', 'guest')
print(client.is_alive())
messages = client.get_messages('/', 'celery')
print(messages)
Esempio n. 16
0
def rebuild_collection(collection_name):
    """
    Will grab all recs from the database and send them to solr
    """
    # first, fail if we can not monitor queue length before we queue anything
    u = urlparse(app.conf['OUTPUT_CELERY_BROKER'])
    rabbitmq = PyRabbitClient(u.hostname + ':' + str(u.port + 10000),
                              u.username, u.password)
    if not rabbitmq.is_alive('master_pipeline'):
        logger.error(
            'failed to connect to rabbitmq with PyRabbit to monitor queue')
        sys.exit(1)

    now = get_date()
    if collection_name.startswith('http'):
        solr_urls = [collection_name]
    else:
        solr_urls = collection_to_urls(collection_name)

    logger.info('Sending all records to: %s', ';'.join(solr_urls))
    sent = 0

    batch = []
    _tasks = []
    with app.session_scope() as session:
        # master db only contains valid documents, indexing task will make sure that incomplete docs are rejected
        for rec in session.query(Records) \
            .options(load_only(Records.bibcode, Records.updated, Records.processed)) \
            .yield_per(1000):

            sent += 1
            if sent % 1000 == 0:
                logger.debug('Sending %s records', sent)

            batch.append(rec.bibcode)
            if len(batch) > 1000:
                t = tasks.task_rebuild_index.delay(batch,
                                                   force=True,
                                                   update_solr=True,
                                                   update_metrics=False,
                                                   update_links=False,
                                                   ignore_checksums=True,
                                                   solr_targets=solr_urls,
                                                   update_timestamps=False)
                _tasks.append(t)
                batch = []

    if len(batch) > 0:
        t = tasks.task_rebuild_index.delay(batch,
                                           force=True,
                                           update_solr=True,
                                           update_metrics=False,
                                           update_links=False,
                                           ignore_checksums=True,
                                           solr_targets=solr_urls,
                                           update_timestamps=False)
        _tasks.append(t)

    logger.info('Done queueing bibcodes for rebuilding collection %s',
                collection_name)
    # now wait for queue to empty
    queue_length = 1
    while queue_length > 0:
        queue_length = rabbitmq.get_queue_depth('master_pipeline',
                                                'rebuild-index')
        stime = queue_length * 0.1
        logger.info(
            'Waiting %s for rebuild-collection tasks to finish, queue_length %s, sent %s'
            % (stime, queue_length, sent))
        time.sleep(stime)

    logger.info('Done rebuilding collection %s, sent %s records',
                collection_name, sent)
Esempio n. 17
0
                    help="Password for RabbitMQ auth",  default='guest')
parser.add_argument("-username",  dest='username',  type=str,
                    help="Username",  default='test')
parser.add_argument("-password",  dest='password',  type=str,
                    help="Password",  default='swordfish')
parser.add_argument("-vhostname",  dest='vhostname',  type=str,
                    help="Vhost name",  default='test')
args = parser.parse_args()
rabbitmq_url = args.rabbitmq_url
rabbitmq_user = args.rabbitmq_username
rabbitmq_password = args.rabbitmq_password
user = args.username
password = args.password
vhost = args.vhostname
cl = Client(rabbitmq_url, rabbitmq_user, rabbitmq_password)
assert cl.is_alive()

for queue in cl.get_queues():
    if queue['vhost'] == vhost:
        cl.purge_queue(vhost, queue['name'])
        cl.delete_queue(vhost, queue['name'])

for vhost_ in cl.get_all_vhosts():
    if vhost_['name'] == vhost:
        while True:
            try:
                cl.delete_vhost(vhost_['name'])
                break
            except Exception:
                pass
Esempio n. 18
0
#automatically reroutes traffic if primary publisher fails
import time
from pyrabbit.api import Client

pri_prod='prod1'
sec_prod='prod2'

cl = Client('localhost:15672', 'user1', 'password1')
cl.is_alive()


while True:
    disconnected1 = False
    disconnected2 = False

    print('checking producers presence...')
    time.sleep(3)

    channels=cl.get_channels()

    if not channels:
        disconnected1=True
        disconnected2=True
    elif not filter(lambda users: users['user'] == pri_prod, channels):
        disconnected1=True

    if disconnected1:
        print('producer ' + pri_prod + ' not connected, trying to switch to ' + sec_prod + '...')
        if not channels:
            disconnected2 = True
        elif not filter(lambda users: users['user'] == sec_prod, channels):