Ejemplo n.º 1
0
def worker_execute():
    goods_user_info = reader.load_goods_user_info('id_user.xlsx')
    config = collector.get_config()
    thread_num = config['thread']
    goods_per_thread = int(len(goods_user_info.keys()) / thread_num)
    workers = []

    goods_count = 0
    thread_id = 1
    goods_user_info_slice = {}
    for goods_id in goods_user_info.keys():
        goods_count = goods_count + 1
        goods_user_info_slice[goods_id] = goods_user_info[goods_id]

        if thread_id == thread_num:
            continue
        elif goods_count == goods_per_thread:
            workers.append(Worker(thread_id, goods_user_info_slice))
            goods_count = 0
            thread_id = thread_id + 1
            goods_user_info_slice = {}

    workers.append(Worker(thread_id, goods_user_info_slice))

    for worker in workers:
        print('[{0}] [{1}].'.format(worker.id,
                                    len(worker.goods_user_info.keys())))

    for worker in workers:
        worker.start()

    for worker in workers:
        worker.join()

    pass
Ejemplo n.º 2
0
    def shepherd(self):
        num_of_threads = int(self.__config["--num-of-threads"][0])
        self.workers = {
            w.name: w
            for w in (Worker() for i in range(0, num_of_threads))
        }
        map(lambda w: w.start(), self.workers.itervalues())

        quit_threshold = 2**(num_of_threads / 2 -
                             1) if num_of_threads / 2 else 1
        stage = 1

        # We have to keep main thread awake to process signal, but will not let it quit.
        while stage <= quit_threshold:
            try:
                abortive_worker, err = Worker.Worker_abortive_instance.get(
                    timeout=1)
            except Empty:
                stage = (stage - 2) if stage >= 0 else 1
                continue
            else:
                stage *= 2
                newborn_worker = Worker()
                del self.workers[abortive_worker.name]
                self.workers[newborn_worker.name] = newborn_worker
                newborn_worker.start()
        self.sys_freeze()
Ejemplo n.º 3
0
    def extractBatchThreader(self, batchPath, thread=1, checkImg=False):
        # List images in db
        images = self.listImageInDir(batchPath, checkImg)
        # Init QProgressDialog
        self.sgnExtTotalImg.emit(len(images))
        # Save thread used and thread done
        self.threadUsed = thread
        self.threadDone = 0

        # Create worker instance
        workers = []
        stopIdx = 0  # initialize stopIdx, used if thread = 1
        for i in range(thread - 1):
            startIdx = i * (len(images) // thread)
            stopIdx = (i + 1) * (len(images) // thread)
            workers.append(
                Worker(self.extractBatch, batchPath, checkImg,
                       images[startIdx:stopIdx]))
        workers.append(
            Worker(self.extractBatch, batchPath, checkImg, images[stopIdx:]))

        # Connect and run
        for worker in workers:
            # Connect signals
            worker.signals.exception.connect(self.extractBatchThreadException)
            worker.signals.done.connect(self.extractBatchThreadDone)
            # Run thread
            self.threadPool.start(worker)
Ejemplo n.º 4
0
    def _create_workers(self):
        self._workers.clear()
        requests_per_minute = setting.get('worker.requests-per-minute', int,
                                          REQUESTS_PER_MINUTE)
        local_object_duration = setting.get('worker.local-object-duration',
                                            int, LOCAL_OBJECT_DURATION)
        broadcast_incremental_backup = setting.get(
            'worker.broadcast-incremental-backup', bool,
            BROADCAST_INCREMENTAL_BACKUP)
        broadcast_active_duration = setting.get(
            'worker.broadcast-active-duration', int, BROADCAST_ACTIVE_DURATION)
        image_local_cache = setting.get('worker.image-local-cache', bool,
                                        IMAGE_LOCAL_CACHE)

        self._worker_input = Queue()
        worker_args = {
            'debug': settings.get('debug'),
            'queue_in': self._worker_input,
            'queue_out': self._worker_output,
            'requests_per_minute': requests_per_minute,
            'local_object_duration': local_object_duration,
            'broadcast_incremental_backup': broadcast_incremental_backup,
            'image_local_cache': image_local_cache,
            'broadcast_active_duration': broadcast_active_duration,
            'db_path': db.DATEBASE_PATH,
        }
        worker = Worker(**worker_args)
        self._workers[worker.name] = worker
        proxies = setting.get('worker.proxies', 'json')
        if not proxies:
            return
        for proxy in proxies:
            worker_args['proxy'] = proxy
            worker = Worker(**worker_args)
            self._workers[worker.name] = worker
Ejemplo n.º 5
0
def main():
    # This initializes ''thread1'' as an instance of our Worker Thread
    thread1 = Worker()
    thread2 = Worker()
    # This is the code needed to run our newly created thread
    thread1.start()
    thread2.start()
Ejemplo n.º 6
0
def main():

    context = zmq.Context()

    orderReceiver = context.socket(zmq.REP)
    orderReceiver.bind("tcp://127.0.0.1:5559")
#    orderReceiver.setsockopt(zmq.SUBSCRIBE, "")

#CalledProcessError:
    worker_ = Worker()
    while True:

        order = orderReceiver.recv_json()

        if order['command'] == 'stop':
            worker_.go = False
            orderReceiver.send_json('ack')
            worker_ = Worker()

        elif order['command'] == 'start':
            if worker_.go:
                orderReceiver.send_json('already running')
                continue
            worker_.newJob(order['interval'], order['filename'], order['batch'])
            worker_.start()
            orderReceiver.send_json('ack')

        elif order['command'] == 'status':
            choices = {True: 'on',
                       False: 'off',}

            orderReceiver.send_json(choices[worker_.go])
Ejemplo n.º 7
0
    def activate_workers(self):
        worker_num = self.config.get('worker_num', 1)
        goods_per_thread = int(len(self.goods_user_info.keys()) / worker_num)

        goods_count = 0
        thread_id = 1
        goods_user_info_slice = {}
        for goods_id in self.goods_user_info.keys():
            goods_count = goods_count + 1
            goods_user_info_slice[goods_id] = self.goods_user_info[goods_id]

            if thread_id == worker_num:
                continue
            elif goods_count == goods_per_thread:
                self.workers.append(Worker(thread_id, self.message_queue, goods_user_info_slice,
                                           self.user_info_dict, self.user_status_dict, self.ip_pool,
                                           self.goods_sale_info_dict))
                goods_count = 0
                thread_id = thread_id + 1
                goods_user_info_slice = {}

        self.workers.append(Worker(thread_id, self.message_queue, goods_user_info_slice,
                                   self.user_info_dict, self.user_status_dict, self.ip_pool,
                                   self.goods_sale_info_dict))

        for worker in self.workers:
            # logger.info('[{0}] [{1}].'.format(worker.id, len(worker.goods_user_info.keys())))
            worker.start()
Ejemplo n.º 8
0
 def test_thread_safe(self):
     """
     These tests are related to:
     http://stackoverflow.com/q/3752618
     
     I'm not even sure if these tests are correct.
     """
     from worker import Worker
     
     with self.subTest("one-time listener"):
         a = Worker().start()
         @a.listen("test")
         def handler(event):
             a.unlisten(handler)
         a.fire("test")
         a.stop().join()
         self.assertNotIn(handler, a.listener_pool)
         self.assertEqual(a.listeners.get("test", []), [])
         
     with self.subTest("add listener in listener callback"):
         a = Worker().start()
         @a.listen("test")
         def _(event):
             @a.listen("test")
             def _(event):
                 pass
         a.fire("test")
         a.stop().join()
         self.assertEqual(len(a.listeners.get("test", [])), 2)
Ejemplo n.º 9
0
    def run(self):
        for batch in self.source_table_batches:
            self.bcp_tasks.put((source_table_task, batch))

        bcp_workers = []
        for i in range(self.bcp_worker_count):
            bcp_workers.append(
                Worker(self.bcp_tasks,
                       self.sf_tasks,
                       self.logging_tasks,
                       'bcp',
                       self.task_counter,
                       self.batch_count,
                       thread_name='Thread-BCP-{}'.format(i)))

        sf_workers = []
        for i in range(self.snowflake_worker_count):
            sf_workers.append(
                Worker(self.bcp_tasks,
                       self.sf_tasks,
                       self.logging_tasks,
                       'snowflake',
                       self.task_counter,
                       self.batch_count,
                       thread_name='Thread-SF-{}'.format(i)))

        for worker in bcp_workers:
            worker.join()

        for worker in sf_workers:
            worker.join()

        self.end_time = datetime.now()
        self.send_logs_to_snowflake()
Ejemplo n.º 10
0
def main():
    args = get_arguments()

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)

    if args.test:
        worker = Worker(args, dataset=args.dataset, mode=args.mode)

        trainer = MLPTrainer(args, worker=worker, train=False)
        trainer.init_model(model_path=args.model_path)
        trainer.test()

    else:
        cur_time = datetime.datetime.now().strftime("%m-%d-%H:%M:%S.%f")

        subdir = 'mode-{}_hidden-{}_lr-{}_decay-{}_dropout-{}_bs-{}_{}'.format(
            args.mode, args.hidden, args.lr, args.weight_decay, args.dropout,
            args.batch_size, cur_time)

        print('subdir = {}'.format(subdir))
        init_logger('./logs_{}'.format(args.dataset), subdir, print_log=False)
        logging.info(str(args))

        worker = Worker(args, dataset=args.dataset, mode=args.mode)

        trainer = MLPTrainer(args, subdir=subdir, worker=worker, train=True)
        trainer.init_model()
        trainer.train()
        trainer.test()
Ejemplo n.º 11
0
 def test_event(self):
     from worker import Worker
     
     access = {}
     
     a = Worker().start()
     b = Worker(parent=a).start()
     c = Worker(parent=b).start()
     
     @a.listen("MY_BUBBLE")
     def _(event):
         access["bubble"] = event.target
         
     @c.listen("MY_BROADCAST")
     def _(event):
         access["broadcast"] = event.target
         
     a.broadcast("MY_BROADCAST")
     c.bubble("MY_BUBBLE")
     
     time.sleep(1)
     
     self.assertEqual(access, {
         "bubble": c,
         "broadcast": a
     })
     
     a.stop().join()
Ejemplo n.º 12
0
    def test_no_wake_up_call_for_delayed(self):
        from machine_midwife import MachineMidwife
        Apprentice = MachineMidwife.Apprentice
        from job import Job
        from worker import Worker

        apprentice = Apprentice()
        apprentice.settings = mock.MagicMock()
        apprentice.settings.max_instances = 1
        apprentice.client = mock.MagicMock()
        apprentice.client.exists.return_value = True
        job = Job('delayed', 'batch-')
        apprentice.client.keys.side_effect = [['jm-1', 'jm-2'], ['job-']]
        w1 = Worker(None, None)
        w1.instance = 'a'
        w2 = Worker(None, None)
        w2.instance = 'b'
        apprentice.client.get.side_effect = [
            pickle.dumps(w1),
            pickle.dumps(w2),
            pickle.dumps(job)
        ]
        apprentice.client.publish = mock.MagicMock()

        apprentice.rise_and_shine()

        assert apprentice.client.keys.call_count == 2
        assert apprentice.client.get.call_count == 3
        assert apprentice.client.publish.call_count == 0
Ejemplo n.º 13
0
    def test_set_worker(self):
        worker_mock = Worker("", "")
        service_mock = TwizoService()
        self.sut = TwizoController(worker_mock, service_mock)
        self.sut._worker = new_worker = Worker("new", "new")

        self.assertEqual(new_worker, self.sut._worker)
Ejemplo n.º 14
0
    def run(self):
        logger.info('starting horizon agent')
        listen_queue = Queue(maxsize=settings.MAX_QUEUE_SIZE)
        pid = getpid()

        #If we're not using oculus, don't bother writing to mini
        try:
            skip_mini = True if settings.OCULUS_HOST == '' else False
        except Exception:
            skip_mini = True

        # Start the workers
        for i in range(settings.WORKER_PROCESSES):
            if i == 0:
                Worker(listen_queue, pid, skip_mini, canary=True).start()
            else:
                Worker(listen_queue, pid, skip_mini).start()

        # Start the listeners
        Listen(settings.PICKLE_PORT, listen_queue, pid, type="pickle").start()
        Listen(settings.UDP_PORT, listen_queue, pid, type="udp").start()

        # Start the roomba
        Roomba(pid, skip_mini).start()

        # Warn the Mac users
        try:
            listen_queue.qsize()
        except NotImplementedError:
            logger.info('WARNING: Queue().qsize() not implemented on Unix platforms like Mac OS X. Queue size logging will be unavailable.')

        # Keep yourself occupied, sucka
        while 1:
            time.sleep(100)
Ejemplo n.º 15
0
 def test_detached(self):
     """child will detached from parent when finished"""
     from worker import Worker
     a = Worker().start()
     b = Worker(parent=a).start()
     b.stop().join()
     time.sleep(1)
     self.assertNotIn(b, a.children)
     a.stop().join()
Ejemplo n.º 16
0
    def test_change_mix_char(self):
        name = "Дмитрий"
        surename = "Дуплий"
        lastname = "Oлегович"

        Worker(0).changeData(name, surename, lastname)

        self.assertEqual(
            Worker(0).changeData(name, surename, lastname), False,
            "Should False")
Ejemplo n.º 17
0
 def __init__(self, nodes):
     self.workers = []
     for node in nodes:
         if len(node) == 3:
             ip, username, password = node
             self.workers.append(Worker(ip, username, password))
         else:
             self.workers.append(Worker(node))
     self.unready_fd_workers_dict = {}
     self.to_do_workers = []
Ejemplo n.º 18
0
    def test_change(self):
        name = "Дмитрий"
        surename = "Дуплий"
        lastname = "Олегович"

        Worker(0).changeData(name, surename, lastname)

        self.assertEqual(
            [Worker(0).name,
             Worker(0).surename,
             Worker(0).patronymic], [name, surename, lastname], "Should Ok")
Ejemplo n.º 19
0
    def start(self):
        self.table.clear()
        self.table.setRowCount(0)
        self.table.setHorizontalHeaderLabels(self.table.headers)
        opts=[
        self.user.text(),
        self.password.text(),
        self.host.text(),
        self.base.text(),
        ]
        self.threads=[]
        for n,opt in enumerate(opts):
            opts[n]=str(opt)
        opts.insert(0,CWD+'qt.sh')
        

        f = file(CWD+'proc.list', 'r')
        count=0
        for line in f:
            count+=1
        if count>int(self.mc.value()) and int(self.mc.value()):
            count=int(self.mc.value())
        else:
            self.mc.setValue(count)
        f.close()

        self.progress.reset()
        self.progress.setMaximum(count*int(self.num.value()))

        for i in xrange(int(self.num.value())):
            topts=opts[:]
            for o in users[i%len(users)]:
                topts.append(users[i%len(users)][o])
            
            topts.append(str(i))
            topts.append(int(self.mc.value()))
            topts.append(self.domain.text())
            if int(self.num.value())!=1:
                w=Worker(self,topts,i,self)
            else:
                w=Worker(self,topts,i,self,False)

            self.threads.append(w)
            self.connect(w,SIGNAL('pylog'),self.table.pylog)
            self.connect(w,SIGNAL('errlog'),self.table.errlog)
            self.connect(w,SIGNAL('proc_end'),self.proc_end)
            self.connect(w,SIGNAL('deadlock'),self.deadlock)
            self.connect(w,SIGNAL('log'),self.api.debug)
            self.connect(w,SIGNAL('err'),self.api.error)

        self.calls={}
        
        self.ended=0
Ejemplo n.º 20
0
    def run(self):
        """
        Determine the `MAX_QUEUE_SIZE` for the listen process.

        Determine if horizon should populate the mini redis store for Oculus.

        Starts the defined number of `WORKER_PROCESSES`, with the first worker
        populating the canary metric.

        Start the pickle (and UDP) listen processes.

        Start roomba.
        """
        logger.info('agent starting skyline %s' % skyline_app)
        listen_queue = Queue(maxsize=settings.MAX_QUEUE_SIZE)
        pid = getpid()

        # If we're not using oculus, don't bother writing to mini
        try:
            skip_mini = True if settings.OCULUS_HOST == '' else False
        except Exception:
            skip_mini = True

        # Start the workers
        for i in range(settings.WORKER_PROCESSES):
            if i == 0:
                logger.info('%s :: starting Worker - canary' % skyline_app)
                Worker(listen_queue, pid, skip_mini, canary=True).start()
            else:
                logger.info('%s :: starting Worker' % skyline_app)
                Worker(listen_queue, pid, skip_mini).start()

        # Start the listeners
        logger.info('%s :: starting Listen - pickle' % skyline_app)
        Listen(settings.PICKLE_PORT, listen_queue, pid, type="pickle").start()
        logger.info('%s :: starting Listen - udp' % skyline_app)
        Listen(settings.UDP_PORT, listen_queue, pid, type="udp").start()

        # Start the roomba
        logger.info('%s :: starting Roomba' % skyline_app)
        Roomba(pid, skip_mini).start()

        # Warn the Mac users
        try:
            listen_queue.qsize()
        except NotImplementedError:
            logger.info(
                'WARNING: Queue().qsize() not implemented on Unix platforms like Mac OS X. Queue size logging will be unavailable.'
            )

        # Keep yourself occupied, sucka
        while 1:
            time.sleep(100)
Ejemplo n.º 21
0
async def on_ready():
    """Discord bot client ready handler"""
    global FIRST_RUN  # pylint: disable=global-statement
    print(f"We have logged in as {client.user}")
    if FIRST_RUN:
        FIRST_RUN = False
        corona_live_crawler = CoronaLiveCrawler(Worker(client, debug=args.debug))
        client.loop.create_task(corona_live_crawler.run())
        kdca_crawler = KdcaCrawler(Worker(client, debug=args.debug))
        client.loop.create_task(kdca_crawler.run())
        vaccine_status_crawler = VaccinationStatusCrawler(Worker(client, debug=args.debug))
        client.loop.create_task(vaccine_status_crawler.run())
Ejemplo n.º 22
0
 def start_bot(self):
     self.start_btn.setVisible(False)
     self.bot = Bot()
     worker = Worker(self.bot.listen_chat)
     self.worker = worker
     self.threadpool.start(worker)
     self.stop_btn = self.set_button("Stop the bot!", self.worker.kill, 40,
                                     40, True)
     self.stop_btn.clicked.connect(self.stop_bot)
     worker = Worker(self.bot.send_info)
     self.info_worker = worker
     self.threadpool.start(worker)
     self.stop_btn.clicked.connect(self.info_worker.kill)
Ejemplo n.º 23
0
    def on_session(self):
        print('Server in on_session()')
        new_id = shortuuid.uuid()
        ns = '/' + new_id + '/rl_session'

        print('Build server RL Session socket withs ns: {}'.format(ns))
        if self.use_DRL:
            self.socketio.on_namespace(
                Worker(ns, new_id, self.sess, self.main_net))
        else:
            self.socketio.on_namespace(Worker(ns, new_id))

        emit('session_response', new_id)
Ejemplo n.º 24
0
 def test_child_thread(self):
     from worker import Worker
     
     parent = Worker()
     child = Worker(parent=parent)
     
     parent.start()
     child.start()
     
     parent.stop().join()
     
     self.assertFalse(parent.is_running())
     self.assertFalse(child.is_running())
Ejemplo n.º 25
0
def run_weight_test(reset_rmsprop):
    tf.reset_default_graph()
    utils.set_random_seeds(0)
    sess = tf.Session()
    env = generic_preprocess(gym.make('Pong-v0'), max_n_noops=0)
    env.seed(0)

    with tf.variable_scope('global'):
        make_inference_network(n_actions=env.action_space.n,
                               weight_inits='glorot')
    shared_variables = tf.global_variables()

    optimizer = tf.train.RMSPropOptimizer(learning_rate=5e-4,
                                          decay=0.99,
                                          epsilon=1e-5)

    network1 = Network(scope="worker_1",
                       n_actions=env.action_space.n,
                       entropy_bonus=0.01,
                       value_loss_coef=0.5,
                       weight_inits='glorot',
                       max_grad_norm=0.5,
                       optimizer=optimizer,
                       summaries=False,
                       debug=False)
    w1 = Worker(sess=sess, env=env, network=network1, log_dir='/tmp')

    network2 = Network(scope="worker_2",
                       n_actions=env.action_space.n,
                       entropy_bonus=0.01,
                       value_loss_coef=0.5,
                       weight_inits='glorot',
                       max_grad_norm=0.5,
                       optimizer=optimizer,
                       summaries=False,
                       debug=False)
    w2 = Worker(sess=sess, env=env, network=network2, log_dir='/tmp')

    rmsprop_init_ops = [v.initializer for v in optimizer.variables()]

    sess.run(tf.global_variables_initializer())

    vars_sum_init = sess.run(get_var_sum(shared_variables))
    w1.run_update(n_steps=1)
    vars_sum_post_w1_update = sess.run(get_var_sum(shared_variables))
    if reset_rmsprop:
        sess.run(rmsprop_init_ops)
    w2.run_update(n_steps=1)
    vars_sum_post_w2_update = sess.run(get_var_sum(shared_variables))

    return vars_sum_init, vars_sum_post_w1_update, vars_sum_post_w2_update
Ejemplo n.º 26
0
    def test_rmsprop_variables(self):
        """
        Test 1: let's look at the variables the optimizer creates to check
        there's no funny business.
        """
        sess = tf.Session()
        env = generic_preprocess(gym.make('Pong-v0'), max_n_noops=0)

        optimizer = tf.train.RMSPropOptimizer(learning_rate=5e-4,
                                              decay=0.99,
                                              epsilon=1e-5)

        with tf.variable_scope('global'):
            make_inference_network(n_actions=env.action_space.n,
                                   weight_inits='glorot')

        network1 = Network(scope="worker_1",
                           n_actions=env.action_space.n,
                           entropy_bonus=0.01,
                           value_loss_coef=0.5,
                           weight_inits='glorot',
                           max_grad_norm=0.5,
                           optimizer=optimizer,
                           summaries=False,
                           debug=False)
        Worker(sess=sess, env=env, network=network1, log_dir='/tmp')

        vars1 = optimizer.variables()

        network2 = Network(scope="worker_2",
                           n_actions=env.action_space.n,
                           entropy_bonus=0.01,
                           value_loss_coef=0.5,
                           weight_inits='glorot',
                           max_grad_norm=0.5,
                           optimizer=optimizer,
                           summaries=False,
                           debug=False)
        Worker(sess=sess, env=env, network=network2, log_dir='/tmp')

        vars2 = optimizer.variables()

        self.assertNotEqual(id(vars1), id(vars2))

        # First, were any extra variables added when we created the second
        # optimizer, that might be indicative of a second set of statistics?
        self.assertLessEqual(vars1, vars2)
        # Second, are all the variables definitely associated with the global
        # set of parameters rather than the thead-local parameters?
        for v in vars1:
            self.assertIn('global', v.name)
Ejemplo n.º 27
0
    def execute(self, call):
        #print call
        worker = Worker(call)
        t = Task('')
        t.specify_cores(1)

        for fn in worker.all_inputs:
            # print 'in:',fn
            t.specify_file(str(worker.sandbox + fn),
                           str(fn),
                           WORK_QUEUE_INPUT,
                           cache=True)

        for fn in worker.all_outputs:
            # print 'out:',fn
            t.specify_file(str(worker.sandbox + fn),
                           str(fn),
                           WORK_QUEUE_OUTPUT,
                           cache=True)

        t.specify_command('chmod 755 task.sh; ./task.sh wq')
        # if worker.call.step:
        # 	print worker.call.step
        t.specify_category(str(worker.call.step))

        taskid = self.wq.submit(t)

        print 'Executing cmd (%s) in a %s sandbox: %s' % (
            worker.call.body['cmd'], '+'.join(
                worker.env_names), worker.sandbox)

        self.workers[taskid] = worker
        self.worker_cnt += 1
Ejemplo n.º 28
0
    def run(self):

        workers = []
        log_queue = Queue()

        cache = ConcurrentCache(self.cache_size, Manager())

        # Create pool of workers
        for i in range(self.num_workers):
            w = Worker(i, self.server_socket, log_queue, cache, self.url_fs,
                       self.num_fs)
            workers.append(w)
            w.start()

        # Set the ignore flag in main process
        # for SIGINT signal
        signal.signal(signal.SIGINT, signal.SIG_IGN)

        # Close server connection
        self.server_socket.close()

        logger.init('http-server')

        logging = Process(target=logger.log_worker, args=(log_queue, ))
        logging.start()

        # Wait to workers to finish
        for j in range(self.num_workers):
            workers[j].join()

        # Tell the logger to finish
        log_queue.put(None)
        logging.join()

        print('Server finished')
Ejemplo n.º 29
0
def worker(ch, method, properties, body, total_errors=0):

    message_broker = MessageBroker()
    worker = Worker()

    # extract file name from the message queue
    file = body.decode("utf-8").strip()

    print(' ==========> [%s] Cycle start -' % os.getpid())

    if total_errors >= MAX_GENERAL_ERRORS:
        print('max error reached')
        ch.basic_ack(delivery_tag=method.delivery_tag)
        return

    # flow
    try:
        # worker.transform(file)
        ch.basic_ack(delivery_tag=method.delivery_tag)

        message_broker.warn_loader(file)

    # Fail Safe, in case of Exceptions try again with
    # the MAX_GENERAL_ERRORS limit
    except Exception as e:
        total_errors += 1
        print('A Exception occured[%s]: Waiting to recover %s seconds' %
              (e, GENERAL_ERROR_TIMEOUT))

        time.sleep(GENERAL_ERROR_TIMEOUT)
        worker(ch, method, properties, body, total_errors)

    print(' ==========> [%s] Cycle end -' % os.getpid())
Ejemplo n.º 30
0
 def __init__(self, num_workers=10, timeout=10):
     self.timeout = timeout
     self.unconnected_node_queue = Queue()
     self.workers = [
         Worker(self.unconnected_node_queue, self.timeout, i)
         for i in range(num_workers)
     ]