Пример #1
0
def generate_in_background(generator, num_cached=10):
    """
    Runs a generator in a background thread, caching up to `num_cached` items.
    """
    import queue
    queue = queue.Queue(maxsize=num_cached)
    sentinel = object()  # guaranteed unique reference

    # define producer (putting items into queue)
    def producer():
        for item in generator:
            queue.put(item)
        queue.put(sentinel)

    # start producer (in a background thread)
    import threading
    thread = threading.Thread(target=producer)
    thread.daemon = True
    thread.start()

    # run as consumer (read items from queue, in current thread)
    item = queue.get()
    while item is not sentinel:
        yield item
        item = queue.get()
Пример #2
0
        def _process_batch():
            dev_grad_batch, dev_events, job_event = queue.get()
            dev_coalesced = []
            # Coalesce the tensors on all devices and start a local reduction
            for dev_id, grad_batch, event, stream in zip(device_ids, dev_grad_batch, dev_events, reduction_streams):
                with torch.cuda.device(dev_id), torch.cuda.stream(stream):
                    stream.wait_event(event)
                    coalesced = _flatten_tensors(grad_batch)
                    dev_coalesced.append(coalesced)
            # Wait for all copies to complete before starting the NCCL kernel
            for stream in reduction_streams:
                stream.synchronize()
            nccl.reduce(dev_coalesced, root=device_ids[0], streams=nccl_streams)

            # From now on we're only going to work on the first device (from device_ids)
            grad_batch = dev_grad_batch[0]
            coalesced = dev_coalesced[0]
            reduce_stream = reduction_streams[0]
            with torch.cuda.stream(reduce_stream):
                reduce_stream.wait_stream(nccl_streams[0])
                coalesced /= dist.get_world_size()
                dist.all_reduce(coalesced, group=group_id)
                for grad, reduced in zip(grad_batch, _unflatten_tensors(coalesced, grad_batch)):
                    grad.copy_(reduced)
            job_event.set()
Пример #3
0
    def start(self):
        "Start module to start reading files"
        # Create new threads
        thread1 = RouterThread(1, "Data Link 1", in_file1, delay1, queueList[0])
        thread2 = RouterThread(2, "Data Link 2", in_file2, delay2, queueList[1])
        thread3 = RouterThread(3, "Data Link 3", in_file3, delay3, queueList[2])

        # Start new Threads
        thread1.start()
        thread2.start()
        thread3.start()

        # Add threads to thread list
        self.threads.append(thread1)
        self.threads.append(thread2)
        self.threads.append(thread3)

        # Wait for all threads to complete
        for t in self.threads:
            t.join()
        print("Exiting Main Thread")

        packetParser = self.PacketParser()

        # Print output - Get bytes from Queue
        print("Printing items from each queue -");
        for queue in queueList:
            while not queue.empty():
                print("***********************************************************************************************************")
                packetParser.parsePacket( queue.get() )
Пример #4
0
 def default_filter(queue, *args):
     while True:
         line = queue.get()
         if not line:
             self.logger.debug('Process exiting (status_loop)')
             break
         yield line
Пример #5
0
 def run(self):
     while True:
         message = queue.get()
         logging.info("Main server recieved: %s" % (sub("\0", "!",
                                                    message)))
         for con in self.conns:
             con.send(message)
Пример #6
0
    def blockingCallFromThread(f, *a, **kw):
        """
        Run a function in the reactor from a thread, and wait for the result
        synchronously, i.e. until the callback chain returned by the function get a
        result.

        @param f: the callable to run in the reactor thread
        @type f: any callable.
        @param a: the arguments to pass to C{f}.
        @param kw: the keyword arguments to pass to C{f}.

        @return: the result of the callback chain.
        @raise: any error raised during the callback chain.
        """
        from twisted.internet import reactor
        queue = queue.Queue()
        def _callFromThread():
            result = defer.maybeDeferred(f, *a, **kw)
            result.addBoth(queue.put)
        
        reactor.callFromThread(_callFromThread)
        result = queue.get()
        if isinstance(result, failure.Failure):
            # This makes it easier for the debugger to get access to the instance
            try:
                result.raiseException()
            except Exception as e:
                raise e
        return result
Пример #7
0
def worker(queue, args):
    while True:
        action = queue.get()
        if action is None:
            break
        doaction(action, args)
        queue.task_done()
Пример #8
0
def absoluteFade(indexes, rgb, fadeTime):
    '''Is given a color to fade to, and executes fade'''
    if not fadeTime:
        fadeTime = 1 / frameRate
    for c in rgb:
        c = makeEightBit(c)
    #Calculates how many individual fade frames are needed
    alterations = int(fadeTime * frameRate)
    queueList = []
    queueLock.acquire()
    while not queue.empty():
        queueList.append(queue.get())
        queue.task_done()
    #Amount of frames that need to be added to queue
    appends = alterations - len(queueList)
    #fill out the queue with blank dictionaries to populate
    if appends > 0:
        for i in range(abs(appends)):
            queueList.append({})
    #Iterate down indexes, figure out what items in queue need to be altered
    for i in indexes:
        #INVESTIGATE: THIS MIGHT BE THE SOURCE OF FLASHING ISSUES AT THE START OF A COMMAND
        start = pixels[i]
        bridgeGenerator = bridgeValues(alterations, start, rgb)
        for m in range(alterations):
            queueList[m][i] = next(bridgeGenerator)
    #If this command overrides a previous command to the pixel, it should wipe any commands remaining
        if appends < 0:
            for r in range(abs(appends)):
                if i in queueList[alterations + r]:
                    del queueList[alterations + r][i]
    while queueList:
        queue.put(queueList.pop(0))
    queueLock.release()
 def storeResults(queue, key):
     server = redis.StrictRedis()
     while True:
         result = queue.get()
         if result == 'END':
             break
         server.rpush(key, json.dumps(result))
Пример #10
0
def multiCommand(commands):
    maxAlterations = int(max([i[2] for i in commands]) * frameRate)
    queueList = []
    queueLock.acquire()
    while not queue.empty():
        queueList.append(queue.get())
        queue.task_done()
    appends = maxAlterations - len(queueList)
    if appends > 0:
        for i in range(abs(appends)):
            queueList.append({})
    for c in commands:
        commandAlterations = int(c[2] * frameRate)
        for i in range(c[0][0], c[0][1]):
            start = pixels[i]
            bridgeGenerator = bridgeValues(commandAlterations, start, c[1])
            for m in range(commandAlterations):
                queueList[m][i] = next(bridgeGenerator)
        if appends < 0:
            for r in range(abs(appends)):
                if i in queueList[commandAlterations + r]:
                    del queueList[commandAlterations + r][i]
    while queueList:
        queue.put(queueList.pop(0))
    queueLock.release()
Пример #11
0
def ucs(source, target, graph):
    """ Uniform-cost graph search """
    queue = queue.PriorityQueue() # fringe
    queue.put((0, source))

    parent = {source:None}
    visited = {}

    while not queue.empty():
        (d, v_in) = queue.get()

        if v_in not in visited or d < visited[v_in]:

            if v_in == target:
                return (d, build_path(parent, target))

            for v_out in graph.adj(v_in):
                cost = graph.distance(v_in, v_out) + d
                if v_out not in visited:
                    queue.put((cost, v_out))
                    parent[v_out] = v_in

            visited[v_in] = cost

    return None
Пример #12
0
def sender(queue):
    # TODO set time limit for checking
    header = Header()
    header.size = 1416
    while(True):
        #set up connection
        payload = bytearray()
        payload.extend(header.serialize())
        count = 0
        while(count < 22):
            #print("have %d sludge " % (count))
            hash = queue.get()
            payload.extend(hash)
            count += 1
        try:
            print("sending sludge downstream")
            attempts = 0
            while ( attempts < 10 ):
                try:
                    sludge_outgoing = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                    sludge_outgoing.connect(("downstream", 4444))
                    sludge_outgoing.send(payload)
                    sludge_outgoing.close()
                    break;
                except:
                    traceback.print_exc()
                    if ( attemps == 9 ):
                        raise Exception
                attempts +=1
        except:
            # TODO make error log 
            f = open('/home/sbartholomew/sludgeOut', 'wb')
            f.write(payload)
            f.close()
Пример #13
0
 def next_item(self):
     queue = self._queue
     try:
         item = queue.get(block=True, timeout=5)
         return item
     except Exception:
         return None
Пример #14
0
    def run_job(self, deviceid):
        queue = self._queue
        while not self._shutdown:
            path = None
            try:
                path = queue.get(timeout=1)
            except:
                pass

            if path:
                if deviceid is None:
                    logger.info('Running ' + path)
                else:
                    logger.info("Running " + path + " on device " + str(deviceid))
                self._setRunning(path)

                runsh = os.path.join(path, 'run.sh')
                jobsh = os.path.join(path, 'job.sh')
                self._createJobScript(jobsh, path, runsh, deviceid)

                try:
                    ret = check_output(jobsh)
                    logger.debug(ret)
                except Exception as e:
                    logger.info('Error in simulation {}. {}'.format(path, e))
                    self._setCompleted(path)
                    queue.task_done()
                    continue

                logger.info("Completed " + path)
                self._setCompleted(path)
                queue.task_done()

        logger.info("Shutting down worker thread")
Пример #15
0
def run_job(obj, gpuid, jobfun, jobargs):
    queue = obj.queue
    while not obj.shutdown:
        path = None
        try:
            path = queue.get(timeout=1)
        except:
            pass

        if path:
            try:
                logger.info("Running " + path + " on GPU device " + str(gpuid))
                obj.running(path)

                try:
                    jobfun(*jobargs, path=path, gpuid=gpuid)
                except:
                    obj.completed(path)
                    queue.task_done()
                    continue

                logger.info("Completed " + path)
                obj.completed(path)
                queue.task_done()
            except:
                logger.error("Error running job {}".format(path))
                obj.completed(path)
                queue.task_done()
                continue
    logger.info("Shutting down worker thread")
Пример #16
0
def worker(id, queue):
    while True:
        try:
            obj = queue.get(timeout=3)
            print(id, obj)
        except:
            break
Пример #17
0
 def runThread(self, port):
     """Router's infinite thread loop. Receives and sends packages
        to hosts/routers."""
     queue = self.portBuffer[port]
     while True:
         packet = queue.get()
         self.process(port, packet)
         queue.task_done()
Пример #18
0
def async_write(buffer=[]):
    item = queue.get()
    buffer.append(item)
    if len(buffer) > 500 or queue.empty():
        log.debug('Processing {} queue items.'.format(len(buffer)))
        with db.transaction():
            write_buffer(buffer)
        buffer.clear()
Пример #19
0
def handle_update():
    while True:
        label, content = queue.get()
        msg = json.dumps({'label': label, 'content': content})
        print('>>> Sending:', msg)
        print('>>> To:', clients)
        for client in clients:
            client.sendMessage(msg.encode('utf8'), isBinary=False)
def submit_to_olog(queue, cb):
    while True:
        name, doc = queue.get()  # waits until document is available
        try:
            cb(name, doc)
        except Exception as exc:
            warn('This olog is giving errors. This will not be logged.'
                 'Error:' + str(exc))
Пример #21
0
def run_find_all_symbols(args, tmpdir, build_path, queue):
  """Takes filenames out of queue and runs find-all-symbols on them."""
  while True:
    name = queue.get()
    invocation = [args.binary, name, '-output-dir='+tmpdir, '-p='+build_path]
    sys.stdout.write(' '.join(invocation) + '\n')
    subprocess.call(invocation)
    queue.task_done()
Пример #22
0
def submit_results(queue, submit_url):
	s = requests.Session()
	while True:
		crash_id, result = queue.get()
		logger.debug('%d results waiting', queue.qsize())
		logger.debug('submitting %d', crash_id)
		logger.info(result)
		_ = s.post(submit_url % crash_id, data=json.dumps(result), headers={'content-type': 'application/json'}).content
Пример #23
0
 def run(self):
     global queue
     while True:
         if queue.qsize() > 100:
             for i in range(3):
                 # get是从queue中取出一个值
                 msg = self.name + '消费了 '+queue.get()
                 print(msg)
         time.sleep(1)
Пример #24
0
 def run(self):
     queue = self.queue
     while True:
         try:
             task = queue.get() 
             log.info("worker执行"+str(task))
             task._execute()
         except Exception as e:
             log.exception(e)
Пример #25
0
def convert(queue):
    cmd_flac = ["flac", "--decode", "--silent", "--stdout"]
    cmd_lame = ["lame", "-h" "--preset", "224"]

    while not queue.empty() and not abort.is_set():
        f = queue.get()
        pFlac = Popen( cmd_flac + [ f[0] ], stdout=PIPE)
        pLame = Popen( cmd_lame + [ f[1] ], stdout=PIPE, stdin=pFlac.stdout, stderr=STDOUT )
        stdout, stderr = pLame.communicate()
Пример #26
0
	def handle(self, call, queue):
		while 1:
			if queue.empty():
				pass
			else:
				m = queue.get()
				call(m)
			
			time.sleep(0.1)
Пример #27
0
def _process_queue(queue):
	while True:
		processor, args = queue.get()
		logging.debug("calling %s with %d arguments: %s", processor, len(args), args)
		try:
			processor(*args)
		except:
			logging.exception("")
		queue.task_done()
Пример #28
0
def consumer(queue, name):
    def _print(string):
        print("%s: %s" % (name, string))
        
    while True:
        widget = queue.get()
       
        _print("will crank widget: %s" % widget)
        time.sleep(random.choice((1,3,5,3,6,7)))
        _print("Cranket!")
Пример #29
0
def run_task():
	while True:
		ev, params = yield from queue.get()
		if ev == "log_chat":
			yield from do_log_chat(*params)
		elif ev == "clear_chat_log":
			yield from do_clear_chat_log(*params)
		elif ev == "rebuild_all":
			yield from do_rebuild_all()
		elif ev == "exit":
			break
Пример #30
0
def Main():
#try statement for creating the catalog database table if it does not exist, otherwise saves a boolean value that the catalog was already created
        try:
            create_catalog(cursor, connection)
        except:
            cat_created = False
        success = []                        #array that will be used to hold the queue values for successful queries
        updated = 0                         #boolean value that is used to show if catalog was updated
        count = 0                           #integer to hold the amount of times we iterate through the for loop that updates the catalog database
        tables = []                         #array to hold the tables used in the query
        firstsplit = []                     #array to help in the rudimentary parsing of the query
        nodeid = []                         #array to hold the nodeid where query was sent
        hostnames = []                      #array to hold the hostname where query was sent
        parse = query.split(" ")            #rudimentary parse of the query
#for loop the parses the query, finds every instance of table and stores the tablename into the table array variable
        for x, word in enumerate(parse):    
            if word == "TABLE":
                firstsplit.append(parse[x + 1])
#for loop that completes the second part of the parse
        for x in firstsplit:
            split = x.split('(', 1)
            tables.append(split[0])
        nodes = ConfigSectionMap("nodecount")['numnodes']  #variable to hold the total amount of nodes listed in the catalog databse
        threads = []                        #array to hold the parallel threads that will be created
#for loop that creates the threads to send queries to multiple servers, and then saves them to the threads array.
        for x in range(1, int(nodes) + 1):
            section = "node " + str(x)
            threads.append(myThread(x, ConfigSectionMap(section)['hostname'], ConfigSectionMap(section)['ip'], query, queue))
            (threads[-1].start())           #starts all threads
            nodeid.append(x)                #saves the node where the thread is sending the query to into the nodeid array
            hostnames.append(ConfigSectionMap(section)['hostname'])   #saves the hostname where the thread is sending the query to into the hostnames array
#for loop that joins all the threads
        for t in threads:
            t.join()
#while loop to save the success or fail of every thread into the success array
        while not queue.empty():
            success.append(queue.get())
#for loop that iterates through the success array, if a query was successfully updated it checks to see if the query contained a drop or create statement.
#if so, calls the proper function to update the catalog
        for x in success:
            if x == 1:
                if parse[0] == "DROP":
                    remove_catalog(cursor, tables[0],connection)
                    updated = 1
                if parse[0] == "CREATE":
                    add_catalog(cursor, tables[0], hostnames[count], 0, nodeid[count], 0, 0, 0, connection)
                    updated = 1
            count += 1 
#for loop checks to see if any of the queries succeeded and contained a drop or create statement, if true, prints out a catalog updated statement, and if false, prints out catalog had no updates
        if updated == 1:
            print(cathost + ": catalog updated.")
        else:
            print(cathost + ": catalog had no updates.")
        connection.close()                  #closes the connection to the database
Пример #31
0
def launcher(logger, queue, lauchfrequency, maxruntime):
    """"""
    maxseconds = maxruntime * 60
    time.sleep(3)  # allow jobqserver to start
    while True:
        time.sleep(lauchfrequency)
        job = queue.get(block=True, timeout=None)
        if job:
            jobnumber = job[1]
            task_to_run = job[2]
            # Start a timer thread for maxruntime error
            timer_thread = threading.Timer(
                maxseconds,
                maxruntimeerror,
                args=(logger, maxruntime, jobnumber, task_to_run),
            )
            timer_thread.start()
            try:
                t0 = datetime.datetime.now()
                logger.info('Starting job %(job)s', {'job': jobnumber})
                result = subprocess.call(task_to_run,
                                         stdin=open(os.devnull, 'r'),
                                         stdout=open(os.devnull, 'w'),
                                         stderr=open(os.devnull, 'w'))
                t1 = datetime.datetime.now()
                time_taken = (t1 - t0).seconds

                logger.info(
                    'Finished job %(job)s, elapsed time %(time_taken)s, result %(result)s',
                    {
                        'job': jobnumber,
                        'time_taken': time_taken,
                        'result': result
                    })
            except Exception as e:
                logger.error('Error starting job {}: {}'.format(jobnumber, e))

                botslib.sendbotserrorreport(
                    '[Bots Job Queue] - Error starting job',
                    'Error starting job {}:\n {}\n\n {}'.format(
                        jobnumber, task_to_run, e))

            timer_thread.cancel()
            queue.task_done()
Пример #32
0
def main():
    setupLogger()

    # gently manage control-c interrupt
    # if necessary
    def control_c_handler(sig, frame):
        raise Exception("Control-C received")

    signal.signal(signal.SIGINT, control_c_handler)

    # create queue connector
    service_bus = AzureQueueManager()
    service_bus.connect()
    service_bus.topics()  # test connection credential asking for topics

    # start reading message queue
    queue = queue.Queue()
    queueListenerThread = Thread(target=populateMessageQueue,
                                 args=(service_bus, 'test-topic',
                                       'test-suscriber-1', queue, 10000))
    queueListenerThread.daemon = True
    queueListenerThread.start()  #start collecting message form queue

    # main loop
    while True:
        message = none
        try:
            message = queue.get(block=False, timeout=0)
        except queue.empty:
            pass

        if message:
            # parse
            # if drone start ODB
            # if satellite start GDAL

            # started have to be put in queue to check if finished with relative message

            # check queued processes

            # if queued process ed => unlog message and remove it
            pass

        time.sleep(2000)
Пример #33
0
def evaluate(model, dataloader, queue, criterion, device):
    logger.info('evaluate() start')
    total_loss = 0.
    total_num = 0
    total_dist = 0
    total_length = 0
    total_sent_num = 0

    model.eval()

    with torch.no_grad():
        while True:
            feats, scripts, feat_lengths, script_lengths = queue.get()
            if feats.shape[0] == 0:
                break

            feats = feats.to(device)
            scripts = scripts.to(device)

            src_len = scripts.size(1)
            target = scripts[:, 1:]

            model.module.flatten_parameters()
            logit = model(feats,
                          feat_lengths,
                          scripts,
                          teacher_forcing_ratio=0.0)

            logit = torch.stack(logit, dim=1).to(device)
            y_hat = logit.max(-1)[1]

            loss = criterion(logit.contiguous().view(-1, logit.size(-1)),
                             target.contiguous().view(-1))
            total_loss += loss.item()
            total_num += sum(feat_lengths)

            display = random.randrange(0, 100) == 0
            dist, length = get_distance(target, y_hat, display=display)
            total_dist += dist
            total_length += length
            total_sent_num += target.size(0)

    logger.info('evaluate() completed')
    return total_loss / total_num, total_dist / total_length
def evaluate(model,
             dataloader,
             queue,
             criterion,
             device,
             confusion_matrix=None):
    logger.info('evaluate() start')
    total_loss = 0
    total_num = 0
    total_correct = 0
    total_sent_num = 0

    model.eval()

    with torch.no_grad():
        while True:
            feats, label, feat_lengths = queue.get()
            if feats.shape[0] == 0:
                break

            feats = feats.to(device)
            label = label.to(device)

            logit = model(feats, feat_lengths).to(device)

            y_hat = logit.max(-1)[1]

            if type(confusion_matrix) is torch.Tensor:
                update_confusion_matrix(confusion_matrix,
                                        label.cpu().numpy(),
                                        y_hat.cpu().numpy())

            correct = torch.eq(y_hat, label)
            batch_correct = torch.nonzero(correct).size(0)
            total_correct += batch_correct

            loss = criterion(logit.contiguous(), label)
            total_loss += loss.item()
            total_num += logit.size(0)

            total_sent_num += label.size(0)

    logger.info('evaluate() completed')
    return total_loss / total_num, total_correct / total_sent_num
Пример #35
0
def checkerLoop(queue):
    """ This checks each incoming file. If they are not PNG files they
        get deleted. This will protect against uploading HTML and XSS

        This will be run as a background thread
        """
    while True:
        filename = queue.get()
        res = subprocess.run(["file", filename],
                             shell=True,
                             timeout=15,
                             stdout=subprocess.PIPE)
        res = res.stdout.decode('utf-8')
        print(res)
        if not ("PNG image data" in res or "JPEG image data" in res):
            os.remove(filename)
            bad_file_log.add(filename)
        else:
            suspicious_file_log.remove(os.path.basename(filename))
    def _validate(self, model: nn.Module, queue: queue.Queue) -> float:
        """
        Run training one epoch

        Args:
            model (torch.nn.Module): model to train
            queue (queue.Queue): validation queue, containing input, targets, input_lengths, target_lengths

        Returns: loss, cer
            - **loss** (float): loss of validation
            - **cer** (float): character error rate of validation
        """
        target_list, predict_list = list(), list()
        cer = 1.0

        model.eval()
        logger.info('validate() start')

        while True:
            inputs, targets, input_lengths, target_lengths = queue.get()

            if inputs.shape[0] == 0:
                break

            inputs = inputs.to(self.device)
            targets = targets[:, 1:].to(self.device)
            model.to(self.device)

            if isinstance(model, nn.DataParallel):
                y_hats = model.module.recognize(inputs, input_lengths)
            else:
                y_hats = model.recognize(inputs, input_lengths)
                
            for idx in range(targets.size(0)):
                target_list.append(self.vocab.label_to_string(targets[idx]))
                predict_list.append(self.vocab.label_to_string(y_hats[idx].cpu().detach().numpy()))
                
            cer = self.metric(targets, y_hats)

        self._save_result(target_list, predict_list)
        logger.info('validate() completed')

        return cer
Пример #37
0
def clockLoop():
    '''Removes items from the queue and transmits them to the controller'''
    print('Initiating Clocker')
    while True:
        #This was one line further down, probably a mistake
        alteration = queue.get(True, None)
        queueLock.acquire()
        queue.task_done()
        for alt in alteration:
            pixels[alt] = alteration[alt]
        if OLA:
            listStr = str(pixels)[1:-1]
            requests.post(olaUrl, data={'u': 1, 'd': listStr})
        else:
            for alt in alteration:
                dmx.setChannel(alt, alteration[alt])
            dmx.render()
        queueLock.release()
        time.sleep((1 / frameRate) * .75)
Пример #38
0
    def run(self):
        queue = self.queue
        while True:
            # Grab our data
            callback, requests = queue.get()

            # Grab prices, this is the time-consuming part
            if len(requests) > 0:
                Price.fetchPrices(requests)

            wx.CallAfter(callback)
            queue.task_done()

            # After we fetch prices, go through the list of waiting items and call their callbacks
            for price in requests:
                callbacks = self.wait.pop(price.typeID, None)
                if callbacks:
                    for callback in callbacks:
                        wx.CallAfter(callback)
Пример #39
0
 def loop(self):
     queue = self.new_queue()
     sock = self.sock
     while True:
         parts = queue.get()
         if parts is None:
             break
         try:
             if len(parts) > 1:
                 sock.send_multipart(parts, copy=False)
             else:
                 sock.send(parts[0], copy=False)
         except zmq.ZMQError as err:
             new_queue = self.new_queue()
             flushed = queue.qsize()
             queue = new_queue
             log.error(
                 f'Error send ZMQ: {err!r}. Flushed {flushed} messages')
     self.new_queue()
Пример #40
0
def queue_consumer(queue: queue.Queue, url_count: int, results_callback):
    i = 0
    result_count = 0

    with progressbar.ProgressBar(max_value=url_count, redirect_stdout=True) as bar:
        while True:
            finished = queue.get()
            if type(finished) == ImageInfo:
                results_callback(finished)
                result_count += 1
            elif type(finished) == str:
                results_callback(finished, error=True)
            else:
                queue.task_done()
                break

            queue.task_done()
            bar.update(i)
            i += 1
Пример #41
0
def searchContract(queue, collection, parser):
    while not exitFlag:
        queueLock.acquire()
        if not queue.empty():
            #address = queue.get()
            #queueLock.release()
            #result = collection.find({"address": address})

            contract = queue.get()
            queueLock.release()

            #byteCode = queue.get()
            #queueLock.release()
            #result = collection.find({"byteCode": byteCode}).sort("balance", pymongo.DESCENDING).limit(1)
            #if result.count() > 0:
            #    contract = list(result)[0]
            #print('Writing contract to file: '+contract['address'])
            file_path = CONTRACT_FOLDER + str(contract['address'])
            #file_path = CONTRACT_FOLDER+str(contract['contractName'])
            extension = ""
            counter = 1
            # Write byte code to file
            if BYTECODE:
                writeLock.acquire()
                while os.path.exists(file_path + extension + ".bin"):
                    counter += 1
                    extension = "_" + str(counter)
                file = open(file_path + extension + ".bin", "w")
                file.write(parser.unescape(contract['byteCode']))
                file.close()
                writeLock.release()
            # Write source code to file
            else:
                writeLock.acquire()
                while os.path.exists(file_path + extension + ".sol"):
                    counter += 1
                    extension = "_" + str(counter)
                file = open(file_path + extension + ".sol", "w")
                file.write(parser.unescape(contract['sourceCode']))
                file.close()
                writeLock.release()
        else:
            queueLock.release()
    def searchRange(self, root, k1, k2):
        # write your code here
        import queue
        if root is None:
            return []
        queue = queue.Queue()
        queue.put(root)
        result = []

        while not queue.empty():
            node = queue.get()
            if node.val <= k2 and node.val >= k1:
                result.append(node.val)
            if node.left:
                queue.put(node.left)
            if node.right:
                queue.put(node.right)

        return result
Пример #43
0
def BFS(queue, game, celltype):

    matrix = game.env.grid

    current_index = queue.get()
    current_x, current_y = current_index[0], current_index[1]

    element = matrix[current_y, current_x]

    if element == celltype: return [current_y, current_x]

    for n in range(current_x - 1, current_x + 2):
        for m in range(current_y - 1, current_y + 2):
            if not (n==current_x and m==current_y) \
                and n>-1 and m>-1 \
                and n<matrix.shape[0] and m<matrix.shape[1] \
                and (n,m) not in queue.queue :
                queue.put((n, m))
    return BFS(queue, game, celltype)
Пример #44
0
def consume_output_queue(queue):
    """ Gets message objects from queue and sends them
        message objects are of the following form: {"sleep":0, "channel":"", "text":"", "blocks":[]} """
    while True:
        data = queue.get()
        if 'text' in data:
            response = webclient.chat_postMessage(channel=data['channel'],
                                                  text=data['text'])
        elif 'blocks' in data:
            response = webclient.chat_postMessage(channel=data['channel'],
                                                  blocks=data['blocks'])
        else:
            raise RuntimeError("Bad output message")

        if not response['ok']:
            raise RuntimeError("Slack error: {}".format(response['error']))

        if 'sleep' in data:
            sleep(data['sleep'])
Пример #45
0
 def _loop(self, qid):
     if not self._is_valid_qid(qid):
         return
     data = self._queues[qid]
     if not data["active"]:
         return
     queue = data["queue"]
     consumer = data["consumer"]
     unpack_result = data["unpack_result"]
     exception_handler = data["exception_handler"]
     latency = data["latency"]
     if not queue.empty():
         result = queue.get()
         self._dispatch_result(result, consumer, unpack_result,
                               exception_handler)
         if result is QueueTail:
             return
     next_call = lambda self=self, qid=qid: self._loop(qid)
     self._tk.after(latency, func=next_call)
Пример #46
0
def insert_url(thread_name, queue, table):
    while not EXIT_FLAG:
        QUEUE_LOCK.acquire()
        if not WORK_QUEUE.empty():
            try:
                url = queue.get()
                QUEUE_LOCK.release()
                count = list(
                    database_util.search_sql(
                        'select count(*) url from ' + table + ' where url=%s',
                        url)[1])[0][0]
                if count == 0:
                    sql = 'insert into ' + table + ' set url=%s'
                    database_util.update_sql(sql, url)
            except Exception as err:
                print('thread_queue update_price err:' + str(err))
        else:
            QUEUE_LOCK.release()
        time.sleep(1)
Пример #47
0
 def new_function(*args, **kwargs):
     """
     Create a queue, create a queuefun from original_function and make the
     new queuefun the target of a thread, passing the thread target
     original_function's args and kwargs. Then instantiate a new Tk Toplevel
     Frame called master and a new waitWidget with the queue and master.
     Start master's mainloop which exits when the original_function's
     results are fed to the queue. Destroy master and return the
     original_function's results.
     """
     queue = queue.Queue()
     queuefun = setqueue(original_function, queue)
     thread = Thread(target=queuefun, args=args, kwargs=kwargs)
     thread.start()
     master = Toplevel()
     waitBox = waitWidget(queue, master)
     waitBox.mainloop()
     master.destroy()
     return queue.get()
 def process_datastream(self, queue):
     while ((len(self.dataMatrix[0])< self.max_samples and not self.continuous_mode) or \
         (self.read_samples_continuously and self.continuous_mode)):
         while not queue.empty():
             timestamp, channel_data = queue.get()
             if not self.quiet:
                 print(
                     f"timestamp:{timestamp} sample_number: {sample_number}| ",
                     end='')
                 for channel_number, sample in enumerate(channel_data):
                     print(f"{channel_number + 1}:{sample}", end='')
             if not self.pause_toggle:
                 with open("../data/" + self.fileName, 'a') as file:
                     file.writelines(
                         str(timestamp) + '\t' +
                         '\t'.join(str(j) for j in channel_data) + '\n')
                 self.dataMatrix[0].append(timestamp)
                 for channel_number, sample in enumerate(channel_data):
                     self.dataMatrix[channel_number + 1].append(sample)
Пример #49
0
    def next_request(self):
        if queue.qsize() == 0 and len(self._crawling) == 0:  # 所有调度都完成
            self._close.callback(None)
            return

        if len(self._crawling) >= self._max_currentcy:
            return
        while len(self._crawling) < self._max_currentcy:
            try:
                request = queue.get(block=False)
                self._crawling.append(request)
                d_defer = getPage(request.url.encode('utf-8'))
                # 页面下载完成 get_reponse_callback 调用用户spider中自定义的parse方法 并且将新请求加入到调度器
                d_defer.addCallback(self.get_reponse_callback, request)

                d_defer.addCallback(
                    lambda _: reactor.callLater(0, self.next_request))
            except Exception as e:
                return
Пример #50
0
 def bfs(self, start, end):
     import queue
     queue = queue.Queue()
     visited = set()
     queue.put(start)
     distance = 1 
     while not queue.empty():
         qsize = queue.qsize()
         for i in range(qsize):
             word = queue.get()
             visited.add(word)
             if word == end:
                 return distance
             for new_word in self.get_next_word(word):
                 if new_word not in visited:
                     queue.put(new_word)
         distance += 1 
     
     return 0
Пример #51
0
    def run(self, queue):
        fileIndex = self.lastIndex
        createFile(clientId=self.clientId)
        # while True:
        if not queue.empty():
            r = self.speech_recognizer
            audio = queue.get()
            logging.debug('Getting ' + str(audio) + ' : ' +
                          str(queue.qsize()) + ' items in queue')
            # audio = 'recording-%i.wav' % fileIndex
            path = self.clientId + "/" + audio
            if os.path.isfile(path):
                print('we are reading ' + path)
                try:
                    with sr.AudioFile(path) as source:
                        audio = r.record(source)
                except IOError as e:
                    print(e)
                    pass
                try:
                    text = r.recognize_google(audio, language="en-US")
                    writeToFile(text + "\n", self.clientId, "a+")
                    print('Done!')
                    print(text)
                    fileIndex += 1
                    if text == "audio stop":
                        fileIndex = 0

                except sr.UnknownValueError:
                    writeToFile(
                        "Google Speech Recognition could not understand audio"
                        + "\n", self.clientId, "a+")
                except sr.RequestError:
                    writeToFile(
                        "Could not request results from Google Speech Recognition service",
                        self.clientId, "a+")
                except Exception as e:
                    print(e)
            else:
                print("no more files to transcribe for %s ......... " %
                      self.clientId)

        return queue
Пример #52
0
def run_job(obj, ngpu, acemd, datadir):
    import sys
    queue = obj.queue
    while not obj.shutdown:
        path = None
        try:
            path = queue.get(timeout=1)
        except:
            pass

        if path:
            try:
                logger.info("Running " + path + " on GPU " + str(ngpu))
                obj.running(path)
                cmd = 'cd {}; {} --device {} input > log.txt 2>&1'.format(os.path.normpath(path), acemd, ngpu)
                try:
                    check_output(cmd, shell=True)
                except CalledProcessError:
                    logger.error('Error in ACEMD for path: {}. Check the {} file.'.format(path, os.path.join(path, 'log.txt')))
                    obj.completed(path)
                    queue.task_done()
                    continue

                # If a datadir is provided, copy finished trajectories there. Only works for xtc files.
                if datadir is not None:
                    if not os.path.isdir(datadir):
                        os.mkdir(datadir)
                    simname = os.path.basename(os.path.normpath(path))
                    odir = os.path.join(datadir, simname)
                    os.mkdir(odir)
                    finishedtraj = glob(os.path.join(path, '*.xtc'))
                    logger.info("Moving simulation {} to {}.".format(finishedtraj[0], odir))
                    move(finishedtraj[0], odir)

                logger.info("Completed " + path)
                obj.completed(path)
                queue.task_done()
            except:
                logger.error("Error running job")
                obj.completed(path)
                queue.task_done()
                continue
    logger.info("Shutting down worker thread")
    def validate(self, model, queue):
        """
        Run training one epoch

        Args:
            model (torch.nn.Module): model to train
            queue (queue.Queue): validation queue, containing input, targets, input_lengths, target_lengths

        Returns: loss, cer
            - **loss** (float): loss of validation
            - **cer** (float): character error rate of validation
        """
        cer = 1.0

        model.eval()
        logger.info('validate() start')

        with torch.no_grad():
            while True:
                inputs, scripts, input_lengths, script_lengths = queue.get()

                if inputs.shape[0] == 0:
                    break

                inputs = inputs.to(self.device)
                scripts = scripts.to(self.device)
                targets = scripts[:, 1:]

                model.module.flatten_parameters()
                output = model(inputs,
                               input_lengths,
                               teacher_forcing_ratio=0.0)[0]

                logit = torch.stack(output, dim=1).to(self.device)
                hypothesis = logit.max(-1)[1]

                cer = self.metric(targets, hypothesis)

                del inputs, input_lengths, scripts, targets, output, logit, hypothesis

        logger.info('validate() completed')
        return cer
Пример #54
0
def evaluate(model, dataloader, queue, criterion, device):
    total_loss = 0.
    batch = 0

    # set model to eval mode
    model.eval()

    # begin logging
    logger.info('evaluate() start')
    begin = time.time()

    with torch.no_grad():
        while True:
            batch += 1

            if queue.empty():
                logger.debug('queue is empty')

            # input, target tensor shapes: (batch_size, n_mfcc, n_frames)
            inputs, targets = queue.get()
            batch_size = inputs.shape[0]

            # if no data from queue, end evaluation
            if batch_size == 0:
                break

            # load tensors to device
            inputs = inputs.to(device)
            targets = targets.to(device)

            # output tensor shape: (batch_size, n_mfcc, n_frames)
            # forward pass
            output = model(inputs).to(device)

            # compute loss
            loss = criterion(output.contiguous().view(-1), targets.contiguous().view(-1))
            total_loss += loss.item()

    # finish logging
    logger.info('evaluate() completed')

    return total_loss / batch
Пример #55
0
    def _fn(*args, **kwargs):

        for i in range(int(1e6)):
            assert not queue.empty(), \
                "trying to get() from an empty queue will deadlock"

            priority, next_trace = queue.get()
            try:
                ftr = poutine.trace(poutine.escape(poutine.replay(fn, next_trace),
                                                   functools.partial(sample_escape,
                                                                     next_trace)))
                return ftr(*args, **kwargs)
            except NonlocalExit as site_container:
                site_container.reset_stack()
                for tr in poutine.util.enum_extend(ftr.trace.copy(),
                                                   site_container.site):
                    # add a little bit of noise to the priority to break ties...
                    queue.put((tr.log_prob_sum().item() - torch.rand(1).item() * 1e-2, tr))

        raise ValueError("max tries ({}) exceeded".format(str(1e6)))
Пример #56
0
def get_comment(queue, table, page_no):
    while not EXIT_FLAG:
        QUEUE_LOCK.acquire()
        if not WORK_QUEUE.empty():
            try:
                sku = queue.get()
                QUEUE_LOCK.release()
                _spider = jd_spider.Spider()
                result = _spider.get_comment(table, sku, page_no)
                # if result[0] != -1:
                #     result = _spider.get_after_comment(table,sku,page_no)
                if result[0] != -1:
                    sql = 'update ' + table + ' set update_comment_time=%s where sku=%s '
                    data = [datetime.datetime.now(), sku]
                    database_util.update_sql(sql, data)
            except Exception as err:
                print('thread_queue get_comment err:' + str(err))
        else:
            QUEUE_LOCK.release()
        time.sleep(1)
Пример #57
0
 def refresh_router(self, queue_name='result_task'):
     '''
     回调数据打包
     '''
     try:
         messages = queue.get(queue_name, self.batch_size)
         logger.debug("refresh_router %s .work process messages begin, count: %d " %(queue_name, len(messages)))
         for body in messages:
             #logger.debug('---------'+ body +'-------------------')
             task = json.loads(body)
             #if isinstance(task, str):
                 #task = json.loads(task)
             #logger.debug(task)
             logger.debug('router for refresh: %s' % task.get('session_id'))
             self.merge_refresh(task)
         for key in self.merged_refresh.iterkeys():
             self.update_refresh_result(self.merged_refresh.get(key))
         logger.info("refresh_router %s .work process messages end, count: %d " %(queue_name, len(messages)))
     except Exception,e:
         logger.warning('refresh_router %s work error:%s' %(queue_name, traceback.format_exc(e)))
Пример #58
0
def th_fn(queue, path1, face_client, res_list, count, th_nb):
    countframes = 0
    while not queue.empty():
        ret, frame = queue.get()
        print(count)

        if not ret:
            break

        if (countframes % 2) == 0:
            resized = cv2.resize(frame, (720, 509))
            path2 = "./static/frame" + str(th_nb) + ".jpg"
            cv2.imwrite(path2, resized)

            try:
                compute(path1, path2, face_client, res_list, count, th_nb)
            except:
                pass
        countframes += 1
        count -= thread_nb
Пример #59
0
def baidu_ocr():
    # 接收图片
    img = request.files.get('file')
    queue.put((img.read()))
    if img:
        time.sleep(random.uniform(1.5, 2.05))
        part = queue.get()

        @retry(stop_max_attempt_number=5)
        def sent_ocr():
            try:
                ocr_result = client.basicGeneral(part)
                return ocr_result
            except Exception as e:
                raise Exception('BaiDuOCR发生了错误,原因为:{}'.format(e))

        result = sent_ocr()
        return result

    return 400
Пример #60
0
def worker(queue, user, size, outdir, total):
    while True:
        try:
            photo = queue.get(False)
        except Queue.Empty:
            break
        media_url = photo[1]
        urllib3_download(media_url, size, outdir)
        with lock:
            global downloaded
            downloaded += 1
            d = {
                'media_url': os.path.basename(media_url),
                'user': user,
                'index': downloaded + 1 if downloaded < total else total,
                'total': total,
            }
            progress = PROGRESS_FORMATTER % d
            sys.stdout.write('\r%s' % progress)
            sys.stdout.flush()