Пример #1
0
def worker(queue, args):
    while True:
        action = queue.get()
        if action is None:
            break
        doaction(action, args)
        queue.task_done()
Пример #2
0
def fnThreadLoop(i, queue, lock):

    s = requests.Session()

    while True:
        #exit Thread when detect signal to quit.
        while libextra.fnExitNow():
            try:
                r = queue.get_nowait()
                break
            except:
                #libextra.fnQueueEmpty(1,lock)
                time.sleep(0.1)
                continue

        if libextra.fnExitNow() == False:
            break

        id = r[0]
        (status, gw_resp, log_msg) = fnJOB_CallAPI(s,r)
        gw_resp = pymysql.escape_string(gw_resp)

        #if (log_msg != ''):
        #    print(log_msg)

        QueueUpdate.put((id,status,gw_resp))
        queue.task_done()
Пример #3
0
def absoluteFade(indexes, rgb, fadeTime):
    '''Is given a color to fade to, and executes fade'''
    if not fadeTime:
        fadeTime = 1 / frameRate
    for c in rgb:
        c = makeEightBit(c)
    #Calculates how many individual fade frames are needed
    alterations = int(fadeTime * frameRate)
    queueList = []
    queueLock.acquire()
    while not queue.empty():
        queueList.append(queue.get())
        queue.task_done()
    #Amount of frames that need to be added to queue
    appends = alterations - len(queueList)
    #fill out the queue with blank dictionaries to populate
    if appends > 0:
        for i in range(abs(appends)):
            queueList.append({})
    #Iterate down indexes, figure out what items in queue need to be altered
    for i in indexes:
        #INVESTIGATE: THIS MIGHT BE THE SOURCE OF FLASHING ISSUES AT THE START OF A COMMAND
        start = pixels[i]
        bridgeGenerator = bridgeValues(alterations, start, rgb)
        for m in range(alterations):
            queueList[m][i] = next(bridgeGenerator)
    #If this command overrides a previous command to the pixel, it should wipe any commands remaining
        if appends < 0:
            for r in range(abs(appends)):
                if i in queueList[alterations + r]:
                    del queueList[alterations + r][i]
    while queueList:
        queue.put(queueList.pop(0))
    queueLock.release()
Пример #4
0
    def run_job(self, deviceid):
        queue = self._queue
        while not self._shutdown:
            path = None
            try:
                path = queue.get(timeout=1)
            except:
                pass

            if path:
                if deviceid is None:
                    logger.info('Running ' + path)
                else:
                    logger.info("Running " + path + " on device " + str(deviceid))
                self._setRunning(path)

                runsh = os.path.join(path, 'run.sh')
                jobsh = os.path.join(path, 'job.sh')
                self._createJobScript(jobsh, path, runsh, deviceid)

                try:
                    ret = check_output(jobsh)
                    logger.debug(ret)
                except Exception as e:
                    logger.info('Error in simulation {}. {}'.format(path, e))
                    self._setCompleted(path)
                    queue.task_done()
                    continue

                logger.info("Completed " + path)
                self._setCompleted(path)
                queue.task_done()

        logger.info("Shutting down worker thread")
Пример #5
0
def multiCommand(commands):
    maxAlterations = int(max([i[2] for i in commands]) * frameRate)
    queueList = []
    queueLock.acquire()
    while not queue.empty():
        queueList.append(queue.get())
        queue.task_done()
    appends = maxAlterations - len(queueList)
    if appends > 0:
        for i in range(abs(appends)):
            queueList.append({})
    for c in commands:
        commandAlterations = int(c[2] * frameRate)
        for i in range(c[0][0], c[0][1]):
            start = pixels[i]
            bridgeGenerator = bridgeValues(commandAlterations, start, c[1])
            for m in range(commandAlterations):
                queueList[m][i] = next(bridgeGenerator)
        if appends < 0:
            for r in range(abs(appends)):
                if i in queueList[commandAlterations + r]:
                    del queueList[commandAlterations + r][i]
    while queueList:
        queue.put(queueList.pop(0))
    queueLock.release()
Пример #6
0
 def runThread(self, port):
     """Router's infinite thread loop. Receives and sends packages
        to hosts/routers."""
     queue = self.portBuffer[port]
     while True:
         packet = queue.get()
         self.process(port, packet)
         queue.task_done()
Пример #7
0
def run_find_all_symbols(args, tmpdir, build_path, queue):
  """Takes filenames out of queue and runs find-all-symbols on them."""
  while True:
    name = queue.get()
    invocation = [args.binary, name, '-output-dir='+tmpdir, '-p='+build_path]
    sys.stdout.write(' '.join(invocation) + '\n')
    subprocess.call(invocation)
    queue.task_done()
Пример #8
0
def write_buffer(buffer):
    for item in buffer:
        try:
            item['fn'](*item.get('args', ()), **item.get('kw', {}))
        except:
            log.exception(
                'Exception while processing queue item: {}'
                .format(item))
        queue.task_done()
Пример #9
0
def _process_queue(queue):
	while True:
		processor, args = queue.get()
		logging.debug("calling %s with %d arguments: %s", processor, len(args), args)
		try:
			processor(*args)
		except:
			logging.exception("")
		queue.task_done()
Пример #10
0
def clockLoop():
    '''Removes items from the queue and transmits them to the controller'''
    print('Initiating Clocker')
    while True:
        alteration = queue.get(True, None)
        queueLock.acquire()
        queue.task_done()
        for alt in alteration:
            pixels[alt] = alteration[alt]
        FCclient.put_pixels(pixels)
        time.sleep(1 / frameRate)
        queueLock.release()
Пример #11
0
 def get_info():
     while True:
         try:
             name = queue.get(block=False)
             tasks_left = queue.qsize()
         except Empty:
             return
         info = tibiacom.char_info(name)
         self.chars[name].deaths = info["deaths"]
         refresh()
         queue.task_done()
         print("pzlock update: %d/%d" % ((task_count - tasks_left), task_count))
 def run(self):
     date = self.conf.get("date",Date.getDate())
     queue = self.conf.get('queue')
     handle = self.conf.get('handle')
     if queue:
         while True:
             code=queue.get()
             try:
                 handle(date = date ,code = code)
                 queue.task_done()
             except:
                 logging.error('Error hanpped when download data.')
                 queue.task_done()
Пример #13
0
    def _task(self):
        queue = self.queue
        # queue can be overflow

        with self.transaction_lock:
            while not queue.empty():
                queue.get_nowait()
                queue.task_done()

            queue.put(0)
            # INTERNAL: start counter for task

        while True:
            wait = self.default_execute_wait
            event = queue.get()

            if event is None:
                # STOP EVENT
                while not queue.empty():
                    queue.get_nowait()
                    queue.task_done()
                    # TODO: warning not queued event?
                    # TODO: just new stop flag?

                queue.task_done()
                # for stop event.
                return

            time.sleep(wait)
            # TODO: how to sleep automation?
            # TODO: use some good schuler?

            with self.transaction_lock:
                current_transaction = self.current_transaction
                if current_transaction is None:
                    with self.transaction() as current_transaction:
                        pass

                if not current_transaction.operations:
                    queue.put(event + wait)
                    queue.task_done()
                    continue

                if event >= self.status.default_execute_wait:
                    self.current_transaction = None
                    self.execute_transaction(current_transaction)

                queue.put(0)
                queue.task_done()
Пример #14
0
def threading_worker(constants, queue, dictionary):
    """Create threads for downloading m3u files."""
    while True:
        item = queue.get()
        if item is None:
            break
        else:
            m3u = item.m3u_filename
            url = constants['URL_M3U'] + m3u
            h = urllib.request.urlopen(url)
            website = h.read().decode()
            dictionary[m3u] = website
            show_name = item.name
            logging.debug('Added %s (%s) to m3u list.', m3u, show_name)
            queue.task_done()
def process_videos(queue):
    while True:
        item = queue.get()
        # cmd =('avconv -i {} -t 00:00:10 -threads auto -strict experimental {}'
        cmd = ['avconv', '-y', '-i', item['file'],
               '-strict', 'experimental', '-preset', 'veryfast',
               '-vf', 'scale=-2:320,format=yuv420p', '-movflags', 'faststart',
               # '-t', '00:01:00',
               item['tmpname']]

        FNULL = open(os.devnull, 'w')
        subprocess.call(cmd, stdout=FNULL, stderr=subprocess.STDOUT)
        # subprocess.call(cmd)
        queue.history.remove(item)
        queue.task_done()
Пример #16
0
 def run(self):
     while True:
         im = queue.get()
         if im is None:
             queue.task_done()
             sys.stdout.write("x")
             break
         f = io.BytesIO()
         im.save(f, test_format, optimize=1)
         data = f.getvalue()
         result.append(len(data))
         im = Image.open(io.BytesIO(data))
         im.load()
         sys.stdout.write(".")
         queue.task_done()
Пример #17
0
    def work(self):
        """TODO: Docstring for work.
        :returns: TODO

        """
        while self.run:
            source, path, file = queue.get()
            dest = self.destination(source, path, file)
            dest = self.mapping(dest)
            os.makedirs(os.path.dirname(dest), exist_ok=True)
            if os.path.splitext(file)[-1] == os.path.splitext(dest)[-1]:
                self.copy(os.path.join(path, file), dest)
            else:
                self.convert(os.path.join(path, file), dest)
            queue.task_done()
Пример #18
0
 def loop():
     while True:
         argv = queue.get()
         succeed = False
         for i in range(5):
             try:
                 func(*argv)
                 succeed = True
                 break
             except StopIteration:
                 pass
         if not succeed:
             print(func.__name__, argv, 'FAILED')
             print('-'*80)
         queue.task_done()
Пример #19
0
def receivemessage():
    n = 0
    html='<p>no messages!</p>'
    username = request.form["username"]
    password = request.form["password"]
    for i in range(queue.qsize()):
        message = queue.get()
        if message[2] == username:
            sentby.append(username)
            text.append(message)
            n += 1
            queue.task_done()
    for x in range(0,len(sentby))
        htmlmessage += '<div>'+sentby[x]+'</div><div>'+text[x]+'</div>'
    return render_template("message.html" html=htmlmessage)
 def consumer_run(self, wiki_vector):
     """
     Running Consumer
     Taking the Calculate average vector
     """
     while True:
         try:
             read_file = queue.get()
             print("Consume %s", read_file)
             class_summary = ClassSummary(read_file, wiki_vector)
             class_summary.summary_class()
             queue.task_done()
             time.sleep(random.random())
         except queue.Empty:
             continue
Пример #21
0
def worker(queue):
    conn2               = pg8000.connect(user="******", host="npaa4726", port=5432, database="cprodev", password="******") 
    insert_cursor       = conn2.cursor()
    count               = 0
    while True:
        item = queue.get()
        if item is None:
            conn2.commit()
            break
        do_work(insert_cursor, item)
        count += 1
        if count % 5000 == 0:
            print()
            print(threading.get_ident() , "-", count)
            print()
        queue.task_done()
Пример #22
0
def absoluteFade(targetValues, fadeTime, sixteenBit):
    '''Is given a dictionary of indexes and their target values, and a fade time'''
    print('Fading now')
    targetValues = {int(k): int(v) for k, v in targetValues.items()}
    if not fadeTime:
        fadeTime = 1 / frameRate
    print(targetValues)
    #Calculates how many individual fade frames are needed
    alterations = int(fadeTime * frameRate)
    queueList = []
    queueLock.acquire()
    while not queue.empty():
        queueList.append(queue.get())
        queue.task_done()
    #Amount of frames that need to be added to queue
    appends = alterations - len(queueList)
    #fill out the queue with blank dictionaries to populate
    if appends > 0:
        for i in range(abs(appends)):
            queueList.append({})
    #Iterate down indexes, figure out what items in queue need to be altered
    for i in targetValues:
        #INVESTIGATE: THIS MIGHT BE THE SOURCE OF FLASHING ISSUES AT THE START OF A COMMAND
        start = pixels[i]
        end = targetValues[i]
        bridgeGenerator = bridgeValues(alterations, start, end)
        print('Index %d' % i)
        print('Start fade at %d' % start)
        print('End fade at %d' % end)
        for m in range(alterations):
            if sixteenBit:
                value = int(next(bridgeGenerator))
                highLow = sixteenToEight(value)
                queueList[m][i] = highLow[0]
                queueList[m][i + 1] = highLow[1]
            else:
                queueList[m][i] = int(next(bridgeGenerator))
    #If this command overrides a previous command to the pixel, it should wipe any commands remaining
        if appends < 0:
            for r in range(abs(appends)):
                if i in queueList[alterations + r]:
                    del queueList[alterations + r][i]
                    if sixteenBit:
                        del queueList[alterations + r][i + 1]
    while queueList:
        queue.put(queueList.pop(0))
    queueLock.release()
Пример #23
0
    def run(self):
        queue = self.queue
        while True:
            # Grab our data
            callback, requests = queue.get()

            # Grab prices, this is the time-consuming part
            if len(requests) > 0:
                Price.fetchPrices(requests)

            wx.CallAfter(callback)
            queue.task_done()

            # After we fetch prices, go through the list of waiting items and call their callbacks
            for price in requests:
                callbacks = self.wait.pop(price.typeID, None)
                if callbacks:
                    for callback in callbacks:
                        wx.CallAfter(callback)
Пример #24
0
def clockLoop():
    '''Removes items from the queue and transmits them to the controller'''
    print('Initiating Clocker')
    while True:
        #This was one line further down, probably a mistake
        alteration = queue.get(True, None)
        queueLock.acquire()
        queue.task_done()
        for alt in alteration:
            pixels[alt] = alteration[alt]
        if OLA:
            listStr = str(pixels)[1:-1]
            requests.post(olaUrl, data={'u':1, 'd':listStr})
        else:
            for alt in alteration:
                dmx.setChannel(alt, alteration[alt])
            dmx.render()
        queueLock.release()
        time.sleep((1 / frameRate) * .75)
Пример #25
0
def run_job(obj, gpuid, jobfun, jobargs):
    queue = obj.queue
    while not obj.shutdown:
        path = None
        try:
            path = queue.get(timeout=1)
        except:
            pass

        if path:
            try:
                logger.info("Running " + path + " on GPU device " + str(gpuid))
                obj.running(path)

                try:
                    jobfun(*jobargs, path=path, gpuid=gpuid)
                except:
                    obj.completed(path)
                    queue.task_done()
                    continue

                logger.info("Completed " + path)
                obj.completed(path)
                queue.task_done()
            except:
                logger.error("Error running job {}".format(path))
                obj.completed(path)
                queue.task_done()
                continue
    logger.info("Shutting down worker thread")
def process_request(queue):
    while True:
        request = queue.get()
        result = make_request(request["host"], request["path"], request["method"], request["data"])
        if request["expected_status"] == 200:
            if result["status"] == 200:
                request["target"]["api-id"] = result["id"]
            else:
                print("Error while calling endopoint \"" + request["path"] + "\". Retrying.")
                result = make_request(request["host"], request["path"], request["method"], request["data"])
                if result["status"] == 200:
                    request["target"]["api-id"] = result["id"]
                else:
                    print("Error while calling endopoint \"" + request["path"] + "\". Exiting.")
                    sys.exit()
        else:
            if result["status"] != request["expected_status"]:
                print("Error while calling endopoint \"" + request["path"] + "\". Retrying.")
                result = make_request(request["host"], request["path"], request["method"], request["data"])
                if result["status"] != request["expected_status"]:
                    print("Error while calling endopoint \"" + request["path"] + "\". Exiting.")
        queue.task_done()
Пример #27
0
def launcher(logger, queue, lauchfrequency, maxruntime):
    """"""
    maxseconds = maxruntime * 60
    time.sleep(3)  # allow jobqserver to start
    while True:
        time.sleep(lauchfrequency)
        job = queue.get(block=True, timeout=None)
        if job:
            jobnumber = job[1]
            task_to_run = job[2]
            # Start a timer thread for maxruntime error
            timer_thread = threading.Timer(
                maxseconds,
                maxruntimeerror,
                args=(logger, maxruntime, jobnumber, task_to_run),
                )
            timer_thread.start()
            try:
                t0 = datetime.datetime.now()
                logger.info('Starting job %(job)s', {'job': jobnumber})
                result = subprocess.call(
                    task_to_run,
                    stdin=open(os.devnull, 'r'),
                    stdout=open(os.devnull, 'w'),
                    stderr=open(os.devnull, 'w'))
                t1 = datetime.datetime.now()
                time_taken = (t1 - t0).seconds

                logger.info('Finished job %(job)s, elapsed time %(time_taken)s, result %(result)s',
                            {'job': jobnumber, 'time_taken': time_taken, 'result': result})
            except Exception as e:
                logger.error('Error starting job {}: {}'.format(jobnumber, e))

                botslib.sendbotserrorreport(
                    '[Bots Job Queue] - Error starting job',
                    'Error starting job {}:\n {}\n\n {}'.format(jobnumber, task_to_run, e))

            timer_thread.cancel()
            queue.task_done()
Пример #28
0
    def processRequests(self):
        queue = self.queue
        cache = self.cache
        sMkt = Market.getInstance()
        while True:
            try:
                id_, callback = queue.get()
                set_ = cache.get(id_)
                if set_ is None:
                    set_ = sMkt.getShipList(id_)
                    cache[id_] = set_

                wx.CallAfter(callback, (id_, set_))
            except Exception as e:
                pyfalog.critical("Callback failed.")
                pyfalog.critical(e)
            finally:
                try:
                    queue.task_done()
                except Exception as e:
                    pyfalog.critical("Queue task done failed.")
                    pyfalog.critical(e)
Пример #29
0
    def _show_progress(last_known_progress, end_event, queue):
        progress_values = [i for i in range(0, 110, 10)]  # [0, 10, ..., 100]
        chars = '|/-\\'
        msg = None
        while True:
            if not queue.full():
                # nothing in the queue yet, keep showing the last known progres
                progress = last_known_progress
            else:
                update = queue.get()
                # figure out what kind of update is being requested
                if update[0] == CmdProgressBarUpdateTypes.UPDATE_PROGRESS:
                    progress = update[1]
                    last_known_progress = progress
                else:
                    msg = update[1]
                # signal that the value has been consumed
                queue.task_done()

            num_progress_vals = bs(progress_values, progress)
            progress_info = '..'.join([''.join((str(i), '%')) for i in progress_values[:num_progress_vals]])
            progress_info = ''.join((progress_info, '.' * (53 - len(progress_info))))

            # for info msg updates, display the message
            if msg != None:
                sys.stdout.write(''.join(('\r', ' ' * 70, '\r')))
                sys.stdout.write(''.join((msg, '\n')))
                msg = None

            # show pogress
            for c in chars:
                sys.stdout.write('\r[ {0} ..{1}.. ]'.format(c, progress_info))
                sys.stdout.flush()
                time.sleep(0.4)

            if end_event.is_set():
                break
Пример #30
0
def run_job(obj, ngpu, acemd, datadir):
    import sys
    queue = obj.queue
    while not obj.shutdown:
        path = None
        try:
            path = queue.get(timeout=1)
        except:
            pass

        if path:
            try:
                logger.info("Running " + path + " on GPU " + str(ngpu))
                obj.running(path)
                cmd = 'cd {}; {} --device {} input > log.txt 2>&1'.format(os.path.normpath(path), acemd, ngpu)
                try:
                    check_output(cmd, shell=True)
                except CalledProcessError:
                    logger.error('Error in ACEMD for path: {}. Check the {} file.'.format(path, os.path.join(path, 'log.txt')))
                    obj.completed(path)
                    queue.task_done()
                    continue

                # If a datadir is provided, copy finished trajectories there. Only works for xtc files.
                if datadir is not None:
                    if not os.path.isdir(datadir):
                        os.mkdir(datadir)
                    simname = os.path.basename(os.path.normpath(path))
                    odir = os.path.join(datadir, simname)
                    os.mkdir(odir)
                    finishedtraj = glob(os.path.join(path, '*.xtc'))
                    logger.info("Moving simulation {} to {}.".format(finishedtraj[0], odir))
                    move(finishedtraj[0], odir)

                logger.info("Completed " + path)
                obj.completed(path)
                queue.task_done()
            except:
                logger.error("Error running job")
                obj.completed(path)
                queue.task_done()
                continue
    logger.info("Shutting down worker thread")
Пример #31
0
    def work():
        while not queue.empty():
            testCasesPath = queue.get()
            bpFileName = testCasesPath[testCasesPath.index('/TestCases/') +
                                       11:]
            print(('\n [*] Test case : %s' % bpFileName))
            base_test.logging.info('\n')
            base_test.logging.info('[*] Test case : %s' % bpFileName)

            try:
                blueprint = create_blueprint.load_blueprint(
                    testCasesPath=testCasesPath)
                get_testService_role(
                    blueprint=blueprint,
                    thread_name=threading.current_thread().name)
                request_cockpit_api = RequestCockpitAPI()
                request_cockpit_api.create_new_repository(
                    repository=request_cockpit_api.repo['name'])
                request_cockpit_api.send_blueprint(
                    repository=request_cockpit_api.repo['name'],
                    blueprint=blueprint)

                request_cockpit_api.execute_blueprint(
                    repository=request_cockpit_api.repo['name'],
                    blueprint=request_cockpit_api.blueprint['name'])
                request_cockpit_api.run_repository(
                    repository=request_cockpit_api.repo['name'])

                testCase_time = request_cockpit_api.get_run_status(
                    repository=request_cockpit_api.repo['name'],
                    run_key=request_cockpit_api.repo['key'],
                    bpFileName=bpFileName)
                if testCase_time:
                    base_test.Testcases_results[bpFileName] = []
                    base_test.Testcases_results[bpFileName].append(
                        ['TestCase Time', testCase_time])
                    for role_item in role[threading.current_thread().name]:
                        base_test.Testcases_results[bpFileName].append(
                            request_cockpit_api.get_service_data(
                                repository=request_cockpit_api.repo['name'],
                                role=role_item[0],
                                service=role_item[1]))
                else:
                    request_cockpit_api.testcase_time = '{:0.2f}'.format(
                        time.time() - request_cockpit_api.start_time)
                    error_message = 'ERROR : %s %s' % (
                        request_cockpit_api.blueprint['name'],
                        request_cockpit_api.blueprint['log'])
                    base_test.Testcases_results[bpFileName] = [[
                        'TestCase Time', request_cockpit_api.testcase_time
                    ], [
                        error_message, role[threading.current_thread().name][0]
                    ]]
            except:
                base_test.logging.error(traceback.format_exc())

                # Add error message to xml result
                error_message = 'ERROR : %s %s' % (traceback.format_exc(
                ), request_cockpit_api.response_error_content)
                base_test.Testcases_results[bpFileName] = [[
                    'TestCase Time', 0
                ], [error_message, 'Unknown service']]

            request_cockpit_api.clean_cockpit()
            queue.task_done()
Пример #32
0
def emptyqueue(queue):
    logging.debug(f"empting queue {queue.qsize()}")
    while not queue.empty():
        temp = queue.get()
        queue.task_done()
Пример #33
0
def tx_make_process(txmanager, queue, thread_idx):
    """Function executed by threads to convert images to textures with txmake.

    Args:
    - queue (Queue.Queue): The task queue maintained by the main thread.
    """
    logger = txm_log()
    logger.debug('start')
    while not queue.empty():
        ui, txfile, txitem, args = queue.get()
        infile = args[-2]
        outfile = args[-1]

        if txfile.is_rtxplugin:
            queue.task_done()
            return

        logger.debug('%r', args)
        if not txfile.done_callback:
            logger.warning('Unexpected done callback = %r: %s',
                           txfile.done_callback, txfile.input_image)

        txfile.set_item_state(txitem, STATE_PROCESSING)
        txmanager.send_txmake_progress(txfile, txitem, txitem.state)
        start_t = time.time()
        err_msg = ''
        win_os = (platform.system() == 'Windows')
        sp_kwargs = {
            'stdin': subprocess.PIPE,
            'stdout': subprocess.PIPE,
            'stderr': subprocess.PIPE,
            'shell': False
        }
        if win_os:
            sp_kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
            sp_kwargs['startupinfo'] = subprocess.STARTUPINFO()
            sp_kwargs['startupinfo'].dwFlags |= subprocess.STARTF_USESHOWWINDOW
        try:
            p = subprocess.Popen(args, **sp_kwargs)
        except Exception as err:
            logger.warning(' |_ failed to launch: %s\n    |_ args: %r', err,
                           args)
            txfile.set_item_state(txitem, STATE_ERROR)
        else:
            txmanager.subprocesses[thread_idx] = p
            lo = p.stdout.readline()
            le = p.stderr.readline()
            while lo or le:
                if lo:
                    logger.debug(lo)
                if le:
                    logger.debug(le)
                    err_msg += le
                lo = p.stdout.readline()
                le = p.stderr.readline()

        time.sleep(1.0)
        p.poll()  # get the return code

        if os.path.exists(outfile):
            stats = time.strftime('%Mm:%Ss',
                                  time.localtime(time.time() - start_t))
            txfile.set_item_state(txitem, STATE_EXISTS)
            logger.info('Converted in %s : %r', stats, outfile)

            # check time stamp for future dated input files
            # if time stamp is greater than "now", we
            # give the outfile the same time stamp as the
            # input outfile
            now_time = time.time()
            infile_time = os.path.getmtime(infile)
            if infile_time > now_time:
                logger.debug('Input file, %r, is from the future!', infile)
                os.utime(outfile, (infile_time, infile_time))
        else:
            if p.returncode in KILLED_SIGNALS:
                logger.debug('KILLED: %s', args)
                txfile.set_item_state(txitem, STATE_IN_QUEUE)
            else:
                txfile.set_item_state(txitem, STATE_ERROR)
                txfile.error_msg += err_msg
                logger.error('Failed to convert: %r', infile)
                logger.error('  |__ args: %r', args)
        txitem.update_file_size()
        txfile.update_file_size()

        # update txmanager and ui
        txmanager.send_txmake_progress(txfile, txitem, txitem.state)
        txmanager.subprocesses[thread_idx] = None

        # mark task done in task queue
        queue.task_done()

    logger.debug('empty queue = done')

    unblock(txmanager)
Пример #34
0
def find_match(queue, id):
    # queue has a list of tasks assigned to CMM model
    # connection to wos ans csx databases
    csxdb = mysql.connector.connect(user='******',
                                    password='******',
                                    host='csxdb02',
                                    database='citeseerx',
                                    charset='utf8',
                                    use_unicode=True)
    wosdb = mysql.connector.connect(user='******',
                                    password='******',
                                    host='heisenberg',
                                    database='wos_tiny',
                                    charset='utf8',
                                    use_unicode=True)
    CSXcursor = csxdb.cursor(dictionary=True)
    WOScursor = wosdb.cursor(dictionary=True)
    CSXCitationsCursor = csxdb.cursor(dictionary=True)
    while (True):
        if queue.empty():
            break
        shared_cit = 0
        prevID = None
        checkedList = set()
        csx_paperID = queue.get()
        if csx_paperID is None:
            break
        try:
            #print(csx_paperID)
            CSXCitationsCursor.execute(cmd_citations % (csx_paperID))
            CSXcitations = CSXCitationsCursor.fetchall()
            # making bow of csx reference titles
            csx_citations_titles = ' '.join(
                mystring(c['title']) for c in CSXcitations)
            csx_bow = normalize(csx_citations_titles).split()
            counter = 0
            brk = False
            for csx_citation in CSXcitations:
                counter += 1
                # we only process 30 citatins due to efficiency
                if counter > 30:
                    break
                csx_citation['authors'] = parse_csx_authors(
                    csx_citation['authors'])
                csx_citation['abstract'] = ''
                start = 0
                label = 1
                if csx_citation['title'] is not None:
                    s = Search(using=client, index=WoS_citations_index).query(
                        "match", citedTitle=csx_citation['title'])
                    # we process at most 1000 candidate citations
                    while brk == False and label == 1 and start < 1000:
                        s = s[start:start + page]
                        response = s.execute()
                        if len(response) == 0:
                            brk = True
                        for hit in response:
                            if hit['paperid'] not in checkedList:
                                checkedList.add(hit['paperid'])
                                wos_citation = {}
                                wos_citation['title'] = hit['citedTitle']
                                wos_citation['year'] = hit['year']
                                wos_citation['abstract'] = ''
                                wos_citation['pages'] = hit['page']
                                wos_citation['volume'] = hit['volume']
                                # check citation matching
                                features = SimilarityProfile.calcFeatureVector(
                                    csx_citation, csx_citation['authors'],
                                    wos_citation,
                                    parse_wos_authors(hit['citedAuthor']))
                                label = clf.predict([features])[0]
                                if label == 1:
                                    WOScursor.execute(cmd_citers %
                                                      (hit['paperid']))
                                    WOS_paper = WOScursor.fetchall()[0]
                                    CSXcursor.execute(cmd_total_paper %
                                                      (csx_paperID))
                                    CSX_paper = CSXcursor.fetchall()[0]
                                    # check for title similarity
                                    title1 = '%x' % Simhash(
                                        get_features(
                                            normalize(
                                                mystring(CSX_paper['title'])))
                                    ).value
                                    title2 = '%x' % Simhash(
                                        get_features(
                                            normalize(
                                                mystring(WOS_paper['title'])))
                                    ).value
                                    dist = distance.nlevenshtein(
                                        title1, title2)
                                    if dist < theta_title:
                                        with open(output_file,
                                                  'a') as match_file:
                                            match_file.write(CSX_paper['id'] +
                                                             ' ' +
                                                             WOS_paper['uid'] +
                                                             '\n')
                                            match_file.flush()
                                            brk = True
                                            break
                                    # check for reference titles similarity
                                    else:
                                        WOScursor.execute(cmd_cited %
                                                          (WOS_paper['uid']))
                                        WOScitations = WOScursor.fetchall()
                                        citations_similarity = compare_jaccard_citations(
                                            WOScitations, csx_bow)
                                        if citations_similarity > theta_ref:
                                            with open(output_file,
                                                      'a') as match_file:
                                                match_file.write(
                                                    CSX_paper['id'] + ' ' +
                                                    WOS_paper['uid'] + '\n')
                                                match_file.flush()
                                                brk = True
                                                break
                        start = start + len(response)
                if brk == True:
                    break
            queue.task_done()
        except:
            queue.task_done()
            print("-" * 60)
            print('csx paper id:', csx_paperID)
            print(str(traceback.format_exc()))
            print(str(sys.exc_info()[0]))
            print("-" * 60)
            csxdb.close()
            wosdb.close()
Пример #35
0
 def run(self):
     print("Starting " + self.name)
     process_data(self.name, self.queue)
     print("Exiting " + self.name)
     queue.task_done()
Пример #36
0
def dryer(queue):
    while True:
        dish = queue.get()
        print('drying', dish, 'dish')
        time.sleep(2)
        queue.task_done()
def sort_list(unsorted_list, sorted_list):

    num = queue.get()
    time.sleep(num)
    sorted_list.append(num)
    queue.task_done()
Пример #38
0
def download_worker():
    while True:
        url = queue.get()
        download_file(url, SAVE_DIR)
        queue.task_done()
Пример #39
0
async def consume(queue):
    while True:
        item = await queue.get()
        print('consuming {}...'.format(item))
        await asyncio.sleep(random.random())
        queue.task_done()
Пример #40
0
    def getData(queue):
        data = queue.get()
        queue.task_done()

        return data
Пример #41
0
def threader():
    while True:
        worker = q.get()
        scan(worker)
        q.task_done()
Пример #42
0
def worker():
    item = queue.get()
    if item is not None:
        break
    print(item)
    queue.task_done()
Пример #43
0
def square(queue):
    while True:
        value = queue.get()
        print(value, " -> ", value * value)
        queue.task_done()
Пример #44
0
    if not path.exists(client_path):
        raise ValueError('wrong path to feed-client was provided')
        exit(1)

    stdout_queue = queue.Queue(maxsize=10)
    stdin_queue = queue.Queue(maxsize=10)

    feed_client_proc = FeedProcess(client_path)

    reader = RwThread(feed_client_proc.p.stdout, stdout_queue)
    writer = RwThread(feed_client_proc.p.stdin, stdin_queue, 'w')
    reader.start()
    writer.start()

    command = {'test': 'test'}

    stdin_queue.put(command)

    print('stdin queue size {}'.format(stdin_queue.qsize()))

    print('stdout queue size {}'.format(stdout_queue.qsize()))

    while not stdout_queue.empty():
        print(stdout_queue.get(timeout=5))
        queue.task_done()

    reader.join(timeout=5)
    print(reader.is_alive())
    writer.join(timeout=5)
    print(writer.is_alive())
Пример #45
0
def parallel_compute(queue, return_queue, shmem_buffer, shmem_results, size_x,
                     size_y, len_filelist, operation):
    #queue, shmem_buffer, shmem_results, size_x, size_y, len_filelist = worker_args

    # buffer = shmem_as_ndarray(shmem_buffer).reshape((size_x, size_y, len_filelist))
    buffer = shmem_buffer.to_ndarray()
    # result_buffer = shmem_as_ndarray(shmem_results).reshape((size_x, size_y))
    result_buffer = shmem_results.to_ndarray()

    logger = logging.getLogger("ParallelImcombine")
    logger.debug("Operation: %s, #samples/pixel: %d" %
                 (operation, len_filelist))

    while (True):
        line = queue.get()
        if (line is None):
            queue.task_done()
            break

        if (operation == "median"):
            result_buffer[line, :] = numpy.median(buffer[line, :, :], axis=1)

        elif (operation == "medsigclip"):
            # Do not use (yet), is slow as hell
            # (maskedarrays are pure python, not C as all the rest)

            #print buffer[line,:,:].shape
            _sigma_plus = numpy.ones(shape=(buffer.shape[1],
                                            buffer.shape[2])) * 1e9
            _sigma_minus = numpy.ones(shape=(buffer.shape[1],
                                             buffer.shape[2])) * 1e9
            _median = numpy.median(buffer[line, :, :], axis=1)

            nrep = 3
            valid_pixels = numpy.ma.MaskedArray(buffer[line, :, :])

            for rep in range(nrep):

                _median_2d = _median.reshape(_median.shape[0],
                                             1).repeat(buffer.shape[2], axis=1)
                _min = _median_2d - 3 * _sigma_minus
                _max = _median_2d + 3 * _sigma_plus

                #valid_pixels = numpy.ma.masked_inside(buffer[line,:,:], _min, _max)
                valid = (buffer[line, :, :] > _min) & (buffer[line, :, :] <
                                                       _max)

                valid_pixels = numpy.ma.array(buffer[line, :, :], mask=valid)
                #valid_pixels = numpy.ma.MaskedArray(buffer[line,:,:], valid)

                #print _min.shape, valid.shape, valid_pixels.shape

                #if (numpy.sum(valid, axis=1).any() <= 0):
                #    break

                #_median = numpy.median(buffer[line,:,:][valid], axis=1)
                _median = numpy.median(valid_pixels, axis=1)
                if (rep < nrep - 1):
                    #_sigma_plus = scipy.stats.scoreatpercentile(buffer[line,:,:][valid], 84) - _median
                    #_sigma_minus = _median - scipy.stats.scoreatpercentile(buffer[line,:,:][valid], 16)
                    _sigma_plus = scipy.stats.scoreatpercentile(
                        valid_pixels, 84) - _median
                    _sigma_minus = _median - scipy.stats.scoreatpercentile(
                        valid_pixels, 16)

            result_buffer[line, :] = _median

        elif (operation == "sigclipx"):
            stdout_write(".")
            rep_count = 2

            _line = buffer[line, :, :].astype(numpy.float32)
            # print _line.shape

            mask = numpy.isfinite(_line)

            #print "line.shape=",_line.shape
            # numpy.savetxt("line_block_%d.dat" % (line), _line)

            def sigclip_pixel(pixelvalue):
                mask = numpy.isfinite(pixelvalue)
                old_mask = mask
                rep = 0
                while (rep < rep_count and numpy.sum(mask) > 3):
                    old_mask = mask

                    mss = scipy.stats.scoreatpercentile(
                        pixelvalue[mask], [16, 50, 84])

                    lower = mss[1] - 3 * (mss[1] - mss[0])  # median - 3*sigma
                    upper = mss[1] + 3 * (mss[2] - mss[1])  # median + 3*sigma

                    mask = (pixelvalue > lower) & (pixelvalue < upper)

                    rep += 1
                    if (rep == rep_count or numpy.sum(mask) < 3):
                        mask = old_mask

                return numpy.mean(pixelvalue[mask])

            result_buffer[line, :] = [
                sigclip_pixel(_line[x, :]) for x in range(_line.shape[0])
            ]

        elif (operation == "sigmaclipmean"):
            _line = buffer[line, :, :].astype(numpy.float64)
            output = numpy.zeros(shape=(_line.shape[0]))
            podi_cython.sigma_clip_mean(_line, output)
            result_buffer[line, :] = output

        elif (operation == "sigmaclipmedian"):
            _line = buffer[line, :, :].astype(numpy.float64)
            output = numpy.zeros(shape=(_line.shape[0]))
            podi_cython.sigma_clip_median(_line, output)
            result_buffer[line, :] = output

        elif (operation == "weightedmean"):
            _line = buffer[line, :, :].astype(numpy.float32)
            result_buffer[line, :] = weighted_mean(_line)

        elif (operation == "medclip"):
            intermediate = numpy.sort(buffer[line, :, :], axis=1)
            result_buffer[line, :] = numpy.median(intermediate[:, 1:-2],
                                                  axis=1)

        elif (operation == "min"):
            result_buffer[line, :] = numpy.min(buffer[line, :, :], axis=1)

        elif (operation == "max"):
            result_buffer[line, :] = numpy.max(buffer[line, :, :], axis=1)

        elif (operation == "nanmean"):
            result_buffer[line, :] = scipy.stats.nanmean(buffer[line, :, :],
                                                         axis=1)

        elif (operation == "nanmedian"):
            result_buffer[line, :] = scipy.stats.nanmedian(buffer[line, :, :],
                                                           axis=1)

        elif (operation == "nanmedian.bn"):
            x = numpy.array(buffer[line, :, :], dtype=numpy.float32)
            result_buffer[line, :] = bottleneck.nanmedian(x, axis=1)
            x = None
            del x
        elif (operation == "nanmean.bn"):
            x = numpy.array(buffer[line, :, :], dtype=numpy.float32)
            result_buffer[line, :] = bottleneck.nanmean(x, axis=1)
            x = None
            del x
        else:
            result_buffer[line, :] = numpy.mean(buffer[line, :, :], axis=1)

        return_queue.put(line)
        queue.task_done()

    buffer = None
    shmem_buffer = None
    del shmem_buffer
    del buffer
    sys.exit(0)

    return
Пример #46
0
def find_match(queue, clf):
    # connection to target and reference database
    client = Elasticsearch(timeout=200, port=ref_index_port)
    csxdb = mysql.connector.connect(user='******',
                                    password='******',
                                    host='csxstaging01',
                                    database='citeseerx2',
                                    charset='utf8',
                                    use_unicode=True)
    CSXcursor = csxdb.cursor(dictionary=True)
    CSXauthorCursor = csxdb.cursor(dictionary=True)
    REFdb = mysql.connector.connect(user='******',
                                    password='******',
                                    host='csxstaging01',
                                    database='wos2017_12',
                                    charset='utf8',
                                    use_unicode=True)
    REFcursor = REFdb.cursor(dictionary=True)

    while (True):
        if queue.empty():
            break
        try:
            csxID = queue.get()
            if csxID is None:
                queue.task_done()
                break
            CSXcursor.execute(cmd_paper % (csxID))
            CSXPaper = CSXcursor.fetchone()
            if CSXPaper is None:
                queue.task_done()
                continue
            CSXauthorCursor.execute(cmd_author % (csxID))
            CSXauthors = CSXauthorCursor.fetchall()
            s = Search(using=client, index=ref_index)
            if CSXPaper['title'] is None or len(CSXPaper['title']) < 20:
                if len(CSXauthors) > 0 and CSXauthors[0][
                        'lname'] is not None and CSXPaper['year'] is not None:
                    s.query = Q('bool',
                                should=[
                                    Q('match', year=CSXPaper['year']),
                                    Q('match', authors=CSXauthors[0]['lname'])
                                ])
                else:
                    if CSXPaper['abstract'] is not None:
                        s = s.query("match", abstract=CSXPaper['abstract'])
                    else:
                        queue.task_done()
                        continue
            else:
                s = s.query("match", title=CSXPaper['title'])
            response = s.execute()
            for hit in response:
                REFcursor.execute(cmd_REFpaper % (hit['id']))
                REFpaper = REFcursor.fetchone()
                REFcursor.execute(cmd_REFauthor % (hit['id']))
                REFauthors = REFcursor.fetchall()
                features = SimilarityProfile.calcFeatureVector(
                    REFpaper, REFauthors, CSXPaper, CSXauthors)
                label = clf.predict([features])
                if label == 1:
                    with open("results.txt", "a") as g:
                        fcntl.flock(g, fcntl.LOCK_EX)
                        g.write(csxID + '\t' + hit['id'] + '\n')
                        fcntl.flock(g, fcntl.LOCK_UN)
                    break
            queue.task_done()
        except:
            queue.task_done()
            print("-" * 60)
            print(csxID)
            print(traceback.format_exc())
            print(sys.exc_info()[0])
            print("-" * 60)
Пример #47
0
async def worker_send_files(name, queue):
    global stats_total_bytes_sent, stats_preprocessed_files_sent, stats_not_preprocessed_files_sent

    # Process events from the queue on the main thread.
    logging.debug(f'Worker {name} started')

    last = 0

    try:
        while True:

            if time.time() - last > TOO_LONG:
                logging.info(f'worker_took: {time.time() - last}')

            file_system_event = await pop_event(False)
            logging.debug(f'event {file_system_event} popped from queue')

            # takes ~0.0003s
            # start_file_read = time.time()
            # f = open(file_system_event.src_path, 'rb')
            # filelike = f.read()
            # f.close()
            # logging.info(f'file_read_took: {time.time()-start_file_read}')

            filelike = file_system_event.src_path

            if FAKE_UPLOAD:
                # (only 1 concurrent upload)
                fake_upload_time = (file_system_event.file_size * 8) / FAKE_UPLOAD_SPEED_BITS_PER_SECOND
                logging.info(f'Fake sleep for: {fake_upload_time}')
                await asyncio.sleep(fake_upload_time)
            else:
                response = await send_file(file_system_event, filelike, stream_id_tag, stream_id, username, password,
                                           host)
                logging.debug(f'Server response body: {response}')

            last = time.time()

            if DELETE:
                start_delete = time.time()
                if False:
                    # this takes ~0.005...at ~10Hz this is too slow?
                    # close_fds makes the parent process' file handles inaccessible for the child.
                    proc = Popen(f'rm {file_system_event.src_path}', shell=True, stdin=None, stdout=None, stderr=None,
                                 close_fds=True)
                else:
                    os.unlink(file_system_event.src_path)
                logging.debug(f'starting delete took: {start_delete - time.time()}')

            stats_total_bytes_sent += file_system_event.file_size
            if file_system_event.preprocessed:
                stats_preprocessed_files_sent += 1
            else:
                stats_not_preprocessed_files_sent += 1

            queue.task_done()

            queue_client.notify_popped(file_system_event.index)

            logging.info(
                f'total_bytes_sent: {stats_total_bytes_sent} preprocessed_files_sent: {stats_preprocessed_files_sent} raw_files_sent: {stats_not_preprocessed_files_sent}')

            # Benchmarking hack
            if stats_not_preprocessed_files_sent + stats_preprocessed_files_sent == QUIT_AFTER:
                logging.info(
                    f'Queue_is_empty. Duration since first event: {time.time() - timestamp_first_event} - total_bytes_sent: {stats_total_bytes_sent} preprocessed_files_sent: {stats_preprocessed_files_sent} raw_files_sent: {stats_not_preprocessed_files_sent} stats_total_preproc_duration: {stats_total_preproc_duration}')
                # master_queue.plot()
                quit()

    except Exception as ex:
        logging.error(f'Exception on {name}: {traceback.format_exc()}')
        print(ex)
Пример #48
0
def myTask(queue):
    value = queue.get()
    print("Process {} Popped {} from the shared Queue".format(
        multiprocessing.current_process().pid, value))
    queue.task_done()
Пример #49
0
def consumer(thread_id):
  """ Consumer: Take the item out of the queue and notify that task is done using task_done() """
  while True:
    item = queue.get()
    print (f'consumer {thread_id} notify: item {item} is popped from queue')
    queue.task_done()
Пример #50
0
def drain(queue):
    while not queue.empty():
        queue.get()
        queue.task_done()
    return
Пример #51
0
def do_job():
    while True:
        i = queue.get()
        time.sleep(1)
        print('index %s ,current: %s ' % (i, threading.currentThread()))
        queue.task_done()
Пример #52
0
def compute_illumination_frame(
    queue,
    return_queue,
    tmp_dir=".",
    redo=False,
    mask_guide_otas=True,
    mask_regions=None,
    bpm_dir=None,
    wipe_cells=None,
    ocdclean=False,
    apply_correction=True,
    additional_sextractor_options=None,
    conf_file=None,
    param_file=None,
):

    root = logging.getLogger("CompIllumination")

    while (True):
        cmd = queue.get()
        if (cmd is None):
            root.debug("Received shutdown command")
            queue.task_done()
            return

        fitsfile = cmd
        root.debug("Received new work: %s" % (fitsfile))

        tempfiles = []

        # get some info so we know what to call the output frame
        hdulist = pyfits.open(fitsfile)
        obsid = hdulist[0].header['OBSID']
        logger = logging.getLogger("CompIllum(%s)" % (obsid))

        #
        # Prepare the input file
        # - mask out guide OTAs
        # - apply bad pixel masks
        # - mask large regions according to user ds9 specs
        #

        # mask out guide-otas
        input2sex_file = fitsfile
        input_file_modified = False
        #print "XXX=", mask_guide_otas, type(mask_regions)
        ota_list = [hdulist[0]]
        for ext in hdulist[1:]:

            if (not is_image_extension(ext)):
                # input_file_modified = True
                # don't save file if all we do is skip table extensions
                continue

            if (ext.header['CELLMODE'].find("V") >= 0):
                # This is a video extension, do not use it
                input_file_modified = True
                continue

            if (mask_guide_otas):
                if (is_guide_ota(hdulist[0], ext)):
                    # do not include
                    input_file_modified = True
                    continue

            if (mask_regions is not None):
                logger.info("Masking regions")
                mask_regions_using_ds9_regions(ext, mask_regions)
                input_file_modified = True

            if (bpm_dir is not None):
                bpmfile = "%s/bpm_xy%s.reg" % (bpm_dir, ext.name[3:5])
                logger.info("apply bpm from %s" % (bpmfile))
                if (os.path.isfile(bpmfile)):
                    mask_broken_regions(ext.data, bpmfile)
                    input_file_modified = True

            #print wipe_cells
            if (wipe_cells is not None):
                wipecells(ext, wipe_cells)

            ota_list.append(ext)

        if (input_file_modified):
            hdulist = pyfits.HDUList(ota_list)
            input2sex_file = "%s/%s" % (sitesetup.swarp_singledir,
                                        os.path.basename(fitsfile))
            hdulist.writeto(input2sex_file, clobber=True)

        # Run Sextractor
        segmask = "%s/%s_segmentation.fits" % (tmp_dir, obsid)
        masked_frame = "%s/%s_masked.fits" % (tmp_dir, obsid)

        #if (not (os.path.isfile(segmask) and os.path.isfile(masked_frame)) or redo):
        if (not os.path.isfile(segmask) or redo):
            logger.info(
                "Starting work (source detection, masking, normalization) ...")
        else:
            logger.info("No work necessary, re-using existing data ...")

        if (conf_file is None):
            conf_file = "%s/config/illumcorr.conf" % (sitesetup.exec_dir)
        if (param_file is None):
            param_file = "%s/config/illumcorr.param" % (sitesetup.exec_dir)
        if (not os.path.isfile(segmask) or redo):
            logger.debug("Creating segmentation mask: %s" % (segmask))
            sex_cmd = """%(sex)s -c %(conf)s
                         -PARAMETERS_NAME %(params)s
                         -CHECKIMAGE_TYPE SEGMENTATION
                         -CHECKIMAGE_NAME %(segfile)s
                         -FILTER_NAME %(filtername)s
                         %(additional_opts)s
                         %(image)s
                """ % {
                'sex':
                sitesetup.sextractor,
                'conf':
                conf_file,
                'params':
                param_file,
                'filtername':
                "%s/config/gauss_5.0_9x9.conv" % (sitesetup.exec_dir),
                'segfile':
                segmask,
                #                'image': fitsfile,
                'image':
                input2sex_file,
                'additional_opts':
                "" if (additional_sextractor_options is None) else
                additional_sextractor_options,
            }

            logger.debug("Starting Sextractor:\n%s" %
                         (" ".join(sex_cmd.split())))

            start_time = time.time()
            try:
                ret = subprocess.Popen(sex_cmd.split(),
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)
                (sex_stdout, sex_stderr) = ret.communicate()
                if (not ret.returncode == 0):
                    print(sex_stdout)
                    print(sex_stderr)
            except OSError as e:
                print("Execution failed:", e, file=sys.stderr)
            end_time = time.time()
            logger.debug("SourceExtractor returned after %.3f seconds" %
                         (end_time - start_time))
        else:
            logger.debug("segmentation mask (%s) exist, re-using it" %
                         (segmask))

        #
        # Now use the mask and the input frame to mask out
        # all sources in the frame. At the same time, rescale the frame by
        # dividing the intensity by the global median skylevel
        #
        hdu_out = []
        hdu_out.append(hdulist[0])

        logger.debug("MASK-regions:\n%s" % (str(mask_regions)))
        if (not os.path.isfile(masked_frame) or redo):
            logger.debug("Preparing masked frame: %s" % (masked_frame))

            mask_hdu = pyfits.open(segmask)

            for ext in hdulist:
                if (not is_image_extension(ext)):
                    continue

                # if (ext.header['CELLMODE'].find("V") >= 0):
                #     # This is a video extension, do not use it
                #     continue

                # if (mask_guide_otas):
                #     if (is_guide_ota(hdulist[0], ext)):
                #         # do not include
                #         continue

                # if (not type(mask_regions) == type(None)):
                #     print "Masking regions"
                #     mask_regions_using_ds9_regions(ext, mask_regions)

                # hdu_out.append(ext)

                # Now search for the right extension in the mask frame
                found_mask = False
                for mask_ext in mask_hdu:
                    if (ext.name == mask_ext.name):
                        # found it
                        found_mask = True
                        logger.debug("found the mask for extension %s" %
                                     (ext.name))

                        logger.debug("smoothing extension %s of %s" %
                                     (ext.name, fitsfile))
                        mask_grown = scipy.ndimage.filters.convolve(
                            input=mask_ext.data,
                            weights=numpy.ones((10, 10)),
                            output=None,
                            mode='constant',
                            cval=0.0)

                        # Set all detected pixels to NaN to ignore them during the
                        # final imcombine
                        #                        ext.data[mask_ext.data > 0] = numpy.NaN
                        ext.data[mask_grown > 0] = numpy.NaN

                        if (apply_correction):
                            # Rescale with the global sky-level
                            # maybe better to re-compute based on the segmentation mask
                            ext.data /= hdulist[0].header['SKYLEVEL']

                        hdu_out.append(ext)

                if (not found_mask):
                    logger.debug("Can't find extension %s in mask" %
                                 (ext.name))

            logger.debug("writing masked frame to %s" % (masked_frame))

            clobberfile(masked_frame)
            hdulist_out = pyfits.HDUList(hdu_out)
            hdulist_out.writeto(masked_frame, clobber=True)

            mask_hdu.close()

        if (ocdclean):
            logger.debug("OCDmode: Deleting segmentation frame %s" % (segmask))
            clobberfile(segmask)

        if (input_file_modified):
            clobberfile(input2sex_file)

        return_queue.put(masked_frame)
        queue.task_done()

        logger.debug("done with this one, taking next frame")

    root.debug("Terminating process")
    return
Пример #53
0
async def preprocess_async_loop_service(name, queue):
    global stats_total_preproc_duration
    count = 0
    try:
        proc = await asyncio.create_subprocess_shell(
            'python3 -m haste.desktop_agent.preprocessor',
            stdout=asyncio.subprocess.PIPE,
            stdin=asyncio.subprocess.PIPE)

        while True:
            file_system_event = await pop_event(True)

            if file_system_event is not None:
                logging.info(f'preprocessing: {file_system_event.src_path}')

                output_filepath = '/tmp/' + file_system_event.src_path.split('/')[-1]

                line_to_send = f"{file_system_event.src_path},{output_filepath}\n"

                # add to the buffer
                proc.stdin.write(line_to_send.encode())
                await proc.stdin.drain()

                stdoutline = await proc.stdout.readline()
                stdoutline = stdoutline.decode().strip()
                logging.info(f'stdout from preprocessor: {stdoutline}')

                dur_preproc = float(stdoutline.split(',')[0])
                dur_waiting = float(stdoutline.split(',')[1])

                logging.debug(f'preprocessor waiting: {dur_waiting}')

                file_system_event2 = SimpleNamespace()
                file_system_event2.timestamp = time.time()
                file_system_event2.src_path = output_filepath

                file_system_event2.file_size = vironova_image_compression.ben_images.file_utils.get_file_size(output_filepath)

                file_system_event2.golden_bytes_reduction = (
                                                                    file_system_event.file_size - file_system_event2.file_size) / dur_preproc

                stats_total_preproc_duration += dur_preproc

                file_system_event2.preprocessed = True
                file_system_event2.index = file_system_event.index

                event_to_re_add = file_system_event2

                count += 1
                logging.info(f'preprocessed {count} files')

                if DELETE:
                    os.unlink(file_system_event.src_path)

            else:
                # We've preprocessed everything. just re-add the original event.
                event_to_re_add = file_system_event

            await push_event(event_to_re_add)
            queue.task_done()

            # We've preprocessed everything for now. just re-add the original event and 'sleep' a little.
            if file_system_event is None:
                await asyncio.sleep(0.2)

    except Exception as ex:
        logging.error(traceback.format_exc())
        raise ex
Пример #54
0
def process_ordering(queue):
    while True:
        order_next = queue.get()
        print(f'Working order: {order_next.desc}')
        queue.task_done()
def consumidor_de_dados(queue):
    while queue.qsize() > 0:
        valor = queue.get()
        print(colorama.Fore.RED + f"Dado {valor*2} processado.", flush=True)
        time.sleep(1)
        queue.task_done()
 def run(self):
     while True:
         num = queue.get()
         queue.task_done()
         print("Consumed", num)
         time.sleep(random.random())
Пример #57
0
def pinger(que):
    while True:
        ip = que.get()
        mac = ping2mac(ip)
        if mac: print (ip, mac)
        queue.task_done()