def readDataFromSerial():
    rectMsg = list()
    try:

        if queue.empty(): return 0
        if queue.qsize() < len(expressoesRegulares): return 0

        line = queue.get(True, 1)
        while "TS:" not in line:
            if queue.empty(): return 0
            line = queue.get(True, 1)

        rectMsg.append(line)

        for _ in range(len(expressoesRegulares) - 1):
            if queue.empty(): return 0
            line = queue.get(True, 1)
            rectMsg.append(line)

    except Exception as err:
        print(err)
        portaSerial.flushInput()
        return 0
    portaSerial.flushInput()
    print(rectMsg)
    return rectMsg
Пример #2
0
def record_send(queue, lock, ffmpeg):
    try:
        ffmpeg.cmd
        CamLock.acquire()
        video, stderr = ffmpeg.run(stdout=subprocess.PIPE)
        CamLock.release()
        dest = []
        lock.acquire()
        while not queue.empty():
            cid = queue.get()
            if cid not in dest:
                dest.append(cid)
                bot.send_video(cid, video)
        lock.release()
    except FFRuntimeError as e:
        CamLock.release()
        error = 'An error occurred while processing your request :('
        dest = []
        lock.acquire()
        while not queue.empty():
            cid = queue.get()
            if cid not in dest:
                dest.append(cid)
                bot.send_message(cid, error)
        lock.release()
        print(str(e))
Пример #3
0
def count_dir_size(path):
    upyun_iter = None
    size = 0
    while True:
        while upyun_iter != 'g2gCZAAEbmV4dGQAA2VvZg':
            res = getlist(path, upyun_iter)
            if res:
                upyun_iter = res[-1]
                for i in res[:-1]:
                    try:
                        if not i['name']:
                            continue
                        new_path = path + i[
                            'name'] if path == '/' else path + '/' + i['name']
                        if i['type'] == 'F':
                            queue.put(new_path)
                        elif i['type'] == 'N':
                            print('size ++ ----> {0} B'.format(size))
                            size += int(i['size'])
                    except Exception as e:
                        print(e)
            else:
                if not queue.empty():
                    path = queue.get()
                    upyun_iter = None
                    queue.task_done()
        else:
            if not queue.empty():
                path = queue.get()
                upyun_iter = None
                queue.task_done()
            else:
                break
    return size / 1024 / 1024 / 1024
Пример #4
0
def print_file_with_iter(path):
    upyun_iter = None
    while True:
        while upyun_iter != 'g2gCZAAEbmV4dGQAA2VvZg':
            res = getlist(path, upyun_iter)
            if res:
                upyun_iter = res[-1]
                for i in res[:-1]:
                    try:
                        if not i['name']:
                            continue
                        new_path = path + i[
                            'name'] if path == '/' else path + '/' + i['name']
                        if i['type'] == 'F':
                            queue.put(new_path)
                        elif i['type'] == 'N':
                            print(new_path)
                            record_request(new_path, True)
                    except Exception as e:
                        print(e)
            else:
                if not queue.empty():
                    path = queue.get()
                    upyun_iter = None
                    queue.task_done()
        else:
            if not queue.empty():
                path = queue.get()
                upyun_iter = None
                queue.task_done()
            else:
                break
Пример #5
0
def get_list(path):
    upyun_iter = None
    up = upyun.UpYun(target_bucket, target_username, target_password)
    while True:
        while upyun_iter != 'g2gCZAAEbmV4dGQAA2VvZg':
            res = sort_data(path, upyun_iter)
            if res:
                upyun_iter = res[-1]
                for i in res[:-1]:
                    try:
                        if not i['name']:
                            continue
                        new_path = '/' + i[
                            'name'] if path == '/' else path + '/' + i['name']
                        if i['type'] == 'F':
                            queue.put(new_path)
                        elif i['type'] == 'N':
                            print(new_path)
                            push_tasks(new_path, up)
                        else:
                            sys.exit(0)
                    except Exception as e:
                        print(e)
            else:
                if not queue.empty():
                    path = queue.get()
                    upyun_iter = None
                    queue.task_done()
        else:
            if not queue.empty():
                path = queue.get()
                upyun_iter = None
                queue.task_done()
            else:
                break
Пример #6
0
    def clear_queue(self, queue):
        if not queue.empty():
            while queue.qsize != 0:
                queue.get()

        if queue.empty():
            log("Work queue EMPTY")            
Пример #7
0
def print_log(queue):
    if not queue.empty():
        import logging
        formatter = logging.Formatter("{asctime} {levelname} : {message}",
                                      style="{")
        while not queue.empty():
            record = queue.get()
            print(formatter.format(record))
Пример #8
0
def my_task(name, queue):
    if not queue.empty():
        while not queue.empty():
            url = queue.get()
            print(f'Task {name} getting URL: {url}')
            et = ET()
            d = getPage(url)
            d.addCallback(success_callback, name, url, et)
            yield d
Пример #9
0
def print_log(queue):
    if not queue.empty():
        import logging
        formatter = logging.Formatter("{asctime} {levelname} : {message}", style="{")
        while not queue.empty():
            record = queue.get()
            msg = formatter.format(record)
            print(msg)
            if msg.find("ERROR") > -1:
                raise Exception()
Пример #10
0
def my_task(name, queue):
    if not queue.empty():
        while not queue.empty():
            url = queue.get()
            print(f"Task {name} getting URL: {url}")
            start_time = time.time()
            elapsed_time = time.time() - start_time
            d = getPage(url)
            d.addCallback(success_callback, name, url, elapsed_time)
            yield d
def cal_geodesic_dist(kNN_mat, source_id, target_id):

    # use BFS search
    target_id = int(target_id)
    data_size = kNN_mat.shape[0]
    k = kNN_mat.shape[1]
    depth = 0
    # 0 for white, 1 for gray, 2 for black
    flag_array = np.zeros([data_size])
    path_array = np.zeros([data_size]) - 1
    import queue
    queue = queue.Queue(data_size)
    queue.put(source_id)
    # set the gray
    flag_array[source_id] = 1
    bFindIt = False

    while not queue.empty():
        # push from queue, set to black
        i = int(queue.get())
        flag_array[source_id] = 2

        for neighbor_id in kNN_mat[i]:
            # if not visited, else skip it
            neighbor_id = int(neighbor_id)
            if flag_array[neighbor_id] == 0:
                # set to gray and set the path
                flag_array[neighbor_id] = 1
                path_array[neighbor_id] = i
                #if multi_label_test[neighbor_id] == database_label[target_id]:
                if neighbor_id == target_id:
                    # the target is founded, clean the queue and break the loop
                    while not queue.empty():
                        queue.get()
                    bFindIt = True
                    break

                else:
                    queue.put(neighbor_id)
                    flag_array[neighbor_id] = 1
    if not bFindIt:
        return data_size + 1

    result_id = int(neighbor_id)

    print("Result ID(same class as target id):%d" % (result_id))
    while path_array[result_id] != source_id:
        depth += 1
        result_id = int(path_array[result_id])
        print("child of:%d" % (result_id))
    print("Source ID:%d" % (source_id))
    return depth
Пример #12
0
    def _task(self):
        queue = self.queue
        # queue can be overflow

        with self.transaction_lock:
            while not queue.empty():
                queue.get_nowait()
                queue.task_done()

            queue.put(0)
            # INTERNAL: start counter for task

        while True:
            wait = self.default_execute_wait
            event = queue.get()

            if event is None:
                # STOP EVENT
                while not queue.empty():
                    queue.get_nowait()
                    queue.task_done()
                    # TODO: warning not queued event?
                    # TODO: just new stop flag?

                queue.task_done()
                # for stop event.
                return

            time.sleep(wait)
            # TODO: how to sleep automation?
            # TODO: use some good schuler?

            with self.transaction_lock:
                current_transaction = self.current_transaction
                if current_transaction is None:
                    with self.transaction() as current_transaction:
                        pass

                if not current_transaction.operations:
                    queue.put(event + wait)
                    queue.task_done()
                    continue

                if event >= self.status.default_execute_wait:
                    self.current_transaction = None
                    self.execute_transaction(current_transaction)

                queue.put(0)
                queue.task_done()
Пример #13
0
    def _task(self):
        queue = self.queue
        # queue can be overflow

        with self.transaction_lock:
            while not queue.empty():
                queue.get_nowait()
                queue.task_done()

            queue.put(0)
            # INTERNAL: start counter for task

        while True:
            wait = self.default_execute_wait
            event = queue.get()

            if event is None:
                # STOP EVENT
                while not queue.empty():
                    queue.get_nowait()
                    queue.task_done()
                    # TODO: warning not queued event?
                    # TODO: just new stop flag?

                queue.task_done()
                # for stop event.
                return

            time.sleep(wait)
            # TODO: how to sleep automation?
            # TODO: use some good schuler?

            with self.transaction_lock:
                current_transaction = self.current_transaction
                if current_transaction is None:
                    with self.transaction() as current_transaction:
                        pass

                if not current_transaction.operations:
                    queue.put(event + wait)
                    queue.task_done()
                    continue

                if event >= self.status.default_execute_wait:
                    self.current_transaction = None
                    self.execute_transaction(current_transaction)

                queue.put(0)
                queue.task_done()
Пример #14
0
def ucs(source, target, graph):
    """ Uniform-cost graph search """
    queue = queue.PriorityQueue() # fringe
    queue.put((0, source))

    parent = {source:None}
    visited = {}

    while not queue.empty():
        (d, v_in) = queue.get()

        if v_in not in visited or d < visited[v_in]:

            if v_in == target:
                return (d, build_path(parent, target))

            for v_out in graph.adj(v_in):
                cost = graph.distance(v_in, v_out) + d
                if v_out not in visited:
                    queue.put((cost, v_out))
                    parent[v_out] = v_in

            visited[v_in] = cost

    return None
Пример #15
0
    def start(self):
        "Start module to start reading files"
        # Create new threads
        thread1 = RouterThread(1, "Data Link 1", in_file1, delay1, queueList[0])
        thread2 = RouterThread(2, "Data Link 2", in_file2, delay2, queueList[1])
        thread3 = RouterThread(3, "Data Link 3", in_file3, delay3, queueList[2])

        # Start new Threads
        thread1.start()
        thread2.start()
        thread3.start()

        # Add threads to thread list
        self.threads.append(thread1)
        self.threads.append(thread2)
        self.threads.append(thread3)

        # Wait for all threads to complete
        for t in self.threads:
            t.join()
        print("Exiting Main Thread")

        packetParser = self.PacketParser()

        # Print output - Get bytes from Queue
        print("Printing items from each queue -");
        for queue in queueList:
            while not queue.empty():
                print("***********************************************************************************************************")
                packetParser.parsePacket( queue.get() )
def shortest_path(graph, start, goal):
    ''' Return path shortest from start to goal'''
    explored = []
    queue = PriorityQueue()
    root = (0, [start])
    queue.put(root)
    if (start == goal):
        return "Start = goal"
    while True:
        if queue.empty():
            raise Exception("No way Exception")
        current_distance, path = queue.get()
        current_point = path[-1]
        if current_point == goal:
            print('total {} path'.format(queue.qsize()))
            return current_distance, path
        if current_point not in explored:
            if current_point not in graph:
                continue
            list_point = graph[current_point]
            # go through all point can see, construct a new path and
            # push it into the queue
            for point in list_point:
                d = distance(current_point, point)
                new_path = list(path)
                new_path.append(point)
                queue.put((current_distance + d, new_path))

            # mark point as explored
            explored.append(current_point)
    # in case there's no path between the 2 points
    return None, "There's no path between the 2 points"
Пример #17
0
def get_html(queue, mysql, crawl_kw, crawl_num):  #获取url页面上的所有url链接
    while not queue.empty():
        global RES_URL_LIST, CURRENT_URL_COUNT
        if len(RES_URL_LIST) >= crawl_num:  #判断爬取数据的数量是否已经足够
            print(len(RES_URL_LIST))
            print("总数达到要求,提前结束")
            return
        url = queue.get()  #从队列中获取url
        CURRENT_URL_COUNT += 1
        print("队列中还有【%s】个URL,当前爬取的是第【%s】个页面,URL:【%s】" %
              (queue.qsize(), CURRENT_URL_COUNT, url))

        page_msg = get_page_msg(url)
        page_url_list = page_msg[0]
        page_title = page_msg[1]
        page_text = page_msg[2]

        if page_title.find(crawl_kw) != -1 or page_text.find(
                crawl_kw) != -1:  #标题或正文中包含关键字,才往下走
            if len(page_title) > 20 and len(page_text) > 300:
                if url not in RES_URL_LIST:  #判断url是否添加过,防止数据重复落库
                    sql = 'INSERT INTO webpage(url,title,text) VALUES("%s","%s","%s")' % (
                        url, page_title, page_text)
                    mysql.insert(sql)
                    RES_URL_LIST.append(
                        url)  #将url添加到全局变量RES_URL_LIST,用于防止数据重复落库
                    print("关键字【%s】,目前已爬取到数据【%s】条,距离目标还差【%s】条,当前落库的URL为【%s】" %
                          (crawl_kw, len(RES_URL_LIST),
                           crawl_num - len(RES_URL_LIST), url))

        while page_url_list:
            url = page_url_list.pop()
            if url not in RES_URL_LIST:
                queue.put(url.strip())  #将源码中的所有url放到队列中
    print("队列没有东西,退出了")
 def test_multi_seq_mutations(self):
     self.verify_result = self.input.param("verify_result", False)
     queue = queue.Queue()
     number_of_times = (self.number_of_documents // self.concurrent_threads)
     process_list = []
     number_of_buckets = len(self.buckets)
     for x in range(self.concurrent_threads):
         base_json = self.generate_json_for_nesting()
         data_set = self.generate_nested(base_json, base_json, 2)
         json_document = self.generate_nested(base_json, data_set, 10)
         bucket_number = x % number_of_buckets
         prefix = self.buckets[bucket_number].name + "_" + str(x) + "_"
         p = Process(target=self.test_seq_mutations,
                     args=(queue, number_of_times, prefix, json_document,
                           self.buckets[bucket_number]))
         p.start()
         process_list.append(p)
     for p in process_list:
         p.join()
     if self.verify_result:
         filename = "/tmp/" + self.randomDataGenerator.random_uuid(
         ) + "_dump_failure.txt"
         queue_size = queue.qsize()
         if not queue.empty():
             self._dump_data(filename, queue)
         self.assertTrue(
             queue_size == 0,
             "number of failures {0}, check file {1}".format(
                 queue.qsize(), filename))
Пример #19
0
def run():
    # event queue
    queue = Queue()

    # create each engine process and start them
    for id in range(process):
        procs.append(P(id, engine, queue, timeout, False))
        procs[-1].set_depth(depth)
        procs[-1].start()

    # read file and fill queue
    with open(input, 'r') as fp:
        for fen in fp:
            fen = fen.replace('"','')
            print('Queuing fen {}'.format(fen))
            queue.put(fen)
            time.sleep(0.2) # not too fast ...

    # wait for queue to be empty
    while not queue.empty():
        print('Still working ...({})'.format(queue.qsize()))
        time.sleep(5)

    # quit the engine
    print('Quitting')
    for p in procs:
       p.quit()
       time.sleep(0.5)
       p.kill()

    print('Done')
Пример #20
0
    def _dequeue(self, queue):
        """Removes queue entries till an alive reference was found.
        The referenced image holder will be returned in this case.
        Otherwise if there wasn't found any alive reference
        None will be returned.

        Args:
            queue (queue.Queue): the queue to operate on

        Returns:
            tuple of (ImageHolder, tuple of (width: int, height: int),
                      PostLoadImageProcessor):
                an queued image holder or None, upper bound size or None,
                the post load image processor or None
        """
        holder_reference = None
        image_holder = None
        upper_bound_size = None
        post_load_processor = None

        while not queue.empty():
            holder_reference, upper_bound_size, post_load_processor = \
                queue.get_nowait()
            image_holder = holder_reference and holder_reference()
            if (holder_reference is None or image_holder is not None):
                break

        return image_holder, upper_bound_size, post_load_processor
Пример #21
0
def multiCommand(commands):
    maxAlterations = int(max([i[2] for i in commands]) * frameRate)
    queueList = []
    queueLock.acquire()
    while not queue.empty():
        queueList.append(queue.get())
        queue.task_done()
    appends = maxAlterations - len(queueList)
    if appends > 0:
        for i in range(abs(appends)):
            queueList.append({})
    for c in commands:
        commandAlterations = int(c[2] * frameRate)
        for i in range(c[0][0], c[0][1]):
            start = pixels[i]
            bridgeGenerator = bridgeValues(commandAlterations, start, c[1])
            for m in range(commandAlterations):
                queueList[m][i] = next(bridgeGenerator)
        if appends < 0:
            for r in range(abs(appends)):
                if i in queueList[commandAlterations + r]:
                    del queueList[commandAlterations + r][i]
    while queueList:
        queue.put(queueList.pop(0))
    queueLock.release()
Пример #22
0
def insertion_listener():
    db = sqlite3.connect('tracker.db')
    create_table(db.cursor())
    while True:
        if (not queue.empty()):
            row = queue.get()
            insert(db, row[0], row[1], row[2])
Пример #23
0
def Consumer(queue,order):
    #Customers(consumers) order by removing from buffer
    while not order.is_set() or not queue.empty():
        order_message = queue.get()
        logging.info("Customers picking up the order: %s (number  = %d", order_message,queue.qsize())
    logging.info("Customers have no more orders.")
    logging.info("System shuts down.")
Пример #24
0
 def run(self):
     while True:
         try:
             if queue.empty(): break
             queue_task = self.queue.get()
         except:
             break
         try:
             task_host, task_port = queue_task.split(":")
             data = scan_port(task_host, task_port)
             if data:
                 #将有返回结果的数据保存到字典port_data中,key格式‘IP:port’,value格式‘返回的数据’
                 if data != 'NULL':
                     port_data[task_host + ":" + task_port] = data
                     #print('port_data:',port_data)
                 # 判断IP 端口的服务类型,即端口指纹
                 server_type = server_discern(task_host, task_port, data)
                 # server_type如果为空的处理
                 if not server_type:
                     h_server, title = get_web_info(task_host, task_port)
                     if title or h_server: server_type = 'web ' + title
                     #print(h_server,title)
                 if server_type:
                     log('server', task_host, task_port,
                         server_type.strip())
                 IPS[task_host + ":" + task_port] = server_type.strip()
                 #print('='*20)
         except Exception as err:
             #print(err)
             continue
    def canFinish(self, numCourses, prerequisites):
        # write your code here

        #change the prerequirement from list of list to the node_edges,indegree
        #Which are to represent the graph format

        edges = {i: [] for i in range(numCourses)}
        indegree = [0 for i in range(numCourses)]
        import queue
        for i, j in prerequisites:
            edges[j].append(i)
            indegree[i] += 1

        queue, count = queue.Queue(maxsize=numCourses), 0

        for i, v in enumerate(indegree):
            if v == 0:
                queue.put(i)

        while not queue.empty():
            node = queue.get()
            #error here
            count += 1

            for vex in edges[node]:
                indegree[vex] -= 1
                if indegree[vex] == 0:
                    queue.put(vex)

        return count == numCourses
    def binaryTreeToLists2(self, root):
        result = []
        if root is None: return result

        import queue
        queue = queue.Queue()
        queue.put(root)

        dummy = ListNode(0)

        while not queue.empty():
            p = dummy
            size = queue.qsize()
            for i in range(size):
                head = queue.get()
                p.next = ListNode(head.val)
                p = p.next

                if head.left is not None:
                    queue.put(head.left)
                if head.right is not None:
                    queue.put(head.right)

            result.append(dummy.next)

        return result
Пример #27
0
 def keys_exist_or_assert_in_parallel(keys,
                                      server,
                                      bucket_name,
                                      test,
                                      concurrency=2,
                                      collection=None):
     log = logger.Logger.get_logger()
     verification_threads = []
     queue = Queue()
     for i in range(concurrency):
         keys_chunk = BucketOperationHelper.chunks(keys,
                                                   len(keys) // concurrency)
         t = Thread(target=BucketOperationHelper.keys_exist_or_assert,
                    name="verification-thread-{0}".format(i),
                    args=(keys_chunk.get(i), server, bucket_name, test,
                          queue, collection))
         verification_threads.append(t)
     for t in verification_threads:
         t.start()
     for t in verification_threads:
         log.info("thread {0} finished".format(t.name))
         t.join()
     while not queue.empty():
         item = queue.get()
         if item is False:
             return False
     return True
Пример #28
0
 def persistence_verification(servers, bucket, timeout_in_seconds=1260):
     log = logger.Logger.get_logger()
     verification_threads = []
     queue = queue.Queue()
     rest = RestConnection(servers[0])
     nodes = rest.get_nodes()
     nodes_ip = []
     for node in nodes:
         nodes_ip.append(node.ip)
     for i in range(len(servers)):
         if servers[i].ip in nodes_ip:
             log.info("Server {0}:{1} part of cluster".format(
                 servers[i].ip, servers[i].port))
             rest = RestConnection(servers[i])
             t = Thread(target=ClusterOperationHelper.
                        persistence_verification_per_node,
                        name="verification-thread-{0}".format(servers[i]),
                        args=(rest, bucket, queue, timeout_in_seconds))
             verification_threads.append(t)
     for t in verification_threads:
         t.start()
     for t in verification_threads:
         t.join()
         log.info("thread {0} finished".format(t.name))
     while not queue.empty():
         item = queue.get()
         if item is False:
             return False
     return True
Пример #29
0
def stn_decode(queue, log_probs, decoder, index2label, window, step):
    while not queue.empty():
        try:
            video = queue.get(timeout=3)
            score, labels, segments = decoder.decode(log_probs[video])
            #             cum_segments = np.array([segment.length for segment in segments])
            #             cum_segments = np.cumsum(cum_segments)
            #             print('segments', cum_segments)
            #             print('labels', len(labels))
            #             labels = np.array(labels)
            trancript = [s.label for s in segments]
            #             print('trancript', trancript)
            stn_score, stn_labels, stn_segments = decoder.stn_decode(
                log_probs[video], segments, trancript, window, step)
            #             stn_labels2 =  [s.label for s in stn_segments]
            #             print('stn_labels2', stn_labels2)
            #             print('stn_labels', len(stn_labels))
            #             cum_segments = np.array([stn_segment.length for stn_segment in stn_segments])
            #             cum_segments = np.cumsum(cum_segments)
            #             print('stn_segments', cum_segments)
            # save result
            with open('results/' + video, 'w') as f:
                f.write('### Recognized sequence: ###\n')
                f.write(' '.join([index2label[s.label]
                                  for s in stn_segments]) + '\n')
                f.write('### Score: ###\n' + str(stn_score) + '\n')
                f.write('### Frame level recognition: ###\n')
                f.write(' '.join([index2label[l] for l in stn_labels]) + '\n')
        except queue.Empty:
            pass
Пример #30
0
def absoluteFade(indexes, rgb, fadeTime):
    '''Is given a color to fade to, and executes fade'''
    if not fadeTime:
        fadeTime = 1 / frameRate
    for c in rgb:
        c = makeEightBit(c)
    #Calculates how many individual fade frames are needed
    alterations = int(fadeTime * frameRate)
    queueList = []
    queueLock.acquire()
    while not queue.empty():
        queueList.append(queue.get())
        queue.task_done()
    #Amount of frames that need to be added to queue
    appends = alterations - len(queueList)
    #fill out the queue with blank dictionaries to populate
    if appends > 0:
        for i in range(abs(appends)):
            queueList.append({})
    #Iterate down indexes, figure out what items in queue need to be altered
    for i in indexes:
        #INVESTIGATE: THIS MIGHT BE THE SOURCE OF FLASHING ISSUES AT THE START OF A COMMAND
        start = pixels[i]
        bridgeGenerator = bridgeValues(alterations, start, rgb)
        for m in range(alterations):
            queueList[m][i] = next(bridgeGenerator)
    #If this command overrides a previous command to the pixel, it should wipe any commands remaining
        if appends < 0:
            for r in range(abs(appends)):
                if i in queueList[alterations + r]:
                    del queueList[alterations + r][i]
    while queueList:
        queue.put(queueList.pop(0))
    queueLock.release()
def run_jobs(queue):
    processes = []
    # for job_idx in range( 0, len( queue ) ):
    # really should protect against number of available engines here
    job_idx = 0
    while not queue.empty():
        get_job_path = queue.get()

        process = subprocess.Popen([
            '/home/gdrobert/Develompent/adjoint_lumerical/inverse_design/run_proc.sh',
            cluster_hostnames[job_idx], get_job_path
        ])
        processes.append(process)

        job_idx += 1

    completed_jobs = [0 for i in range(0, len(processes))]
    while np.sum(completed_jobs) < len(processes):
        for job_idx in range(0, len(processes)):
            if completed_jobs[job_idx] == 0:

                poll_result = processes[job_idx].poll()
                if not (poll_result is None):
                    completed_jobs[job_idx] = 1

        time.sleep(1)
Пример #32
0
def worker():
    while not queue.empty():
        port = queue.get()

        if portscan(port):
            print(f'Port {port} is open!')
            open_ports.append(port)
Пример #33
0
 def do_work(self, ctime):
     node = (ctime + self.me) % max(len(self.ids), self.MINIMUM_TIME)
     logging.debug(" Doing work for node {}".format(node))
     if node == self.me:
         logging.debug(" Skipping self")
         return
     if node not in self.names:  # actually a list of ids, because it's the keys
         logging.debug(" Skipping blank slot... small N")
         return
     pad = self.fetch_pad(node, ctime, as_sender=True)
     if pad is None:
         logging.debug(" Making chaff for stranger ({})".format(node))
         envelope_bytes = self.make_stranger_chaff_envelope(ctime, node)
     else:
         queue = self.message_queue[node]
         if queue.empty():
             logging.debug(" Making chaff for friend ({})".format(node))
             envelope_bytes = self.make_friend_chaff_envelope(
                 ctime, node, pad)
         else:
             logging.info(" Packing data for friend ({})".format(node))
             message_bytes = queue.get()
             assert isinstance(message_bytes, bytes)
             assert len(message_bytes) == MESSAGE_LENGTH
             envelope_bytes = self.make_friend_data_envelope(
                 ctime, node, pad, message_bytes)
     assert isinstance(envelope_bytes, bytes)
     assert len(envelope_bytes) == ENVELOPE_LENGTH
     self.schedule_send_raw_bytes(node, envelope_bytes, ctime)
def service(lock, queue):
     print(threading.currentThread().getName(), "Waiting")    

     with lock:
         lock.wait()
     while(not queue.empty()):
        print(threading.currentThread().getName(), "Done item " + queue.get())       
Пример #35
0
    def nextEventFor(self, sid):
        with lockEventPoolAssistance:
            queue = self.dict[sid]
            if not queue.empty():
                return queue.get()

            return None
Пример #36
0
def async_write(buffer=[]):
    item = queue.get()
    buffer.append(item)
    if len(buffer) > 500 or queue.empty():
        log.debug('Processing {} queue items.'.format(len(buffer)))
        with db.transaction():
            write_buffer(buffer)
        buffer.clear()
Пример #37
0
def convert(queue):
    cmd_flac = ["flac", "--decode", "--silent", "--stdout"]
    cmd_lame = ["lame", "-h" "--preset", "224"]

    while not queue.empty() and not abort.is_set():
        f = queue.get()
        pFlac = Popen( cmd_flac + [ f[0] ], stdout=PIPE)
        pLame = Popen( cmd_lame + [ f[1] ], stdout=PIPE, stdin=pFlac.stdout, stderr=STDOUT )
        stdout, stderr = pLame.communicate()
Пример #38
0
	def handle(self, call, queue):
		while 1:
			if queue.empty():
				pass
			else:
				m = queue.get()
				call(m)
			
			time.sleep(0.1)
Пример #39
0
def Main():
#try statement for creating the catalog database table if it does not exist, otherwise saves a boolean value that the catalog was already created
        try:
            create_catalog(cursor, connection)
        except:
            cat_created = False
        success = []                        #array that will be used to hold the queue values for successful queries
        updated = 0                         #boolean value that is used to show if catalog was updated
        count = 0                           #integer to hold the amount of times we iterate through the for loop that updates the catalog database
        tables = []                         #array to hold the tables used in the query
        firstsplit = []                     #array to help in the rudimentary parsing of the query
        nodeid = []                         #array to hold the nodeid where query was sent
        hostnames = []                      #array to hold the hostname where query was sent
        parse = query.split(" ")            #rudimentary parse of the query
#for loop the parses the query, finds every instance of table and stores the tablename into the table array variable
        for x, word in enumerate(parse):    
            if word == "TABLE":
                firstsplit.append(parse[x + 1])
#for loop that completes the second part of the parse
        for x in firstsplit:
            split = x.split('(', 1)
            tables.append(split[0])
        nodes = ConfigSectionMap("nodecount")['numnodes']  #variable to hold the total amount of nodes listed in the catalog databse
        threads = []                        #array to hold the parallel threads that will be created
#for loop that creates the threads to send queries to multiple servers, and then saves them to the threads array.
        for x in range(1, int(nodes) + 1):
            section = "node " + str(x)
            threads.append(myThread(x, ConfigSectionMap(section)['hostname'], ConfigSectionMap(section)['ip'], query, queue))
            (threads[-1].start())           #starts all threads
            nodeid.append(x)                #saves the node where the thread is sending the query to into the nodeid array
            hostnames.append(ConfigSectionMap(section)['hostname'])   #saves the hostname where the thread is sending the query to into the hostnames array
#for loop that joins all the threads
        for t in threads:
            t.join()
#while loop to save the success or fail of every thread into the success array
        while not queue.empty():
            success.append(queue.get())
#for loop that iterates through the success array, if a query was successfully updated it checks to see if the query contained a drop or create statement.
#if so, calls the proper function to update the catalog
        for x in success:
            if x == 1:
                if parse[0] == "DROP":
                    remove_catalog(cursor, tables[0],connection)
                    updated = 1
                if parse[0] == "CREATE":
                    add_catalog(cursor, tables[0], hostnames[count], 0, nodeid[count], 0, 0, 0, connection)
                    updated = 1
            count += 1 
#for loop checks to see if any of the queries succeeded and contained a drop or create statement, if true, prints out a catalog updated statement, and if false, prints out catalog had no updates
        if updated == 1:
            print(cathost + ": catalog updated.")
        else:
            print(cathost + ": catalog had no updates.")
        connection.close()                  #closes the connection to the database
Пример #40
0
def deepcopy_queue2list(queue):
    li = []
    while not queue.empty():
        try:
            li.append(queue.get())
        except:
            pass
    """ put items in li back to queue """
    for item in li:
        queue.put(item)
    return li
Пример #41
0
 def run (self):
     consumed = 0
     while consumed < buffer_size:
         myLock.acquire ()
         if not queue.empty():
             try:
                 e = queue.get()
                 print ('<- Consumed', ' -- ', e.title, ' -- size = ', consumed + 1)
             except:
                 print ('no feed yet')
         myLock.release ()
         consumed =+ 1
Пример #42
0
def test(queue):
    while not queue.empty() and not abort.is_set():
        temp = queue.get()
        f = temp[0]
        cmd = ["flac", "-t", f ]
        proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
        stdout, stderr = proc.communicate()
        print(stderr.decode())

        if proc.returncode != 0:
            print("%s returned %i." % (cmd, proc.returncode))
            abort.set()
            break
Пример #43
0
	def run(self):
		while not queue.empty():
			try:
				name = queue.get(False)
			except queue.Empty:
				break

			with stdout:
				global done
				print(self.getName(), name, done, queue.qsize())
				done += 1

			with open(name) as f:
				import_from_file(f, dijkstra_lock, mysql_lock)
Пример #44
0
def usePQ():

    queue = PriorityQueue()
    queue.put((10, 'dez'))
    queue.put((8, 'oito'))
    queue.put((9, 'nove'))
    

    queue = PriorityQueue()
    queue.put((1, Data(10)))
    queue.put((3, Data(30)))
    queue.put((2, Data(20)))
    
    while not queue.empty() :
        print("%d %s" % queue.get())
Пример #45
0
    def next(self):
        queue = self._queue
        items = []

        item = self.next_item()
        if item is None:
            return items

        items.append(item)
        while len(items) < self._config._upload_limit and not queue.empty():
            item = self.next_item()
            if item:
                items.append(item)

        return items
Пример #46
0
def Main():
#try statement for creating the catalog database table if it does not exist, otherwise saves a boolean value that the catalog was already created
    success = []                        #array that will be used to hold the queue values for successful queries
    updates = []                        #boolean value that is used to show if catalog was updated, used in for loop to adjust updated variable
    updated = 0                         #integer to check if catalog was updated, used for print statement
    count = 0                           #integer to hold the amount of times we iterate through the for loop that updates the catalog database
    db_query = []                       #array to hold the catalog update queries for each node that was changed
    #for loop to populate db_query with queries
    for x in range(1, int(numnodes) + 1):
        format_list = []
        format_list.append(table)
        format_list.append(nodeurl[x-1])
        format_list.append(partmtd)
        format_list.append(x)
        format_list.append(column)
        format_list.append(param1[x-1])
        format_list.append(param2[x-1])
        db_query.append("INSERT OR REPLACE INTO DTABLES (tname, nodeurl, partmtd, nodeid, partcol, partparam1, partparam2)\nVALUES\n('{}', '{}', {}, {}, '{}', '{}', '{}');".format(*format_list))
    db_runquery(cathost, catip, db_query, updates)
    threads = []                        #array to hold the parallel threads that will be created
#for loop that creates the threads to send queries to multiple servers, and then saves them to the threads array.
    for x in range(1, int(numnodes) + 1):
        section = "node " + str(x)
        threads.append(myThread(x, ConfigSectionMap(section)['hostname'], ConfigSectionMap(section)['ip'], queries[x-1], queue))
        (threads[-1].start())           #starts all threads
        nodeid.append(x)                #saves the node where the thread is sending the query to into the nodeid array
        hostnames.append(ConfigSectionMap(section)['hostname'])   #saves the hostname where the thread is sending the query to into the hostnames array
#for loop that joins all the threads
    for t in threads:
        t.join()
#while loop to save the success or fail of every thread into the success array
    while not queue.empty():
        success.append(queue.get())
#for loop that iterates through the success array, if a query was successfully updated it checks to see if the query contained a drop or create statement.
#if so, calls the proper function to update the catalog
    for x in success:
        if x == 1:
            print (hostnames[count] + ": " + str(rowcount[count]) + " rows inserted.")
        count += 1 
#for loop checks to see if any of the queries succeeded, if true, prints out a catalog updated statement, and if false, prints out catalog had no updates
    for x in updates:
        if x == 1:
            updated = 1
    if(updated == 1):     
        print(cathost + ": catalog updated.")
    else:
        print(cathost + ": catalog had no updates.")
    connection.close()                  #closes the connection to the database
Пример #47
0
 def work(queue):
     while True:
         if queue.empty():
             if self.all_fetched_event.is_set():
                 break
             else:
                 time.sleep(0.001)
         try:
             task = queue.get_nowait()
         except Exception:
             continue
         if not os.path.exists(os.path.dirname(task.destination)):
             os.makedirs(os.path.dirname(task.destination),
                         exist_ok=True)
         with open(task.destination, 'wb') as f:
             f.write(self.__download(task.url))
def search(start, goal):
    queue = StateQueue()
    queue.push(Move(start))
    while not queue.empty():
        current = queue.pop()
        # print(current[1].state)
        
        # current[1] is necessary because we put the move object in queue using (f(n), move) tuple
        if cmp(current[1].state, goal) == 0: 
            return current[1]
        else:
            successors = current[1].move()
            for i in successors:
                # print("pushed", i.state)
                queue.push(i)
    return None
Пример #49
0
def check_queue():
    global t
    
    text = ''
    
    if not queue.empty():
        text = queue.get()
        print('get:', text)
        l['text'] = text
    else:
        print('get: - empty -')
        
    if text == 'last':
        t = None
    else:
        root.after(500, check_queue)    
Пример #50
0
def main():

    event = Event()
    queue = Queue()
    proc_pool = [MyProcess(queue, event, top_func) for _ in range(4)]
    event.set()
    for i in range(1000):
        queue.put(i)
    for proc in proc_pool:
        proc.start()
    while not queue.empty():
        time.sleep(1)

    event.clear()
    for proc in proc_pool:
        proc.join()
Пример #51
0
def absoluteFade(targetValues, fadeTime, sixteenBit):
    '''Is given a dictionary of indexes and their target values, and a fade time'''
    print('Fading now')
    targetValues = {int(k): int(v) for k, v in targetValues.items()}
    if not fadeTime:
        fadeTime = 1 / frameRate
    print(targetValues)
    #Calculates how many individual fade frames are needed
    alterations = int(fadeTime * frameRate)
    queueList = []
    queueLock.acquire()
    while not queue.empty():
        queueList.append(queue.get())
        queue.task_done()
    #Amount of frames that need to be added to queue
    appends = alterations - len(queueList)
    #fill out the queue with blank dictionaries to populate
    if appends > 0:
        for i in range(abs(appends)):
            queueList.append({})
    #Iterate down indexes, figure out what items in queue need to be altered
    for i in targetValues:
        #INVESTIGATE: THIS MIGHT BE THE SOURCE OF FLASHING ISSUES AT THE START OF A COMMAND
        start = pixels[i]
        end = targetValues[i]
        bridgeGenerator = bridgeValues(alterations, start, end)
        print('Index %d' % i)
        print('Start fade at %d' % start)
        print('End fade at %d' % end)
        for m in range(alterations):
            if sixteenBit:
                value = int(next(bridgeGenerator))
                highLow = sixteenToEight(value)
                queueList[m][i] = highLow[0]
                queueList[m][i + 1] = highLow[1]
            else:
                queueList[m][i] = int(next(bridgeGenerator))
    #If this command overrides a previous command to the pixel, it should wipe any commands remaining
        if appends < 0:
            for r in range(abs(appends)):
                if i in queueList[alterations + r]:
                    del queueList[alterations + r][i]
                    if sixteenBit:
                        del queueList[alterations + r][i + 1]
    while queueList:
        queue.put(queueList.pop(0))
    queueLock.release()
Пример #52
0
def process_data(threadName, queue, lock):
    
    while True:
        lock.acquire()
        if not queue.empty():
            data = queue.get()            
            lock.release()
            
            if(data is None):
                print ("%s ending now!" % (threadName))
                break
            else:
                print ("%s processing %s" % (threadName, data))
                time.sleep(1);
        else:
            lock.release()
            time.sleep(1)
Пример #53
0
def general_search(problem, queue):
  num_expanded = 0
  visited = set()
  queue.put(adt.Node(problem.initial_state, root=True))
  while not queue.empty():
    node = queue.get()
    if str(node.state) in visited:
      continue
    visited.add(str(node.state))
    num_expanded += 1
    node.expanded = num_expanded
    # print(node, problem.heuristics[0](node.state))
    if problem.goal_test(node.state):
      return node, num_expanded
    queue.put_many(node.expand(problem.operators))
    pass
  return None, num_expanded
def dfs_pre_order_imperative(tree):
	if tree:
		queue = Queue()
		queue.push(tree)
		while not queue.empty():
			node = queue.pop_back()
			while node:
				yield node
				if node.right:
					queue.push(node.right)
				if node.left:
					node = node.left
				elif node.right:
					if queue._data[-1] == node.right:
						queue.pop_back()
					node = node.right
				else:
					node = None
Пример #55
0
def generate(params):
    threadCount = int(params['count']) if params['count'] else THREAD_COUNT
    targetDir = params['targetDir'] if params['targetDir'] else TARGET_DIR
    listFile = params['listFile'] if params['listFile'] else LIST_FILE

    if not os.path.isdir(targetDir):
        os.mkdir(targetDir)
    songList = mission.getList(listFile)
    # songList = [mission.getList(listFile)[3],] #for debug
    global total
    logger = Logger()
    queue = GenQueue(len(songList))
    threads = []
    # Open thread pool
    for i in range(threadCount):
        thread = GenThread('thread-%d' % i, queue, targetDir, logger, len(songList))
        thread.start()
        threads.append(thread)
    # Fill the task queue
    queue.lock.acquire()
    startTime = datetime.now()
    for i in range(len(songList)):
        l = songList[i]
        queue.put(l)
    logger.logPrint('%s\nStart Generating with %s threads....' % (startTime.strftime('%Y-%m-%d %H:%M:%S'), threadCount))
    queue.lock.release()
    # Wait for threads to finish
    while not queue.empty():
        pass
    global exitFlag
    exitFlag = 1
    for t in threads:
        t.join()

    logger.logPrint('All Done!')
    logger.logPrint('Succeeded:%d   Failed:%d   Total:%d\n' % tuple(total))
    delta = datetime.now() - startTime
    logger.logPrint('It takes %s seconds to finish.' % delta.seconds)
    logger.logCsv(targetDir)
    logger.logPrint('CSV dumped.')
    logger.logToDisk()
Пример #56
0
 def pollQueue(self, whatInbox):
     """This method checks all the queues from the outside world, and forwards any waiting data
     to the child component. Returns False if we propogated a shutdown signal, true otherwise."""
     parentSource = self.childInboxMapping[whatInbox]
     queue = self.inQueues[whatInbox]
     while not queue.empty():
         if not self.outboxes[parentSource].isFull():
             msg = queue.get_nowait()  # won't fail, we're the only one reading from the queue.
             try:
                 self.send(msg, parentSource)
             except noSpaceInBox as e:
                 raise RuntimeError(
                     "Box delivery failed despite box (earlier) reporting being not full. Is more than one thread directly accessing boxes?"
                 )
             if isinstance(msg, (Ipc.shutdownMicroprocess, Ipc.producerFinished)):
                 #                    print ("Quietly dieing?")
                 return False
         else:
             # if the component's inboxes are full, do something here. Preferably not succeed.
             break
     return True
Пример #57
0
def main():
    parser = argparse.ArgumentParser(description='Read a config file.')
    parser.add_argument('urqmd_file', metavar='URQMD_FILE', type=argparse.FileType('r', encoding='ascii'), help="Must be of type .f14")
    parser.add_argument('out_file', metavar='OUT_FILE', help='The HDF5 (.h5) file to store the information in')
    parser.add_argument('--no-event-columns', action='store_true', help="Don NOT include columns for the event number and event impact parameter.")
    parser.add_argument('--chunksize', type=int, default = 100000, help='The number of lines to read in one go.')
    parser.add_argument('--verbosity', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'], default='INFO', help="How verbose should the output be")
    args = parser.parse_args()

    logging.basicConfig(level=args.verbosity, format='%(asctime)s.%(msecs)d %(levelname)s %(message)s', datefmt="%Y-%m-%d %H:%M:%S")

    queue = multiprocessing.Queue()
    worker = HDF_Worker(args.out_file, queue)
    worker.start()
    for df in F14_Reader(args.urqmd_file, not args.no_event_columns).iter_dataframes(chunksize = args.chunksize):
        logging.debug("DataFrame ready to be written to file.")
        if not queue.empty(): time.sleep(0.05)
        logging.debug("Queue empty. DataFrame will be put into write queue now.")
        queue.put(df.copy())
    queue.put('EOF')
    queue.close()
    queue.join_thread()
    worker.join()
Пример #58
0
	def enqueue(self, queue):
		print("ArduinoComm::enqueue")

		while not queue.empty():
			self.msgQueue.put(queue.get())
Пример #59
0
def drain(queue):
    while not queue.empty():
      queue.get()
      queue.task_done()
    return
Пример #60
0
def manual_correction(folder, target_folder, fit_data_set, nv_type_manual, b_field_manual, queue, current_id_queue, lower_peak_widget, upper_peak_widget, lower_fit_widget, upper_fit_widget):
    """
    Backend code to display and fit ESRs, then once input has been received from front-end, incorporate the data into the
    current data set

    Args:
        folders: folder containing the data to analyze
        target_folder: target for processed data
        fit_data_set: starting point data set containing automatically fitted esrs
        nv_type_manual: pointer to empty array to populate with nv type manual corrections
        b_field_manual: pointer to empty array to populate with b field manual corrections
        queue: queue for communication with front-end in separate thread
        lower_peak_widget: widget containing frequency value of lower peak
        upper_peak_widget: widget containing frequency value of upper peak

    Poststate: populates fit_data_set with manual corrections
    """

    lower_peak_manual = [np.nan] * len(fit_data_set)
    upper_peak_manual = [np.nan] * len(fit_data_set)

    filepath = os.path.join(target_folder, folder[2:])
    data_filepath = os.path.join(filepath, 'data-manual.csv')
    if os.path.exists(data_filepath):
        prev_data = pd.read_csv(data_filepath)
        if 'manual_peak_1' in list(prev_data.keys()):
            for i in range(0, len(prev_data['manual_B_field'])):
                b_field_manual[i] = prev_data['manual_B_field'][i]
                nv_type_manual[i] = prev_data['manual_nv_type'][i]
            lower_peak_manual = prev_data['manual_peak_1']
            upper_peak_manual = prev_data['manual_peak_2']


    #TODO: Add saving as you go, add ability to start at arbitrary NV, add ability to specify a next NV number, eliminate peak/correct -> only have 'accept fit'

    try:


        print('STARTING')

        fit_data_set_array = fit_data_set.as_matrix()

        w = widgets.HTML("Event information appears here when you click on the figure")
        display(w)

        # loop over all the folders in the data_subscripts subfolder and retrieve fitparameters and position of NV
        esr_folders = glob.glob(os.path.join(folder, '.\\data_subscripts\\*esr*'))

        # create folder to save images to
        # filepath_image = os.path.join(target_folder, os.path.dirname(folder).split('./')[1])
        # image_folder = os.path.join(filepath_image, '{:s}\\images'.format(os.path.basename(folder)))
        image_folder = os.path.join(target_folder, '{:s}\\images'.format(folder[2:]))
        # image_folder = os.path.normpath(
        #     os.path.abspath(os.path.join(os.path.join(target_folder, 'images'), os.path.basename(folders[0]))))
        if not os.path.exists(image_folder):
            os.makedirs(image_folder)
        if not os.path.exists(os.path.join(image_folder, 'bad_data')):
            os.makedirs(os.path.join(image_folder, 'bad_data'))

        f = plt.figure(figsize=(12, 6))

        def onclick(event):
            if event.button == 1:
                if event.key == 'control':
                    lower_fit_widget.value = event.xdata
                else:
                    lower_peak_widget.value = event.xdata
            elif event.button == 3:
                if event.key == 'control':
                    upper_fit_widget.value = event.xdata
                else:
                    upper_peak_widget.value = event.xdata

        cid = f.canvas.mpl_connect('button_press_event', onclick)

        data_array = []
        data_pos_array = []
        for esr_folder in esr_folders:
            print(esr_folder)
            sys.stdout.flush()
            data = Script.load_data(esr_folder)
            data_array.append(data)
            print('looping')
            sys.stdout.flush()


        nv_folders = glob.glob(folder + '\\data_subscripts\\*find_nv*pt_*')
        for nv_folder in nv_folders:
            data_pos_array.append(Script.load_data(nv_folder))

        while True:


        # for i, esr_folder in enumerate(esr_folders):

            i = current_id_queue.queue[0]
            if i >= len(data_array):
                break

            lower_fit_widget.value = 0
            upper_fit_widget.value = 10e9

            lower_peak_widget.value = 2.87e9
            upper_peak_widget.value = 0

            def display_data(pt_id, lower_peak_widget = None, upper_peak_widget = None, display_fit = True):
                # find the NV index
                # pt_id = int(os.path.basename(esr_folder).split('pt_')[-1])

                # findnv_folder = glob.glob(folder + '\\data_subscripts\\*find_nv*pt_*{:d}'.format(pt_id))[0]

                f.clf()
                gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
                ax0 = plt.subplot(gs[0])
                ax1 = plt.subplot(gs[1])
                ax = [ax0, ax1]
                plt.suptitle('NV #{:d}'.format(pt_id), fontsize=16)

                # load data
                data = data_array[i]
                if lower_fit_widget.value == 0 and upper_fit_widget.value == 10e9:
                    freq = data['frequency']
                    ampl = data['data']
                else:
                    freq = data['frequency'][np.logical_and(data['frequency'] > lower_fit_widget.value, data['frequency'] < upper_fit_widget.value)]
                    ampl = data['data'][np.logical_and(data['frequency'] > lower_fit_widget.value, data['frequency'] < upper_fit_widget.value)]
                if lower_peak_widget is None:
                    fit_params = fit_data_set_array[pt_id, 2:8]
                else:
                    lower_peak = lower_peak_widget.value
                    upper_peak = upper_peak_widget.value
                    if upper_peak == 0:
                        start_vals = get_lorentzian_fit_starting_values(freq, ampl)
                        start_vals[2] = lower_peak
                        start_vals[1] = ampl[np.argmin(np.abs(freq - lower_peak))] - start_vals[0]
                        try:
                            fit_params = fit_lorentzian(freq, ampl, starting_params=start_vals,
                                                 bounds=[(0, -np.inf, 0, 0), (np.inf, 0, np.inf, np.inf)])
                        except:
                            # ESR fit failed!
                            fit_params = [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]

                    else:
                        center_freq = np.mean(freq)
                        start_vals = []
                        start_vals.append(
                            get_lorentzian_fit_starting_values(freq[freq < center_freq], ampl[freq < center_freq]))
                        start_vals.append(
                            get_lorentzian_fit_starting_values(freq[freq > center_freq], ampl[freq > center_freq]))
                        start_vals = [
                            np.mean([start_vals[0][0], start_vals[1][0]]),  # offset
                            np.sum([start_vals[0][3], start_vals[1][3]]),  # FWHM
                            ampl[np.argmin(np.abs(freq-lower_peak))] - start_vals[0][0], ampl[np.argmin(np.abs(freq-upper_peak))]- start_vals[1][0],  # amplitudes
                            lower_peak, upper_peak  # centers
                        ]
                        try:
                            fit_params = fit_double_lorentzian(freq, ampl, starting_params=start_vals, bounds=
                            [(0, 0, -np.inf, -np.inf, min(freq), min(freq)), (np.inf, np.inf, 0, 0, max(freq), max(freq))])
                        except:
                            fit_params = [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]

                if len(fit_params) == 4 or np.isnan(fit_params[4]):
                    fit_params = fit_params[0:4]

                # get nv positions
                #             data_pos = {'initial_point': [fit_data_set['xo'].values[pt_id]]}
                data_pos = data_pos_array[i]
                #             pos = data_pos['maximum_point']
                #             pos_init = data_pos['initial_point']

                # plot NV image
                FindNV.plot_data([ax[1]], data_pos)

                # plot data and fits
                # print("fit_params: ", fit_params)

                sys.stdout.flush()

                if display_fit:
                    plot_esr(ax[0], data['frequency'], data['data'], fit_params=fit_params)
                else:
                    plot_esr(ax[0], data['frequency'], data['data'], fit_params=[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan])

                plt.tight_layout()
                plt.subplots_adjust(top=0.85) # Makes room at top of plot for figure suptitle

                plt.draw()
                plt.show()

                return fit_params, pt_id

            fit_params, pt_id = display_data(i)
            if len(fit_params) == 6:
                lower_peak_widget.value = fit_params[4]
                upper_peak_widget.value = fit_params[5]
            elif len(fit_params) == 4:
                lower_peak_widget.value = fit_params[2]
                upper_peak_widget.value = 0

            while True:
                if queue.empty():
                    time.sleep(.5)
                else:
                    value = queue.get()
                    if value == -1:
                        fit_params, point_id = display_data(i, lower_peak_widget=lower_peak_widget, upper_peak_widget=upper_peak_widget)
                        if len(fit_params) == 6:
                            lower_peak_widget.value = fit_params[4]
                            upper_peak_widget.value = fit_params[5]
                        elif len(fit_params) == 4:
                            lower_peak_widget.value = fit_params[2]
                            upper_peak_widget.value = 0
                        continue
                    elif value == -2:
                        display_data(display_fit = False)
                        fit_params = [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]
                        lower_fit_widget.value = 0
                        upper_fit_widget.value = 10e9
                    else:
                        break

            if nv_type_manual[i] == 'split':
                if np.isnan(fit_params[0]):
                    lower_peak_manual[i] = lower_peak_widget.value
                    upper_peak_manual[i] = upper_peak_widget.value
                    b_field_manual[i] = ((upper_peak_widget.value - lower_peak_widget.value) / 5.6e6)
                elif len(fit_params) == 4:
                    lower_peak_manual[i] = fit_params[2]
                    b_field_manual[i] = (np.abs(2.87e9-fit_params[2]) / 2.8e6)
                else:
                    lower_peak_manual[i] = fit_params[4]
                    upper_peak_manual[i] = fit_params[5]
                    b_field_manual[i] = ((fit_params[5] - fit_params[4]) / 5.6e6)
            elif nv_type_manual[i] == 'single':
                if np.isnan(fit_params[0]):
                    lower_peak_manual[i] = lower_peak_widget.value
                    b_field_manual[i] = 0
                else:
                    lower_peak_manual[i] = fit_params[2]
                    b_field_manual[i] = 0

            if nv_type_manual[i] == '':
                if fit_params is None:
                    f.savefig(os.path.join(os.path.join(image_folder, 'bad_data'), 'esr_pt_{:02d}.jpg'.format(pt_id)))
                else:
                    f.savefig(os.path.join(image_folder, 'esr_pt_{:02d}.jpg'.format(pt_id)))
            else:
                if nv_type_manual[i] in ['bad', 'no_split']:
                    f.savefig(os.path.join(os.path.join(image_folder, 'bad_data'), 'esr_pt_{:02d}.jpg'.format(pt_id)))
                else:
                    f.savefig(os.path.join(image_folder, 'esr_pt_{:02d}.jpg'.format(pt_id)))

            if not os.path.exists(filepath):
                os.makedirs(filepath)
            fit_data_set['manual_B_field'] = b_field_manual
            fit_data_set['manual_nv_type'] = nv_type_manual
            fit_data_set['manual_peak_1'] = lower_peak_manual
            fit_data_set['manual_peak_2'] = upper_peak_manual
            fit_data_set.to_csv(data_filepath)

        f.canvas.mpl_disconnect(cid)
        fit_data_set['manual_B_field'] = b_field_manual
        fit_data_set['manual_nv_type'] = nv_type_manual
        fit_data_set['manual_peak_1'] = lower_peak_manual
        fit_data_set['manual_peak_2'] = upper_peak_manual

        # filepath = os.path.join(target_folder, folder[2:])
        # data_filepath = os.path.join(filepath, 'data-manual.csv')
        # filename = '{:s}\\data-manual.csv'.format(os.path.basename(folder))
        # filepath = os.path.join(target_folder, os.path.dirname(folder).split('./')[1])
        # data_filepath = os.path.join(filepath, filename)

        if not os.path.exists(filepath):
            os.makedirs(filepath)

        fit_data_set.to_csv(data_filepath)

        create_shortcut(os.path.abspath(os.path.join(filepath, 'to_data.lnk')), os.path.abspath(folder))
        create_shortcut(os.path.join(os.path.abspath(folder), 'to_processed.lnk'), os.path.abspath(filepath))

        print('DONE!')

    except Exception as e:
        print(e)
        raise