if pusher:
            self.pusher.start()

        self.puller = puller

        if puller:
            self.puller.start()


if __name__ == "__main__":
    kill_switch = multiprocessing.Event()

    initializer(logging.INFO)

    queue_sub_to_push = multiprocessing.Queue()

    pusher = ZMQPusherQueue(queue_sub_to_push,
                            kill_switch,
                            host='127.0.0.1',
                            port="5559")
    subscriber = ZMQSubscriberQueue(queue_sub_to_push,
                                    kill_switch,
                                    host='127.0.0.1',
                                    port="5558")
    puller = ZMQPuller(kill_switch, pull_host='127.0.0.1', pull_port="5559")

    service_manager = ServiceManager(subscriber, pusher, puller, kill_switch)

    try:
        s = service_manager.get_server()
Exemple #2
0
            L.append(tmp)
        predict = heapq.nlargest(pred_num, range(len(L)), L.__getitem__)
        res.append(predict)
    return res

# Finetuning
def pred_logi_classification(test_data, mu, params, pred_num=1):
    res = []
    for data in test_data:
        value = np.dot(data, params[:, :-1].T) + params[:, -1]
        predict = heapq.nlargest(pred_num, range(len(value)), value.__getitem__)
        res.append(predict)
    return res

# Testing on basic baselines (including cosine, euclid, finetuning)
output = mp.Queue()
def calc_accuracy(cls, data, oneshot_avg, logi_params, method):
    cnt = 0.0
    result = []
    if method == 'cosine':
        result = cosine_classification(data, oneshot_avg)
    elif method == 'eucilid':
        result = eucilid_classification(data, oneshot_avg)
    elif method == 'logi' or method == 'vager':
        result = pred_logi_classification(data, oneshot_avg, logi_params)
    for it in result:
        if cls in it:
            cnt += 1.0
    output.put([cls, cnt / len(result)])

def testing(base_avg, oneshot_data, test_data, X, W, method, fusion, _logiparam=None):
Exemple #3
0
def get_data(artist=None, album=None, title=None, otherinfo=None, \
        request=(), timeout=None, filename=None, analyzer='first_match',
        plugin_filter=None):
    '''
    Get data about a song

    :param otherinfo: Other metadata, not worthing a function parameter
    :type otherinfo: dict
    :param request: all needed metadata. If empty, all will be searched
    :type request: tuple
    :param timeout: timeout in seconds, None for no timeout
    :rtype: dict
    '''
    # retrievers write in results; when every retriever has finished, waiter
    # write in results; analyzer read from results
    results = multiprocessing.Queue()
    # analyzer write in response, main process read (with optional timeout)
    response = multiprocessing.Queue()
    #this is a trick; finished will be filled with useless value, one per
    #process. When a process exits, it will pop. So, finished.join() is
    #equivalent to joining every process; the advantage is that it can be done
    #in a separate process
    finished = multiprocessing.JoinableQueue()

    # even worse trick: every "improvement" analyzer does is written here,
    # and it tries to be the only value, so that main thread can just get()
    best = multiprocessing.Queue()
    if analyzer is None:
        analyzer = 'first_match'
    analyzer = multiprocessing.Process(target=_get_analyzer(analyzer),
                                       args=(request, results, response, best))
    analyzer.name = 'analyzer'
    analyzer.daemon = True
    analyzer.start()

    def retriever_wrapper(name, retriever, results, finished):
        '''Call a retriever, handle its results'''
        def wrapped():
            '''Provide transparent concurrency for retrievers'''
            finished.get()
            try:
                res = retriever()
            except Exception as exc:
                results.put((name, 'error', exc))
            else:
                results.put((name, 'ok', res))
            finally:
                finished.task_done()

        return wrapped

    processes = []
    for name, retriever in get_ready_retrievers(artist, album, title, \
            otherinfo, request, timeout, filename, filter_=plugin_filter):
        finished.put(True)
        wrapped_retriever = retriever_wrapper(name, retriever, results,
                                              finished)
        p = multiprocessing.Process(target=wrapped_retriever)
        processes.append(p)
        p.daemon = True
        p.name = name
        p.start()

    def waiter(q, res):
        '''wait for every retriever to join, then unlock main flow'''
        q.join()  # when every process has done
        res.put('finished')

    w = multiprocessing.Process(target=waiter, args=(finished, results))
    w.daemon = True
    w.start()

    try:
        res = response.get(block=True, timeout=timeout)
    except Queue.Empty as exc:
        print('no response')
        try:
            best_res = best.get_nowait()
        except Queue.Empty:
            return None
        else:
            print('best I found:', best_res)
            return best_res
    else:
        return res
Exemple #4
0
 def __init__(self, *args, **kwargs):
     super(SimpleChromeArtifactsStage, self).__init__(*args, **kwargs)
     self._upload_queue = multiprocessing.Queue()
     self._pkg_dir = os.path.join(self._build_root,
                                  constants.DEFAULT_CHROOT_DIR, 'build',
                                  self._current_board, 'var', 'db', 'pkg')
Exemple #5
0
 def output_queue(self):
     """The output queue (a :class:`multiprocessing.Queue` object)."""
     return multiprocessing.Queue(self.concurrency)
        for j in range(1, Xtrain.shape[0]):
            dis = np.linalg.norm(nowXtest - Xtrain[j, :])
            if dis < dis_smallest:
                dis_smallest = dis
                idx = j
        ### Now idx is the index for the nearest neighbor

        ## check whether the predicted label matches the true label
        if ytest[i] == ytrain[idx]:
            correct += 1
    acc = correct / float(Xtest.shape[0])
    return acc


start_time = time.time()
sharedQueue = mp.Queue()
coreCount = 4
delta = Xtest.shape[0] // 4
processes = list((mp.Process(target=subProcess,
                             args=(Xtrain, ytrain, Xtest, ytest, sharedQueue,
                                   i * delta, (i + 1) * delta))
                  for i in range(coreCount - 1)))
i = coreCount - 1
processes += [
    mp.Process(target=subProcess,
               args=(Xtrain, ytrain, Xtest, ytest, sharedQueue, i * delta,
                     Xtest.shape[0]))
]

for p in processes:
    p.start()
def semisupervised_learning(g,
                            truth_dict,
                            kwargs_list,
                            nprocs=1,
                            size_ratio=0.1,
                            use_bfs=False,
                            flowmethod="mqi_weighted",
                            use_spectral=True):
    l1reg_PR_all = []
    l1reg_RC_all = []
    l1reg_F1_all = []
    flow_PR_all = []
    flow_RC_all = []
    flow_F1_all = []
    l1reg_PR_curr = defaultdict(list)
    l1reg_RC_curr = defaultdict(list)
    l1reg_F1_curr = defaultdict(list)
    flow_PR_curr = defaultdict(list)
    flow_RC_curr = defaultdict(list)
    flow_F1_curr = defaultdict(list)

    def wrapper(pid, q_in, q_out):
        while True:
            kwargs, kwargs_id, trial_id, delta, ratio = q_in.get()
            if kwargs is None:
                break
            nlabels = len(list(truth_dict.keys()))
            l1reg_labels = np.zeros(g._num_vertices) - 1
            true_labels = np.zeros(g._num_vertices) - 1
            flow_labels = np.zeros(g._num_vertices) - 1
            ranking = np.zeros(g._num_vertices) - 1
            npositives = 0
            for lid, label in enumerate(sorted(list(truth_dict.keys()))):
                truth = truth_dict[label]
                npositives += len(truth)
                true_labels[truth] = lid
                nseeds = int(ratio * len(truth))
                np.random.seed(int(1000 * time.time()) % (2**32 - 1))
                seeds = np.random.choice(truth, nseeds)
                if use_spectral:
                    l1reg_ids, l1reg_vals = approximate_PageRank(
                        g, seeds, **kwargs)
                    sorted_indices = np.argsort(-1 * l1reg_vals)
                    for i, idx in enumerate(sorted_indices):
                        if ranking[l1reg_ids[idx]] == -1 or i < ranking[
                                l1reg_ids[idx]]:
                            ranking[l1reg_ids[idx]] = i
                            l1reg_labels[l1reg_ids[idx]] = lid
                if use_bfs:
                    seeds = seed_grow_bfs(g, seeds, size_ratio)
                flow_output = flow_clustering(g,
                                              seeds,
                                              method=flowmethod,
                                              delta=delta)[0]
                for i, idx in enumerate(flow_output):
                    if flow_labels[idx] == -1:
                        flow_labels[idx] = lid
                    else:
                        flow_labels[idx] = nlabels + 1
            if use_spectral:
                l1reg_PR = np.sum(
                    (l1reg_labels
                     == true_labels)) / (1.0 * np.sum(l1reg_labels != -1))
                l1reg_RC = np.sum(
                    (l1reg_labels == true_labels)) / (1.0 * npositives)
                l1reg_F1 = 2 * (l1reg_PR * l1reg_RC) / (
                    l1reg_PR + l1reg_RC) if (l1reg_PR + l1reg_RC) > 0 else 0
            else:
                l1reg_PR, l1reg_RC, l1reg_F1 = 0, 0, 0
            # l1reg_PR_curr.append(l1reg_PR)
            # l1reg_RC_curr.append(l1reg_RC)
            # l1reg_F1_curr.append()
            flow_PR = np.sum(
                (flow_labels
                 == true_labels)) / (1.0 * np.sum(flow_labels != -1))
            flow_RC = np.sum((flow_labels == true_labels)) / (1.0 * npositives)
            flow_F1 = 2 * (flow_PR * flow_RC) / (flow_PR + flow_RC) if (
                flow_PR + flow_RC) > 0 else 0
            # flow_PR_curr.append(flow_PR)
            # flow_RC_curr.append(flow_RC)
            # flow_F1_curr.append()
            q_out.put((kwargs_id, trial_id, l1reg_PR, l1reg_RC, l1reg_F1,
                       flow_PR, flow_RC, flow_F1))

    q_in, q_out = mp.Queue(), mp.Queue()
    ntrials = 20
    for kwargs_id in range(len(kwargs_list)):
        kwargs = copy.deepcopy(kwargs_list[kwargs_id])
        delta = kwargs["delta"]
        del kwargs["delta"]
        ratio = kwargs["ratio"]
        del kwargs["ratio"]
        for trial_id in range(ntrials):
            q_in.put((kwargs, kwargs_id, trial_id, delta, ratio))
    for _ in range(nprocs):
        q_in.put((None, None, None, None, None))
    procs = [
        mp.Process(target=wrapper, args=(pid, q_in, q_out))
        for pid in range(nprocs)
    ]
    for p in procs:
        p.start()
    ncounts = 0
    while ncounts < len(kwargs_list) * ntrials:
        if ncounts % 10 == 0:
            print("Finished " + str(ncounts) + "/" +
                  str(len(kwargs_list) * ntrials) + " experiments.")
        kwargs_id, trial_id, l1reg_PR, l1reg_RC, l1reg_F1, flow_PR, flow_RC, flow_F1 = q_out.get(
        )
        l1reg_PR_curr[kwargs_id].append(l1reg_PR)
        l1reg_RC_curr[kwargs_id].append(l1reg_RC)
        l1reg_F1_curr[kwargs_id].append(l1reg_F1)
        flow_PR_curr[kwargs_id].append(flow_PR)
        flow_RC_curr[kwargs_id].append(flow_RC)
        flow_F1_curr[kwargs_id].append(flow_F1)
        if trial_id == ntrials - 1:
            l1reg_PR_all.append((np.mean(l1reg_PR_curr[kwargs_id]),
                                 np.std(l1reg_PR_curr[kwargs_id])))
            l1reg_RC_all.append((np.mean(l1reg_RC_curr[kwargs_id]),
                                 np.std(l1reg_RC_curr[kwargs_id])))
            l1reg_F1_all.append((np.mean(l1reg_F1_curr[kwargs_id]),
                                 np.std(l1reg_F1_curr[kwargs_id])))
            flow_PR_all.append((np.mean(flow_PR_curr[kwargs_id]),
                                np.std(flow_PR_curr[kwargs_id])))
            flow_RC_all.append((np.mean(flow_RC_curr[kwargs_id]),
                                np.std(flow_RC_curr[kwargs_id])))
            flow_F1_all.append((np.mean(flow_F1_curr[kwargs_id]),
                                np.std(flow_F1_curr[kwargs_id])))
        ncounts += 1
    for p in procs:
        p.join()

    del procs
    del p
    del q_in
    del q_out
    del wrapper
    return locals()
Exemple #8
0
    def optimize(self,
                 num_vars,
                 objective_function,
                 gradient_function=None,
                 variable_bounds=None,
                 initial_point=None):
        num_procs = multiprocessing.cpu_count() - 1
        num_procs = \
            num_procs if self._max_processes is None else min(num_procs, self._max_processes)
        num_procs = num_procs if num_procs >= 0 else 0

        if platform.system() == 'Darwin':
            # Changed in version 3.8: On macOS, the spawn start method is now the
            # default. The fork start method should be considered unsafe as it can
            # lead to crashes.
            # However P_BFGS doesn't support spawn, so we revert to single process.
            major, minor, _ = platform.python_version_tuple()
            if major > '3' or (major == '3' and minor >= '8'):
                num_procs = 0
                logger.warning(
                    "For MacOS, python >= 3.8, using only current process. "
                    "Multiple core use not supported.")
        elif platform.system() == 'Windows':
            num_procs = 0
            logger.warning("For Windows, using only current process. "
                           "Multiple core use not supported.")

        queue = multiprocessing.Queue()
        # bounds for additional initial points in case bounds has any None values
        threshold = 2 * np.pi
        if variable_bounds is None:
            variable_bounds = [(-threshold, threshold)] * num_vars
        low = [(l if l is not None else -threshold)
               for (l, u) in variable_bounds]
        high = [(u if u is not None else threshold)
                for (l, u) in variable_bounds]

        def optimize_runner(_queue, _i_pt):  # Multi-process sampling
            _sol, _opt, _nfev = self._optimize(num_vars, objective_function,
                                               gradient_function,
                                               variable_bounds, _i_pt)
            _queue.put((_sol, _opt, _nfev))

        # Start off as many other processes running the optimize (can be 0)
        processes = []
        for _ in range(num_procs):
            i_pt = aqua_globals.random.uniform(
                low, high)  # Another random point in bounds
            proc = multiprocessing.Process(target=optimize_runner,
                                           args=(queue, i_pt))
            processes.append(proc)
            proc.start()

        # While the one _optimize in this process below runs the other processes will
        # be running to. This one runs
        # with the supplied initial point. The process ones have their own random one
        sol, opt, nfev = self._optimize(num_vars, objective_function,
                                        gradient_function, variable_bounds,
                                        initial_point)

        for proc in processes:
            # For each other process we wait now for it to finish and see if it has
            # a better result than above
            proc.join()
            p_sol, p_opt, p_nfev = queue.get()
            if p_opt < opt:
                sol, opt = p_sol, p_opt
            nfev += p_nfev

        return sol, opt, nfev
Exemple #9
0
 def __init__(self):
     self.queue = multiprocessing.Queue()
Exemple #10
0
                        '--numberofncs',
                        dest='number_of_ncs',
                        type=int,
                        default=1,
                        help='Number of NCS. (Default=1)')
    args = parser.parse_args()

    number_of_ncs = args.number_of_ncs
    camera_width = int(cv2.VideoCapture(0).get(cv2.CAP_PROP_FRAME_WIDTH))
    camera_height = int(cv2.VideoCapture(0).get(cv2.CAP_PROP_FRAME_HEIGHT))
    vidfps = 30

    try:

        mp.set_start_method('forkserver')
        frameBuffer = mp.Queue(10)
        results = mp.Queue()

        # Start detection MultiStick
        # Activation of inferencer
        p = mp.Process(target=inferencer,
                       args=(results, frameBuffer, number_of_ncs, camera_width,
                             camera_height, vidfps),
                       daemon=True)
        p.start()
        processes.append(p)

        sleep(number_of_ncs * 7)

        # Start streaming
        p = mp.Process(target=camThread,
Exemple #11
0
    DEVNULL = open(os.devnull, 'w')
    while True:
        ip = job_q.get()
        if ip is None: break

        try:
            subprocess.check_call(['ping', '-c1', ip], stdout=DEVNULL)
            results_q.put(ip)
        except:
            pass


if __name__ == '__main__':
    pool_size = 255

    jobs = multiprocessing.Queue()
    results = multiprocessing.Queue()

    pool = [
        multiprocessing.Process(target=pinger, args=(jobs, results))
        for i in range(pool_size)
    ]

    for p in pool:
        p.start()

    for i in range(150, 255):
        jobs.put('10.45.14.{0}'.format(i))

    for p in pool:
        jobs.put(None)
Protocol:
    Camera process polls sonar, and takes picture. writes picture to directory. puts notification in queue
    
    This process checks queue, if set reads image and puts acknowledgement in queue
    
    once acknowledgement read, Camera process may take a new picture.

"""
pictureExists = False
newPicture = False
acceptNextImage = True
objectImg = "/images/download.jpg"
buffer = None

# queues to pass messages between processes
imageQueue = mp.Queue()
ackQueue = mp.Queue()


# class app is an instantiation of the touchscreen app.
# It contains several pages including Login Page, Landing Page, Regular Items Page and Custom Items page
class App(tk.Tk):
    def __init__(self, *args, **kwargs):
        tk.Tk.__init__(self, *args, **kwargs)
        self.attributes('-fullscreen', True)

        self.canvas = tk.Canvas(self, bg=backgroundColour)
        self.canvas.pack(fill=tk.BOTH, expand=True)

        # Set up Menu
        MainMenu(self)
Exemple #13
0
    log = logging.getLogger(f'send-{who}')
    log.addHandler(stdout_handler)
    log.setLevel(logging.INFO)
    return log


log = get_logger('main')

KAFKA_BOOTSTRAP_SERVERS = ['kafka:9092']
if 'KAFKA_BOOTSTRAP_SERVERS' in os.environ:
    KAFKA_BOOTSTRAP_SERVERS = os.environ['KAFKA_BOOTSTRAP_SERVERS'].split(',')

NUVLA_ENDPOINT = (os.environ.get('NUVLA_ENDPOINT')
                  or 'https://nuvla.io').rstrip('/')

work_queue = multiprocessing.Queue()


def kafka_consumer(topic,
                   bootstrap_servers,
                   group_id,
                   auto_offset_reset='latest'):
    consumer = KafkaConsumer(
        topic,
        bootstrap_servers=bootstrap_servers,
        auto_offset_reset=auto_offset_reset,
        group_id=group_id,
        key_deserializer=lambda x: str(x.decode()),
        value_deserializer=lambda x: json.loads(x.decode()))
    log.info("Kafka consumer created.")
    return consumer
Exemple #14
0
            time.sleep(0.01)
      except Queue.Empty:
         pass
      except (IOError, EOFError, KeyboardInterrupt) as e:
         debug("got a breaking error: %s" % e)
         break
      except Exception as e:
         debug("EXCEPTION DURING WORKER PROCESSING: %s" % e)
         traceback.print_exc()
         break

loader = DataLoader()

workers = []
for i in range(NUM_WORKERS):
   main_q = multiprocessing.Queue()
   res_q  = multiprocessing.Queue()
   worker_p = multiprocessing.Process(target=worker, args=(main_q, res_q, loader))
   worker_p.start()
   workers.append((worker_p, main_q, res_q))

res_q = multiprocessing.Queue()
res_p = multiprocessing.Process(target=results, args=(res_q, workers))
res_p.start()

def send_data(obj):
   global cur_worker
   global workers
   global pending_results

   (w_proc, main_q, wrkr_q) = workers[cur_worker]
import multiprocessing as mp



def job(q):
	res = 0
	for i in range(1000):
		res += i + i**2 + i**3
	q.put(res)


if __name__=='__main__':
	q = mp.Queue()
	p1 = mp.Process(target=job,args=(q,))
	p2 = mp.Process(target=job,args=(q,))
	p1.start()
	p2.start()
	p1.join()
	p2.join()

	res1 = q.get()
	res2 = q.get()

	print(res1,res2)
        queue.put(i)
        time.sleep(0.3)

def read_data(queue):


    while True:
        # 判断队列是否为空
        if queue.empty():
            break
        value =queue.get()
        print(value)

if __name__ == '__main__':
    #默认队列可以放入多个任意数据
    queue = multiprocessing.Queue(3)

    add_proccess = multiprocessing.Process(target=add_data,args=(queue,))
    read_proccess = multiprocessing.Process(target=read_data, args=(queue,))

    #启进程执行任务
    add_proccess.start()
    #进程等待一下,,加载完毕数据之后读取
    read_proccess.start()

#总结:多任务使用线程和进程
#从资源的角度来说,线程更加节省资源
#进程饿销毁的资源比较多

#从代码稳定性来说:多进程要比多线程稳定性要强
#因为一个进程挂掉,不会影响其他应用程序
Exemple #17
0
 def get_multiprocessing_logging_queue():  # pylint: disable=invalid-name
     '''
     Mock
     '''
     import multiprocessing
     return multiprocessing.Queue()
Exemple #18
0
class MultiPlexIO:
    incoming_messages = multiprocessing.Queue()
    outgoing_messages = multiprocessing.Queue()
    exceptions = multiprocessing.Queue()

    flag = {"inc": True, "out": True, "exc": True}

    def __init__(self, **kwargs):
        self.host = kwargs.get("host")
        self.port = kwargs.get("port")
        self.qsize = kwargs.get("backlog")
        if self.qsize:
            self.role = "server"
        else:
            self.role = "client"

        self.proto = kwargs.get("proto")
        self.nbytes = kwargs.get("bytes")

    def serve_forever(self, consumer_target, *target_parameters):
        evproducer = multiprocessing.Process(target=self.evloop)
        evproducer.daemon = True
        consumer = multiprocessing.Process(target=consumer_target,
                                           args=(target_parameters))
        consumer.daemon = True
        producer.start()
        consumer.start()
        producer.join(1)
        comsumer.join(1)

    def createsocket(self, port_offset):
        if self.proto == "tcp":
            sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        elif self.proto == "udp":
            sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

        if self.role == "client":
            sock.connect((self.host, self.port))
        elif self.role == "server":
            sock.setblocking(True)
            sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
            sock.bind((self.host, self.port + port_offset))
            sock.listen(self.qsize)
        return sock

    def evloop(self):
        flag = True
        try:
            rlist = self.createsocket()
            wlist = self.createsocket()
            elist = self.createsocket()
            while flag:
                readable, writeable, exceptable = select.select([rlist],
                                                                [wlist],
                                                                [elist])
                self.recvdata(readable)
                self.senddata(writeable)
                self.getexceptions(exceptable)
        except KeyboardInterrupt:
            flag = False

    def recvdata(self, readable):
        if self.role == "server":
            for each in readable:
                #data = each.accept()[0].recv(self.nbytes).decode("utf-8").split("\n")
                data = each.accept()[0].recv(self.nbytes).decode("utf-8")
                print(data)
                self.incoming_messages.put_nowait(data)

        if self.role == "client":
            for each in readable:
                data = each.recv(self.nbytes).decode("utf-8").split(
                    "\n")  # makes a list
                print(data)
                self.incoming_messages.put_nowait(data)

    def senddata(self, writeable):
        if self.role in ["client", "server"]:
            for each in writeable:
                if self.flag["out"] == False:
                    data = self.outgoing_messages.get()
                    print(data)
                    each.send(bytes(data, "utf-8"))

    def getexceptions(self, exc):
        for each in exc:
            self.exceptions.put_nowait(each)

    def read_exceptions(self):
        while (self.exceptions.empty() != True):
            print(self.exceptions.get())
Exemple #19
0
 def setUp(self):
     self.prompt = "pipeline>"
     self.q_in = multiprocessing.Queue()
     self.q_out = multiprocessing.Queue()
    def start_capture(self, cli=LinuxCLI(), interface='any',
                      count=0, packet_type='', pcap_filter=None,
                      max_size=0, timeout=None, callback=None,
                      callback_args=None, blocking=False,
                      save_dump_file=False, save_dump_filename=None):
        """
        Capture <count> packets using tcpdump and add them to a Queue
        of PCAPPackets. Use wait_for_packets to retrieve the packets
        as a list (or wait for them to come in. The filter parameter
        should be a set of pcap_rules which can be combined to create
        practically any pcap_filter ruleset.  A callback may also be
        provided which must be a callable which takes at least a single
        PCAPPacket as an argument.  This callable will be called when
        each packet arrives along with any provided arguments (as a list).
        A timeout may be provided in case of a blocking call, which
        will limit the blocking call to timeout seconds.  This time
        limit only applies to the execution of tcpdump if blocking is
        set to True.  The optional save_dump_file parameter can be set
        to true to save the temporary packet capture file to the given
        save file name (use tcp.out.<timestamp> if name not provided)

        :type cli: LinuxCLI
        :type interface: str
        :type count: int
        :type packet_type: str
        :type pcap_filter: pcap_rule
        :type max_size: int
        :type timeout: int
        :type callback: callable
        :type callback_args: list[T]
        :type blocking: bool
        :type save_dump_file: bool
        :type save_dump_filename: str
        :return:
        """
        # Don't run twice in a row
        if self.process is not None:
            raise exceptions.SubprocessFailedException(
                'tcpdump process already started')

        # Set up synchronization queues and events
        self.data_queue = multiprocessing.Queue()
        self.subprocess_info_queue = multiprocessing.Queue()

        self.tcpdump_ready = multiprocessing.Event()
        self.tcpdump_error = multiprocessing.Event()
        self.tcpdump_stop = multiprocessing.Event()
        self.tcpdump_finished = multiprocessing.Event()

        self.tcpdump_ready.clear()
        self.tcpdump_error.clear()
        self.tcpdump_stop.clear()
        self.tcpdump_finished.clear()
        self.tcpdump_pid = None

        kwarg_map = {'cli': cli,
                     'interface': interface,
                     'count': count,
                     'packet_type': packet_type,
                     'pcap_filter': pcap_filter,
                     'max_size': max_size,
                     'flag_set': (self.tcpdump_ready, self.tcpdump_error,
                                  self.tcpdump_stop, self.tcpdump_finished),
                     'packet_queues': (self.data_queue,
                                       self.subprocess_info_queue),
                     'callback': callback,
                     'callback_args': callback_args,
                     'save_dump_file': save_dump_file,
                     'save_dump_filename': save_dump_filename
                     }
        self.process = multiprocessing.Process(target=tcpdump_start,
                                               args=(kwarg_map,))
        self.process.start()
        deadline_time = time.time() + TCPDUMP_LISTEN_START_TIMEOUT
        while not self.tcpdump_ready.is_set():
            if time.time() > deadline_time:
                self.process.terminate()
                raise exceptions.SubprocessFailedException(
                    "tcpdump failed to start listening within timeout")
            if self.tcpdump_error.is_set():
                error_info = self.subprocess_info_queue.get(timeout=2)
                if 'error' in error_info:
                    raise exceptions.SubprocessFailedException(
                        'tcpdump error { ' +
                        'retcode[' + str(error_info['returncode']) + '] ' +
                        'reason [' + error_info['error'] + '] ' +
                        'stdout [' + error_info['stdout'] + '] ' +
                        'stderr [' + error_info['stderr'] + '] }')
                raise exceptions.SubprocessFailedException(
                    'tcpdump error UNKNOWN')
            time.sleep(0)

        if blocking is True:
            self.process.join(timeout)
            if self.process.is_alive():
                raise exceptions.SubprocessTimeoutException(
                    'tcpdump failed to receive packets within timeout')
def semisupervised_learning_with_improve(g, truth, kwargs_list, nprocs=1):
    input_size_all = []
    l1reg_PR_all = []
    l1reg_RC_all = []
    l1reg_F1_all = []
    mqi_PR_all = []
    mqi_RC_all = []
    mqi_F1_all = []
    flow_PR_all = []
    flow_RC_all = []
    flow_F1_all = []

    def wrapper(q_in, q_out):
        while True:
            kwargs = q_in.get()
            if kwargs is None:
                break
            delta = kwargs["delta"]
            del kwargs["delta"]
            ntrials = 0
            input_size_curr = []
            l1reg_PR_curr = []
            l1reg_RC_curr = []
            l1reg_F1_curr = []
            mqi_PR_curr = []
            mqi_RC_curr = []
            mqi_F1_curr = []
            flow_PR_curr = []
            flow_RC_curr = []
            flow_F1_curr = []
            while ntrials < 20:
                seed_node = np.random.choice(truth)
                l1reg_output = spectral_clustering(g, [seed_node], **kwargs)[0]
                if len(l1reg_output) == 0:
                    continue
                input_size_curr.append(len(l1reg_output))
                if g._weighted:
                    mqi_output = flow_clustering(g,
                                                 l1reg_output,
                                                 method="mqi_weighted")[0]
                    flow_output = flow_clustering(g,
                                                  l1reg_output,
                                                  method="flow_weighted",
                                                  delta=delta)[0]
                else:
                    mqi_output = flow_clustering(g, l1reg_output,
                                                 method="mqi")[0]
                    flow_output = flow_clustering(g,
                                                  l1reg_output,
                                                  method="flow",
                                                  delta=delta)[0]
                l1reg_PR = len(set(truth).intersection(l1reg_output)) / (
                    1.0 * len(l1reg_output))
                l1reg_RC = len(
                    set(truth).intersection(l1reg_output)) / (1.0 * len(truth))
                l1reg_PR_curr.append(l1reg_PR)
                l1reg_RC_curr.append(l1reg_RC)
                l1reg_F1_curr.append(
                    2 * (l1reg_PR * l1reg_RC) /
                    (l1reg_PR + l1reg_RC)) if (l1reg_PR + l1reg_RC) > 0 else 0
                mqi_PR = len(set(truth).intersection(mqi_output)) / (
                    1.0 * len(mqi_output))
                mqi_RC = len(
                    set(truth).intersection(mqi_output)) / (1.0 * len(truth))
                mqi_PR_curr.append(mqi_PR)
                mqi_RC_curr.append(mqi_RC)
                mqi_F1_curr.append(
                    2 * (mqi_PR * mqi_RC) /
                    (mqi_PR + mqi_RC)) if (mqi_PR + mqi_RC) > 0 else 0
                flow_PR = len(set(truth).intersection(flow_output)) / (
                    1.0 * len(flow_output))
                flow_RC = len(
                    set(truth).intersection(flow_output)) / (1.0 * len(truth))
                flow_PR_curr.append(flow_PR)
                flow_RC_curr.append(flow_RC)
                flow_F1_curr.append(
                    2 * (flow_PR * flow_RC) /
                    (flow_PR + flow_RC)) if (flow_PR + flow_RC) > 0 else 0
                ntrials += 1
            q_out.put((np.mean(input_size_curr), np.std(input_size_curr),
                       np.mean(l1reg_PR_curr), np.std(l1reg_PR_curr),
                       np.mean(l1reg_RC_curr), np.std(l1reg_RC_curr),
                       np.mean(l1reg_F1_curr), np.std(l1reg_F1_curr),
                       np.mean(mqi_PR_curr), np.std(mqi_PR_curr),
                       np.mean(mqi_RC_curr), np.std(mqi_RC_curr),
                       np.mean(mqi_F1_curr), np.std(mqi_F1_curr),
                       np.mean(flow_PR_curr), np.std(flow_PR_curr),
                       np.mean(flow_RC_curr), np.std(flow_RC_curr),
                       np.mean(flow_F1_curr), np.std(flow_F1_curr)))

    q_in, q_out = mp.Queue(), mp.Queue()
    for kwargs in kwargs_list:
        q_in.put(kwargs)
    for _ in range(nprocs):
        q_in.put(None)
    procs = [
        mp.Process(target=wrapper, args=(q_in, q_out)) for _ in range(nprocs)
    ]
    for p in procs:
        p.start()
    ncounts = 0
    while ncounts < len(kwargs_list):
        output = q_out.get()
        input_size_all.append((output[0], output[1]))
        l1reg_PR_all.append((output[2], output[3]))
        l1reg_RC_all.append((output[4], output[5]))
        l1reg_F1_all.append((output[6], output[7]))
        mqi_PR_all.append((output[8], output[9]))
        mqi_RC_all.append((output[10], output[11]))
        mqi_F1_all.append((output[12], output[13]))
        flow_PR_all.append((output[14], output[15]))
        flow_RC_all.append((output[16], output[17]))
        flow_F1_all.append((output[18], output[19]))
        ncounts += 1
    for p in procs:
        p.join()
    return locals()
Exemple #22
0
    def run(self, genomic_files, tln_tables):
        """Run Prodigal across a set of genomes.

        Parameters
        ----------
        genomic_files : dict
            Dictionary indicating the genomic and gene file for each genome.
        tln_tables : Dict[str, int]
            Mapping of genome id to user-specified translation table.
        """

        # populate worker queue with data to process
        worker_queue = mp.Queue()
        writer_queue = mp.Queue()

        for genome_id, file_path in genomic_files.items():
            worker_queue.put((genome_id, file_path, tln_tables.get(genome_id)))

        for _ in range(self.threads):
            worker_queue.put(None)

        try:
            manager = mp.Manager()
            out_dict = manager.dict()

            worker_proc = [
                mp.Process(target=self._worker,
                           args=(out_dict, worker_queue, writer_queue))
                for _ in range(self.threads)
            ]
            writer_proc = mp.Process(target=self._writer,
                                     args=(len(genomic_files), writer_queue))

            writer_proc.start()
            for p in worker_proc:
                p.start()

            for p in worker_proc:
                p.join()

                # Gracefully terminate the program.
                if p.exitcode != 0:
                    raise ProdigalException(
                        'Prodigal returned a non-zero exit code.')

            writer_queue.put(None)
            writer_proc.join()
        except Exception:
            for p in worker_proc:
                p.terminate()

            writer_proc.terminate()
            raise ProdigalException(
                'An exception was caught while running Prodigal.')

        # Report on any genomes which failed to have any genes called
        result_dict = dict()
        lq_gids = list()
        for gid, gid_dict in out_dict.items():
            if os.path.getsize(gid_dict['aa_gene_path']) <= 1:
                lq_gids.append(gid)
            else:
                result_dict[gid] = gid_dict

        if len(lq_gids) > 0:
            self.logger.warning(
                f'Skipping {len(lq_gids)} of {len(genomic_files)} '
                f'genomes as no genes were called by Prodigal. '
                f'Check the genome quality (see gtdb.warnings.log).')
            self.warnings.warning(
                f'The following {len(lq_gids)} genomes have '
                f'been excluded from analysis due to Prodigal '
                f'failing to call any genes:')

            # If there are few low-quality genomes just output to console.
            if len(lq_gids) > 10:
                for lq_gid in lq_gids:
                    self.warnings.info(lq_gid)
            else:
                for lq_gid in lq_gids:
                    self.logger.warning(f'Skipping: {lq_gid}')
                    self.warnings.info(lq_gid)

        return result_dict
Exemple #23
0
    def _run_scenario(self, cls, method_name, context, args):
        """Runs the specified benchmark scenario with given arguments.

        Every single benchmark scenario iteration is executed with specified
        frequency (runs per second) in a pool of processes. The scenario will
        be launched for a fixed number of times in total (specified in the
        config).

        :param cls: The Scenario class where the scenario is implemented
        :param method_name: Name of the method that implements the scenario
        :param context: Benchmark context that contains users, admin & other
                        information, that was created before benchmark started.
        :param args: Arguments to call the scenario method with

        :returns: List of results fore each single scenario iteration,
                  where each result is a dictionary
        """
        times = self.config["times"]
        timeout = self.config.get("timeout", 0)  # 0 means no timeout
        iteration_gen = utils.RAMInt()
        cpu_count = multiprocessing.cpu_count()
        processes_to_start = min(cpu_count, times,
                                 self.config.get("max_concurrency", times))
        rps_per_worker = float(self.config["rps"]) / processes_to_start
        times_per_worker, times_overhead = divmod(times, processes_to_start)

        # Determine concurrency per worker
        concurrency_per_worker, concurrency_overhead = divmod(
            self.config.get("max_concurrency", times), processes_to_start)

        self._log_debug_info(times=times,
                             timeout=timeout,
                             cpu_count=cpu_count,
                             processes_to_start=processes_to_start,
                             rps_per_worker=rps_per_worker,
                             times_per_worker=times_per_worker,
                             times_overhead=times_overhead,
                             concurrency_per_worker=concurrency_per_worker,
                             concurrency_overhead=concurrency_overhead)

        result_queue = multiprocessing.Queue()

        def worker_args_gen(times_overhead, concurrency_overhead):
            """Generate arguments for process worker.

            Remainder of threads per process division is distributed to
            process workers equally - one thread per each process worker
            until the remainder equals zero. The same logic is applied
            to concurrency overhead.
            :param times_overhead: remaining number of threads to be
                                   distributed to workers
            :param concurrency_overhead: remaining number of maximum
                                         concurrent threads to be distributed
                                         to workers
            """
            while True:
                yield (result_queue, iteration_gen, timeout, rps_per_worker,
                       times_per_worker + (times_overhead and 1),
                       concurrency_per_worker + (concurrency_overhead and 1),
                       context, cls, method_name, args, self.aborted)
                if times_overhead:
                    times_overhead -= 1
                if concurrency_overhead:
                    concurrency_overhead -= 1

        process_pool = self._create_process_pool(
            processes_to_start, _worker_process,
            worker_args_gen(times_overhead, concurrency_overhead))
        self._join_processes(process_pool, result_queue)
    def test_generate_VOEvent(self):
        """
        Test that the VOEvent generator converts a trigger into a VOEvent
        """
        # init VOEvent Generator
        control_queue = mp.Queue()
        generator = VOEventGenerator(control_queue)
        # overwrite server location
        generator.server_host = 'localhost'
        # events are only generated if send = True
        generator.send_events = True
        # start the generator
        generator.start()
        sleep(1)
        # create two triggers, generator should pick highest S/N
        # define utc of trigger with highest S/N, which is also in the VOEvent filename
        trigger_utc = '2019-01-02T18:00:00.0'
        triggers = [{
            'dm': 56.791,
            'dm_err': .2,
            'width': 2.5,
            'snr': 10,
            'flux': 0.5,
            'ra': 83.63322083333333,
            'dec': 22.01446111111111,
            'ymw16': 0,
            'semiMaj': 15.,
            'semiMin': 15.,
            'name': 'B0531+21',
            'cb': 0,
            'importance': 0.1,
            'utc': '2019-01-01T18:00:00.0',
            'test': True
        }, {
            'dm': 56.791,
            'dm_err': .2,
            'width': 2.5,
            'snr': 50,
            'flux': 0.5,
            'ra': 83.63322083333333,
            'dec': 22.01446111111111,
            'ymw16': 0,
            'semiMaj': 15.,
            'semiMin': 15.,
            'name': 'B0531+21',
            'cb': 17,
            'importance': 0.1,
            'utc': trigger_utc,
            'test': True
        }]

        # get the queue
        VOEventQueueServer.register('get_queue')
        queue_server = VOEventQueueServer(
            address=(generator.server_host, generator.server_port),
            authkey=generator.server_auth.encode())
        queue_server.connect()
        queue = queue_server.get_queue()
        # send the triggers
        for trigger in triggers:
            queue.put(trigger)
        # wait and stop
        sleep(5)
        control_queue.put('stop')
        generator.join()
        # check the output file
        filename = os.path.join(generator.voevent_dir,
                                "{}.xml".format(trigger_utc))

        self.assertTrue(os.path.isfile(filename))

        # remove output file
        os.remove(filename)
Exemple #25
0
import multiprocessing
import time



def count(q):
	q.put("from client")
	print(q.get())

c = 80000000

if __name__ == '__main__':
	q = multiprocessing.Queue()
	t1 = multiprocessing.Process(target=count, args=(q,))
	t1.start()
	q.put("from server: hello")
	print(q.get())
	t1.join()
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)
    cfg.GPU_ID = args.gpu_id

    # --------------------------------------------------------------------------
    # Pycaffe doesn't reliably free GPU memory when instantiated nets are
    # discarded (e.g. "del net" in Python code). To work around this issue, each
    # training stage is executed in a separate process using
    # multiprocessing.Process.
    # --------------------------------------------------------------------------

    # queue for communicated results between processes
    mp_queue = mp.Queue()
    # solves, iters, etc. for each training stage
    solvers, max_iters, rpn_test_prototxt = get_solvers(args.net_name)

    print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
    print 'Stage 1 RPN, init from ImageNet model'
    print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'

    cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
    mp_kwargs = dict(queue=mp_queue,
                     imdb_name=args.imdb_name,
                     init_model=args.pretrained_model,
                     solver=solvers[0],
                     max_iters=max_iters[0],
                     cfg=cfg)
    p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
    # initialize webcam video object
    cap = cv2.VideoCapture(0)

    # width & height of webcam video in pixels -> adjust to your size
    # adjust values if you see black bars on the sides of capture window
    frameWidth = 1280
    frameHeight = 720

    # set width and height in pixels
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, frameWidth)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frameHeight)
    # enable auto gain
    cap.set(cv2.CAP_PROP_GAIN, 0)

    # creating a queue to share data to speech process
    speakQ = multiprocessing.Queue()

    # creating speech process to not hang processor
    p1 = multiprocessing.Process(target=speak, args=(speakQ, ))

    # starting process 1 - speech
    p1.start()

    # keeps program running forever until ctrl+c or window is closed
    while True:

        # disable scientific notation for clarity
        np.set_printoptions(suppress=True)

        # Create the array of the right shape to feed into the keras model.
        # We are inputting 1x 224x224 pixel RGB image.
Exemple #28
0
                my_queue.put(f"{i}: from {mp.current_process().name}",
                             timeout=0.1)  # [block[, timeout]]
            except queue.Full:
                print('Queue is full')
            # if i <= 4 and not my_queue.full():
            #     my_queue.put(f"{i}: from {mp.current_process().name}", timeout=0.1)  # [block[, timeout]]
        except ValueError:
            print('Queue is closed')
            break
        if i == 4:
            my_queue.put("CLOSED!")
            my_queue.close()


if __name__ == '__main__':
    my_queue = mp.Queue(maxsize=32)
    prod_1 = mp.Process(target=proc_func, name="prod 1", args=(my_queue, ))
    prod_2 = mp.Process(target=proc_func, name="prod 2", args=(my_queue, ))
    prod_1.start()
    prod_2.start()
    closed_count = 0
    while my_queue:
        try:
            text = my_queue.get(block=False, timeout=0.1)
            if type(text) is str and len(text) > 0:
                print(text)  # [block[, timeout]]
                if text == "CLOSED!":
                    closed_count += 1
                if closed_count == 2:
                    print('END')
                    break
async def test_grpc_data_runtime_graceful_shutdown(close_method):
    args = set_pea_parser().parse_args([])

    cancel_event = multiprocessing.Event()
    handler_closed_event = multiprocessing.Event()
    slow_executor_block_time = 1.0
    pending_requests = 5
    sent_queue = multiprocessing.Queue()

    def start_runtime(args, cancel_event, sent_queue, handler_closed_event):
        with GRPCDataRuntime(args, cancel_event) as runtime:
            runtime._data_request_handler.handle = lambda *args, **kwargs: time.sleep(
                slow_executor_block_time
            )
            runtime._data_request_handler.close = (
                lambda *args, **kwargs: handler_closed_event.set()
            )

            async def mock(msg):
                sent_queue.put('')

            runtime._grpclet.send_message = mock

            runtime.run_forever()

    runtime_thread = Process(
        target=start_runtime,
        args=(args, cancel_event, sent_queue, handler_closed_event),
        daemon=True,
    )
    runtime_thread.start()

    assert GRPCDataRuntime.wait_for_ready_or_shutdown(
        timeout=5.0, ctrl_address=f'{args.host}:{args.port_in}', shutdown_event=Event()
    )

    request_start_time = time.time()

    async def task_wrapper(adress, messages_received):
        msg = _create_test_data_message(len(messages_received))
        await Grpclet._create_grpc_stub(adress).Call(msg)
        messages_received.append(msg)

    sent_requests = 0
    messages_received = []
    tasks = []
    for i in range(pending_requests):
        tasks.append(
            asyncio.create_task(
                task_wrapper(f'{args.host}:{args.port_in}', messages_received)
            )
        )
        sent_requests += 1

    await asyncio.sleep(1.0)

    if close_method == 'TERMINATE':
        runtime_thread.terminate()
    else:
        GRPCDataRuntime.cancel(cancel_event)

    assert not handler_closed_event.is_set()
    runtime_thread.join()

    assert pending_requests == sent_requests
    assert sent_requests == len(messages_received)
    assert sent_queue.qsize() == pending_requests

    assert (
        time.time() - request_start_time >= slow_executor_block_time * pending_requests
    )
    assert handler_closed_event.is_set()
    assert not GRPCDataRuntime.is_ready(f'{args.host}:{args.port_in}')
Exemple #30
0
                task_idx,
                np.mean(total_rewards[-21:-1]),
                np.min(total_rewards[-21:-1]),
                np.max(total_rewards[-21:-1]),
                np.mean(total_steps[-21:-1]),
                len(total_rewards),
            )

            train_data_queue.put(
                ([rb['s'] for rb in replay_buffer], returns, advs,
                 [rb['a'] for rb in replay_buffer], summary))


##################################

coordinator_queue = mp.Queue()
train_data_queue = mp.Queue()

workers = []
for i in range(num_workers):
    workers.append(
        mp.Process(target=worker,
                   args=(i, coordinator_queue, train_data_queue),
                   daemon=True))
    workers[-1].start()

server = tf.train.Server(cluster, job_name='learner', task_index=0)
sess = tf.Session(server.target)
ppo = PPO(sess)
sess.run(tf.global_variables_initializer())