class ConsoleProxy(object):
    def __init__(self):
        self._read, self._write = Pipe(duplex=True)

    def fileno(self):
        return self._read.fileno()

    def read(self):
        data = self._read.recv()
        if data["type"] == "completer":
            result = command_root.match(data["line"], data["hints"])
        elif data["type"] == "parser":
            try:
                result = command_root.parse(data["line"])
            except Command.NotFound as e:
                if str(e) != "":
                    result = str(e)
                else:
                    result = "No such command '{:s}'".format(data["line"])
            except Command.SyntaxError as e:
                result = str(e)
        else:
            result = ""

        self._read.send(result)

    def completer(self, line, hints):
        self._write.send({"type" : "completer", "line" : line, "hints" : hints})
        return self._write.recv()

    def parser(self, line):
        self._write.send({"type" : "parser", "line" : line})
        return self._write.recv()
        def fn_with_timeout(*args, **kwargs):
            conn1, conn2 = Pipe()
            kwargs['_conn'] = conn2
            th = Process(target=fn, args=args, kwargs=kwargs)
            th.start()
            if conn1.poll(self.trial_timeout):
                fn_rval = conn1.recv()
                th.join()
            else:
                print 'TERMINATING DUE TO TIMEOUT'
                th.terminate()
                th.join()
                fn_rval = 'return', {
                    'status': hyperopt.STATUS_FAIL,
                    'failure': 'TimeOut'
                }

            assert fn_rval[0] in ('raise', 'return')
            if fn_rval[0] == 'raise':
                raise fn_rval[1]

            # -- remove potentially large objects from the rval
            #    so that the Trials() object below stays small
            #    We can recompute them if necessary, and it's usually
            #    not necessary at all.
            if fn_rval[1]['status'] == hyperopt.STATUS_OK:
                fn_loss = float(fn_rval[1].get('loss'))
                fn_preprocs = fn_rval[1].pop('preprocs')
                fn_classif = fn_rval[1].pop('classifier')
                if fn_loss < self._best_loss:
                    self._best_preprocs = fn_preprocs
                    self._best_classif = fn_classif
                    self._best_loss = fn_loss
            return fn_rval[1]
Example #3
0
    def run(self):
        logging.info('Visualizer thread started')

        parent_end, child_end = Pipe()

        # Sensible default value for max_process
        max_process = 3
        process_count = 0

        while not self.stop or not self.job_backlog.empty():
            while parent_end.poll(0.1):
                (name, counter) = parent_end.recv()
                self.controller.find_trial(name).set_counter_plot(counter)
                process_count -= 1

            if self.job_backlog.empty():
                time.sleep(1)
            elif process_count < max_process:
                process_count += 1

                function, snapshot, trial = self.job_backlog.get_nowait()
                logging.info('Visualizing {}'.format(trial.get_name()))
                p = Process(target=self.render_graph,
                            args=(function, snapshot, trial.get_name(),
                                  child_end))
                p.start()
                self.job_backlog.task_done()

        logging.info('Visualizer Finished')
    def test_request_with_fork(self):
        try:
            from multiprocessing import Process, Pipe
        except ImportError:
            raise SkipTest("No multiprocessing module")

        coll = self.c.test.test
        coll.remove(safe=True)
        coll.insert({'_id': 1}, safe=True)
        coll.find_one()
        self.assert_pool_size(1)
        self.c.start_request()
        self.assert_pool_size(1)
        coll.find_one()
        self.assert_pool_size(0)
        self.assert_request_with_socket()

        def f(pipe):
            # We can still query server without error
            self.assertEqual({'_id':1}, coll.find_one())

            # Pool has detected that we forked, but resumed the request
            self.assert_request_with_socket()
            self.assert_pool_size(0)
            pipe.send("success")

        parent_conn, child_conn = Pipe()
        p = Process(target=f, args=(child_conn,))
        p.start()
        p.join(1)
        p.terminate()
        child_conn.close()
        self.assertEqual("success", parent_conn.recv())
Example #5
0
class PluginRunner:
  def __init__(self, plugin):
    self.name = plugin
    self.proc = None
    self.running = False
    self.local_pipe, self.remote_pipe = Pipe()

  def getConnection(self):
    return self.local_pipe

  def start(self):
    assert not self.running, "Already running."
    self.running = True
    self.thread = Thread(target=self.run)
    self.thread.start()

  def restart(self):
    self.proc.terminate()

  def stop(self):
    assert self.running, "Running"
    self.running = False
    self.proc.terminate()
    self.thread.join()
    self.remote_pipe.close()
    self.local_pipe.close()

  def run(self):
    while self.running:
      self.proc = Process(target=launch, args=('repeat', self.remote_pipe))
      self.proc.start()
      print("Waiting on proc to end")
      self.proc.join()
Example #6
0
    def init_object(self, remoteobject, *args, **kwargs):
        '''Create a new child process hosting an object.
        @param remoteobject: a L{RemoteObject} representation for the
        to be created object
        @param args: list arguments to pass on to the constructor
        @param kwargs: keyword arguments to pass on to the constructor
        '''
        loglevel = logging.getLogger().getEffectiveLevel()
        if not (self.logqueue_reader and self.logqueue_reader.is_alive()):
            self.logqueue_reader = LogQueueReader(self.logqueue)
            self.logqueue_reader.start()

        conn1, conn2 = Pipe()
        p = Process(
            target=childmain,
            args=(conn2, remoteobject, zim.ZIM_EXECUTABLE, loglevel, self.logqueue, args, kwargs)
        )
        p.daemon = True # child process exit with parent
        p.start()
        obj = conn1.recv()
        logger.debug('Child process started %i for %s', p.pid, obj)
        worker = ConnectionWorker(conn1, p)
        worker.start()
        self.remoteobjects[obj] = worker
        self._running = True
            # for the odd case that last child quit and new
            # child start come in at the same time
        return True
Example #7
0
 def __init__(self, testcase_suite, manager):
     self.keep_alive_parent_end, self.keep_alive_child_end = Pipe(
         duplex=False)
     self.finished_parent_end, self.finished_child_end = Pipe(duplex=False)
     self.result_parent_end, self.result_child_end = Pipe(duplex=False)
     self.testcase_suite = testcase_suite
     if sys.version[0] == '2':
         self.stdouterr_queue = manager.StreamQueue()
     else:
         from multiprocessing import get_context
         self.stdouterr_queue = manager.StreamQueue(ctx=get_context())
     self.logger = get_parallel_logger(self.stdouterr_queue)
     self.child = Process(target=test_runner_wrapper,
                          args=(testcase_suite,
                                self.keep_alive_child_end,
                                self.stdouterr_queue,
                                self.finished_child_end,
                                self.result_child_end,
                                self.logger)
                          )
     self.child.start()
     self.last_test_temp_dir = None
     self.last_test_vpp_binary = None
     self._last_test = None
     self.last_test_id = None
     self.vpp_pid = None
     self.last_heard = time.time()
     self.core_detected_at = None
     self.testcases_by_id = {}
     self.testclasess_with_core = {}
     for testcase in self.testcase_suite:
         self.testcases_by_id[testcase.id()] = testcase
     self.result = TestResult(testcase_suite, self.testcases_by_id)
Example #8
0
def test_cython_wrapper():
    descs, uris = plutosdr.scan_devices()
    plutosdr.set_tx(False)
    print("Devices", descs)
    print("Open", plutosdr.open(uris[0]))
    print("Set Freq to 433.92e6", plutosdr.set_center_freq(int(433.92e6)))
    print("Set Sample Rate to 2M", plutosdr.set_sample_rate(int(2.5e6)))
    print("Set bandwidth to 4M", plutosdr.set_bandwidth(int(4e6)))
    print("Set gain to 10", plutosdr.set_rf_gain(10))

    print("prepare rx", plutosdr.setup_rx())

    parent_conn, child_conn = Pipe()

    for i in range(10):
        plutosdr.receive_sync(child_conn)
        data = parent_conn.recv_bytes()
        print(np.frombuffer(data, dtype=np.int16))

    print(plutosdr.get_tx())
    print("Close", plutosdr.close())

    plutosdr.set_tx(True)

    print("Open", plutosdr.open(uris[0]))
    print("Setup tx", plutosdr.setup_tx())
    print("Set Freq to 433.92e6", plutosdr.set_center_freq(int(433.92e6)))
    print("Set Sample Rate to 2M", plutosdr.set_sample_rate(int(2.5e6)))
    print("Set bandwidth to 4M", plutosdr.set_bandwidth(int(4e6)))
    print("Set gain to 10", plutosdr.set_rf_gain(-89))

    print("Send", plutosdr.send_sync(np.zeros(4096, dtype=np.int16)))

    print("Close", plutosdr.close())
Example #9
0
def get_resource(url,is_ajax=0,ajax_timeout=2,js_timeout=5,timeout_retry_times=0,jsengine_type=1000): 
	parent_conn,child_conn=Pipe()
	p=Process(target=CrawlerProcess,args=(child_conn,url))
	p.start()
	p.join()
	html=parent_conn.recv()
	return html
class ProcessHandler:
    '''
    run(): The run() method is the entry point for a thread.
    start(): The start() method starts a thread by calling the run method.
    join([time]): The join() waits for threads to terminate.
    isAlive(): The isAlive() method checks whether a thread is still executing.
    '''

    def __init__(self, daemonic, pipe):
        self.daemonic = daemonic
        self.pipe = pipe
        if self.pipe:
            self.parent_conn, self.child_conn = Pipe(duplex=False)

    @abc.abstractmethod
    def run(self, *args):
        pass

    def start(self, *args):
        # Close write fd because parent not going to write
        if not self.pipe:
            self.process = Process(target=self.run, args=args)
        else:
            self.process = Process(
                target=self.run, args=(self.child_conn,) + args)
        if self.daemonic:
            self.process.daemon = True
        self.process.start()

    def join(self):
        if self.pipe:
            self.parent_conn.close()
            self.child_conn.close()
        self.process.join()
    """
class ProcessStarter(object):
    
    def __init__(self):
        '''
        Setup the shared memory data structure model and initialize the control parts.
        '''
        self.running = True
        self.orprocess  = None
        self.guiprocess = None
        self.pipeGUI, self.pipeOR = Pipe()
        self.StartProcesses()

    def StartProcesses(self):
        self.guiprocess = Process(target=self.__startGUI__)
        self.guiprocess.start()        
        self.pipeGUI.send(["StartViewer", None])
        self.orprocess = Process(target=ORServer,args=(self.pipeOR,))
        self.orprocess.start()
        return True
    
    def terminate(self):
        try:
            self.pipeGUI.send(["Stop", None])
            self.guiprocess.terminate()
            self.orprocess.terminate()
        except:
            pass

    def __startGUI__(self):
        app  = QtGui.QApplication(sys.argv)
        form = pnpApp(self.pipeGUI, self)
        form.show()
        sys.exit(app.exec_())
Example #12
0
def run_http_server(redirect_uri = None, modify_port = True, port_range = (10000, 10010) ):
    """Returns (modified) redirect_uri"""
    from multiprocessing import Process, Pipe
    from urllib.parse import urlsplit, urlunsplit
    if redirect_uri is None:
        redirect_uri = "http://localhost"
    p = urlsplit(redirect_uri)
    #Ensure hostname is localhost or 127.0.0.1
    if p.hostname != "127.0.0.1" and p.hostname != "localhost":
        raise ValueError("url must have host of 127.0.0.1 or localhost! Got: {}".format(p.hostname))
    if not modify_port:
        if p.port is not None:
            port_range = (int(p.port), int(p.port))
        else:
            port_range = (int(80), int(80))
    parent_port_pipe, child_port_pipe = Pipe()
    parent_pipe, child_pipe = Pipe()
    httpd_p = Process(target = _run_http_server, args = (child_port_pipe, child_pipe, port_range))
    httpd_p.start()
    if parent_port_pipe.poll(3000):
        final_port = parent_port_pipe.recv()
    else:
        raise Exception("Timeout waiting for HTTP server process to start")
    if final_port == 0:
        #Could not find a port
        raise Exception("Could not find open port")
    netloc = "{0}:{1}".format(p.hostname, final_port)
    if p.path:
        path = p.path
    else:
        path = '/'
    p = p._replace(netloc = netloc, path = path)
    return (urlunsplit(p), parent_pipe, httpd_p)
Example #13
0
        def fn_with_timeout(*args, **kwargs):
            conn1, conn2 = Pipe()
            kwargs["_conn"] = conn2
            th = Process(target=partial(fn, best_loss=self._best_loss), args=args, kwargs=kwargs)
            th.start()
            if conn1.poll(self.trial_timeout):
                fn_rval = conn1.recv()
                th.join()
            else:
                self.info("TERMINATING DUE TO TIMEOUT")
                th.terminate()
                th.join()
                fn_rval = "return", {"status": hyperopt.STATUS_FAIL, "failure": "TimeOut"}

            assert fn_rval[0] in ("raise", "return")
            if fn_rval[0] == "raise":
                raise fn_rval[1]

            # -- remove potentially large objects from the rval
            #    so that the Trials() object below stays small
            #    We can recompute them if necessary, and it's usually
            #    not necessary at all.
            if fn_rval[1]["status"] == hyperopt.STATUS_OK:
                fn_loss = float(fn_rval[1].get("loss"))
                fn_preprocs = fn_rval[1].pop("preprocs")
                fn_classif = fn_rval[1].pop("classifier")
                fn_iters = fn_rval[1].pop("iterations")
                if fn_loss < self._best_loss:
                    self._best_preprocs = fn_preprocs
                    self._best_classif = fn_classif
                    self._best_loss = fn_loss
                    self._best_iters = fn_iters
            return fn_rval[1]
Example #14
0
    def train(self):
        conf = self.conf
        if len(self.training_set) == 0:
            return True
        try:
            # Scale inputs and particles?
            self.input_scaler = preprocessing.StandardScaler().fit(self.training_set)
            scaled_training_set = self.input_scaler.transform(self.training_set)

            # Scale training data
            self.output_scaler = preprocessing.StandardScaler(with_std=False).fit(self.training_fitness)
            adjusted_training_fitness = self.output_scaler.transform(self.training_fitness)
            gp = self.regressor_countructor()
            # Start a new process to fit the data to the gp, because gp.fit is
            # not thread-safe
            parent_end, child_end = Pipe()

            self.controller.acquire_training_sema()
            logging.info("Training regressor")
            p = Process(target=self.fit_data, args=(gp, scaled_training_set, adjusted_training_fitness, child_end))
            p.start()
            self.regr = parent_end.recv()
            if self.regr is None:
                raise Exception("Something went wrong with the regressor")
            else:
                logging.info("Regressor training successful")
                self.controller.release_training_sema()
                self.gp = gp
                return True
        except Exception, e:
            logging.info("Regressor training failed.. retraining.. " + str(e))
            return False
Example #15
0
def resolve_list(hostname_list, qtype='A', tokens=300, flag=False, servers=[('8.8.8.8', 53)], timeout=(1, 3, 5, 7)):
    parent, child = Pipe()
    p = Process(target=_resolve_list, args=(child, hostname_list, qtype, tokens, flag, servers, timeout))
    p.start()
    result = parent.recv()
    p.join()
    return result
Example #16
0
    def onExeBtn(self, event):
        srcFiles = self.projPane.getValue()

        self.analyzerOutDir = os.path.join(self.projRootDir, 'analyzer_output')
        oldcwd = os.getcwd()

        if not os.path.exists(self.analyzerOutDir):
            os.makedirs(self.analyzerOutDir)

        os.chdir(self.analyzerOutDir)

        rcv_pipe, snd_pipe = Pipe(duplex=True)

        self.dbname = ''.join([self.projRootDir.replace(os.sep, '_').strip('_'), '.sqlite'])

        self.exePane.dbPathTc.SetValue(os.path.join(self.analyzerOutDir, self.dbname))
        self.exePane.ppLogFileTc.SetValue(os.path.join(self.analyzerOutDir, 'preprocessor.log'))
        self.exePane.parserLogFileTc.SetValue(os.path.join(self.analyzerOutDir, 'parser.log'))

        p = Process(target=analyze,
                    args=(snd_pipe,
                          os.path.join(self.analyzerOutDir, self.dbname),
                          self.getPpCfg(),
                          self.getParserCfg(),
                          srcFiles,
                          self.exePane.pipelineCb.GetValue(),
                          self.exePane.numProcSc.GetValue(),
                          self.exePane.numPpProcSc.GetValue(),
                          self.exePane.numParserProcSc.GetValue(),
                          ))
        p.start()
        dlg = wx.ProgressDialog('Executing',
                                     '0/%d' % len(srcFiles),
                                     parent=self,
                                     maximum=len(srcFiles)*10,
                                     style=wx.PD_CAN_ABORT |
                                           wx.PD_APP_MODAL |
                                           wx.PD_ELAPSED_TIME
                                     )
        dlg.SetSize((500,150))
        dlg.Layout()
        dlg.Show()

        result = None
        while True:
            i, total, result = rcv_pipe.recv()
            ret = dlg.Update(i*10, result if i == total else '[%d/%d] %s ... done' % (i+1, total, result))
            if ret[0] == False:
                rcv_pipe.send('STOP')
                while result != 'STOPPED':
                    result = rcv_pipe.recv()
                dlg.Update(total*10, 'Canceled')
                break
            if i == total:
                break
        p.join()

        self.exePane.dbPathTc.SetValue(os.path.join(self.analyzerOutDir, self.dbname))

        os.chdir(oldcwd)
Example #17
0
    def run(self):
        logging.info('Visualizer thread started')

        parent_end, child_end = Pipe()

        # Sensible default value for max_process
        max_process = 2
        process_count = 0

        while not self.stop or not self.job_backlog.empty():
            while parent_end.poll(0.1):
                parent_end.recv() ## currently not using the info... irrelevant
                
                ## TODO - a signal to notify the viewer that visuzaliztion job has been finished... 
                #self.controller.view_update(self)
                process_count -= 1

            if self.job_backlog.empty():
                time.sleep(1)
            elif process_count < max_process:
                process_count += 1
                run_name, function, snapshot = self.job_backlog.get_nowait()
                if not (run_name in self.remove_run_name):
                    logging.info('Added job to visuzalizer Que: ' + str(run_name))
                    logging.info('No. of jobs in Que: ' + str(process_count))
                    p = Process(target=self.render_graph,
                                args=(function, snapshot, run_name, child_end))
                    p.start()
                
        logging.info('Visualizer Finished')
Example #18
0
    def query(self, sql, values=None, errors=True):
        """
        :param sql: SQL string
        :param values: If question marks are used in SQL command, pass in replacement values as tuple
        :param errors: Suppress raised errors by passing in False
        :return: 
        """
        if not self.broker:
            raise RuntimeError("Broker not running. Use the 'start_broker()' method before calling query().")

        values = () if not values else values
        recvpipe, sendpipe = Pipe(False)
        valve = br.SafetyValve(150)
        while True:
            valve.step("To many threads being called, tried for 5 minutes but couldn't find an open thread.")
            try:
                dummy_func()
                self.broker_queue.put({'mode': 'sql', 'sql': sql, 'values': values, 'pipe': sendpipe})
                break
            except (sqlite3.Error, sqlite3.OperationalError, sqlite3.IntegrityError, sqlite3.DatabaseError) as err:
                if errors:
                    raise err
            except RuntimeError as err:
                if "can't start new thread" in str(err):
                    sleep(2)
                else:
                    raise err
        response = json.loads(recvpipe.recv())
        return response
Example #19
0
def evaluate_expression(exp):
    '''
    Evaluates given expression.
    '''    
    if not exp:
        return "No expression supplied."
    exp = str(exp)
    
    # Setup evaluation process if it's not present
    global eval_process, eval_pipe_parent, eval_pipe_child
    if not eval_process:
        eval_pipe_parent, eval_pipe_child = Pipe()
        eval_process = Process(name = "seejoo_eval",
                               target = _eval_worker,
                               args = (eval_pipe_child,))
        eval_process.daemon = True
        eval_process.start()
    
    # Push expression through the pipe and wait for result
    eval_pipe_parent.send(exp)
    if eval_pipe_parent.poll(EVAL_TIMEOUT):
        res = str(eval_pipe_parent.recv())
        res = filter(lambda x: ord(x) >= 32, res)   # Sanitize result
        return res        

    # Evaluation timed out; kill the process and return error
    os.kill(eval_process.pid, 9)
    os.waitpid(eval_process.pid, os.WUNTRACED)
    eval_process = None
    return "Operation timed out."
Example #20
0
    def recog_proc(self, child_recog: Pipe, e_recog: Event, yolo_type: str):
        """
        Parallel process for object recognition

        Arguments:
            child_recog {Pipe} -- pipe for communication with parent process,
                sends bbox yolo type of recognized object
            e_recog {Event} -- event for indicating complete recognize in frame
        """

        # initialize YOLO
        yolo = Yolo(yolo_type)
        e_recog.set()
        print("yolo defined")

        while True:
            frame = child_recog.recv()
            print("recog process frame recieved")
            if frame is None:
                print("FRAME NONE? R U SURE ABOUT THAT?!")
                return
            res = yolo.detect(frame, cvmat=True)
            print("recog send")
            e_recog.set()
            child_recog.send(res)
Example #21
0
    def quicksort(self,arr, conn, procNum):


        if procNum <= 0 or len(arr)<= 1:
            conn.send(self.qck(arr))
            conn.close()
            return
        #print 'Just in case you don't trust that this program works better than other quicksorts :3 FUBAR. process id:', os.getppid()
        pivot = arr.pop(random.randint(0, len(arr)-1))

        leftSide = [x for x in arr if x < pivot]
        rightSide = [x for x in arr if x > pivot]

        pconnLeft, cconnLeft = Pipe()
        leftProc = Process(target= self.quicksort, args=(leftSide, cconnLeft,procNum -1))

        pconnRight, cconnRight = Pipe()
        rightProc = Process(target=self.quicksort, args=(rightSide, cconnRight, procNum - 1))

        leftProc.start()
        rightProc.start()

        conn.send(pconnLeft.recv() + [pivot] + pconnRight.recv())
        conn.close()

        leftProc.join()
        rightProc.join()
Example #22
0
def main():
    logging.basicConfig(level=logging.INFO)

    args = parse_args()
    print args

    echo_server = Process(target=EchoServer('ipc://ECHO:1',
            args.echo_delay).run)
    client = Process(target=JsonClient('ipc://PRODUCER_REP:1',
                                       'ipc://PRODUCER_PUB:1',
                                       args.request_count,
                                       request_delay=args.request_delay).run)
    parent_pipe, child_pipe = Pipe(duplex=True)
    async_server_adapter = Process(
        target=AsyncJsonServerAdapter('ipc://ECHO:1', 'ipc://PRODUCER_REP:1',
                                      'ipc://PRODUCER_PUB:1', child_pipe).run
    )

    try:
        echo_server.start()
        async_server_adapter.start()
        client.start()
        client.join()
        parent_pipe.send('close')
        async_server_adapter.join()
    except KeyboardInterrupt:
        pass
    client.terminate()
    async_server_adapter.terminate()
    echo_server.terminate()
    # Since ipc:// creates files, delete them on exit
    cleanup_ipc_uris(('ipc://ECHO:1', 'ipc://PRODUCER_REP:1',
            'ipc://PRODUCER_PUB:1'))
Example #23
0
def go():
    global graphic_view, status_label
    data_parent, data_child = Pipe(duplex=False)
    receiver = Process(target=generate_data, args=(data_child,))
    receiver.daemon = True
    receiver.start()

    scene = QGraphicsScene()
    graphic_view.setScene(scene)
    scene.setSceneRect(0, 0, 1024, 1024)

    x_pos = 0
    y_pos = 0
    t = time.time()
    while True:
        speed = time.time()
        data = data_parent.recv()
        spectrogram = Spectrogram(data)
        pixmap = QPixmap.fromImage(spectrogram.create_spectrogram_image(transpose=True))

        scene.setSceneRect(scene.sceneRect().adjusted(0, 0, 0, pixmap.height()))
        item = scene.addPixmap(pixmap)
        item.setPos(x_pos, y_pos)
        y_pos += pixmap.height()
        graphic_view.fitInView(scene.sceneRect())
        status_label.setText("Height: {0:.0f} // Speed: {1:.2f}  // Total Time: {2:.2f}".format(scene.sceneRect().height(),
                                                                                                1/(time.time()-speed),
                                                                                                time.time()-t))
        QApplication.instance().processEvents()
Example #24
0
def QuickSortMPListArray(A,conn,NumProcs):
        if len(A)<=1 :
                conn.send(A)
                conn.close()
        elif int(NumProcs)<1:
                conn.send(QuickSortListArray(A))
                conn.close()
        else:
                lesser=[]
                greater=[]
                pv=[]
                Pivot=A.pop(0)
                pvVal=int(Pivot[0])
                lesser=[x for x in A if x[0] < pvVal]
                greater=[x for x in A if x[0] > pvVal]
                pv=[x for x in A if x[0] == pvVal]
                pv.append(Pivot)
                Procs=int(NumProcs)-1
                pConnLeft,cConnLeft=Pipe()
                leftProc=Process(target=QuickSortMPListArray,args=(lesser,cConnLeft,Procs))
                pConnRight,cConnRight=Pipe()
                rightProc=Process(target=QuickSortMPListArray,args=(greater,cConnRight,Procs))
                

                leftProc.start()
                rightProc.start()
                conn.send(pConnLeft.recv()+pv+pConnRight.recv())
                conn.close()
        
                leftProc.join()
                rightProc.join()
        return
Example #25
0
def process_pipe():
    parent_conn, child_conn = Pipe()
    p = Process(target=pipe_test, args=(child_conn,))
    p.start()
    print parent_conn.recv()
    p.join()
    parent_conn.close()
Example #26
0
 def transcode(self, path, format='mp3', bitrate=False):
     if self.stopping.is_set():
         return
     try:
         stop = Event()
         start_time = time.time()
         parent_conn, child_conn = Pipe()
         process = Process(target=transcode_process,
                 args=(child_conn, path, stop, format, bitrate))
         process.start()
         while not (self.stopping.is_set() or stop.is_set()):
             data = parent_conn.recv()
             if not data:
                 break
             yield data
         logger.debug("Transcoded %s in %0.2f seconds." % (path.encode(cfg['ENCODING']), time.time() - start_time))
     except GeneratorExit:
         stop.set()
         logger.debug("User canceled the request during transcoding.")
     except:
         stop.set()
         logger.warn("Some type of error occured during transcoding.")
     finally:
         parent_conn.close()
         process.join()
Example #27
0
def main():
    startup_checks()
    parser = argparse.ArgumentParser()
    parser.add_argument("repourl", nargs="?", default=REPOURL, help="URL of "
                        "repository to crawl")
    arguments = parser.parse_args(sys.argv[1:])

    parent_conn, child_conn = Pipe()
    crawler = Process(target=crawl, args=(arguments.repourl, child_conn))
    crawler.start()
    while True:
        package_url = parent_conn.recv()
        if package_url is None:
            break
        print "Processing %s" % package_url
        tempdir = tempfile.mkdtemp()
        p_obj = Package(package_url, tempdir)
        database = pymongo.Connection("localhost", 27017).symdex
        collection = database.libraries
        for key, value in p_obj.libs_and_syms.items():
            mongo_dict = {"package": os.path.basename(package_url),
                          "library": key, "symbols": value}
            collection.insert(mongo_dict)
        shutil.rmtree(tempdir)
        os.remove(package_url)
    crawler.join()
Example #28
0
def take_lock(fd, timeout=None, shared=False):
	'''Take a lock on a file descriptor

	If timeout is 0 the lock is taken without blocking,
	if timeout is None we block indefinitely,
	if timeout is a positive number we time out in that many seconds.

	If shared is True this is a shared lock,
	so can lock with other shared locks,
	if shared is False this is an exclusive lock.

	with open(path, 'r') as lock:
		take_lock(lock.fileno(), timeout, shared)

	'''
	if timeout is None or timeout == 0:
		flags = (LOCK_SH if shared else LOCK_EX)
		flags |= (LOCK_NB if timeout == 0 else 0)
		flock(fd, flags)
		return
	piper, pipew = Pipe(duplex=False)
	p = Process(target=_set_alarm_and_lock,
	            args=(fd, pipew, timeout, shared))
	p.start()
	err = piper.recv()
	p.join()
	if err:
		if isinstance(err, IOError) and err.errno == EINTR:
			raise IOError(EAGAIN, strerror(EAGAIN))
		raise err
class StubExecuteTestsFunc:
    def __init__(self):
        self.main_conn, self.func_conn = Pipe()
        self._called = self._complete = None
        self.stub_reset()

    def stub_reset(self):
        self._called = self._complete = False

    def stub_complete(self):
        self._complete = True
        self.main_conn.send(StubExecuteTestsFuncConnMessages.COMPLETE)

    def stub_called(self):
        if not self._called and self.main_conn.poll():
            conn_message = self.main_conn.recv()
            if conn_message == StubExecuteTestsFuncConnMessages.CALLED:
                self._called = True
        return self._called

    def __enter__(self):
        self.stub_reset()
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.stub_complete()

    def __call__(self, *_):
        self._called = True
        self.func_conn.send(StubExecuteTestsFuncConnMessages.CALLED)
        while not self._complete:
            conn_message = self.func_conn.recv()
            if conn_message == StubExecuteTestsFuncConnMessages.COMPLETE:
                self._complete = True
Example #30
0
class Airplay():
	def __init__(self, file):
		self.metadata   = AIRPLAY_DEFAULT
		self.block      = ''
		self.out_pipe, self.in_pipe = Pipe()

		p = Process(target=read_shairport_pipe, args=(file, self.in_pipe,))
		p.start()

	def __repr__(self):
		printout = "metadata:\n"+self.metadata
		# for k,v in self.metadata.items():
		# 		printout += '%12s : %s\n' % (k,v)
		return printout


	def grab(self):
		if self.out_pipe.poll(0):
			s = True
			self.metadata = self.out_pipe.recv()   # prints "[42, None, 'hello']"
		else:
			print "nothing in pipe"
			s = False

		return s
Example #31
0
from multiprocessing import Pipe,Process
import os
import time
import random


def send_proc(pipe,content):
    pass 


def recv_proc(pipe):
    pass

if __name__ == '__main__':
    print("Parent process is %s started." % (os.getpid(),))
    pipe = Pipe()
    send_process = Process(target = send_proc, args=())
from multiprocessing import Process, Pipe

def f(conn):
    conn.send([42, None, 'hello'])
    conn.close()

if __name__ == '__main__':
    parent_conn, child_conn = Pipe()
    p = Process(target=f, args=(child_conn,))
    p.start()
    print(parent_conn.recv())   # prints "[42, None, 'hello']"
    p.join()
    def test_fork(self):
        """Test using a connection before and after a fork.
        """
        if sys.platform == "win32":
            raise SkipTest("Can't fork on Windows")

        try:
            from multiprocessing import Process, Pipe
        except ImportError:
            raise SkipTest("No multiprocessing module")

        db = self._get_connection().pymongo_test

        # Failure occurs if the connection is used before the fork
        db.test.find_one()
        #db.connection.end_request()

        def loop(pipe):
            while True:
                try:
                    db.test.insert({"a": "b"}, safe=True)
                    for _ in db.test.find():
                        pass
                except:
                    traceback.print_exc()
                    pipe.send(True)
                    os._exit(1)

        cp1, cc1 = Pipe()
        cp2, cc2 = Pipe()

        p1 = Process(target=loop, args=(cc1,))
        p2 = Process(target=loop, args=(cc2,))

        p1.start()
        p2.start()

        p1.join(1)
        p2.join(1)

        p1.terminate()
        p2.terminate()

        p1.join()
        p2.join()

        cc1.close()
        cc2.close()

        # recv will only have data if the subprocess failed
        try:
            cp1.recv()
            self.fail()
        except EOFError:
            pass
        try:
            cp2.recv()
            self.fail()
        except EOFError:
            pass

        db.connection.close()
    def __init__(self):
        self.finished = False

        # Get two sides of a pipe.
        self.p_left, self.p_right = Pipe()
from multiprocessing import Process, Pipe
import sys


def sum(n, conn):
    total = 0
    for i in range(n + 1):
        total += i
    conn.send(total)
    conn.close()


if __name__ == '__main__':
    if len(sys.argv) != 2:
        print("run python3 sumprocess.py <num>")
        sys.exit()
    n = int(sys.argv[1])
    parent_conn, child_conn = Pipe()
    p = Process(target=sum, args=(n, child_conn))
    p.start()
    print('sum=' + str(parent_conn.recv()))
    p.join()
Example #36
0
def main(verbose=False, n_processes=None, given_array=None, order='asc'):
    """
    Simulation of the distributed sorting proposed by Sasaski.
    Each Process node has it's own resource. Each of the non-terminal
    node is connected to two adjacent processes with a synchronized duplex Pipe
    connection.
    """

    num_process = 0
    process_list = []

    if not given_array:

        num_process = int(random.uniform(2.0, 20.0))
        if n_processes:
            num_process = n_processes

        connection_list = [Pipe() for i in range(num_process - 1)]

        shared_array = None
        if verbose:
            shared_array = Array('i', (num_process + 1) * num_process * 4)

        process_list = [
            ProcessNode(None,
                        connection_list[0][0],
                        shared_array=shared_array,
                        order=order)
        ]
        for i in range(num_process - 2):
            p = ProcessNode(connection_list[i][1],
                            connection_list[i + 1][0],
                            shared_array=shared_array,
                            order=order)
            process_list.append(p)
        process_list.append(
            ProcessNode(connection_list[num_process - 2][1],
                        shared_array=shared_array,
                        order=order))

    else:

        num_process = len(given_array)

        connection_list = [Pipe() for i in range(num_process - 1)]

        shared_array = None
        if verbose:
            shared_array = Array('i', (num_process + 1) * num_process)

        process_list = [
            ProcessNode(None,
                        connection_list[0][0],
                        shared_array=shared_array,
                        data=given_array[0],
                        order=order)
        ]

        for i in range(num_process - 2):
            p = ProcessNode(connection_list[i][1],
                            connection_list[i + 1][0],
                            shared_array=shared_array,
                            data=given_array[i],
                            order=order)
            process_list.append(p)

        process_list.append(
            ProcessNode(connection_list[num_process - 2][1],
                        shared_array=shared_array,
                        data=given_array[-1],
                        order=order))

    # Start time
    start = time.clock()

    for p in process_list:
        p.start()

    for p in process_list:
        p.join()

    # Elapsed time
    time_elapsed = time.clock() - start

    if verbose:
        print("number of process {}".format(num_process))

        print_factor = 1
        if num_process <= 10 and num_process >= 5:
            print_factor = 2
        else:
            print_factor = 5

        for i in range(0, num_process + 1):
            if i == 0:
                print("Initial : ", end=" ")
                for j in range(0, num_process):
                    print("P{}(D({}))".format(
                        j + 1, shared_array[i * num_process * 4 + j * 4 + 0]),
                          end=" ")
                print("\n")

            elif (i == (num_process)) or (i % print_factor == 0):

                print("Round {} : ".format(i - 1))

                if i == num_process:
                    print("Data   :", end=" ")
                    for j in range(0, num_process):
                        print("P{}(D({})) ".format(
                            j + 1,
                            shared_array[i * num_process * 4 + j * 4 + 0]),
                              end=" ")
                        if j == (num_process - 1):
                            print('')

                print("vl, vr  :", end=" ")
                for j in range(0, num_process):
                    print("P{}({}|{})".format(
                        j + 1, shared_array[i * num_process * 4 + j * 4 + 1],
                        shared_array[i * num_process * 4 + j * 4 + 2]),
                          end=" ")
                    if j == (num_process - 1):
                        print('')

                print("Area   :", end=" ")
                for j in range(0, num_process):
                    print("P{}(A({})) ".format(
                        j + 1, shared_array[i * num_process * 4 + j * 4 + 3]),
                          end=" ")
                    if j == (num_process - 1):
                        print('')
                print("\n")

    return time_elapsed
Example #37
0
from multiprocessing import Pipe, Process

conn1, conn2 = Pipe()


def f1():

    conn1.send('Hello shiyanlou')


def f2():

    data = conn2.recv()
    print(data)


def main():

    Process(target=f1).start()
    Process(target=f2).start()


if __name__ == '__main__':

    main()
Example #38
0
 def __init__(self, pipe_in):
   self.pipe_in = pipe_in
   self.parent, self.child = Pipe()
   self.proc = Process(target=self.start, args=(self.child,))
   self.proc.start()
Example #39
0
# -*- coding:utf-8 -*-
'''
使用multiprocessing创建两个子进程,
分别复制一个文件的上下两个部分到一个新的文件中。
'''
from multiprocessing import Process, Pipe
import time, os
# 获取文件大小
filepath = r"/home/tarena/test/Net/process/2.jpg"
Fow_filepath = r"/home/tarena/test/Net/process/top.jpg"
Last_filepath = r"/home/tarena/test/Net/process/bot.jpg"
# 创建管道
fd1, fd2 = Pipe()
fd3, fd4 = Pipe()

size = os.path.getsize(filepath)
f = open(filepath, 'rb')


# 复制上半部分
def top():
    #f = open(filepath,'rb')
    n = size // 2
    with open(Fow_filepath, 'wb') as fw:
        fw.write(f.read(n))
    #f.close()


#复制下半部分
def bot():
    #f.open(filepath,'rb')
Example #40
0
"""
pipe.py  管道通信
注意: 1. multiprocessing 中管道通信只能用于有
         亲缘关系进程中
      2. 管道对象在父进程中创建,子进程通过父进程获取
"""
from multiprocessing import Process, Pipe

# 创建管道
# False单项管道,fd1->recv fd2->send
fd1, fd2 = Pipe()


def app1():
    print("启动app 1,请登录")
    print("请求app2 授权")
    fd1.send("app1 请求登录")  # 写入管道
    data = fd1.recv()
    if data:
        print("登录成功:", data)


def app2():
    data = fd2.recv()  # 阻塞等待读取管道内容
    print(data)
    fd2.send(('Dave', '123'))  # 可以发送任意Python类型数据


p1 = Process(target=app1)
p2 = Process(target=app2)
p1.start()
    def handle(self):
        # self.request is the TCP socket connected to the clinet
        data = self.request.recv(4)
        json_size = struct.unpack("!I", data)[0]

        # recv JSON header
        json_str = self.request.recv(json_size)
        json_data = json.loads(json_str)
        if 'VM' not in json_data or len(json_data['VM']) == 0:
            self.ret_fail("No VM Key at JSON")
            return

        vm_name = ''
        try:
            vm_name = json_data['VM'][0]['base_name']
            disk_size = int(json_data['VM'][0]['diskimg_size'])
            mem_size = int(json_data['VM'][0]['memory_snapshot_size'])
            #print "received info %s" % (vm_name)
        except KeyError:
            message = 'No key is in JSON'
            print message
            self.ret_fail(message)
            return

        print "[INFO] New client request %s VM (will transfer %d MB, %d MB)" % (vm_name, disk_size/1024/1024, mem_size/1024/1024)

        # check base VM
        base_disk_path = None
        base_mem_path = None
        for base_vm in BaseVM_list:
            if vm_name.lower() == base_vm['name'].lower():
                base_disk_path = base_vm['diskimg_path']
                base_mem_path = base_vm['memorysnapshot_path']
        if base_disk_path == None or base_mem_path == None:
            message = "Failed, No such base VM exist : %s" % (vm_name)
            self.wfile.write(message)            
            print message

        # read overlay files
        tmp_dir = tempfile.mkdtemp()
        time_transfer = Queue()
        time_decomp = Queue()
        time_delta = Queue()

        # check OS type
        # TODO: FIX this
        os_type = ''
        if base_disk_path.find('ubuntu') != -1:
            os_type = 'linux'
        else:
            os_type = 'window'

        start_time = datetime.now()
        # handling disk overlay
        disk_download_queue = JoinableQueue()
        disk_decomp_queue = JoinableQueue()
        (disk_download_pipe_in, disk_download_pipe_out) = Pipe()
        (disk_decomp_pipe_in, disk_decomp_pipe_out) = Pipe()
        disk_out_filename = os.path.join(tmp_dir, "disk.recover")
        disk_download_process = Process(target=network_worker, args=(self.rfile, disk_download_queue, time_transfer, CHUNK_SIZE, disk_size))
        disk_decomp_process = Process(target=decomp_worker, args=(disk_download_queue, disk_decomp_queue, time_decomp))
        disk_delta_process = Process(target=delta_worker, args=(disk_decomp_queue, time_delta, base_disk_path, disk_out_filename))

        # handling memory overlay
        mem_download_queue = JoinableQueue()
        mem_decomp_queue = JoinableQueue()
        (mem_download_pipe_in, mem_download_pipe_out) = Pipe()
        (mem_decomp_pipe_in, mem_decomp_pipe_out) = Pipe()
        mem_download_process = Process(target=network_worker, args=(self.rfile, mem_download_queue, time_transfer, CHUNK_SIZE, mem_size))
        mem_decomp_process = Process(target=decomp_worker, args=(mem_download_queue, mem_decomp_queue, time_decomp))
        # memory snapshot result will be pipelined to KVM
        kvm_pipename = os.path.join(tmp_dir, "mem.fifo")
        if os.path.exists(kvm_pipename):
            os.unlink(kvm_pipename)
        os.mkfifo(kvm_pipename)
        mem_delta_process = Process(target=delta_worker_pipe, args=(mem_decomp_queue, time_delta, base_mem_path, kvm_pipename))
        
        # start processes
        # wait for download disk first
        disk_download_process.start()
        disk_decomp_process.start()
        disk_delta_process.start()

        # Once disk is ready, start KVM
        # Memory snapshot will be completed by pipelining
        disk_delta_process.join()
        mem_download_process.start()
        mem_decomp_process.start()
        mem_delta_process.start()
        telnet_port = 9999
        vnc_port = 2
        exe_time = run_snapshot(disk_out_filename, kvm_pipename, telnet_port, vnc_port, wait_vnc_end=False, terminal_mode=True, os_type=os_type)
        kvm_end_time = datetime.now()

        mem_delta_process.join()

        # Print out Time Measurement
        disk_transfer_time = time_transfer.get()
        mem_transfer_time = time_transfer.get()
        disk_decomp_time = time_decomp.get()
        mem_decomp_time = time_decomp.get()
        disk_delta_time = time_delta.get()
        mem_delta_time = time_delta.get()
        disk_transfer_start_time = disk_transfer_time['start_time']
        disk_transfer_end_time = disk_transfer_time['end_time']
        disk_decomp_end_time = disk_decomp_time['end_time']
        disk_delta_end_time = disk_delta_time['end_time']
        mem_transfer_start_time = mem_transfer_time['start_time']
        mem_transfer_end_time = mem_transfer_time['end_time']
        mem_decomp_end_time = mem_decomp_time['end_time']
        mem_delta_end_time = mem_delta_time['end_time']

        transfer_diff = mem_transfer_end_time-disk_transfer_start_time
        decomp_diff = mem_decomp_end_time-mem_transfer_end_time
        delta_diff = mem_delta_end_time-mem_decomp_end_time
        kvm_diff = kvm_end_time-mem_delta_end_time
        total_diff = datetime.now()-start_time
        message = "\n"
        message += 'Transfer\tDecomp\tDelta\tBoot\tResume\tTotal\n'
        message += "%04d.%06d\t" % (transfer_diff.seconds, transfer_diff.microseconds)
        message += "%04d.%06d\t" % (decomp_diff.seconds, decomp_diff.microseconds)
        message += "%04d.%06d\t" % (delta_diff.seconds, delta_diff.microseconds)
        message += "%04d.%06d\t" % (kvm_diff.seconds, kvm_diff.microseconds)
        message += "%04d.%06d\t" % (total_diff.seconds, total_diff.microseconds)
        message += "\n"
        print message
        self.ret_success()
Example #42
0
# Using a pipe to communicate between two processes

from multiprocessing import Process, Pipe


def worker(conn):
    conn.send(['ali', 'sayfi'])
    conn.close()


if __name__ == '__main__':
    main_connection, worker_connection = Pipe()
    p = Process(target=worker, args=[worker_connection])
    p.start()
    print(main_connection.recv())
    main_connection.close()
Example #43
0
class TestClientListener(unittest.TestCase):
    PORT = 5000

    def _launch_process(self, client_listener_send, client_listener_recv):
        self.backup_scheduler_send.close()
        try:
            client_listener = ClientListener(TestClientListener.PORT, 5,
                                             client_listener_send,
                                             client_listener_recv)
            self.barrier.wait()
            client_listener()
        except Exception as e:
            raise e

    def setUp(self):
        try:
            from pytest_cov.embed import cleanup_on_sigterm
        except ImportError:
            pass
        else:
            cleanup_on_sigterm()
        self.barrier = Barrier(2)
        self.client_listener = None
        self.backup_scheduler_recv, client_listener_send = Pipe(False)
        client_listener_recv, self.backup_scheduler_send = Pipe(False)
        self.p = Process(target=self._launch_process,
                         args=(client_listener_send, client_listener_recv))
        self.p.start()

    def tearDown(self) -> None:
        if self.p.is_alive():
            self.p.terminate()
        self.backup_scheduler_send.close()
        self.backup_scheduler_recv.close()
        TestClientListener.PORT += 1

    def test_send_and_receive_command(self):
        self.barrier.wait()
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.connect(('localhost', TestClientListener.PORT))
        socket_transferer = BlockingSocketTransferer(sock)
        socket_transferer.send_plain_text(
            '{"command": "dummy", "args": {"one": "one"}}')
        command, args = self.backup_scheduler_recv.recv()
        self.assertEqual(command, 'dummy')
        self.assertEqual(args, {"one": "one"})
        self.backup_scheduler_send.send(("OK", {}))
        msg = socket_transferer.receive_plain_text()
        self.assertEqual(json.loads(msg), {"message": "OK", "data": {}})
        socket_transferer.close()

    def test_error_json_and_then_working(self):
        self.barrier.wait()
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.connect(('localhost', TestClientListener.PORT))
        socket_transferer = BlockingSocketTransferer(sock)
        socket_transferer.send_plain_text("asd")
        msg = socket_transferer.receive_plain_text()
        self.assertEqual(json.loads(msg)["message"], "ERROR")
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.connect(('localhost', TestClientListener.PORT))
        socket_transferer = BlockingSocketTransferer(sock)
        socket_transferer.send_plain_text(
            '{"command": "dummy", "args": {"one": "one"}}')
        command, args = self.backup_scheduler_recv.recv()
        self.assertEqual(command, 'dummy')
        self.assertEqual(args, {"one": "one"})
        self.backup_scheduler_send.send(("OK", {}))
        msg = socket_transferer.receive_plain_text()
        self.assertEqual(json.loads(msg), {"message": "OK", "data": {}})
        socket_transferer.close()

    def test_detect_backup_scheduler_dead(self):
        self.barrier.wait()
        self.backup_scheduler_send.close()
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.connect(('localhost', TestClientListener.PORT))
        socket_transferer = BlockingSocketTransferer(sock)
        socket_transferer.send_plain_text(
            '{"command": "dummy", "args": {"one": "one"}}')
        msg = socket_transferer.receive_plain_text()
        self.assertEqual("ABORT", msg)
        sleep(1)
        self.assertTrue(not self.p.is_alive())

    def test_detect_socket_dead(self):
        self.barrier.wait()
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.connect(('localhost', TestClientListener.PORT))
        socket_transferer = BlockingSocketTransferer(sock)
        socket_transferer.send_plain_text(
            '{"command": "dummy", "args": {"one": "one"}}')
        command, args = self.backup_scheduler_recv.recv()
        self.assertEqual(command, 'dummy')
        self.assertEqual(args, {"one": "one"})
        sock.shutdown(socket.SHUT_RDWR)
        sock.close()
        self.backup_scheduler_send.send(("OK", {}))
        sleep(1)
        self.assertTrue(not self.p.is_alive())
Example #44
0
	def __init__(self,strategy):
		self.Inqueue,self.Outqueue = Pipe()
		self.Strategy = strategy
Example #45
0
def first_in_last_out(conn: str, me: str, queue_ctrl: Queue, pipe_in: Pipe,
                      pipe_out: Pipe):
    """
    # 先进后出 / 只进
    # 控制信号(启动)--> 输入队列(推入)--> 控制信号(需反馈)--> 输出队列(推出)
    :param conn: str            # 套接字
    :param me: str              # 传输方式 ( "REPLY", "SUBSCRIBE", "PULL")
    :param queue_ctrl: Queue    # 控制队列 ("is_active", "is_response"):(bool, bool)
    :param pipe_in: Pipe        # 输入队列 (MSG,)
    :param pipe_out: pipe       # 输出队列 (MSG,)
    :return: None
    """
    context = zmq.Context()
    # ------- REPLY ----------------------------
    if me in ["REPLY"]:
        socket = context.socket(zmq.REP)
        socket.bind(conn)
        has_response = True
    # ------- REPLY ----------------------------
    elif me in ["PUBLISH"]:
        socket = context.socket(zmq.PUB)
        socket.bind(conn)
        has_response = False
    # ------- PULL -----------------------------
    elif me in ["PULL"]:
        socket = context.socket(zmq.PULL)
        socket.bind(conn)
        has_response = False
    # ------- DEFAULT: PULL -----------------------------
    else:
        socket = context.socket(zmq.PULL)
        socket.bind(conn)
        has_response = False
    logging.debug("ZMQ::FILO::{0} bind:{1}".format(me, conn))
    # ------------------------------------- QUEUE
    is_active = queue_ctrl.get()
    while is_active:
        if queue_ctrl.empty():
            # RECV --------------------------------
            message_in = socket.recv()
            logging.debug("ZMQ::FILO::{0}::{1} recv:{2}".format(
                me, conn, message_in))
            msg_in = MSG(message_in.decode(CONFIG.Coding))
            pipe_in.send(msg_in)
            logging.debug("ZMQ::FILO::{0}::{1}::PIPE IN send:{2}".format(
                me, conn, msg_in))
            # SEND --------------------------------
            if has_response:
                try:
                    msg_out = pipe_out.recv()
                    logging.debug(
                        "ZMQ::FILO::{0}::{1}::PIPE OUT recv:{2}".format(
                            me, conn, msg_out))
                    message_out = str(msg_out).encode(CONFIG.Coding)
                    socket.send(message_out)
                    logging.debug("ZMQ::FILO::{0}::{1} send:{2}".format(
                        me, conn, message_out))
                except EOFError:
                    is_active = False
        else:
            is_active = queue_ctrl.get()
    else:
        queue_ctrl.close()
def consumer(con, pro, name, lo):
    pro.close()
    while True:
        lo.acquire()
        f = con.recv()
        lo.release()
        if f:
            time.sleep(random.random())
            print('\033[1;32m%s消费了%s\033[0m' % (name, f))
        else:
            con.close()
            break


if __name__ == '__main__':
    con, pro = Pipe()
    lo = Lock()
    c1 = Process(target=consumer, args=(con, pro, 'c1', lo))
    c2 = Process(target=consumer, args=(con, pro, 'c2', lo))
    p1 = Process(target=producer, args=(con, pro, 30, 'p1', '泔水'))
    c2.start()
    c1.start()
    p1.start()

    con.close()
    pro.close()

    c2.join()
    c1.join()
    p1.join()
Example #47
0
def main():
    import optparse

    p = optparse.OptionParser(
        usage="usage: %prog [options] dump enriched_pickle")

    _, args = p.parse_args()

    if len(args) != 2:
        p.error("Too few or too many arguments")
    xml, rich_fn = args

    global lang_user_talk, lang_user, tag, user_classes
    ## pipe to send data to the  subprocess
    p_receiver, p_sender = Pipe(duplex=False)
    ## pipe to get elaborated data from the subprocess
    done_p_receiver, done_p_sender = Pipe(duplex=False)

    src = BZ2File(xml)

    tag = mwlib.get_tags(src)
    lang, date, _ = mwlib.explode_dump_filename(xml)
    g = sg_load(rich_fn)
    user_classes = dict(
        g.get_user_class('username',
                         ('anonymous', 'bot', 'bureaucrat', 'sysop')))

    p = Process(target=get_freq_dist, args=(p_receiver, done_p_sender))
    p.start()

    translations = mwlib.get_translations(src)
    lang_user, lang_user_talk = translations['User'], translations['User talk']

    assert lang_user, "User namespace not found"
    assert lang_user_talk, "User Talk namespace not found"

    ## open with a faster decompressor (but that probably cannot seek)
    src.close()
    src = lib.BZ2FileExt(xml, parallel=False)

    partial_process_page = partial(process_page, send=p_sender)
    mwlib.fast_iter(etree.iterparse(src, tag=tag['page']),
                    partial_process_page)
    logging.info('Users missing in the rich file: %d', count_missing)

    p_sender.send(0)  # this STOPS the process

    print >> sys.stderr, "end of parsing"

    ## SAVE DATA
    g.set_weighted_degree()
    users_cache = {}
    # get a list of pair (class name, frequency distributions)
    for cls, fd in done_p_receiver.recv():
        with open(
                "%swiki-%s-words-%s.dat" % (lang, date, cls.replace(' ', '_')),
                'w') as out:
            # users in this group
            try:
                users = users_cache[cls]
            except KeyError:
                users = get_class(g, cls)
                users_cache[cls] = users
            print >> out, '#users: ', len(users)
            print >> out, '#msgs: ', sum(users['weighted_indegree'])
            for k, v in fd:
                print >> out, v, k
        del fd

    for cls, counters in done_p_receiver.recv():
        with open(
                "%swiki-%s-smile-%s.dat" % (lang, date, cls.replace(' ', '_')),
                'w') as out:
            # users in this group
            try:
                users = users_cache[cls]
            except KeyError:
                users = get_class(g, cls)
                users_cache[cls] = users
            print >> out, '#users: ', len(users)
            print >> out, '#msgs: ', sum(users['weighted_indegree'])
            for k, v in counters:
                print >> out, v, k
        del counters

    p.join()

    print >> sys.stderr, "end of FreqDist"
Example #48
0
            # SOCKETIO.emit('run deployment', filename, broadcast=False)
            file_location = 'uploads/' + str(filename)
            run_deployment(file_location)
            print('File successfully uploaded')
            # Returns empty url to prevent page reload upon submission
            return '', 204

    template_data = {'incoming': INCOMING, 'outgoing': OUTGOING}

    return render_template('robotWebapp.jinja2', **template_data)


if __name__ == '__main__':

    # Pipes to Webapp
    SERIAL_CHILD, SERIAL_PARENT = Pipe()

    # Pipes for encoder
    ENCODER_CHILD, ENCODER_PARENT = Pipe()

    # Queues for sql database connector
    RECORD_QUEUE = Queue()
    DATA_QUEUE = Queue()

    # Start AquaTROLL
    TROLL = Modbus(DATA_QUEUE)

    # Starts camera
    CAMERA_PROCESS = Process(target=Camera.start_camera,
                             args=((2592, 1944), 300, "Images", RECORD_QUEUE))
    CAMERA_PROCESS.start()
Example #49
0
        dp.add_handler(conv_handler)

        # log all errors
        dp.add_error_handler(error)

        # Start the Bot
        updater.start_polling()

        # Run the bot until you press Ctrl-C or the process receives SIGINT,
        # SIGTERM or SIGABRT. This should be used most of the time, since
        # start_polling() is non-blocking and will stop the bot gracefully.
        updater.idle()

    logger.debug(f'My connection is {conn}')
    telegramBot_main(token)


## ---------------------------------- END TELEGRAM -----------------------------------------------##

if __name__ == "__main__":

    logging.basicConfig()
    logging.root.setLevel(logging.INFO)
    logging.basicConfig(level=logging.INFO)

    # creating a pipe
    parent_conn, child_conn = Pipe()
    p = Process(target=start_bot, args=(child_conn, ))
    p.start()
Example #50
0
def first_out_last_in(conn: str, me: str, queue_ctrl: Queue, pipe_in: Pipe,
                      pipe_out: Pipe):
    """
    # 先出后进 / 只出
    # 控制信号 --> 输出队列 --> 输出队列 --> 反馈信号 --> 输入队列
    :param conn: str            # 套接字    "tcp://<hostname>:<port>"
    :param me: str              # 传输方式  ["REQUEST", "SUBSCRIBE", "PUSH"]
    :param queue_ctrl: Queue    # 控制队列  ("is_active",):(bool,)
    :param pipe_in: Pipe        # 输入队列  ("msg_in",):(MSG,)
    :param pipe_out: Pipe       # 输出队列  ("msg_out",):(MSG,)
    :return: None
    """
    context = zmq.Context()
    # ------- REQUEST ----------------------------
    if me in ["REQUEST"]:
        socket = context.socket(zmq.REQ)
        socket.connect(conn)
        handshake, has_response = 0, True
    # ------- SUBSCRIBE --------------------------
    elif me in ["SUBSCRIBE"]:
        socket = context.socket(zmq.SUB)
        socket.connect(conn)
        handshake, has_response = 1, True
    # ------- PUSH ------------------------------
    elif me in ["PUSH"]:
        socket = context.socket(zmq.PUSH)
        socket.connect(conn)
        handshake, has_response = 0, False
    # ------- DEFAULT: PUSH ---------------------
    else:
        socket = context.socket(zmq.PUSH)
        socket.connect(conn)
        handshake, has_response = 0, False
    logging.debug("ZMQ::FOLI::{0} connect:{1}".format(me, conn))
    # ------------------------------------- QUEUE
    is_active = queue_ctrl.get()
    while is_active:
        if queue_ctrl.empty():
            # SEND --------------------------------
            if handshake == 1:
                socket.setsockopt(zmq.SUBSCRIBE, '')
                logging.debug("ZMQ::FOLI::{0}::{1} send:{2}".format(
                    me, conn, "zmq.SUBSCRIBE"))
            else:
                try:
                    msg_out = pipe_out.recv()
                    logging.debug(
                        "ZMQ::FOLI::{0}::{1}::PIPE OUT recv:{2}".format(
                            me, conn, msg_out))
                    message_out = str(msg_out).encode(CONFIG.Coding)
                    socket.send(message_out)
                    logging.debug("ZMQ::FOLI::{0}::{1} send:{2}".format(
                        me, conn, message_out))
                except EOFError:
                    is_active = False
            # RECV --------------------------------
            if has_response:
                message_in = socket.recv()
                logging.debug("ZMQ::FOLI::{0}::{1} recv:{2}".format(
                    me, conn, message_in))
                msg_in = MSG(message_in.decode(CONFIG.Coding))
                pipe_in.send(msg_in)
                logging.debug("ZMQ::FOLI::{0}::{1}::PIPE IN send:{2}".format(
                    me, conn, msg_in))
        else:
            is_active = queue_ctrl.get()
    else:
        queue_ctrl.close()
Example #51
0
# def create_new_arr(seed)

if __name__ == "__main__":
    # for i in range(0, 100):
    #     print("\ni: {}".format(i))
    #     amount_bytes = np.random.randint(1, 20)
    #     print("amount_bytes: {}".format(amount_bytes))
    #     n = get_new_rnd_number(amount_bytes)
    #     print("n: {}".format(n))

    found_primes = []

    for i in range(0, 100):
        print("i: {}".format(i))

        pipes_out, pipes_in = list(zip(*[Pipe() for _ in range(0, 3)]))

        def func_create_new_prime(pipe_in, seeds):
            primes = factorint(get_new_rnd_number(seeds))
            p = sorted(list(primes.keys()))[-1]
            pipe_in.send(p)

        ps = [
            Process(target=func_create_new_prime,
                    args=(pipe_in, np.random.randint(0, 256, (35, ))))
            for pipe_in in pipes_in
        ]

        for p in ps:
            p.start()
Example #52
0
        if countSensor == 7:
            jarakTempuh += int(((kecepatan / 100000) / 3.6)) * 100
            countSensor = 0
            print('-------------------------------------------')
            print('Jarak tempuh : ', jarakTempuh)
            print('Waktu yang ditempuh : ', float(jarakTempuh / kecepatan),
                  ' Detik')
            print('-------------------------------------------\n')

        countSensor += 1

        time.sleep(1)


if __name__ == '__main__':
    pipeIN, pipeOUT = Pipe()
    pSendKanan = Process(target=sendKanan, args=(pipeIN, ))
    pSendKiri = Process(target=sendKiri, args=(pipeIN, ))
    pSendDepan = Process(target=sendDepan, args=(pipeIN, ))
    pSendBelakang = Process(target=sendBelakang, args=(pipeIN, ))
    pSendDepanKanan = Process(target=sendDepanKanan, args=(pipeIN, ))
    pSendDepanKiri = Process(target=sendDepanKiri, args=(pipeIN, ))
    pSendJarakLampuLalin = Process(target=sendJarakLampuLalin, args=(pipeIN, ))
    pMasterKontrol = Process(target=masterKontrol, args=(pipeOUT, ))

    pSendKanan.start()
    pSendKiri.start()
    pSendDepan.start()
    pSendBelakang.start()
    pSendDepanKanan.start()
    pSendDepanKiri.start()
 def __init__(self, name=None, stop_signal=False):
     self.name = name
     self.stop_signal = stop_signal and not name
     self.sub_pipe, self.main_pipe = Pipe()
     self.allow_sending = True
Example #54
0
    while True:
        cur_t = str(int(time.time()))
        time_to_main_w.send(cur_t)
        time.sleep(1)


def reader(input_to_main_w):
    i = 0
    while True:
        input_to_main_w.send(str(i))
        i += 1
        time.sleep(1)


if __name__ == '__main__':
    main_to_log_w, main_to_log_r = Pipe()
    log_proc = Process(target=logger, args=(main_to_log_r, ))
    log_proc.start()
    main_to_log_r.close()

    time_to_main_w, time_to_main_r = Pipe()
    time_proc = Process(target=timer, args=(time_to_main_w, ))
    time_proc.start()
    time_to_main_w.close()

    input_to_main_w, input_to_main_r = Pipe()
    input_proc = Process(target=reader, args=(input_to_main_w, ))
    input_proc.start()
    input_to_main_w.close()

    try:
Example #55
0
    # print('Process 2: ', vector2)


def process_three(pipe32, res):
    pid = 3
    global vector3
    vector3 = send_message(pipe32, pid, vector3)
    vector3 = recv_message(pipe32, pid, vector3)
    vector3 = event(pid, vector3)
    vector3 = recv_message(pipe32, pid, vector3)
    res[2] = vector3
    # print('Process 3: ', vector3)


# pipe creation
oneandtwo, twoandone = Pipe()
twoandthree, threeandtwo = Pipe()

process1 = Process(target=process_one, args=(oneandtwo, res))
process2 = Process(target=process_two, args=(twoandone, twoandthree, res))
process3 = Process(target=process_three, args=(threeandtwo, res))

process1.start()
process2.start()
process3.start()

process1.join()
process2.join()
process3.join()
print('\nFinal state: ')
print('Process 1: ', res[0])
 def get_position_buffer_connection(self):
     my_end, your_end = Pipe()
     self._position_buffer_connections.append(my_end)
     return your_end
Example #57
0
s = Template('$who likes $what')
s = s.substitute(who='this', what='that')
print(s)

###############################################################
from multiprocessing import Process, Pipe
def f(conn):
  conn.send('This is sent trhough a pipe!')
  conn.close()

def g(conn):
  conn.send('This is sent trhough a pipe too!')
  conn.close()

if __name__ == '__main__':
  parent_conn, child1_conn = Pipe()
  #parent_conn, child2_conn = Pipe()
  p = Process(target = f, args = (child1_conn,))
  q = Process(target = g, args = (child1_conn,))
  q.start()
  p.start()
  print(parent_conn.recv())
  #print(parent_conn.recv())
  p.join()
  q.join()


################################################################
import os
from os.path import join, getsize
def f1(rootDir, exDir = 'CVS'):
Example #58
0
 def Run_Event(self, evt):
     self.Gen_Config()
     parent_conn, child_conn = Pipe()
     self.biosup_process = Process(target=self.BIOSUP_APP)
     self.biosup_process.daemon = True
     self.biosup_process.start()
Example #59
0
    def _execute(
        self,
        *,
        use_threads: bool,
        max_workers: int,
        tqdm_kwargs: dict,
        worker_initializer: Callable,
        task: Callable,
        task_arguments: Iterable,
        task_finished: Callable,
    ):
        if use_threads and max_workers == 1:
            with self.pbar_class(**tqdm_kwargs) as pbar:
                for args in task_arguments:
                    result = task(args)
                    task_finished(result, pbar)
            return

        task_arguments = list(task_arguments)
        grouped_args = list(
            zip_longest(*list(split_every(max_workers, task_arguments))))
        if not grouped_args:
            return

        processes = []
        connections = []
        for chunk in grouped_args:
            parent_conn, child_conn = Pipe()

            worker_args = [args for args in chunk if args is not None]
            process = Process(
                target=process_loop,
                args=(
                    child_conn,
                    worker_initializer,
                    logging.getLogger("").level,
                    task,
                    worker_args,
                ),
            )
            process.daemon = True
            processes.append(process)
            connections.append(parent_conn)

        for process in processes:
            process.start()

        with self.pbar_class(**tqdm_kwargs) as pbar:
            while connections:
                for r in wait(connections):
                    try:
                        msg_type, msg = r.recv()
                    except EOFError:
                        connections.remove(r)
                        continue

                    if msg_type == MessageType.result:
                        if task_finished:
                            task_finished(msg, pbar)
                    elif msg_type == 'log':
                        record = msg
                        logger = logging.getLogger(record.name)
                        logger.handle(record)
                    elif msg_type == MessageType.complete:
                        connections.remove(r)
                    elif msg_type == MessageType.exception:
                        for process in processes:
                            process.terminate()
                        raise msg

        for process in processes:
            process.join()
Example #60
0
from multiprocessing import Process
from multiprocessing import Pipe
from flask import Flask, render_template
import SocketServer
import time

a, b = Pipe()


def webSvr():

    print('starting Flask server')

    app = Flask(__name__)

    @app.route('/')
    def index():
        return render_template('index.html')

    @app.route('/forward/')
    def forward():
        a.send('8')
        print('sombody clicked forward!!')
        return render_template('index.html')

    @app.route('/reverse/')
    def reverse():
        a.send('2')
        print('sombody clicked reverse!')
        return render_template('index.html')