Example #1
0
    def testNotifications(self):

        systems = {}

        try:

            # Start sub-processes before ActorSystem so that the
            # ActorSystem doesn't get duplicated in all the
            # sub-processes.  The sub-processes will wait for a
            # startup message from this process before creating their
            # ActorSystems.

            parent_conn1, child_conn1 = Pipe()
            child1 = Process(target=DagobahSystem,
                             args=(child_conn1, self.actorSystemBase,
                                   getattr(self, 'extraCapabilities', None)))
            child1.start()
            systems['Dagobah'] = (parent_conn1, child1)

            child_conn2, parent_conn2 = Pipe()
            child2 = Process(target=HothSystem,
                             args=(child_conn2, self.actorSystemBase,
                                   getattr(self, 'extraCapabilities', None)))
            child2.start()
            systems['Hoth'] = (parent_conn2, child2)

            child_conn3, parent_conn3 = Pipe()
            child3 = Process(target=EndorSystem,
                             args=(child_conn3, self.actorSystemBase,
                                   getattr(self, 'extraCapabilities', None)))
            child3.start()
            systems['Endor'] = (parent_conn3, child3)

            child_conn4, parent_conn4 = Pipe()
            child4 = Process(target=NabooSystem,
                             args=(child_conn4, self.actorSystemBase,
                                   getattr(self, 'extraCapabilities', None)))
            child4.start()
            systems['Naboo'] = (parent_conn4, child4)

            # Start the Primary ActorSystem and an Actor that
            # registers for Convention entry/exit from other
            # ActorSystems.

            caps = {'Jedi Council': True, 'Admin Port': 12121}
            caps.update(getattr(self, 'extraCapabilities', {}))
            ActorSystem(self.actorSystemBase,
                        caps,
                        logDefs=simpleActorTestLogging())

            watcher = ActorSystem().createActor(Notified)
            ActorSystem().tell(watcher, 'register')
            sleep(0.10)  # wait for watcher to register

            # Now start each of the secondary ActorSystems; their
            # registration should be noted by the Actor registered for
            # such notifications.

            for each in systems:
                systems[each][0].send('Start now please')

            # Verify all anticipated registrations actually occurred.

            for X in range(30):
                registrations = ActorSystem().ask(watcher, 'notifications',
                                                  1).split('&')
                print(registrations)
                if 4 == len(registrations):
                    break
                sleep(0.01)  # wait for more registrations to complete
            self.assertEqual(4, len(registrations))

            # Now ask an ActorSystem to exit

            systems['Hoth'][0].send('OK, all done')
            del systems['Hoth']

            # Verify that the convention deregistration occurred

            for X in range(30):
                registrations2 = ActorSystem().ask(watcher, 'notifications',
                                                   1).split('&')
                if 5 == len(registrations2):
                    break
                sleep(0.01)  # wait for Hoth system to exit and deregister
            self.assertEqual(5, len(registrations2))

            outs = [X for X in registrations2 if X.startswith('OUT')]
            self.assertEqual(1, len(outs))

        finally:
            for system in systems:
                systems[system][0].send('OK, all done')
            sleep(
                0.1
            )  # allow other actorsystems (non-convention-leaders) to exit
            ActorSystem().shutdown()
Example #2
0
def test_user_config_file(user_home, user_entry):
    info("Check user config file contents")
    import ConfigParser
    config = ConfigParser.ConfigParser()
    config.read("/etc/domogik/domogik.cfg")

    #check [domogik] section
    dmg = dict(config.items('domogik'))
    database = dict(config.items('database'))
    rest = dict(config.items('rest'))
    admin = dict(config.items('admin'))
    butler = dict(config.items('butler'))
    backup = dict(config.items('backup'))
    ok("Config file correctly loaded")

    info("Parse [domogik] section")
    import domogik

    #Check ix xpl port is not used
    _check_port_availability("0.0.0.0", 3865, udp=True)
    ok("xPL hub IP/port is not bound by anything else")

    parent_conn, child_conn = Pipe()
    p = Process(target=_test_user_can_write,
                args=(
                    child_conn,
                    dmg['log_dir_path'],
                    user_entry,
                ))
    p.start()
    p.join()
    assert parent_conn.recv(
    ), "The directory %s for log does not exist or does not have right permissions" % dmg[
        'log_dir_path']

    assert dmg['log_level'] in [
        'debug', 'info', 'warning', 'error', 'critical'
    ], "The log_level parameter does not have a good value. Must \
            be one of debug,info,warning,error,critical"

    ### obsolete
    #if not os.path.isdir(dmg['src_prefix'] + '/share/domogik'):
    #    try:
    #        f = os.listdir("%s/share/domogik" % dmg['src_prefix'])
    #        f.close()
    #    except OSError:
    #        fail("Can't access %s/share/domogik. Check %s is available for domogik user (if you are in development mode, be sure the directory which contains the sources is available for domogik user)." % (dmg['src_prefix'],dmg['src_prefix']))
    #        exit()
    ok("[domogik] section seems good")

    # check [database] section
    info("Parse [database] section")
    assert database[
        'type'] == 'mysql', "Only mysql database type is supported at the moment"

    uid = user_entry.pw_uid
    os.setreuid(0, uid)
    old_home = os.environ['HOME']
    os.environ['HOME'] = user_home
    from domogik.common.database import DbHelper
    d = DbHelper()
    os.setreuid(0, 0)
    os.environ['HOME'] = old_home
    assert d.get_engine(
    ) != None, "Engine is not set, it seems something went wrong during connection to the database"

    ok("[database] section seems good")

    # Check [rest] section
    info("Parse [rest] section")
    for ipadd in get_ip_for_interfaces(rest['interfaces'].split(",")):
        _check_port_availability(ipadd, rest['port'])
    ok("Rest server IP/port is not bound by anything else")

    # Check [admin] section
    info("Parse [admin] section")
    for ipadd in get_ip_for_interfaces(admin['interfaces'].split(",")):
        _check_port_availability(ipadd, admin['port'])
    ok("Admin server IP/port is not bound by anything else")
Example #3
0
    address. Called when a user try to submit a new transaction.
    """
    public_key = (base64.b64decode(public_key)).hex()
    signature = base64.b64decode(signature)
    vk = ecdsa.VerifyingKey.from_string(bytes.fromhex(public_key),
                                        curve=ecdsa.SECP256k1)
    try:
        return (vk.verify(signature, message.encode()))
    except:
        return False


def welcome_msg():
    print("""       =========================================\n
        SIMPLE COIN v1.0.0 - BLOCKCHAIN SYSTEM\n
       =========================================\n\n
        You can find more help at: https://github.com/cosme12/SimpleCoin\n
        Make sure you are using the latest version or you may end in
        a parallel chain.\n\n\n""")


if __name__ == '__main__':
    welcome_msg()
    #Start mining
    a, b = Pipe()
    p1 = Process(target=mine, args=(a, BLOCKCHAIN, NODE_PENDING_TRANSACTIONS))
    p1.start()
    #Start server to recieve transactions
    p2 = Process(target=node.run(), args=b)
    p2.start()
Example #4
0
            #chn_cnt = 0
            for wibno in wibnos:
                for fembno in fembnos:
                    chn_cnt = 0
                    ffts = []
                    if not ((APAno == 1) and (wibno == 4) and (fembno == 2)):
                        print APAno, wibno, fembno
                        log_str = log_str + str(wibno) + str(fembno) + "_"
                        chns = []
                        for chn_loc in All_sort:
                            if (chn_loc[0][0] == wire_type):
                                chns.append(int(chn_loc[1]))
                        mps = []
                        for chnno in chns:
                            chn_cnt = chn_cnt + 1
                            pc, cc = Pipe()
                            fft_s = 5000
                            ana_a_chn_args = (cc, out_path, rms_rootpath,
                                              fpga_rootpath, asic_rootpath,
                                              APAno, rmsrunno, fpgarunno,
                                              asicrunno, wibno, fembno, chnno,
                                              gain, tp, jumbo_flag, fft_s, apa)
                            p = mp.Process(target=pipe_ana_a_chn,
                                           args=ana_a_chn_args)
                            mps.append([pc, cc, p])

                        for onep in mps:
                            onep[2].start()

                        for onep in mps:
                            ffts.append(onep[0].recv())
For example:
"""

from multiprocessing import Process, Pipe


def child(conn):
    conn.send([12, {'name': 'yuan'}, 'hello'])
    response = conn.recv()
    print('child receive: ', response)
    conn.close()


if __name__ == '__main__':
    print('main start ...')

    parent_conn, child_conn = Pipe()  # 管道的一对(两个)对象

    p = Process(target=child, args=(child_conn, ))  # 创建进程
    p.start()  # 启动进程

    print('parent receive: ', parent_conn.recv())
    parent_conn.send('儿子你好!')
    p.join()

    print('\nmain end ...')
"""
Pipe()返回的两个连接对象代表管道的两端。 每个连接对象都有send()和recv()方法(等等)。
 请注意,如果两个进程(或线程)尝试同时读取或写入管道的同一端,管道中的数据可能会损坏
"""
Example #6
0
def memory_usage(proc=-1,
                 interval=.1,
                 timeout=None,
                 timestamps=False,
                 include_children=False,
                 multiprocess=False,
                 max_usage=False,
                 retval=False,
                 stream=None,
                 backend=None):
    """
    Return the memory usage of a process or piece of code

    Parameters
    ----------
    proc : {int, string, tuple, subprocess.Popen}, optional
        The process to monitor. Can be given by an integer/string
        representing a PID, by a Popen object or by a tuple
        representing a Python function. The tuple contains three
        values (f, args, kw) and specifies to run the function
        f(*args, **kw).
        Set to -1 (default) for current process.

    interval : float, optional
        Interval at which measurements are collected.

    timeout : float, optional
        Maximum amount of time (in seconds) to wait before returning.

    max_usage : bool, optional
        Only return the maximum memory usage (default False)

    retval : bool, optional
        For profiling python functions. Save the return value of the profiled
        function. Return value of memory_usage becomes a tuple:
        (mem_usage, retval)

    timestamps : bool, optional
        if True, timestamps of memory usage measurement are collected as well.

    include_children : bool, optional
        if True, sum the memory of all forked processes as well

    multiprocess : bool, optional
        if True, track the memory usage of all forked processes.

    stream : File
        if stream is a File opened with write access, then results are written
        to this file instead of stored in memory and returned at the end of
        the subprocess. Useful for long-running processes.
        Implies timestamps=True.

    Returns
    -------
    mem_usage : list of floating-point values
        memory usage, in MiB. It's length is always < timeout / interval
        if max_usage is given, returns the two elements maximum memory and
        number of measurements effectuated
    ret : return value of the profiled function
        Only returned if retval is set to True
    """
    backend = choose_backend(backend)
    if stream is not None:
        timestamps = True

    if not max_usage:
        ret = []
    else:
        ret = -1

    if timeout is not None:
        max_iter = int(timeout / interval)
    elif isinstance(proc, int):
        # external process and no timeout
        max_iter = 1
    else:
        # for a Python function wait until it finishes
        max_iter = float('inf')

    if callable(proc):
        proc = (proc, (), {})
    if isinstance(proc, (list, tuple)):
        if len(proc) == 1:
            f, args, kw = (proc[0], (), {})
        elif len(proc) == 2:
            f, args, kw = (proc[0], proc[1], {})
        elif len(proc) == 3:
            f, args, kw = (proc[0], proc[1], proc[2])
        else:
            raise ValueError

        while True:
            child_conn, parent_conn = Pipe(
            )  # this will store MemTimer's results
            p = MemTimer(os.getpid(),
                         interval,
                         child_conn,
                         backend,
                         timestamps=timestamps,
                         max_usage=max_usage,
                         include_children=include_children)
            p.start()
            parent_conn.recv()  # wait until we start getting memory

            # When there is an exception in the "proc" - the (spawned) monitoring processes don't get killed.
            # Therefore, the whole process hangs indefinitely. Here, we are ensuring that the process gets killed!
            try:
                returned = f(*args, **kw)
                parent_conn.send(0)  # finish timing
                ret = parent_conn.recv()
                n_measurements = parent_conn.recv()
                if retval:
                    ret = ret, returned
            except Exception:
                parent = psutil.Process(os.getpid())
                for child in parent.children(recursive=True):
                    os.kill(child.pid, SIGKILL)
                p.join(0)
                raise

            p.join(5 * interval)
            if n_measurements > 4 or interval < 1e-6:
                break
            interval /= 10.
    elif isinstance(proc, subprocess.Popen):
        # external process, launched from Python
        line_count = 0
        while True:
            if not max_usage:
                mem_usage = _get_memory(proc.pid,
                                        backend,
                                        timestamps=timestamps,
                                        include_children=include_children)

                if stream is not None:
                    stream.write("MEM {0:.6f} {1:.4f}\n".format(*mem_usage))

                    # Write children to the stream file
                    if multiprocess:
                        for idx, chldmem in enumerate(
                                _get_child_memory(proc.pid)):
                            stream.write("CHLD {0} {1:.6f} {2:.4f}\n".format(
                                idx, chldmem, time.time()))
                else:
                    # Create a nested list with the child memory
                    if multiprocess:
                        mem_usage = [mem_usage]
                        for chldmem in _get_child_memory(proc.pid):
                            mem_usage.append(chldmem)

                    # Append the memory usage to the return value
                    ret.append(mem_usage)
            else:
                ret = max(
                    ret,
                    _get_memory(proc.pid,
                                backend,
                                include_children=include_children))
            time.sleep(interval)
            line_count += 1
            # flush every 50 lines. Make 'tail -f' usable on profile file
            if line_count > 50:
                line_count = 0
                if stream is not None:
                    stream.flush()
            if timeout is not None:
                max_iter -= 1
                if max_iter == 0:
                    break
            if proc.poll() is not None:
                break
    else:
        # external process
        if max_iter == -1:
            max_iter = 1
        counter = 0
        while counter < max_iter:
            counter += 1
            if not max_usage:
                mem_usage = _get_memory(proc,
                                        backend,
                                        timestamps=timestamps,
                                        include_children=include_children)
                if stream is not None:
                    stream.write("MEM {0:.6f} {1:.4f}\n".format(*mem_usage))

                    # Write children to the stream file
                    if multiprocess:
                        for idx, chldmem in enumerate(_get_child_memory(proc)):
                            stream.write("CHLD {0} {1:.6f} {2:.4f}\n".format(
                                idx, chldmem, time.time()))
                else:
                    # Create a nested list with the child memory
                    if multiprocess:
                        mem_usage = [mem_usage]
                        for chldmem in _get_child_memory(proc):
                            mem_usage.append(chldmem)

                    # Append the memory usage to the return value
                    ret.append(mem_usage)
            else:
                ret = max([
                    ret,
                    _get_memory(proc,
                                backend,
                                include_children=include_children)
                ])

            time.sleep(interval)
            # Flush every 50 lines.
            if counter % 50 == 0 and stream is not None:
                stream.flush()
    if stream:
        return None
    return ret
Example #7
0
    lockCount = Lock()
    lockWrite = Lock()

    queue = Queue()
    q = Queue()
    queue_echange = Queue()

    clock_ok = Event()
    weather_ok = Event()

    count = Value('i', 0)
    temp = Array('f', range(2))
    wind = Array('f', range(2))
    market_OK = Value('b', False)

    term_conn, markt_conn, = Pipe()
    term_conn2, weather_conn = Pipe()

    Homes = []

    for i in range(1, nmbHome + 1):
        num_policy = random.randrange(1, 4)
        h = Process(
            target=Home,
            args=(i, lockQueue, lockCount, queue, count, market_OK, clock_ok,
                  temp, wind, weather_ok, num_policy, q, queue_echange),
        )
        h.start()
        if num_policy == 1:
            print("Home", i, "Always give away (", num_policy, ")")
Example #8
0
def lambda_handler_fn(events: Any, context: Any) -> None:
    # Parse sns message
    LOGGER.debug(f"handling events: {events} context: {context}")

    client = GraphClient()

    s3 = get_s3_client()

    load_plugins(os.environ["BUCKET_PREFIX"], s3,
                 os.path.abspath(MODEL_PLUGINS_DIR))

    for event in events["Records"]:
        if not IS_LOCAL:
            event = json.loads(event["body"])["Records"][0]
        data = parse_s3_event(s3, event)

        message = json.loads(data)

        LOGGER.info(f'Executing Analyzer: {message["key"]}')
        analyzer = download_s3_file(
            s3, f"{os.environ['BUCKET_PREFIX']}-analyzers-bucket",
            message["key"])
        analyzer_name = message["key"].split("/")[-2]

        subgraph = SubgraphView.from_proto(client, bytes(message["subgraph"]))

        # TODO: Validate signature of S3 file
        LOGGER.info(f"event {event}")
        rx: Connection
        tx: Connection
        rx, tx = Pipe(duplex=False)
        p = Process(target=execute_file,
                    args=(analyzer_name, analyzer, subgraph, tx, ""))

        p.start()
        t = 0

        while True:
            p_res = rx.poll(timeout=5)
            if not p_res:
                t += 1
                LOGGER.info(
                    f"Polled {analyzer_name} for {t * 5} seconds without result"
                )
                continue
            result: Optional[Any] = rx.recv()

            if isinstance(result, ExecutionComplete):
                LOGGER.info("execution complete")
                break

            # emit any hits to an S3 bucket
            if isinstance(result, ExecutionHit):
                LOGGER.info(
                    f"emitting event for {analyzer_name} {result.analyzer_name} {result.root_node_key}"
                )
                emit_event(s3, result)
                update_msg_cache(analyzer, result.root_node_key,
                                 message["key"])
                update_hit_cache(analyzer_name, result.root_node_key)

            assert not isinstance(
                result, ExecutionFailed), f"Analyzer {analyzer_name} failed."

        p.join()
Example #9
0
def digest(sfile, sha1each = 4 * 1024 * 1024, cmds = None):
    if cmds is None:
        cmds = []
    elif isinstance(cmds, str):
        cmds = [cmds]

    # processes

    def mpw_reader(p, sfile):
        with open(sfile, 'rb') as bfile:
            while True:
                if "GO" != p.recv():
                    break

                b_read = bfile.read(HASH_FILE_BUFFER_SIZE)
                if b_read:
                    p.send(b_read)
                else:
                    break
        p.send(None)

    def mpw_md5(p):
        hl = hashlib.md5()
        while True:
            buf = p.recv()
            if buf is not None:
                hl.update(buf)
                p.send("OK")
            else:
                p.send(hl.hexdigest())
                break

    def mpw_sha1(p):
        hl = hashlib.sha1()
        while True:
            buf = p.recv()
            if buf is not None:
                hl.update(buf)
                p.send("OK")
            else:
                p.send(hl.hexdigest())
                break

    def mpw_sha256(p):
        hl = hashlib.sha256()
        while True:
            buf = p.recv()
            if buf is not None:
                hl.update(buf)
                p.send("OK")
            else:
                p.send(hl.hexdigest())
                break

    def mpw_sha512(p):
        hl = hashlib.sha512()
        while True:
            buf = p.recv()
            if buf is not None:
                hl.update(buf)
                p.send("OK")
            else:
                p.send(hl.hexdigest())
                break


    # Supervisor Process

    try:
        Pp_reader, Pc_reader = Pipe()
        P_reader = Process(target=mpw_reader, args=(Pc_reader, sfile))
        P_reader.start()
        Pp_reader.send("GO")

        Pp_b2, Pc_b2 = Pipe()
        I_b2 = 0
        H_b2 = []
        P_b2 = Process(target=mpw_sha1, args=(Pc_b2,))

        Pp_workers = [Pipe(), Pipe(), Pipe(), Pipe()]

        P_workers = {
            "md5":    Process(target=mpw_md5,    args=(Pp_workers[0][1],)),
            "sha1":   Process(target=mpw_sha1,   args=(Pp_workers[1][1],)),
            "sha256": Process(target=mpw_sha256, args=(Pp_workers[2][1],)),
            "sha512": Process(target=mpw_sha512, args=(Pp_workers[3][1],))
            }

        for c in cmds:
            pass

        for p in P_workers.values():
            p.start()
        P_b2.start()

        while True:
            buf = Pp_reader.recv()
            if buf is None:
                break

            Pp_reader.send("GO")

            for p, _ in Pp_workers:
                p.send(buf)

            t_b2 = I_b2
            I_b2 = I_b2 + len(buf)
            if t_b2 // sha1each != I_b2 // sha1each:
                t_b2 = sha1each - (t_b2  % sha1each)
                Pp_b2.send(buf[0:t_b2])
                if "OK" != Pp_b2.recv():
                    raise RuntimeError("Partial sha1each process got sick, the operation failed on chunk.")
                Pp_b2.send(None)
                H_b2.append(Pp_b2.recv())
                P_b2 = Process(target=mpw_sha1, args=(Pc_b2,))
                P_b2.start()
                if t_b2 < HASH_FILE_BUFFER_SIZE:
                    Pp_b2.send(buf[t_b2:])
            else:
                Pp_b2.send(buf)
                if "OK" != Pp_b2.recv():
                    raise RuntimeError("Partial sha1each process got sick, the operation failed.")

            # We need to wait for -all- workers
            for p, _ in Pp_workers:
                if "OK" != p.recv():
                    raise RuntimeError("One of the worker processes got sick, the operation failed.")

        Pp_b2.send(None)
        for p, _ in Pp_workers:
            p.send(None)
        H_b2.append(Pp_b2.recv())

        # All workers MUST exit, the following hash workers MUST return a hexdigest string/bytestring


        return {'md5':    Pp_workers[0][0].recv(),
                'sha1':   Pp_workers[1][0].recv(),
                'sha256': Pp_workers[2][0].recv(),
                'sha512': Pp_workers[3][0].recv(),
                'sha1each': H_b2}

    except (Exception,) as e:
        raise e
                    if ObjectsList[i][
                            5] + offset < image_y + target_interval and ObjectsList[
                                i][5] + offset > image_y - target_interval:
                        print("aiming to head !")
                        pyautogui.click()
                break
                #continue

        cv2.imshow("YOLO v3", r_image)
        fps += 1
        TIME = time.time() - start_time
        if (TIME) >= display_time:
            print("FPS: ", fps / (TIME))
            fps = 0
            start_time = time.time()
        if cv2.waitKey(1) & 0xFF == ord('q'): break

    yolo.close_session()


if __name__ == "__main__":
    p_output, p_input = Pipe()

    # creating new processes
    p1 = multiprocessing.Process(target=GRABMSS_screen, args=(p_input, ))
    p2 = multiprocessing.Process(target=SHOWMSS_screen, args=(p_output, ))

    # starting our processes
    p1.start()
    p2.start()
Example #11
0
def main():

    # Internal function to get help message
    def get_help_message():
        msg = ""
        msg = msg + "To transition ground station into these modes, enter commands: " + "\n"
        msg = msg + "Contact mode:                      [C] " + "\n"
        # msg = msg + "Downlink mode: [D] " + "\n"
        msg = msg + "Keep beacons quiet:                [Q] " + "\n"
        msg = msg + "Turn on beacons:                   [U] " + "\n"
        msg = msg + "Terminate Script:                  [Z] " + "\n"
        msg = msg + "Display this help message:         [H]" + "\n"
        return msg

    try:
        # Check for mission folder
        if not os.path.exists(GROUND_STN_MISSION_FOLDER_PATH):
            os.makedirs(GROUND_STN_MISSION_FOLDER_PATH)

        # Check for hk logs folder
        if not os.path.exists(GROUND_STN_OBC_HK_FOLDER_PATH):
            os.makedirs(GROUND_STN_OBC_HK_FOLDER_PATH)

        # Initialize serial ports for TT&C transceiver
        ttnc_port = input("Enter COM port for TT&C transceiver: ")
        serial_ttnc = serial.Serial(ttnc_port, 9600, timeout=10)

        # Create pipes to communicate with beacon process
        conn_process_beacon, conn_main_process = Pipe(duplex=True)

        # Initialize serial ports for payload transceiver
        payload_port = input("Enter COM port for Payload transceiver: ")
        serial_payload = serial.Serial(payload_port, 115200, timeout=None)

        # Initialize background scheduler for Downlink task
        scheduler = BackgroundScheduler()
        scheduler.start()

        # Enter Autonomous mode to wait for beacons
        process_beacon_collection = Process(target=handle_incoming_beacons,
                                            args=(serial_ttnc,
                                                  conn_process_beacon),
                                            daemon=True)

        run_flag = True

        while run_flag:

            # Initial begin
            print()
            print("---- GROUND STATION ----")
            init_response = input("To begin, enter [Y]... ")
            if init_response.lower() == 'y':
                # Carry on running script
                print()
                pass
            else:
                print()
                print("Exiting script...")
                break

            # Begin Autonomous Mode
            print("Entering Autonomous Mode...")
            print()
            process_beacon_collection.start()

            # Wait for trigger to enter other modes
            print("---- WAITING FOR COMMANDS ----")
            print(get_help_message())

            while run_flag:
                choice = input()
                print()

                if choice.lower() == 'h':
                    print(get_help_message())

                elif choice.lower() == 'c':

                    # Stop beacon receiving process
                    conn_main_process.send("stop")
                    process_beacon_collection.join()

                    # Start contact mode process
                    print("Start Contact mode process")
                    telecommand_type, ts = handle_contact_mode(serial_ttnc)

                    # Schedule downlink task
                    if telecommand_type == TELECOMMAND_TYPE_MISSION_DOWNLINK:
                        # Subtract 2 mins from time stamp
                        ts = ts - timedelta(minutes=2)

                        scheduler.add_job(handle_downlink_task,
                                          next_run_time=ts,
                                          args=[serial_payload])

                        print("Scheduled downlink job")
                        print()

                    # Resume beacon collection after contact mode process ends
                    print("Restart beacon collection process")
                    print()
                    process_beacon_collection = Process(
                        target=handle_incoming_beacons,
                        args=(serial_ttnc, conn_process_beacon),
                        daemon=True)
                    process_beacon_collection.start()

                elif choice.lower() == 'q':
                    print("Verbose mode now\n")
                    conn_main_process.send("verbose on")
                    pass

                elif choice.lower() == 'u':
                    print("Verbose mode off\n")
                    conn_main_process.send("verbose off")
                    pass

                elif choice.lower() == 'z':
                    conn_main_process.send("stop")
                    process_beacon_collection.join()
                    run_flag = False

                else:
                    print("Command not found...")
                    print()

    except KeyboardInterrupt:
        run_flag = False

    except serial.serialutil.SerialException:
        print("Invalid Serial port!")
        sys.exit()

    serial_payload.close()
    serial_ttnc.close()

    conn_main_process.close()
    conn_process_beacon.close()

    print("Terminated script")
    sys.exit()
Example #12
0
def pipe_process_pair():
    recv, send = Pipe()
    return PipeConsumerProcess(recv), PipeProducerProcess(send)
Example #13
0
from multiprocessing import Pipe

conn1, conn2 = Pipe()

conn1.send('conn1 第 1 次发送的数据')
conn1.send('conn1 第 2 次发送的数据')

conn2.send('conn2 第 1 次发送的数据')
conn2.send('conn2 第 2 次发送的数据')

print(conn1.recv())
print(conn1.recv())

print(conn2.recv())
print(conn2.recv())

"""
conn2 第 1 次发送的数据
conn2 第 2 次发送的数据
conn1 第 1 次发送的数据
conn1 第 2 次发送的数据
"""

# 半双工
# c1 只能接收,c2 只能发送
c1, c2 = Pipe(False)

c2.send('c2发送的数据')
print(c1.recv())
"""
c2发送的数据
Example #14
0
    def testNotifications(self):

        systems = {}

        try:

            # Start sub-processes before ActorSystem so that the
            # ActorSystem doesn't get duplicated in all the
            # sub-processes.  The sub-processes will wait for a
            # startup message from this process before creating their
            # ActorSystems.

            parent_conn1, child_conn1 = Pipe()
            child1 = Process(target=DagobahSystem,
                             args=(child_conn1, self.actorSystemBase,
                                   getattr(self, 'extraCapabilities', None)))
            child1.start()
            systems['Dagobah'] = (parent_conn1, child1)

            parent_conn1, child_conn1 = Pipe()
            child1 = Process(target=EndorSystem,
                             args=(child_conn1, self.actorSystemBase,
                                   getattr(self, 'extraCapabilities', None)))
            child1.start()
            systems['Endor'] = (parent_conn1, child1)

            # Start the Primary ActorSystem and an Actor that
            # registers for Convention entry/exit from other
            # ActorSystems.

            caps = {'Jedi Council': True, 'Admin Port': 12121}
            caps.update(getattr(self, 'extraCapabilities', {}))
            ActorSystem(self.actorSystemBase,
                        caps,
                        logDefs=simpleActorTestLogging())

            watcher = ActorSystem().createActor(Notified)
            ActorSystem().tell(watcher, 'register')
            sleep(0.2)  # wait for watcher to register

            # Now start each of the secondary ActorSystems; their
            # registration should be noted by the Actor registered for
            # such notifications.

            for each in systems:
                systems[each][0].send('Start now please')

            # Verify all anticipated registrations actually occurred.

            for X in range(50):
                registrations = ActorSystem().ask(watcher, 'notifications',
                                                  1).split('&')
                print(registrations)
                if 2 == len(registrations):
                    break
                sleep(0.01)  # wait for systems to startup and register
            self.assertEqual(2, len(registrations))

            # Now there are 3 actor Systems:
            #    Jedi Council (convention leader)
            #    Endor (Trees)
            #    Dagobah (Swamp)
            # Create some Actors:
            #    Yoda (from Primary, created in system Dagobah)
            #       ObiWan  (from Yoda, through Jedi Council to system Endor)
            #       Luke    (from Yoda, but cannot start this anywhere)
            # Verify that ObiWan starts and stays started, but that Luke "starts" and subsequently exits.

            yoda = ActorSystem().createActor(Yoda)
            self.assertEqual('Use the Force, you must, to train',
                             ActorSystem().ask(yoda, 'train', 2))
            self.assertEqual((0, 0),
                             ActorSystem().ask(yoda, 'Training Completed?', 2))
            ActorSystem().tell(yoda, 'Obi Wan')
            ActorSystem().tell(yoda, 'Padawan')
            sleep(0.25)  # allow time for Yoda to fail training a young Padawan
            self.assertEqual((2, 1),
                             ActorSystem().ask(yoda, 'Training Completed?', 2))

            # Now ask an ActorSystem to exit.  This is the ActorSystem
            # where Obi Wan is, so that will cause Obi Wan to go away
            # as well.

            systems['Endor'][0].send('Please exit nicely')
            del systems['Endor']

            # KWQ: how to get Endor to abruptly exit without shutting
            # down ObiWan first so that Dagobah system cleanup can
            # tell Yoda that ObiWan is gone.

            # Verify that the convention deregistration occurred

            for X in range(60):
                registrations2 = ActorSystem().ask(watcher, 'notifications',
                                                   1).split('&')
                print(str(registrations2))
                if 3 == len(registrations2):
                    break
                sleep(0.01)  # wait for Endor system to exit and deregister
            self.assertEqual(3, len(registrations2))

            outs = [X for X in registrations2 if X.startswith('OUT')]
            self.assertEqual(1, len(outs))

            # Verify that destroying the Endor system shutdown all Actors within it
            self.assertEqual((2, 2),
                             ActorSystem().ask(yoda, 'Training Completed?', 2))

        finally:
            for system in systems:
                systems[system][0].send('OK, all done')
            sleep(
                0.3
            )  # allow other actorsystems (non-convention-leaders) to exit
            ActorSystem().shutdown()
'''


def proc_write(pipe, urls):
    print 'Porcess %s is writing...' % (os.getpid())
    for url in urls:
        pipe.send(url)
        print 'PUT the url is %s' % url
        time.sleep(random.random() * 3)


def proc_read(pipe):
    print 'Porcess %s is reading...' % (os.getpid())
    while True:
        url = pipe.recv()
        print 'read the url is %s' % url


if __name__ == '__main__':
    print 'main %s Process start...' % os.getpid()
    pipe = Pipe()
    p1 = Process(target=proc_write,
                 args=(pipe[0], ['url1', 'url2', 'url3', 'url4', 'url5']))
    p2 = Process(target=proc_read, args=(pipe[1], ))

    p1.start()
    p2.start()
    p1.join()
    p2.join()
    print 'main %s Process end' % os.getpid()
Example #16
0
# -*- coding:utf-8 -*-
from multiprocessing import Process, Queue, Pipe


def say_hello(con_a):

    print('我在子进程发送了数据:{}'.format(con_a.recv()))


if __name__ == '__main__':
    # 建立管道
    con_a, con_b = Pipe()

    p = Process(target=say_hello, args=(con_a, ))
    p.start()

    con_b.send("啦啦啦,德玛西亚")
Example #17
0
 def __init__(self, make_env_fn):
     p1, p2 = Pipe()
     self.pipe = p1
     self.proc = Process(target=self.env_process, args=[p2, make_env_fn])
     self.proc.start()
     self.observation_space, self.action_space = self.pipe.recv()
Example #18
0
partition = loadmat(partitionFile)
# partitionnumber = 0
op = partition['OP14'][0]
op[10:] = 3
# op[:] = 1
# op = partition['C'][partitionnumber]
bus[:, BUS_AREA] = op
tieline, ntl = findTieline(bus, branch)

##---------- create all the communication pipes --------------------------------
edges = np.vstack({tuple(sorted(row))
                   for row in tieline[:,
                                      2:4]}) if tieline.any() else np.array([])
pipes = {}
for edge in edges.tolist():
    fend, tend = Pipe()
    if edge[0] not in pipes:
        pipes[edge[0]] = {}
    pipes[edge[0]][edge[1]] = fend
    if edge[1] not in pipes:
        pipes[edge[1]] = {}
    pipes[edge[1]][edge[0]] = tend

##----subproblem configuration including local opf and communication pipes-----
problem = []
output = Queue()
for i in range(na):
    s = opf_admm_model()
    s.config(i + 1, op, bus, gen, gencost, Ybus, genBus, tieline, pipes, na)
    s.var_init()
    problem.append(s)
Example #19
0
            self.input_data = ""
            for _b in range(0, self.write_size):
                self.input_data += '{:02x}'.format(0)

        except Exception as e:
            print("Exception in doCleanup")
            self.sendDebugInfo('Exception during cleanup: ' + str(e))
        finally:
            self.changeStatus(DriverStatus.SETUP)


# Main thread
if __name__ == "__main__":

    # Create pipe
    m_pipe, p_pipe = Pipe()

    # Create driver
    d = enip_driver('test', p_pipe, driver_ip="192.168.57.222",
                    connection_path='', read_size=1, write_size=2)
    # Start driver
    d.start()

    # Loop
    while m_pipe:
        try:
            # Check pipe
            if m_pipe.poll():
                # Print message
                msg = m_pipe.recv()
                print('pipe read:', msg)
Example #20
0
        p.apply_async(long_time_task, args=(i, ))

    p.close() #关闭进程池
    p.join() #执行进程连接
    print("all subprocess done")
    # # python进程通信,提供了 队列(Queue),通道(Pipes)等多种方式
    # q = Queue()
    # pw = Process(target=write, args=(q,))
    # pr = Process(target=read, args=(q,))
    # pw.start()
    # pr.start()
    # pw.join()
    # pr.terminate() #强行终止死循环程序

    #通过管道实现进程之间的通信
    parent_conn, child_conn = Pipe()
    pp = Process(target=f, args=(child_conn,))
    pp.start()

    print('双工道', parent_conn.recv())
    parent_conn.send('6666')


12, python 多线程实例
import time, threading
def loop():
    print("thread %s is running.." % threading.current_thread().name)
    n = 0
    while n < 5:
        n = n + 1
        print('thread %s  >>> %s'% (threading.current_thread().name,n))
Example #21
0
def run_experiment(**kwargs):

    config = ConfigProto()
    config.gpu_options.allow_growth = True
    config.gpu_options.per_process_gpu_memory_fraction = kwargs.get('gpu_frac', 0.95)

    exp_dir = os.getcwd() + '/data/parallel_mb_ppo/' + EXP_NAME + '/' + kwargs.get('exp_name', '')
    print("\n---------- running experiment {} ---------------------------".format(exp_dir))
    logger.configure(dir=exp_dir, format_strs=['stdout', 'log', 'csv'], snapshot_mode='last')
    json.dump(kwargs, open(exp_dir + '/params.json', 'w'), indent=2, sort_keys=True, cls=ClassEncoder)

    # Instantiate classes
    set_seed(kwargs['seed'])

    baseline = kwargs['baseline']()

    env = normalize(kwargs['env']()) # Wrappers?

    policy = GaussianMLPPolicy(
        name="meta-policy",
        obs_dim=np.prod(env.observation_space.shape),
        action_dim=np.prod(env.action_space.shape),
        hidden_sizes=kwargs['policy_hidden_sizes'],
        learn_std=kwargs['policy_learn_std'],
        hidden_nonlinearity=kwargs['policy_hidden_nonlinearity'],
        output_nonlinearity=kwargs['policy_output_nonlinearity'],
    )

    dynamics_model = MLPDynamicsEnsemble(
        'dynamics-ensemble',
        env=env,
        num_models=kwargs['num_models'],
        hidden_nonlinearity=kwargs['dyanmics_hidden_nonlinearity'],
        hidden_sizes=kwargs['dynamics_hidden_sizes'],
        output_nonlinearity=kwargs['dyanmics_output_nonlinearity'],
        learning_rate=kwargs['dynamics_learning_rate'],
        batch_size=kwargs['dynamics_batch_size'],
        buffer_size=kwargs['dynamics_buffer_size'],
    )

    '''-------- dumps and reloads -----------------'''

    baseline_pickle = pickle.dumps(baseline)
    env_pickle = pickle.dumps(env)

    receiver, sender = Pipe()
    p = Process(
        target=init_vars,
        name="init_vars",
        args=(sender, config, policy, dynamics_model),
        daemon=True,
    )
    p.start()
    policy_pickle, dynamics_model_pickle = receiver.recv()
    receiver.close()

    '''-------- following classes depend on baseline, env, policy, dynamics_model -----------'''
    
    worker_data_feed_dict = {
        'env_sampler': {
            'num_rollouts': kwargs['num_rollouts'],
            'max_path_length': kwargs['max_path_length'],
            'n_parallel': kwargs['n_parallel'],
        },
        'dynamics_sample_processor': {
            'discount': kwargs['discount'],
            'gae_lambda': kwargs['gae_lambda'],
            'normalize_adv': kwargs['normalize_adv'],
            'positive_adv': kwargs['positive_adv'],
        },
    }

    worker_model_feed_dict = {}
    
    worker_policy_feed_dict = {
        'model_sampler': {
            'num_rollouts': kwargs['imagined_num_rollouts'],
            'max_path_length': kwargs['max_path_length'],
            'dynamics_model': dynamics_model,
            'deterministic': kwargs['deterministic'],
        },
        'model_sample_processor': {
            'discount': kwargs['discount'],
            'gae_lambda': kwargs['gae_lambda'],
            'normalize_adv': kwargs['normalize_adv'],
            'positive_adv': kwargs['positive_adv'],
        },
        'algo': {
            'learning_rate': kwargs['learning_rate'],
            'clip_eps': kwargs['clip_eps'],
            'max_epochs': kwargs['num_ppo_steps'],
        }
    }

    trainer = ParallelTrainer(
        policy_pickle=policy_pickle,
        env_pickle=env_pickle,
        baseline_pickle=baseline_pickle,
        dynamics_model_pickle=dynamics_model_pickle,
        feed_dicts=[worker_data_feed_dict, worker_model_feed_dict, worker_policy_feed_dict],
        n_itr=kwargs['n_itr'],
        dynamics_model_max_epochs=kwargs['dynamics_max_epochs'],
        log_real_performance=kwargs['log_real_performance'],
        steps_per_iter=kwargs['steps_per_iter'],
        flags_need_query=kwargs['flags_need_query'],
        config=config,
        simulation_sleep=kwargs['simulation_sleep'],
    )

    trainer.train()
Example #22
0
        self.socketio = SocketIO(message_queue='redis://')

    def update_position(self, data):
        print("updating position")

    def testing(self):
        self.socketio.emit("testing deployment socket", namespace='/test')
        self.socketio.on_event('update table', self.update_position)

        
def start_deployment(serial, file):
    deployer = Deployment()
    deployer.testing()



if __name__ == "__main__":

    SERIAL_CHILD, SERIAL_PARENT = Pipe()
    RECORD_QUEUE = Queue()
    COMMUNICATOR = Process(target=SerialCommunication.start_serial_communication,\
        args=(RECORD_QUEUE, SERIAL_CHILD))
    COMMUNICATOR.start()

    DEPLOYMENT = Deployment(SERIAL_PARENT, "deployment_test.txt")
    DEPLOYMENT.run()
    COMMUNICATOR.join()
    while not RECORD_QUEUE.empty():
        print(RECORD_QUEUE.get())
Example #23
0
                if cmd == 'shutdown':
                    break

            # do your work (with timeout)
            server.handle_request()

    except KeyboardInterrupt:
        signal.signal(signal.SIGINT, signal.SIG_IGN)  # we heard you!
        dprint(__name__, 0, "^C received.")
    finally:
        dprint(__name__, 0, "Shutting down (HTTPS).")
        server.socket.close()


if __name__ == "__main__":
    cmdPipe = Pipe()

    cfg = Settings.CSettings()
    param = {}
    param['CSettings'] = cfg
    param['CATVSettings'] = ATVSettings.CATVSettings()

    param['IP_self'] = '192.168.178.20'  # IP_self?
    param['baseURL'] = 'http://' + param['IP_self'] + ':' + cfg.getSetting(
        'port_webserver')
    param['HostToIntercept'] = cfg.getSetting('hosttointercept')

    if len(sys.argv) == 1:
        Run(cmdPipe[1], param)
    elif len(sys.argv) == 2 and sys.argv[1] == 'SSL':
        Run_SSL(cmdPipe[1], param)
Example #24
0
 def __init__(self, *args, **kwargs):
     Process.__init__(self, *args, **kwargs)
     self._pconn, self._cconn = Pipe()
     self._exception = None
Example #25
0
#open pointer to log as well txt file which will store the completed word count
pointer_to_time_log = open('master_log.txt', 'a')
pointer_to_word_count = open('word_count.txt', 'w+')

#Initialize a variable to count the total number of lines in the txt file to send
total_num_lines = 0
#Use loop to calculate total nummber of lines in file
with open('les_mis.txt') as f:
    total_num_lines = sum(1 for _ in f)
#Use OS command to split the file into three places by the total number of lines/# of workers
os.system("split --lines=" +
          str((total_num_lines + NUMPROCESSES) / NUMPROCESSES) +
          " --numeric-suffixes --suffix-length=2 les_mis.txt t")

#Create Pipes that the Parents and Child can send objects through, one for each parent-child process
ParentConnect1, ChildConnect1 = Pipe()
ParentConnect2, ChildConnect2 = Pipe()
ParentConnect3, ChildConnect3 = Pipe()
#Use process module to set up processes which invoke previously defined function
# as a separate process which distributes and then retrieves work from the connecting worker
give_work_to_14 = Process(target=divide_and_receive_work,
                          args=(ChildConnect1, 5000, '10.100.0.13',
                                '10.100.0.14', 't00', 'pickled_dict_from_14'))
give_work_to_25 = Process(target=divide_and_receive_work,
                          args=(ChildConnect2, 5002, '10.100.0.13',
                                '10.100.0.25', 't01', 'pickled_dict_from_25'))
give_work_to_26 = Process(target=divide_and_receive_work,
                          args=(ChildConnect3, 5004, '10.100.0.13',
                                '10.100.0.26', 't01', 'pickled_dict_from_26'))
#Begin these processes
give_work_to_14.start()
    def scheduler(self, chunk = None, sched = None):
        """TODO: Docstring"""
        
        if not sched:
            sched = self.opts.schedule
        
        if not chunk:
            chunk = self.opts.chunk
            if self.opts.loud:
                print(
                    'Using default chunk, {}.'.format(chunk))
            
        
        
        sched_copy = [i for i in sched]
        sched_copy.reverse()
        
        while len(sched_copy) > 0:
            parent_conn, child_conn = Pipe()
            
            scheduled_func, message = sched_copy.pop()
            
            #The user must define chunk somewhere
            if chunk:
                message['chunk'] = chunk
            if 'chunk' in message:
                pass
            else:
                raise ValueError(
            "The scheduler needs to know which chunk of data to use."
             )
            
            if self.opts.loud:
                print("scheduler using chunk: ", message['chunk'])

            #Spawn the process and pass the connection, data and message to it
            if message:
                if 'load' in message:
                    if message['load'] == False:
                        p = multiprocessing.Process(target=scheduled_func, 
                                        args=(child_conn, None, message), 
                                        name = scheduled_func.__name__)
                        p.start()
                else:
                    p = multiprocessing.Process(target=scheduled_func, 
                                        args=(child_conn, self.data, message), 
                                        name = scheduled_func.__name__)
                    p.start()
            else:
                p = multiprocessing.Process(target=scheduled_func, 
                                        args=(child_conn, None, None), 
                                        name = scheduled_func.__name__)
                p.start()
                
             
                
            
            
            
            if self.opts.loud:
                print(
                    'Beginning next function: {}, {} remaining'.format(
                        p.name, len(sched_copy)))
            
            recv = parent_conn.recv()    
            p.join(timeout = self.opts.timeouts)
            
            if "BREAK" in recv:
                print("\n \nERROR IN CHILD PROCESS:\n")               
                print(recv["BREAK"])
                print(recv['TB'])
                raise recv["BREAK"]
                
                
            
            #Decide how to store result with Pandas
            if self.load == 'pandas':
                if "pandas_to_data" in message:
                    if message['pandas_to_data'] in [
                        'append', 'concat', 'overwrite', 'none',]:
                        pass
                    
                elif self.opts.pandas_to_data in [
                    'append', 'concat', 'overwrite', 'none',]:
                    
                    message['pandas_to_data'] = self.opts.pandas_to_data
                else:
                    raise ValueError(
        "To store data with pandas you must define "             
        "the pandas_to_data option or pass it to your UDF "
        "\nSupports: 'none', 'append', 'concat', 'merge' and 'overwrite'"
                                    )
                
                if message['pandas_to_data'] == 'none':
                    pass
                if message['pandas_to_data'] == 'append':
                    if "pandas_to_data_args" in message:
                        pandas_to_data_args = message['pandas_to_data_args']
                        self.data.append(recv,
                                         **pandas_to_data_args)
                    else:
                        self.data.append(recv)
                if message['pandas_to_data'] == 'concat':
                    if "pandas_to_data_args" in message:
                        pandas_to_data_args = message['pandas_to_data_args']
                        self.data.concat(recv,
                                         **pandas_to_data_args)
                    else:
                        self.data.concat(recv)
                if message['pandas_to_data'] == 'merge':
                    if "pandas_to_data_args" in message:
                        pandas_to_data_args = message['pandas_to_data_args']
                        self.data.append(recv,
                                         **pandas_to_data_args)
                    else:
                        raise ValueError(
                        "You must define pandas_to_data_args "
                            "with the merge option"
                        )
                if message['pandas_to_data'] == 'overwrite':
                    self.data = recv
            
           
            
            #Not yet implemented
            if self.load == 'postgreSQL':
                raise ValueError("postgreSQL is not currently supported")
            
            #Not yet implemented
            if self.load == 'SQLite':
                raise ValueError("SQLite is not currently supported")
            
            if self.opts.loud:
                print(
                    'Scheduled function completed, {} remaining.'.format(
                        len(sched_copy)))
Example #27
0
  # exit()
  # print(hp.nb_best_directions)
  print("seed = ", hp.seed)
  np.random.seed(hp.seed)
  max_processes = 5

  parentPipes = None
  if args.mp:
    num_processes = min([hp.nb_directions, max_processes])
    print('processes: ',num_processes)
    processes = []
    childPipes = []
    parentPipes = []

    for pr in range(num_processes):
      parentPipe, childPipe = Pipe()
      parentPipes.append(parentPipe)
      childPipes.append(childPipe)

    for rank in range(num_processes):
      p = mp.Process(target=ExploreWorker, args=(rank, childPipes[rank], hp.env_name, args))
      p.start()
      processes.append(p)

  # env = stoch2_gym_env.StochBulletEnv(render = False, gait = 'trot')
  nb_inputs = env.observation_space.sample().shape[0]
  nb_outputs = env.action_space.sample().shape[0]
  policy = Policy(nb_inputs, nb_outputs, hp.env_name, hp.normal, args)
  normalizer = Normalizer(nb_inputs)

  print("start training")
    output_p, input_p = pipe
    input_p.close()
    while True:
        try:
            item = output_p.recv()
        except EOFError:
            break
        print(item)
    print("Consumer done")


def producer(sequece, input_p):
    for item in sequence:
        input_p.send(item)


if __name__ == '__main__':
    (output_p, input_p) = Pipe()

    cons_p = Process(target=consumer, args=((output_p, input_p), ))
    cons_p.start()

    output_p.close()

    sequence = [1, 2, 3, 4]
    producer(sequence, input_p)

    input_p.close()

    cons_p.join()
        # Bellow we calculate our FPS
        fps += 1
        TIME = time.time() - start_time
        if (TIME) >= display_time:
            print("FPS: ", fps / (TIME))
            fps = 0
            start_time = time.time()
        # Press "q" to quit
        if cv2.waitKey(25) & 0xFF == ord("q"):
            cv2.destroyAllWindows()
            break


if __name__ == "__main__":
    # Pipes
    p_output, p_input = Pipe()
    p_output2, p_input2 = Pipe()

    # creating new processes
    p1 = multiprocessing.Process(target=grab_screen, args=(p_input, ))
    p2 = multiprocessing.Process(target=TensorflowDetection,
                                 args=(
                                     p_output,
                                     p_input2,
                                 ))
    p3 = multiprocessing.Process(target=Show_image, args=(p_output2, ))

    # starting our processes
    p1.start()
    p2.start()
    p3.start()
Example #30
0
def function_handler(event):
    start_time = time.time()

    log_level = event['log_level']
    cloud_logging_config(log_level)
    logger.debug("Action handler started")

    extra_env = event.get('extra_env', {})
    os.environ.update(extra_env)

    os.environ.update({'PYWREN_FUNCTION': 'True',
                       'PYTHONUNBUFFERED': 'True'})

    config = event['config']
    call_id = event['call_id']
    job_id = event['job_id']
    executor_id = event['executor_id']
    exec_id = "{}/{}/{}".format(executor_id, job_id, call_id)
    logger.info("Execution-ID: {}".format(exec_id))

    runtime_name = event['runtime_name']
    runtime_memory = event['runtime_memory']
    execution_timeout = event['execution_timeout']
    logger.debug("Runtime name: {}".format(runtime_name))
    logger.debug("Runtime memory: {}MB".format(runtime_memory))
    logger.debug("Function timeout: {}s".format(execution_timeout))

    func_key = event['func_key']
    data_key = event['data_key']
    data_byte_range = event['data_byte_range']

    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)

    call_status = CallStatus(config, internal_storage)
    call_status.response['host_submit_time'] = event['host_submit_time']
    call_status.response['start_time'] = start_time
    context_dict = {
        'python_version': os.environ.get("PYTHON_VERSION"),
        'call_id': call_id,
        'job_id': job_id,
        'executor_id': executor_id,
        'activation_id': os.environ.get('__PW_ACTIVATION_ID')
    }
    call_status.response.update(context_dict)

    show_memory_peak = strtobool(os.environ.get('SHOW_MEMORY_PEAK', 'False'))
    call_status.response['peak_memory_usage'] = 0

    try:
        if version.__version__ != event['pywren_version']:
            msg = ("PyWren version mismatch. Host version: {} - Runtime version: {}"
                   .format(event['pywren_version'], version.__version__))
            raise RuntimeError('HANDLER', msg)

        # send init status event
        call_status.send('__init__')

        # call_status.response['free_disk_bytes'] = free_disk_space("/tmp")
        custom_env = {'PYWREN_CONFIG': json.dumps(config),
                      'PYWREN_EXECUTION_ID': exec_id,
                      'PYTHONPATH': "{}:{}".format(os.getcwd(), PYWREN_LIBS_PATH)}
        os.environ.update(custom_env)

        jobrunner_stats_dir = os.path.join(STORAGE_BASE_DIR, executor_id, job_id, call_id)
        os.makedirs(jobrunner_stats_dir, exist_ok=True)
        jobrunner_stats_filename = os.path.join(jobrunner_stats_dir, 'jobrunner.stats.txt')

        jobrunner_config = {'pywren_config': config,
                            'call_id':  call_id,
                            'job_id':  job_id,
                            'executor_id':  executor_id,
                            'func_key': func_key,
                            'data_key': data_key,
                            'log_level': log_level,
                            'data_byte_range': data_byte_range,
                            'output_key': create_output_key(JOBS_PREFIX, executor_id, job_id, call_id),
                            'stats_filename': jobrunner_stats_filename}

        setup_time = time.time()
        call_status.response['setup_time'] = round(setup_time - start_time, 8)

        if show_memory_peak:
            mm_handler_conn, mm_conn = Pipe()
            memory_monitor = Thread(target=memory_monitor_worker, args=(mm_conn, ))
            memory_monitor.start()

        handler_conn, jobrunner_conn = Pipe()
        jobrunner = JobRunner(jobrunner_config, jobrunner_conn, internal_storage)
        logger.debug('Starting JobRunner process')
        local_execution = strtobool(os.environ.get('__PW_LOCAL_EXECUTION', 'False'))
        jrp = Thread(target=jobrunner.run) if local_execution else Process(target=jobrunner.run)
        jrp.start()

        jrp.join(execution_timeout)
        logger.debug('JobRunner process finished')
        call_status.response['exec_time'] = round(time.time() - setup_time, 8)

        if jrp.is_alive():
            # If process is still alive after jr.join(job_max_runtime), kill it
            try:
                jrp.terminate()
            except Exception:
                # thread does not have terminate method
                pass
            msg = ('Function exceeded maximum time of {} seconds and was '
                   'killed'.format(execution_timeout))
            raise TimeoutError('HANDLER', msg)

        if show_memory_peak:
            mm_handler_conn.send('STOP')
            memory_monitor.join()
            peak_memory_usage = int(mm_handler_conn.recv())
            logger.info("Peak memory usage: {}".format(sizeof_fmt(peak_memory_usage)))
            call_status.response['peak_memory_usage'] = peak_memory_usage

        if not handler_conn.poll():
            logger.error('No completion message received from JobRunner process')
            logger.debug('Assuming memory overflow...')
            # Only 1 message is returned by jobrunner when it finishes.
            # If no message, this means that the jobrunner process was killed.
            # 99% of times the jobrunner is killed due an OOM, so we assume here an OOM.
            msg = 'Function exceeded maximum memory and was killed'
            raise MemoryError('HANDLER', msg)

        if os.path.exists(jobrunner_stats_filename):
            with open(jobrunner_stats_filename, 'r') as fid:
                for l in fid.readlines():
                    key, value = l.strip().split(" ", 1)
                    try:
                        call_status.response[key] = float(value)
                    except Exception:
                        call_status.response[key] = value
                    if key in ['exception', 'exc_pickle_fail', 'result', 'new_futures']:
                        call_status.response[key] = eval(value)

        # call_status.response['server_info'] = get_server_info()
        call_status.response['end_time'] = time.time()

    except Exception:
        # internal runtime exceptions
        print('----------------------- EXCEPTION !-----------------------', flush=True)
        traceback.print_exc(file=sys.stdout)
        print('----------------------------------------------------------', flush=True)
        call_status.response['end_time'] = time.time()
        call_status.response['exception'] = True

        pickled_exc = pickle.dumps(sys.exc_info())
        pickle.loads(pickled_exc)  # this is just to make sure they can be unpickled
        call_status.response['exc_info'] = str(pickled_exc)

    finally:
        call_status.send('__end__')
        for key in extra_env:
            del os.environ[key]
        logger.info("Finished")