コード例 #1
0
ファイル: worker.py プロジェクト: schoenemeyer/landcover
def main():
    parser = argparse.ArgumentParser(
        description="AI for Earth Land Cover Worker")

    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="Enable verbose debugging",
                        default=False)
    parser.add_argument("--port",
                        action="store",
                        type=int,
                        help="Port we are listenning on",
                        default=0)
    parser.add_argument("--gpu_id",
                        action="store",
                        dest="gpu_id",
                        type=int,
                        help="GPU to use",
                        required=False)
    parser.add_argument("--model_key",
                        action="store",
                        dest="model_key",
                        type=str,
                        help="Model key from models.json to use")
    args = parser.parse_args(sys.argv[1:])

    # Setup logging
    log_path = os.path.join(os.getcwd(), "tmp/logs/")
    setup_logging(log_path, "worker")

    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = "" if args.gpu_id is None else str(
        args.gpu_id)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

    model_configs = load_models()
    if not args.model_key in model_configs:
        LOGGER.error("'%s' is not recognized as a valid model, exiting..." %
                     (args.model_key))
        return
    model_type = model_configs[args.model_key]["type"]

    if model_type == "keras_example":
        model = KerasDenseFineTune(args.gpu_id,
                                   **model_configs[args.model_key])
    elif model_type == "pytorch_example":
        model = TorchFineTuning(args.model_fn, args.gpu_id,
                                args.fine_tune_layer)
    elif model_type == "pytorch_smoothing_multiple":
        model = TorchSmoothingCycleFineTune(args.model_fn, args.gpu_id,
                                            args.fine_tune_layer,
                                            args.num_models)
    else:
        raise NotImplementedError(
            "The given model type is not implemented yet.")

    t = OneShotServer(MyService(model), port=args.port)
    t.start()
コード例 #2
0
ファイル: rpyc_classic.py プロジェクト: SilongHu/rpyc
 def _serve_oneshot(self):
     t = OneShotServer(SlaveService, hostname = self.host, port = self.port,
         reuse_addr = True, ipv6 = self.ipv6, authenticator = self.authenticator,
         registrar = self.registrar, auto_register = self.auto_register)
     sys.stdout.write("rpyc-oneshot\n")
     sys.stdout.write("%s\t%s\n" % (t.host, t.port))
     sys.stdout.flush()
     t.start()
コード例 #3
0
 def _serve_oneshot(self):
     t = OneShotServer(SlaveService, hostname = self.host, port = self.port, 
         reuse_addr = True, ipv6 = self.ipv6, authenticator = self.authenticator, 
         registrar = self.registrar, auto_register = self.auto_register)
     sys.stdout.write("rpyc-oneshot\n")
     sys.stdout.write("%s\t%s\n" % (t.host, t.port))
     sys.stdout.flush()
     t.start()
コード例 #4
0
def cli(context):
    while 42:
        # FIXME: multithreaded server hangs pykd
        print("running rpyc server")
        from rpyc.utils.server import OneShotServer
        t = OneShotServer(PyKDService,
                          port=18861,
                          protocol_config={"allow_all_attrs": True})
        t.start()
コード例 #5
0
def start_server(service_args, port):
    # avoid import cycle
    from supervised import make_problem_service
    new_service = make_problem_service(service_args)
    server = OneShotServer(new_service, hostname='localhost', port=port)
    print('Child process starting OneShotServer %s' % server)
    try:
        server.start()
    finally:
        # save kernprof profile for this subprocess if we can
        try_save_profile()
コード例 #6
0
class FakeRemoteAgent(threading.Thread):
    """ A fake agent used for tests of the RPyC interface """

    def __init__(self, port, handle_job_func,
                 update_image_aliases_func=(lambda aliases: ""),
                 get_task_directory_hashes_func=(lambda: []),
                 update_task_directory_func=(lambda remote_tar_file, to_delete: "")):
        threading.Thread.__init__(self)
        self.port = port
        self.handle_job_func = handle_job_func
        self.update_image_aliases_func = update_image_aliases_func
        self.get_task_directory_hashes_func = get_task_directory_hashes_func
        self.update_task_directory_func = update_task_directory_func
        self.start()

    def run(self):
        try:
            self._backend_server = OneShotServer(self._get_agent_backend_service(), port=self.port,
                                                 protocol_config={"allow_public_attrs": True, 'allow_pickle': True})
            self._backend_server.start()
        except EOFError:
            pass

    def close(self):
        self._backend_server.close()

    def _get_agent_backend_service(self):
        """ Returns a RPyC service associated with this Agent """
        handle_job = self.handle_job_func
        update_image_aliases_func = self.update_image_aliases_func
        get_task_directory_hashes_func = self.get_task_directory_hashes_func
        update_task_directory_func = self.update_task_directory_func

        class AgentService(rpyc.Service):
            def exposed_update_image_aliases(self, image_aliases):
                update_image_aliases_func(image_aliases)

            def exposed_get_task_directory_hashes(self):
                return get_task_directory_hashes_func()

            def exposed_update_task_directory(self, remote_tar_file, to_delete):
                update_task_directory_func(remote_tar_file.read(), copy.deepcopy(to_delete))

            def exposed_new_job(self, job_id, course_id, task_id, inputdata, debug, callback_status, callback_return):
                """ Creates, executes and returns the results of a new job """
                try:
                    retval = handle_job(job_id, course_id, task_id, inputdata, debug, callback_status)
                except Exception as e:
                    callback_return({"result": "crash", "text": "An error occured in the Agent: {}".format(str(e))})
                    return
                callback_return(retval)

        return AgentService
コード例 #7
0
ファイル: multiprob.py プロジェクト: xf1590281/ASNets
def start_server(service_args, socket_path):
    if service_args.random_seed is not None:
        set_random_seeds(service_args.random_seed)
    # avoid import cycle
    from asnets.supervised import make_problem_service
    parent_death_pact(signal=signal.SIGKILL)
    new_service = make_problem_service(service_args, set_proc_title=True)
    server = OneShotServer(new_service, socket_path=socket_path)
    print('Child process starting OneShotServer %s' % server)
    try:
        server.start()
    finally:
        # save kernprof profile for this subprocess if we can
        try_save_profile()
コード例 #8
0
ファイル: worker.py プロジェクト: zgle-me/landcover
def main():
    global MODEL
    parser = argparse.ArgumentParser(description="AI for Earth Land Cover Worker")

    parser.add_argument("-v", "--verbose", action="store_true", help="Enable verbose debugging", default=False)

    parser.add_argument("--port", action="store", type=int, help="Port we are listenning on", default=0)
    parser.add_argument("--model", action="store", dest="model",
        choices=[
            "keras_dense",
            "pytorch"
        ],
        help="Model to use", required=True
    )
    parser.add_argument("--model_fn", action="store", dest="model_fn", type=str, help="Model fn to use", default=None)
    parser.add_argument("--fine_tune_layer", action="store", dest="fine_tune_layer", type=int, help="Layer of model to fine tune", default=-2)
    
    parser.add_argument("--gpu", action="store", dest="gpuid", type=int, help="GPU to use", required=False)

    args = parser.parse_args(sys.argv[1:])

    # Setup logging
    log_path = os.path.join(os.getcwd(), "tmp/logs/")
    setup_logging(log_path, "worker")


    # Setup model
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = "" if args.gpuid is None else str(args.gpuid)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' 

    if args.model == "keras_dense":
        model = KerasDenseFineTune(args.model_fn, args.gpuid, args.fine_tune_layer)
    elif args.model == "pytorch":
        model = TorchFineTuning(args.model_fn, args.gpuid, args.fine_tune_layer)
    else:
        raise NotImplementedError("The given model type is not implemented yet.")

    t = OneShotServer(MyService(model), port=args.port)
    t.start()
コード例 #9
0
def start_server(unix_path, clean=None):
    service = ClassicService()
    server = OneShotServer(socket_path=str(unix_path),
                           listener_timeout=1,
                           service=service,
                           logger=logger)

    def sayhello():
        while not server.active:
            time.sleep(.01)
        print("HELLO", flush=True)
        logger.debug("Server started")

        time.sleep(5)
        if not service.connected:
            logger.warning("No inbound connection: stopping")
            server.close()

    threading.Thread(target=sayhello).start()
    if clean:
        atexit.register(cleanup, unix_path)
    server.start()
コード例 #10
0
ファイル: rpyc_ida.py プロジェクト: cea-sec/miasm
def serve_threaded(hostname="localhost", port=4455):
    """This will run a rpyc server in IDA, so a custom script client will be
    able to access IDA api.
    WARNING: IDA will be locked until the client script terminates.
    """

    print('Running server')
    server = OneShotServer(SlaveService, hostname=hostname,
                           port=port, reuse_addr=True, ipv6=False,
                           authenticator=None,
                           auto_register=False)
    server.logger.quiet = False

    return server.start()
コード例 #11
0
def serve_threaded(hostname="localhost", port=4455):
    """This will run a rpyc server in IDA, so a custom script client will be
    able to access IDA api.
    WARNING: IDA will be locked until the client script terminates.
    """

    print 'Running server'
    server = OneShotServer(SlaveService,
                           hostname=hostname,
                           port=port,
                           reuse_addr=True,
                           ipv6=False,
                           authenticator=None,
                           auto_register=False)
    server.logger.quiet = False

    return server.start()
コード例 #12
0
from ghpythonremote import rpyc
from rpyc.utils.server import OneShotServer


class GhcompService(rpyc.ClassicService):
    def on_connect(self, conn):
        print("Incoming connection.")
        super(GhcompService, self).on_connect(conn)
        import ghpythonlib.components as ghcomp

        self.ghcomp = ghcomp

    def on_disconnect(self, conn):
        print("Disconnected.")

    def get_component(self, component_name, is_cluster_component=False):
        component = getattr(self.ghcomp, component_name)
        if is_cluster_component:
            component = getattr(
                component, component_name
            )
            # TODO: improve ghcomp to get clusters the same way we get compiled
            # components, thus removing the need for a custom getter
        return component

server = OneShotServer(
    GhcompService, hostname="localhost", port=18871, listener_timeout=None
)
server.start()
コード例 #13
0
import rpyc.utils.classic
from rpyc.utils.server import OneShotServer
import CtxCtlTools
import CtxDynapse

# - Get list of devices
CtxCtlTools.device_controller.refresh_devices()
lDevices = CtxCtlTools.device_controller.get_unopened_devices()

# - Check that a device is available
assert lDevices is not None, "No devices found."

# - Open the first available device (or find the one we are interested in opening)
CtxCtlTools.device_controller.open_device(lDevices[0])

# - Wait until the device model has been created
while not hasattr(CtxDynapse, "model"):
    CtxCtlTools.process_events()

c = rpyc.utils.classic.SlaveService()
t = OneShotServer(c, port=1300)

print("RPyC: Ready to start.")
t.start()
コード例 #14
0
class FakeRemoteAgent(threading.Thread):
    """ A fake agent used for tests of the RPyC interface """
    def __init__(
        self,
        port,
        handle_job_func,
        update_image_aliases_func=(lambda aliases: ""),
        get_task_directory_hashes_func=(lambda: []),
        update_task_directory_func=(lambda remote_tar_file, to_delete: "")):
        threading.Thread.__init__(self)
        self.port = port
        self.handle_job_func = handle_job_func
        self.update_image_aliases_func = update_image_aliases_func
        self.get_task_directory_hashes_func = get_task_directory_hashes_func
        self.update_task_directory_func = update_task_directory_func
        self.start()

    def run(self):
        try:
            self._backend_server = OneShotServer(
                self._get_agent_backend_service(),
                port=self.port,
                protocol_config={
                    "allow_public_attrs": True,
                    'allow_pickle': True
                })
            self._backend_server.start()
        except EOFError:
            pass

    def close(self):
        self._backend_server.close()

    def _get_agent_backend_service(self):
        """ Returns a RPyC service associated with this Agent """
        handle_job = self.handle_job_func
        update_image_aliases_func = self.update_image_aliases_func
        get_task_directory_hashes_func = self.get_task_directory_hashes_func
        update_task_directory_func = self.update_task_directory_func

        class AgentService(rpyc.Service):
            def exposed_update_image_aliases(self, image_aliases):
                update_image_aliases_func(image_aliases)

            def exposed_get_task_directory_hashes(self):
                return get_task_directory_hashes_func()

            def exposed_update_task_directory(self, remote_tar_file,
                                              to_delete):
                update_task_directory_func(remote_tar_file.read(),
                                           copy.deepcopy(to_delete))

            def exposed_new_job(self, job_id, course_id, task_id, inputdata,
                                debug, callback_status, callback_return):
                """ Creates, executes and returns the results of a new job """
                try:
                    retval = handle_job(job_id, course_id, task_id, inputdata,
                                        debug, callback_status)
                except Exception as e:
                    callback_return({
                        "result":
                        "crash",
                        "text":
                        "An error occured in the Agent: {}".format(str(e))
                    })
                    return
                callback_return(retval)

        return AgentService