Exemple #1
0
def BruteOne(crypt_msg):

    k1 = rpyc.connect('192.168.2.101', 18861)
    k2 = rpyc.connect('192.168.2.101', 18862)
    k3 = rpyc.connect('192.168.2.101', 18863)
    k4 = rpyc.connect('192.168.2.101', 18864)

    k11 = rpyc.async_(k1.root.decrypt)(crypt_msg, 2500, 0)
    k22 = rpyc.async_(k2.root.decrypt)(crypt_msg, 5000, 2500)
    k33 = rpyc.async_(k3.root.decrypt)(crypt_msg, 7500, 5000)
    k44 = rpyc.async_(k4.root.decrypt)(crypt_msg, 10000, 7500)

    workers = [[k11, k1], [k22, k2], [k33, k3], [k44, k4]]
    flag = False
    t = 0
    password = 0
    while True:
        for i in range(0, 4):
            if (workers[i][0].ready == True
                    and workers[i][0].value[0] == True):
                passowrd = workers[i][0].value[1]
                t = workers[i][0].value[2]
                for i in workers:
                    i[1].close()
                flag = True
        if (flag):
            break
    result = f"Passowrd Found: {passowrd} Time Taken : {t}"
    print(result)
    send_result('http://127.0.0.1:1880/DecryptOneResult', result)
def execute():
    params = request.args.to_dict()
    lat = params['lat']
    long = params['long']
    token = params['token']
    address_key = 'lat:' + str(lat) + ' long:' + str(long)
    city_url = mongo.db.city.find_one({'address': address_key})

    if city_url:
        model_url = city_url["url"]
        weather_data = requests.get(model_url).content
        processed = rpyc.async_(post_processor.process)(weather_data, token)

        while not processed.ready:
            continue

    else:
        address = str(lat) + ',' + str(long)
        url = data_retriever.get_url(address)
        print('url', url)
        mongo.db.city.insert_one({'address': address_key, 'url': url})
        weather_data = requests.get(url).content
        processed = rpyc.async_(post_processor.process)(weather_data.content,
                                                        token)

        while not processed.ready:
            continue
    return ""
Exemple #3
0
def PrimeOne(range_splits):

    k1 = rpyc.connect('192.168.2.101', 18861)
    k2 = rpyc.connect('192.168.2.101', 18862)
    k3 = rpyc.connect('192.168.2.101', 18863)
    k4 = rpyc.connect('192.168.2.101', 18864)

    k11 = rpyc.async_(k1.root.no_of_prime_numbers)(range_splits[0][1],
                                                   range_splits[0][0])
    k22 = rpyc.async_(k2.root.no_of_prime_numbers)(range_splits[1][1],
                                                   range_splits[1][0] + 1)
    k33 = rpyc.async_(k3.root.no_of_prime_numbers)(range_splits[2][1],
                                                   range_splits[2][0] + 1)
    k44 = rpyc.async_(k4.root.no_of_prime_numbers)(range_splits[3][1],
                                                   range_splits[3][0] + 1)

    t1 = time.time()
    pi1 = k11.ready == False and k22.ready == False and k33.ready == False and k44.ready == False

    while (pi1 == False):

        pass

    pi1_sum = int(k11.value) + int(k22.value) + int(k33.value) + int(k44.value)

    result = f"Number Of Prime Numbers: {pi1_sum - 1} \n Time : {time.time() - t1} seconds   "

    send_result('http://127.0.0.1:1880/PrimeOneResult', result)
    def exposed_run_mapreduce(self, data_file, function):
        self.task = function
        self.filename = data_file
        
        #Divide input data into chunks
        self.input_chunks()
        
        mappers = []
        for i, mapper in enumerate(self.mapper_connections):
            mappers.append(rpyc.async_(mapper.execute)(i, self.task, self.filename, self.KVServer_address, 
                                                       self.kvport, self.nreducer))
            mappers[i].set_expiry(None)

        # wait till all mappers to completes their assigned task
        for mapper in mappers:
            while not mapper.ready:
                continue
        # logging.info('Mappers have completed their assigned task...')
        
        reducers = []
        
        for i, reducer in enumerate(self.reducer_connections):
            reducers.append(rpyc.async_(reducer.execute)(i, self.task, self.filename, self.KVServer_address, 
                                                       self.kvport))
            reducers[i].set_expiry(None)

        # wait till all reducers to completes their assigned task
        for reducer in reducers:
            while not reducer.ready:
                continue
Exemple #5
0
    def exposed_run_run(self, client: int = 0):
        if (self._run is None): return -1

        if (client >= 0):
            run = rpyc.async_(self._run[client])

            if (self._args_per_client is None):
                future = run(self.to_run, *self._args)
            else:
                future = run(self.to_run,
                             *self._args_per_client[self.__client_num])

            self._result.append(future)
        else:
            for num, run in enumerate(self._run):
                run = rpyc.async_(run)

                if (self._args_per_client is None):
                    future = run(self.to_run, *self._args)
                else:
                    future = run(self.to_run, *self._args_per_client[num])

                self._result.append(future)

        return future
Exemple #6
0
 def wrapper(*args, **kwargs):
     if rpcserver is not None:
         try:
             rpyc.async_(rpcserver.root.called)(method.__name__)
         except Exception as e:
             rpcserver.root.echoError(method.__name__, e)
     ret = method(*args, **kwargs)
     return ret
Exemple #7
0
    def exposed_run_mapreduce(self, id, data, map_func, reduce_func, output_location):
        print('Starting map-reduce task...')
        mappers = []
        
        # Generate mapper inputs by splitting data
        self.__mapper_input(id, data)

        for i, mapper in enumerate(self.__worker_connections):
            mappers.append(rpyc.async_(mapper.execute)(id, map_func, 
                           self.config['Master']['map_role_name'], i, self.__KVServer_address, 
                           self.__KVServer_port))
            mappers[i].set_expiry(None)

        # wait till all workers completes its assigned task
        for mapper in mappers:
            while not mapper.ready:
                continue
        print('Mappers have completed their assigned task...')
        
        self.__combine_map_outputs(id)
        
        reducers = []
        reducer_response = []
        
        # Generate reducer inputs by splitting keys of intermidiate data
        self.__reducer_input(id)

        for i, reducer in enumerate(self.__worker_connections):
            reducers.append(rpyc.async_(reducer.execute)(id, reduce_func, 
                            self.config['Master']['reduce_role_name'], i, self.__KVServer_address, 
                            self.__KVServer_port))
            reducers[i].set_expiry(None)

        # wait till all workers completes its assigned task
        for reducer in reducers:
            while not reducer.ready:
                continue
        
        for i in range(len(reducers)):
            keys = self.__KVServer.getKeys(id, self.config['Master']['reduce_role_name'], i, self.config['Master']['output_dir'])
            for key in keys:
                reducer_response.append(('.'.join(key.split('.')[:-1]), self.__KVServer.get(id, key.split('.')[0], self.config['Master']['reduce_role_name'], i, self.config['Master']['output_dir'])))
        print('Reducers has completed their assigned task...')

        # Generate final output to store at output location and to send in reply to client
        reducer_response = sorted(reducer_response, key = lambda x: -int(x[1]))
        try:
            with open(output_location, 'w') as fp:
                fp.write('\n'.join('{} {}'.format(x[0],x[1]) for x in reducer_response))
        except Exception:
            print('Error occured while writing output to output_location.')
        print('Map-reduce task has been completed...')
        return reducer_response
Exemple #8
0
 def killWorkers(self):
     for w, p in self.workerList.items():
         try:
             rpyc.async_(w.root.killServer)
         except:
             self.addValue(p, self.notConnected)
             self.removeValue(p, self.connected)
             self.workersToRemove.append(w)
             print("notConnected port: " + str(p))
     # Bumped outside loop since dicts cannot change len at runtime
     for w in self.workersToRemove:
         self.workerList.pop(w)
     self.workersToRemove = []  # Empty list to prevent violations
def scheduler(threadName):
    global tasks
    while True:
        if tasks.is_empty():
            continue;
        else:
            while not tasks.is_empty() and not resources.is_empty():
                    deleted_task = tasks.delete()
                    print("jobId: ", deleted_task.job_id, "| Type(0=M,1=R): ", deleted_task.type, "| splitNumber: ",
                          deleted_task.info, "| thread: ", threadName, "| priority: ", deleted_task.priority)
                    deleted_resource = resources.delete()
                    print(deleted_resource.worker.id)
                    #print("jobId: ", deleted_task.job_id, "| Type(0=M,1=R): ", deleted_task.type, "| splitNumber: ",
                          #deleted_task.info, "| thread: ", threadName, "| priority: ", deleted_task.priority, file=deleted_resource.worker.conn.modules.sys.stdout)
                    deleted_task.worker = deleted_resource.worker
                    #deleted_task.worker.tasks.append(deleted_task)
                    deleted_task.resource = deleted_resource
                    try:
                        deleted_task.conn = rpyc.classic.connect(deleted_resource.worker.ip, port=22222)
                    except:
                        print("Ddddddddddddddddddddddddddddd")
                    else:
                        deleted_task.conn.execute(wc_txt)
                        deleted_task.remote_func = deleted_task.conn.namespace['word_count_map']
                        #deleted_task.remote_func = deleted_task.conn.namespace['length_count_map']

                        deleted_task.func = rpyc.async_(deleted_task.remote_func)
                        deleted_task.result = deleted_task.func(deleted_task.info, deleted_resource.worker.id)
                        #deleted_task.result.add_callback(r_func(deleted_resource))
                        _thread.start_new_thread(result, ("SchedulerThread", deleted_task.worker, deleted_task.result, deleted_resource, deleted_task))
Exemple #10
0
 def clearWorkerBuff(self):
     for w, p in self.workerList.items():
         print("CLEARING WORKER BUFFER" + str(p))
         try:
             # Try to deliver a task to a worker
             rpyc.async_(w.root.clearBuffer)
         except:
             self.addValue(p, self.notConnected)
             self.removeValue(p, self.connected)
             self.workersToRemove.append(w)
             #print("notConnected port: "+str(p))
     print("LOADED ALL TASKS")
     # Bumped outside loop since dicts cannot change len at runtime
     for w in self.workersToRemove:
         self.workerList.pop(w)
     self.workersToRemove = []  # Empty list to prevent violations
def obtain(proxy, serv=None):
    """
    Obtain a remote netfref object by value (i.e., copy it to the local Python instance).

    Wrapper around :func:`rpyc.utils.classic.obtain` with some special cases handling.
    `serv` specifies the current remote service. If it is of type :class:`SocketTunnelService`, use its socket tunnel for faster transfer.
    """
    if not isinstance(proxy, rpyc.BaseNetref):
        return proxy
    if isinstance(proxy, np.ndarray):
        elsize = np.prod(proxy.shape, dtype="u8")
        bytesize = proxy.dtype.itemsize * elsize
        if bytesize > _numpy_block_size:
            if _is_tunnel_service(serv):
                loc_serv = serv.peer
                async_send = rpyc.async_(serv.tunnel_send)
                async_send(proxy, packer="numpy")
                data = loc_serv.tunnel_recv()
                return np.frombuffer(data, dtype=proxy.dtype.str).reshape(
                    proxy.shape)
            else:
                fproxy = proxy.flatten()
                loc = np.zeros(elsize, dtype=proxy.dtype.str)
                block_size = _numpy_block_size // proxy.dtype.itemsize
                for pos in range(0, elsize, block_size):
                    loc[pos:pos + block_size] = rpyc.classic.obtain(
                        fproxy[pos:pos + block_size])
                return loc.reshape(proxy.shape)
    return rpyc.classic.obtain(proxy)
Exemple #12
0
    def update_input_buffer(self, context_id: str, buffer_index: int,
                            local_object: Any) -> Any:
        """[summary]

        Args:
            context_id (str): [description]
            buffer_index (int): [description]
            local_object (Any): [description]

        Raises:
            ValueError: [description]

        Returns:
            Any: [description]
        """
        if context_id not in self.contexts:
            raise ValueError("No context {} found!".format(context_id))

        if self.use_async:
            logging.debug("Creating buffers asynchronously")
            _update_input_buffer = rpyc.async_(
                self.cluster_cl.update_input_buffer)
            return _update_input_buffer(context_id, buffer_index, local_object)
        else:
            self.cluster_cl.update_input_buffer(context_id, buffer_index,
                                                local_object)
            return None
Exemple #13
0
    def execute_kernel(self,
                       context_id: str,
                       kernel_name: str,
                       work_size: tuple,
                       wait_execution: bool = True) -> np.array:
        """[summary]

        Args:
            context_id (str): [description]
            kernel_name (str): [description]
            work_size (tuple): [description]
            wait_execution (bool, optional): [description]. Defaults to True.

        Raises:
            ValueError: [description]

        Returns:
            np.array: [description]
        """
        if context_id not in self.contexts:
            raise ValueError("No context {} found!".format(context_id))

        if type(work_size) != tuple:
            raise ValueError("work_size has to be a tuple")

        if self.use_async:
            logging.debug("Executing asynchronously the kernel")
            _execute_kernel = rpyc.async_(self.cluster_cl.execute_kernel)
            return _execute_kernel(context_id, kernel_name, work_size,
                                   wait_execution)
        else:
            return np.array(
                self.cluster_cl.execute_kernel(context_id, kernel_name,
                                               work_size, wait_execution))
Exemple #14
0
 def exposed_login(self, clientName, callback, appFolder=None):
     '''
     Log into the service. 
     '''
     if ControllerService.STOPPING: return
     global theController, ctrlLock  # ,guiClient
     #         if clientName == "*gui*":       # NOTE: the GUI is client of the service
     #             assert self.client == None and guiClient == None
     #             guiClient = ServiceClient(clientName,async_(callback),self,None)
     #             return ()
     #         else:                           # RIAPS node client
     assert (appFolder != None)
     if (self.client and
             not self.client.stale) or theController.isClient(clientName):
         # raise ValueError("already logged in")
         oldClient = theController.getClient(clientName)
         oldClient.exposed_logout()
         theController.delClient(clientName)
     self.client = ServiceClient(clientName, async_(callback), self,
                                 appFolder)  # Register client's callback
     theController.addClient(clientName, self.client)
     dbaseNode = theController.nodeAddr  # The (redis) database is running on this same node
     if theController.discoType == 'redis':
         dbasePort = const.discoRedisPort
     elif theController.discoType == 'opendht':
         dbasePort = theController.dhtPort
     else:
         dbasePort = -1
     return ('dbase', str(dbaseNode), str(dbasePort))  # Reply to the client
Exemple #15
0
 def do_reconnect(self):
     global adb
     import sys
     for i in range(5):
         try:
             self.remote = rpyc.classic.connect(self.address)
             self.remote.modules.sys.stdin = sys.stdin
             self.remote.modules.sys.stdout = sys.stdout
             self.remote.modules.sys.stderr = sys.stderr
             self.remote._config['sync_request_timeout'] = None
             self.remote_exec = rpyc.async_(self.remote.modules.builtins.exec) # Independent namespace
             # self.remote_exec = rpyc.async_(self.remote.execute) # Common namespace
             return True
         # ConnectionRefusedError: [Errno 111] Connection refused
         except Exception as e:
             self.remote = None
             # self.log.debug('%s on Remote IP: %s' % (repr(e), self.address))
             print("[ rpyc-kernel ]( Connect IP: %s at %s)" % (self.address, time.asctime()))
         time.sleep(2)
     print("[ rpyc-kernel ]( Connect IP: %s fail! )" % (self.address))
     if sys.platform.startswith('win'):
         try:
             if adb.connect_check():
                 adb.kill_server()
         except Exception as e:
             self.log.info("[ rpyc-kernel ]( adb %s )" % (str(e)))
Exemple #16
0
    def _extend_replays(self, num_per_problem):
        """Extend the replays for //all// problems asynchronously."""
        # fire off extension methods
        results = []
        for problem in tqdm.tqdm(self.problems, desc='spawn extend'):
            get_action = self._make_get_action(problem, stochastic=True)
            extend_replay = rpyc.async_(problem.problem_service.extend_replay)
            result = extend_replay(
                get_action,
                num_per_problem,
                no_plan=bool(self.use_saved_training_set))
            # apparently I need to keep hold of async ref according to RPyC
            # docs (it's weak or s.th). Also, I need a background thread to
            # serve each environment's requests (...this may break things
            # slightly).
            bg_thread = rpyc.utils.helpers.BgServingThread(
                problem.problem_server.conn)
            results.append((problem, extend_replay, result, bg_thread))

        # Now we wait for results to come back. This is horribly inefficient
        # when some environments are much harder than others; oh well.
        succ_rates = []
        for problem, _, result, bg_thread in tqdm.tqdm(
                results, desc='wait extend'):
            succ_rates.append((problem, to_local(result.value)))
            # always shut down cleanly
            bg_thread.stop()

        return succ_rates
async def uplThumbnail(channelID, videoID, live=True):
    extServer = rpyc.connect(settings["thumbnailIP"], int(settings["thumbnailPort"]))
    asyncUpl = rpyc.async_(extServer.root.thumbGrab)
    uplSuccess = False

    for x in range(3):
        if live:
            upload = asyncUpl(channelID, f'https://img.youtube.com/vi/{videoID}/maxresdefault_live.jpg')
        else:
            upload = asyncUpl(channelID, f'https://img.youtube.com/vi/{videoID}/maxresdefault.jpg')
        uplSuccess = False

        while True:
            if upload.ready and not upload.error:
                uplSuccess = True
                break
            elif upload.error:
                break

            await asyncio.sleep(0.5)

        if not uplSuccess or "yagoo.ezz.moe" not in upload.value:
            print("Stream - Couldn't upload thumbnail!")
            print(upload.value)
            return None
        return upload.value
    def broadcast_updates(self):
        '''
        Asynchronously sends the uncommitted updates to all other replicas, along
        with the local replica ID and the latest value of X.
        '''

        #assumes replicas will never fail and will all be available when requested
        #TODO: handle possible errors in a better way
        confirmations = [False for peer in self.peer_connections]
        while not all(confirmations):
            requests_in_flight = []

            for i in range(len(self.peer_connections)):
                if confirmations[i] == False:
                    port = self.peer_connections[i]
                    conn = rpyc.connect("localhost", port=port)
                    replicate_changes_async = rpyc.async_(
                        conn.root.replicate_changes)
                    ar_obj = replicate_changes_async(self.id, self.X,
                                                     self.uncommitted_history)
                    async_request = (i, ar_obj)
                    requests_in_flight.append(async_request)

            for confirmation_id, ar_obj in requests_in_flight:
                confirmations[confirmation_id] = ar_obj.value
Exemple #19
0
 def __init__(self, filename, callback, interval = 1):
     self.filename = filename
     self.interval = interval
     self.last_stat = None
     self.callback = rpyc.async_(callback)   # make the callback async
     self.active = True
     self.thread = Thread(target = self.work)
     self.thread.start()
Exemple #20
0
 def __init__(self, filename, callback, interval=1):
     self.filename = filename
     self.interval = interval
     self.last_stat = None
     self.callback = rpyc.async_(callback)  # make the callback async
     self.active = True
     self.thread = Thread(target=self.work)
     self.thread.start()
Exemple #21
0
 def exposed_login(self, username, password, callback):
     if self.token and not self.token.stale:
         raise ValueError("already logged in")
     if username in USERS_DB and password == USERS_DB[username]:
         self.token = UserToken(username, async_(callback))
         return self.token
     else:
         raise ValueError("wrong username or password")
Exemple #22
0
 def _recv_socket(self):
     """Set up a listener to receive a socket connection from the other service."""
     def listen(s):
         s.set_timeout(self._default_tunnel_timeout)
         self.tunnel_socket=s
     remote_call=rpyc.async_(self._conn.root._send_socket)
     def port_func(port):
         remote_call(net.get_local_addr(),port)
     net.listen(None,0,listen,port_func=port_func,timeout=self._default_tunnel_timeout,connections_number=1,nodelay=True)
Exemple #23
0
    def exposed_run_parall(self):
        if (self._run_parall is None): return -1

        for num, run in enumerate(self._run_parall):
            print(run)
            run = rpyc.async_(run)  # In a var, as asked in the docs

            future = run(self.to_run, self.args_parall[num])
            self._result.append(future)
def _obtain_single(proxy, serv):
    if _is_tunnel_service(serv):
        loc_serv = serv.peer
        async_send = rpyc.async_(serv.tunnel_send)
        async_send(proxy, packer="pickle")
        data = pickle.loads(loc_serv.tunnel_recv())
        return data
    else:
        return rpyc.classic.obtain(proxy)
Exemple #25
0
 def loadTasks(self):
     print("LOADING TASKS")
     while len(self.taskList) != 0:
         for w, p in self.workerList.items():
             print("LOADING TASK " + str(self.taskList[0]))
             try:
                 # Try to deliver a task to a worker
                 rpyc.async_(w.root.loadTask)(self.taskList.pop(0))
                 print("here")
             except:
                 self.addValue(p, self.notConnected)
                 self.removeValue(p, self.connected)
                 self.workersToRemove.append(w)
                 print("notConnected port: " + str(p))
     print("LOADED ALL TASKS")
     # Bumped outside loop since dicts cannot change len at runtime
     for w in self.workersToRemove:
         self.workerList.pop(w)
     self.workersToRemove = []  # Empty list to prevent violations
Exemple #26
0
    def greyscale(self, src, out, ext):
        print("PREPROCESSING: GREYSCALE")
        self.local_log("PREPROCESSING: GREYSCALE")
        self.getTasks(src, "nc")
        self.loadTasks()
        for w, p in self.workerList.items():
            try:
                # Try to deliver a task to a worker
                rpyc.async_(w.root.greyscale)()
            except:
                self.addValue(p, self.notConnected)
                self.removeValue(p, self.connected)
                self.workersToRemove.append(w)
                #print("notConnected port: "+str(p))

        # Bumped outside loop since dicts cannot change len at runtime
        for w in self.workersToRemove:
            self.workerList.pop(w)
        self.workersToRemove = []  # Empty list to prevent violations
        self.wait()
Exemple #27
0
    def attach(self, callback):
        self.callbacks.append(callback)
        acallback = rpyc.async_(callback)

        # first, send recent output
        #lines = "\n<recent output>\n\n" + "".join(self.recent_output)
        lines = "".join(self.recent_output)
        acallback(self.run_name, lines)

        # now, hook to our list of callback for next output line
        with self.lock:
            self.acallbacks.append(acallback)
Exemple #28
0
        def wrapper(*args, **kwargs):
            # Connect to first host
            try:
                connection1 = rpyc.connect(
                    host1,
                    host1_port,
                    keepalive=True,
                    service=rpyc.core.service.MasterService)
                connection1._config[
                    'sync_request_timeout'] = None  # No timeout
                logging.info(f'Connected to {host1} on port {host1_port}')
            except Exception as e:
                logging.error(f'Could not connect to host1: {e}')
                raise e

            # Connect to second host
            try:
                connection2 = rpyc.connect(
                    host2,
                    host2_port,
                    keepalive=True,
                    service=rpyc.core.service.MasterService)
                connection2._config[
                    'sync_request_timeout'] = None  # No timeout
                logging.info(f'Connected to {host2} on port {host2_port}')
            except Exception as e:
                logging.error(f'Could not connect to host2: {e}')
                raise e

            # Teleport test function to first host
            host1_test_remote = rpyc.async_(connection1.teleport(host1_test))
            logging.info(f'Teleported {host1_test.__name__} to {host1}')

            # Teleport test function to second host
            host2_test_remote = rpyc.async_(connection2.teleport(host2_test))
            logging.info(f'Teleported {host2_test.__name__} to {host2}')

            # Now we have a nice test setup
            logging.info(f'Successfully setup the test on both hosts')
            return func(host1_test_remote, host2_test_remote, *args, **kwargs)
Exemple #29
0
 def _register_listeners_async():
     """Issues an asynchronous call to the server containing all the
     parameters that this client wants to be notified of in case of a
     changed value."""
     pending = self._listeners_pending_remote_registration
     if pending:
         # this copies the list before clearing it below. Otherwise
         # we just transmit an epmty list in the async call
         pending = pending[:]
         self._async_listener_registering = async_(
             self.remote.exposed_register_remote_listeners)(self.uuid,
                                                            pending)
         self._listeners_pending_remote_registration.clear()
Exemple #30
0
 def invoke_current_algorithms(self, action_dict, streaming_url):
     for algorithm, action_item in action_dict.items():
         func_async = rpyc.async_(self.conn.root.run_algorithm)
         json_str = json.dumps({
             'streaming_url':
             streaming_url,
             'camera_id':
             str(self.camera_id),
             'result_api':
             CAMERA_API + str(self.camera_id) + '/result'
         })
         self.result_dict[algorithm] = func_async(algorithm, json_str)
     return self.result_dict
Exemple #31
0
def obtain(proxy, serv=None, deep=False, direct=False):
    """
    Obtain a remote netref object by value (i.e., copy it to the local Python instance).

    Wrapper around :func:`rpyc.utils.classic.obtain` with some special cases handling.
    `serv` specifies the current remote service. If it is of type :class:`SocketTunnelService`, use its socket tunnel for faster transfer.
    If ``deep==True`` and ``proxy`` is a container (tuple, list, or dict), run the function recursively for all its sub-elements.
    If ``direct==True``, directly use RPyC ``obtain`` method; otherwise use the custom method, which works better with large numpy arrays,
    but worse with composite types (e.g., lists).
    """
    t = type(
        proxy
    )  # each isinstance call is performed on the server, so getting type once is faster
    if deep and not direct and issubclass(
            t, tuple
    ):  # tuples are not passed as netrefs, so they need to be checked first
        return tuple([obtain(v, serv=serv) for v in proxy])
    if not issubclass(t, rpyc.BaseNetref):
        return proxy
    if direct:
        return rpyc.classic.obtain(proxy)
    if deep:
        if isinstance(proxy, list):
            return [obtain(v, serv=serv) for v in proxy]
        if isinstance(proxy, dict):
            return {
                obtain(k, serv=serv): obtain(v, serv=serv)
                for k, v in proxy.items()
            }
    if isinstance(proxy, np.ndarray) or (t.__name__ == "numpy.ndarray" and all(
        [hasattr(proxy, a)
         for a in ["shape", "dtype", "tostring", "flatten"]])):
        elsize = np.prod(proxy.shape, dtype="u8")
        bytesize = proxy.dtype.itemsize * elsize
        if bytesize > _numpy_block_size:
            if _is_tunnel_service(serv):
                loc_serv = serv.peer
                async_send = rpyc.async_(serv.tunnel_send)
                async_send(proxy, packer="numpy")
                data = loc_serv.tunnel_recv()
                return np.frombuffer(data, dtype=proxy.dtype.str).reshape(
                    proxy.shape)
            else:
                fproxy = proxy.flatten()
                loc = np.zeros(elsize, dtype=proxy.dtype.str)
                block_size = _numpy_block_size // proxy.dtype.itemsize
                for pos in range(0, elsize, block_size):
                    loc[pos:pos + block_size] = rpyc.classic.obtain(
                        fproxy[pos:pos + block_size])
                return loc.reshape(proxy.shape)
    return rpyc.classic.obtain(proxy)
Exemple #32
0
 def __init__(self, callback, interval):
     self.callback = rpyc.async_(callback)
     self.interval = interval
     self.active = True
     self.thread = rpyc.spawn(self.work)
Exemple #33
0
 def setUp(self):
     self.conn = rpyc.classic.connect_thread()
     self.a_sleep = rpyc.async_(self.conn.modules.time.sleep)
     self.a_int = rpyc.async_(self.conn.builtin.int)
Exemple #34
0
 def bg_timer_thread():
     while active[0]:
         rpyc.async_(lbl3.set_text)("Server time is: %s" % (time.ctime(),))
         time.sleep(1)