def master_init_slaves(cls, num_weights, slaves=(), timeout=None, limit=65536, master_oversees=False, **kwargs): """Initialize proxies for each of the slaves and block on a working connection. """ # Initialize server proxies for the slaves srv_timeout = 3 * timeout if timeout is not None else 1000 all_machines = [] if master_oversees else [None] for slave in slaves: host, port = slave.split(':') all_machines.append( jsonrpc.ServerProxy( jsonrpc.JsonRpc20(), jsonrpc.TransportTcpIp(addr=(host, int(port)), limit=limit, timeout=srv_timeout))) # Block on a working connection to each slave machine_idx = len(all_machines) - 1 working_connection = False while not working_connection: try: cls.call_slave(all_machines[machine_idx], 'slave_init_weights', num_weights) working_connection = True except jsonrpc.RPCTransportError: print "Waiting to establish connection to slave", slave time.sleep(10) return all_machines
def from_server(cls, host_port): """Return a proxy Gigaword object with bound methods to retrieve counts. """ host, port = host_port.split(':') return jsonrpc.ServerProxy( jsonrpc.JsonRpc20(), jsonrpc.TransportTcpIp(addr=(host, int(port))))
def from_server(cls, host_port, timeout=60): """Return a proxy LM object with bound methods to support ngram scoring. """ if host_port is None: return None host, port = host_port.split(':') return jsonrpc.ServerProxy(jsonrpc.JsonRpc20(), jsonrpc.TransportTcpIp(addr=(host, int(port)), timeout=timeout))
def from_server(cls, host_port, timeout=60): """Return a proxy DependencyModel object with bound methods to support retrieval of dependency probabiities. """ if host_port is None: return None host, port = host_port.split(':') return jsonrpc.ServerProxy( jsonrpc.JsonRpc20(), jsonrpc.TransportTcpIp(addr=(host, int(port)), timeout=timeout))
def train_slave(self, instances, host, port, slaves=(), minibatch_size=1, timeout=None, limit=65536, **kwargs): """Run the slave process of a round-robin distributed learner. This stores all the instances and model parameters (including large initialized models) as object members and must therefore NEVER be saved to disk. """ # Note the inversion here compared to the other methods. Slave models # will never be saved to disk and need to store all their state, # including runtime parameters, as class members. self.params.update(kwargs) self.params['timeout'] = timeout self.instances = instances if minibatch_size == 1: self.machine_init_serial(instances, slaves=slaves, **self.params) else: self.machine_init_minibatch(instances, minibatch_size=minibatch_size, slaves=slaves, **self.params) srv_timeout = 3 * timeout if timeout is not None else 1000 server = jsonrpc.Server( jsonrpc.JsonRpc20(), jsonrpc.TransportTcpIp(addr=(host, int(port)), limit=limit, timeout=srv_timeout)) server.register_function(self.slave_init_weights) server.register_function(self.slave_receive_weights) server.register_function(self.slave_decode_instance) server.register_function(self.slave_fetch_weight_update) print "Serving at %s:%s with timeout %ds" % (host, port, srv_timeout) server.serve()
parser = argparse.ArgumentParser( description='Start an LM server') parser.add_argument('--ngram_order', action='store', type=int, help="order of n-grams to serve", default=3) parser.add_argument('--lm_path', action='store', help="path to the trained SRILM language model", default='/path/to/project/resources/LMs/sample.lm') parser.add_argument('--host', action='store', help="host to serve on (default localhost; 0.0.0.0 for public)", default=socket.gethostname()) #os.environ['HOSTNAME'] parser.add_argument('--port', action='store', type=int, help="port to serve on (default 8081)", default=8081) parser.add_argument('--timeout', action='store', type=int, help="time limit for responses", default=200) args = parser.parse_args() server = jsonrpc.Server(jsonrpc.JsonRpc20(), jsonrpc.TransportTcpIp(addr=(args.host, args.port), timeout=args.timeout)) lm = LangModel(args.ngram_order, lm_path=args.lm_path) server.register_function(lm.score_ngram) server.register_function(lm.score_ngrams) server.register_function(lm.score_sent) print 'Serving %s-grams from %s on http://%s:%s' % (args.ngram_order, args.lm_path, args.host, args.port) server.serve()
action='store', help="path to the WSJ in Stanford parse format", default='/proj/fluke/users/kapil/resources/' + 'treebank-depenified-stanford/wsj_gold_stanford_all.deps') parser.add_argument( '--host', action='store', help="host to serve on (default localhost; 0.0.0.0 for public)", default=os.environ['HOSTNAME']) parser.add_argument('--port', action='store', type=int, help="port to serve on (default 8082)", default=8082) parser.add_argument('--timeout', action='store', type=int, help="time limit for responses", default=60) args = parser.parse_args() server = jsonrpc.Server( jsonrpc.JsonRpc20(), jsonrpc.TransportTcpIp(addr=(args.host, args.port), timeout=args.timeout)) dm = DependencyModel(args.treebank_path) server.register_function(dm.get_logprob) print 'Serving dependencies from %s on http://%s:%s' % \ (args.treebank_path, args.host, args.port) server.serve()