def train_slave(self, instances, host, port, slaves=(), minibatch_size=1, timeout=None, limit=65536, **kwargs): """Run the slave process of a round-robin distributed learner. This stores all the instances and model parameters (including large initialized models) as object members and must therefore NEVER be saved to disk. """ # Note the inversion here compared to the other methods. Slave models # will never be saved to disk and need to store all their state, # including runtime parameters, as class members. self.params.update(kwargs) self.params['timeout'] = timeout self.instances = instances if minibatch_size == 1: self.machine_init_serial(instances, slaves=slaves, **self.params) else: self.machine_init_minibatch(instances, minibatch_size=minibatch_size, slaves=slaves, **self.params) srv_timeout = 3 * timeout if timeout is not None else 1000 server = jsonrpc.Server( jsonrpc.JsonRpc20(), jsonrpc.TransportTcpIp(addr=(host, int(port)), limit=limit, timeout=srv_timeout)) server.register_function(self.slave_init_weights) server.register_function(self.slave_receive_weights) server.register_function(self.slave_decode_instance) server.register_function(self.slave_fetch_weight_update) print "Serving at %s:%s with timeout %ds" % (host, port, srv_timeout) server.serve()
parser = argparse.ArgumentParser( description='Start an LM server') parser.add_argument('--ngram_order', action='store', type=int, help="order of n-grams to serve", default=3) parser.add_argument('--lm_path', action='store', help="path to the trained SRILM language model", default='/path/to/project/resources/LMs/sample.lm') parser.add_argument('--host', action='store', help="host to serve on (default localhost; 0.0.0.0 for public)", default=socket.gethostname()) #os.environ['HOSTNAME'] parser.add_argument('--port', action='store', type=int, help="port to serve on (default 8081)", default=8081) parser.add_argument('--timeout', action='store', type=int, help="time limit for responses", default=200) args = parser.parse_args() server = jsonrpc.Server(jsonrpc.JsonRpc20(), jsonrpc.TransportTcpIp(addr=(args.host, args.port), timeout=args.timeout)) lm = LangModel(args.ngram_order, lm_path=args.lm_path) server.register_function(lm.score_ngram) server.register_function(lm.score_ngrams) server.register_function(lm.score_sent) print 'Serving %s-grams from %s on http://%s:%s' % (args.ngram_order, args.lm_path, args.host, args.port) server.serve()