def predict(env): """Predict""" args = env.args logging.info("Load the dataset") if args.prob: env.fields = env.fields._replace(PHEAD=Field('prob')) predicts = Corpus.load(args.infer_data_path, env.fields) dataset = TextDataset(predicts, [env.WORD, env.FEAT], args.buckets) # set the data loader dataset.loader = batchify(dataset, args.batch_size) logging.info(f"{len(dataset)} sentences, " f"{len(dataset.loader)} batches") logging.info("Load the model") model = load(args.model_path) model.args = args logging.info("Make predictions on the dataset") start = datetime.datetime.now() model.eval() pred_arcs, pred_rels, pred_probs = epoch_predict(env, args, model, dataset.loader) total_time = datetime.datetime.now() - start # restore the order of sentences in the buckets indices = np.argsort( np.array([i for bucket in dataset.buckets.values() for i in bucket])) predicts.head = [pred_arcs[i] for i in indices] predicts.deprel = [pred_rels[i] for i in indices] if args.prob: predicts.prob = [pred_probs[i] for i in indices] logging.info(f"Save the predicted result to {args.infer_result_path}") predicts.save(args.infer_result_path) logging.info(f"{total_time}s elapsed, " f"{len(dataset) / total_time.total_seconds():.2f} Sents/s")
def evaluate(env): """Evaluate""" arguments = env.args punctuation = dygraph.to_variable(env.puncts, zero_copy=False) logging.info("Load the dataset") evaluates = Corpus.load(arguments.test_data_path, env.fields) dataset = TextDataset(evaluates, env.fields, arguments.buckets) # set the ddparser_data loader dataset.loader = batchify(dataset, arguments.batch_size) logging.info("{} sentences, ".format(len(dataset)) + "{} batches, ".format(len(dataset.loader)) + "{} buckets".format(len(dataset.buckets))) logging.info("Load the model") model = load(arguments.model_path) logging.info("Evaluate the dataset") start = datetime.datetime.now() loss, metric = epoch_evaluate(arguments, model, dataset.loader, punctuation) total_time = datetime.datetime.now() - start logging.info("Loss: {:.4f} {}".format(loss, metric)) logging.info("{}s elapsed, {:.2f} Sents/s".format( total_time, len(dataset) / total_time.total_seconds()))
def __init__( self, use_cuda=False, tree=True, prob=False, use_pos=False, model_files_path=None, buckets=False, batch_size=None, encoding_model="ernie-lstm", ): if model_files_path is None: if encoding_model in ["lstm", "transformer", "ernie-1.0", "ernie-tiny", "ernie-lstm"]: model_files_path = self._get_abs_path(os.path.join("./model_files/", encoding_model)) else: raise KeyError("Unknown encoding model.") if not os.path.exists(model_files_path): try: utils.download_model_from_url(model_files_path, encoding_model) except Exception as e: logging.error("Failed to download model, please try again") logging.error("error: {}".format(e)) raise e args = [ "--model_files={}".format(model_files_path), "--config_path={}".format(self._get_abs_path('config.ini')), "--encoding_model={}".format(encoding_model) ] if use_cuda: args.append("--use_cuda") if tree: args.append("--tree") if prob: args.append("--prob") if batch_size: args.append("--batch_size={}".format(batch_size)) args = ArgConfig(args) # Don't instantiate the log handle args.log_path = None self.env = Environment(args) self.args = self.env.args fluid.enable_imperative(self.env.place) self.model = load(self.args.model_path) self.model.eval() self.lac = None self.use_pos = use_pos # buckets=None if not buckets else defaults if not buckets: self.args.buckets = None if args.prob: self.env.fields = self.env.fields._replace(PHEAD=Field("prob")) if self.use_pos: self.env.fields = self.env.fields._replace(CPOS=Field("postag")) # set default batch size if batch_size is None and not buckets if batch_size is None and not buckets: self.args.batch_size = 50
def __init__(self, use_cuda=False, tree=True, prob=False, use_pos=False, model_files_path=None, buckets=False, batch_size=None): if model_files_path is None: model_files_path = self._get_abs_path('./model_files/baidu') if not os.path.exists(model_files_path): try: utils.download_model_from_url(model_files_path) except Exception as e: logging.error("Failed to download model, please try again") logging.error(f"error: {e}") return args = [ f"--model_files={model_files_path}", f"--config_path={self._get_abs_path('config.ini')}" ] if use_cuda: args.append("--use_cuda") if tree: args.append("--tree") if prob: args.append("--prob") if batch_size: args.append(f"--batch_size={batch_size}") args = ArgConfig(args) # Don't instantiate the log handle args.log_path = None self.env = Environment(args) self.args = self.env.args fluid.enable_imperative(self.env.place) self.model = load(self.args.model_path) self.lac = None self.use_pos = use_pos # buckets=None if not buckets else defaults if not buckets: self.args.buckets = None if args.prob: self.env.fields = self.env.fields._replace(PHEAD=Field('prob')) if self.use_pos: self.env.fields = self.env.fields._replace(CPOS=Field('postag')) # set default batch size if batch_size is None and not buckets if batch_size is None and not buckets: self.args.batch_size = 50
def predict_query(env): """Predict one query""" args = env.args logging.info("Load the model") model = load(args.model_path) model.eval() lac_mode = "seg" if args.feat != "pos" else "lac" lac = LAC.LAC(mode=lac_mode) if args.prob: env.fields = env.fields._replace(PHEAD=Field("prob")) while True: query = input() if isinstance(query, six.text_type): pass else: query = query.decode("utf-8") if not query: logging.info("quit!") return if len(query) > 200: logging.info("The length of the query should be less than 200!") continue start = datetime.datetime.now() lac_results = lac.run([query]) predicts = Corpus.load_lac_results(lac_results, env.fields) dataset = TextDataset(predicts, [env.WORD, env.FEAT]) # set the ddparser_data loader dataset.loader = batchify(dataset, args.batch_size, use_multiprocess=False, sequential_sampler=True) pred_arcs, pred_rels, pred_probs = epoch_predict( env, args, model, dataset.loader) predicts.head = pred_arcs predicts.deprel = pred_rels if args.prob: predicts.prob = pred_probs predicts._print() total_time = datetime.datetime.now() - start logging.info("{}s elapsed, {:.2f} Sents/s, {:.2f} ms/Sents".format( total_time, len(dataset) / total_time.total_seconds(), total_time.total_seconds() / len(dataset) * 1000))
def predict(env): """Predict""" arguments = env.args logging.info("Load the dataset") if arguments.prob: env.fields = env.fields._replace(PHEAD=Field("prob")) predicts = Corpus.load(arguments.infer_data_path, env.fields) dataset = TextDataset(predicts, [env.WORD, env.FEAT], arguments.buckets) # 只需提取word和feat # set the ddparser_data loader dataset.loader = batchify(dataset, arguments.batch_size) logging.info("{} sentences, {} batches".format(len(dataset), len(dataset.loader))) logging.info("Load the model") model = load(arguments.model_path) model.args = arguments logging.info("Make predictions on the dataset") start = datetime.datetime.now() model.eval() connection_predicts, deprel_predicts, predict_prob = epoch_predict( env, arguments, model, dataset.loader) total_time = datetime.datetime.now() - start # restore the order of sentences in the buckets indices = np.argsort( np.array([i for bucket in dataset.buckets.values() for i in bucket])) predicts.head = [connection_predicts[i] for i in indices] predicts.deprel = [deprel_predicts[i] for i in indices] if arguments.prob: predicts.prob = [predict_prob[i] for i in indices] logging.info("Save the predicted result to {}".format( arguments.infer_result_path)) predicts.save(arguments.infer_result_path) logging.info("{}s elapsed, {:.2f} Sents/s".format( total_time, len(dataset) / total_time.total_seconds()))
def evaluate(env): """Evaluate""" args = env.args puncts = dygraph.to_variable(env.puncts, zero_copy=False) logging.info("Load the dataset") evaluates = Corpus.load(args.test_data_path, env.fields) dataset = TextDataset(evaluates, env.fields, args.buckets) # set the data loader dataset.loader = batchify(dataset, args.batch_size) logging.info(f"{len(dataset)} sentences, " f"{len(dataset.loader)} batches, " f"{len(dataset.buckets)} buckets") logging.info("Load the model") model = load(args.model_path) logging.info("Evaluate the dataset") start = datetime.datetime.now() loss, metric = epoch_evaluate(args, model, dataset.loader, puncts) total_time = datetime.datetime.now() - start logging.info(f"Loss: {loss:.4f} {metric}") logging.info(f"{total_time}s elapsed, " f"{len(dataset) / total_time.total_seconds():.2f} Sents/s")
def train(env): """Train""" args = env.args logging.info("loading data.") train = Corpus.load(args.train_data_path, env.fields) dev = Corpus.load(args.valid_data_path, env.fields) test = Corpus.load(args.test_data_path, env.fields) logging.info("init dataset.") train = TextDataset(train, env.fields, args.buckets) dev = TextDataset(dev, env.fields, args.buckets) test = TextDataset(test, env.fields, args.buckets) logging.info("set the data loaders.") train.loader = batchify(train, args.batch_size, args.use_data_parallel, True) dev.loader = batchify(dev, args.batch_size) test.loader = batchify(test, args.batch_size) logging.info(f"{'train:':6} {len(train):5} sentences, " f"{len(train.loader):3} batches, " f"{len(train.buckets)} buckets") logging.info(f"{'dev:':6} {len(dev):5} sentences, " f"{len(dev.loader):3} batches, " f"{len(train.buckets)} buckets") logging.info(f"{'test:':6} {len(test):5} sentences, " f"{len(test.loader):3} batches, " f"{len(train.buckets)} buckets") logging.info("Create the model") model = Model(args, env.WORD.embed) # init parallel strategy if args.use_data_parallel: strategy = dygraph.parallel.prepare_context() model = dygraph.parallel.DataParallel(model, strategy) if args.use_cuda: grad_clip = fluid.clip.GradientClipByNorm(clip_norm=args.clip) else: grad_clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=args.clip) decay = dygraph.ExponentialDecay(learning_rate=args.lr, decay_steps=args.decay_steps, decay_rate=args.decay) optimizer = fluid.optimizer.AdamOptimizer( learning_rate=decay, beta1=args.mu, beta2=args.nu, epsilon=args.epsilon, parameter_list=model.parameters(), grad_clip=grad_clip) total_time = datetime.timedelta() best_e, best_metric = 1, Metric() puncts = dygraph.to_variable(env.puncts, zero_copy=False) logging.info("start training.") for epoch in range(1, args.epochs + 1): start = datetime.datetime.now() # train one epoch and update the parameter logging.info(f"Epoch {epoch} / {args.epochs}:") epoch_train(args, model, optimizer, train.loader, epoch) if args.local_rank == 0: loss, dev_metric = epoch_evaluate(args, model, dev.loader, puncts) logging.info(f"{'dev:':6} Loss: {loss:.4f} {dev_metric}") loss, test_metric = epoch_evaluate(args, model, test.loader, puncts) logging.info(f"{'test:':6} Loss: {loss:.4f} {test_metric}") t = datetime.datetime.now() - start # save the model if it is the best so far if dev_metric > best_metric and epoch > args.patience // 10: best_e, best_metric = epoch, dev_metric save(args.model_path, args, model, optimizer) logging.info(f"{t}s elapsed (saved)\n") else: logging.info(f"{t}s elapsed\n") total_time += t if epoch - best_e >= args.patience: break if args.local_rank == 0: model = load(args.model_path, model) loss, metric = epoch_evaluate(args, model, test.loader, puncts) logging.info( f"max score of dev is {best_metric.score:.2%} at epoch {best_e}") logging.info( f"the score of test at epoch {best_e} is {metric.score:.2%}") logging.info(f"average time of each epoch is {total_time / epoch}s") logging.info(f"{total_time}s elapsed")
def train(env): """Train""" args = env.args logging.info("loading data.") train = Corpus.load(args.train_data_path, env.fields) dev = Corpus.load(args.valid_data_path, env.fields) test = Corpus.load(args.test_data_path, env.fields) logging.info("init dataset.") train = TextDataset(train, env.fields, args.buckets) dev = TextDataset(dev, env.fields, args.buckets) test = TextDataset(test, env.fields, args.buckets) logging.info("set the data loaders.") train.loader = batchify(train, args.batch_size, args.use_data_parallel, True) dev.loader = batchify(dev, args.batch_size) test.loader = batchify(test, args.batch_size) logging.info("{:6} {:5} sentences, ".format('train:', len(train)) + "{:3} batches, ".format(len(train.loader)) + "{} buckets".format(len(train.buckets))) logging.info("{:6} {:5} sentences, ".format('dev:', len(dev)) + "{:3} batches, ".format(len(dev.loader)) + "{} buckets".format(len(dev.buckets))) logging.info("{:6} {:5} sentences, ".format('test:', len(test)) + "{:3} batches, ".format(len(test.loader)) + "{} buckets".format(len(test.buckets))) logging.info("Create the model") model = Model(args) # init parallel strategy if args.use_data_parallel: dist.init_parallel_env() model = paddle.DataParallel(model) if args.encoding_model.startswith( "ernie") and args.encoding_model != "ernie-lstm" or args.encoding_model == 'transformer': args['lr'] = args.ernie_lr else: args['lr'] = args.lstm_lr if args.encoding_model.startswith("ernie") and args.encoding_model != "ernie-lstm": max_steps = 100 * len(train.loader) decay = LinearDecay(args.lr, int(args.warmup_proportion * max_steps), max_steps) clip = args.ernie_clip else: decay = dygraph.ExponentialDecay(learning_rate=args.lr, decay_steps=args.decay_steps, decay_rate=args.decay) clip = args.clip if args.use_cuda: grad_clip = fluid.clip.GradientClipByNorm(clip_norm=clip) else: grad_clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=clip) if args.encoding_model.startswith("ernie") and args.encoding_model != "ernie-lstm": optimizer = AdamW( learning_rate=decay, parameter_list=model.parameters(), weight_decay=args.weight_decay, grad_clip=grad_clip, ) else: optimizer = fluid.optimizer.AdamOptimizer( learning_rate=decay, beta1=args.mu, beta2=args.nu, epsilon=args.epsilon, parameter_list=model.parameters(), grad_clip=grad_clip, ) total_time = datetime.timedelta() best_e, best_metric = 1, Metric() puncts = dygraph.to_variable(env.puncts, zero_copy=False) logging.info("start training.") for epoch in range(1, args.epochs + 1): start = datetime.datetime.now() # train one epoch and update the parameter logging.info("Epoch {} / {}:".format(epoch, args.epochs)) epoch_train(args, model, optimizer, train.loader, epoch) if args.local_rank == 0: loss, dev_metric = epoch_evaluate(args, model, dev.loader, puncts) logging.info("{:6} Loss: {:.4f} {}".format('dev:', loss, dev_metric)) loss, test_metric = epoch_evaluate(args, model, test.loader, puncts) logging.info("{:6} Loss: {:.4f} {}".format('test:', loss, test_metric)) t = datetime.datetime.now() - start # save the model if it is the best so far if dev_metric > best_metric and epoch > args.patience // 10: best_e, best_metric = epoch, dev_metric save(args.model_path, args, model, optimizer) logging.info("{}s elapsed (saved)\n".format(t)) else: logging.info("{}s elapsed\n".format(t)) total_time += t if epoch - best_e >= args.patience: break if args.local_rank == 0: model = load(args.model_path, model) loss, metric = epoch_evaluate(args, model, test.loader, puncts) logging.info("max score of dev is {:.2%} at epoch {}".format(best_metric.score, best_e)) logging.info("the score of test at epoch {} is {:.2%}".format(best_e, metric.score)) logging.info("average time of each epoch is {}s".format(total_time / epoch)) logging.info("{}s elapsed".format(total_time))