def infer(): fluid.enable_dygraph(device) processor = SentaProcessor(data_dir=args.data_dir, vocab_path=args.vocab_path, random_seed=args.random_seed) infer_data_generator = processor.data_generator( batch_size=args.batch_size, padding_size=args.padding_size, places=device, phase='infer', epoch=1, shuffle=False) if args.model_type == 'cnn_net': model_infer = CNN(args.vocab_size, args.batch_size, args.padding_size) elif args.model_type == 'bow_net': model_infer = BOW(args.vocab_size, args.batch_size, args.padding_size) elif args.model_type == 'gru_net': model_infer = GRU(args.vocab_size, args.batch_size, args.padding_size) elif args.model_type == 'bigru_net': model_infer = BiGRU(args.vocab_size, args.batch_size, args.padding_size) print('Do inferring ...... ') inputs = [Input([None, None], 'int64', name='doc')] model_infer.prepare(None, CrossEntropy(), Accuracy(topk=(1, )), inputs, device=device) model_infer.load(args.checkpoints, reset_optimizer=True) preds = model_infer.predict(test_data=infer_data_generator) preds = np.array(preds[0]).reshape((-1, 2)) if args.output_dir: with open(os.path.join(args.output_dir, 'predictions.json'), 'w') as w: for p in range(len(preds)): label = np.argmax(preds[p]) result = json.dumps({ 'index': p, 'label': label, 'probs': preds[p].tolist() }) w.write(result + '\n') print('Predictions saved at ' + os.path.join(args.output_dir, 'predictions.json'))
def main(): """ Main Function """ args = Config(yaml_file='./config.yaml') args.build() args.Print() if not (args.do_train or args.do_val or args.do_infer): raise ValueError("For args `do_train`, `do_val` and `do_infer`, at " "least one of them must be True.") place = set_device("gpu" if args.use_cuda else "cpu") fluid.enable_dygraph(place) processor = EmoTectProcessor(data_dir=args.data_dir, vocab_path=args.vocab_path, random_seed=args.random_seed) num_labels = args.num_labels if args.model_type == 'cnn_net': model = CNN(args.vocab_size, args.max_seq_len) elif args.model_type == 'bow_net': model = BOW(args.vocab_size, args.max_seq_len) elif args.model_type == 'lstm_net': model = LSTM(args.vocab_size, args.max_seq_len) elif args.model_type == 'gru_net': model = GRU(args.vocab_size, args.max_seq_len) elif args.model_type == 'bigru_net': model = BiGRU(args.vocab_size, args.batch_size, args.max_seq_len) else: raise ValueError("Unknown model type!") inputs = [Input([None, args.max_seq_len], 'int64', name='doc')] optimizer = None labels = None if args.do_train: train_data_generator = processor.data_generator( batch_size=args.batch_size, places=place, phase='train', epoch=args.epoch, padding_size=args.max_seq_len) num_train_examples = processor.get_num_examples(phase="train") max_train_steps = args.epoch * num_train_examples // args.batch_size + 1 print("Num train examples: %d" % num_train_examples) print("Max train steps: %d" % max_train_steps) labels = [Input([None, 1], 'int64', name='label')] optimizer = fluid.optimizer.Adagrad(learning_rate=args.lr, parameter_list=model.parameters()) test_data_generator = None if args.do_val: test_data_generator = processor.data_generator( batch_size=args.batch_size, phase='dev', epoch=1, places=place, padding_size=args.max_seq_len) elif args.do_val: test_data_generator = processor.data_generator( batch_size=args.batch_size, phase='test', epoch=1, places=place, padding_size=args.max_seq_len) elif args.do_infer: infer_data_generator = processor.data_generator( batch_size=args.batch_size, phase='infer', epoch=1, places=place, padding_size=args.max_seq_len) model.prepare(optimizer, CrossEntropy(), Accuracy(topk=(1, )), inputs, labels, device=place) if args.do_train: if args.init_checkpoint: model.load(args.init_checkpoint) elif args.do_val or args.do_infer: if not args.init_checkpoint: raise ValueError("args 'init_checkpoint' should be set if" "only doing validation or infer!") model.load(args.init_checkpoint, reset_optimizer=True) if args.do_train: model.fit(train_data=train_data_generator, eval_data=test_data_generator, batch_size=args.batch_size, epochs=args.epoch, save_dir=args.checkpoints, eval_freq=args.eval_freq, save_freq=args.save_freq) elif args.do_val: eval_result = model.evaluate(eval_data=test_data_generator, batch_size=args.batch_size) print("Final eval result: acc: {:.4f}, loss: {:.4f}".format( eval_result['acc'], eval_result['loss'][0])) elif args.do_infer: preds = model.predict(test_data=infer_data_generator) preds = np.array(preds[0]).reshape((-1, args.num_labels)) if args.output_dir: with open(os.path.join(args.output_dir, 'predictions.json'), 'w') as w: for p in range(len(preds)): label = np.argmax(preds[p]) result = json.dumps({ 'index': p, 'label': label, 'probs': preds[p].tolist() }) w.write(result + '\n') print('Predictions saved at ' + os.path.join(args.output_dir, 'predictions.json'))