def translate(args, net, src_vocab, tgt_vocab): "done" sentences = [l.split() for l in args.text] translated = [] infer_dataset = ParallelDataset(args.text, args.ref_text, src_vocab, tgt_vocab) if args.batch_size is not None: infer_dataset.BATCH_SIZE = args.batch_size if args.max_batch_size is not None: infer_dataset.max_batch_size = args.max_batch_size if args.tokens_per_batch is not None: infer_dataset.tokens_per_batch = args.tokens_per_batch infer_dataiter = iter(infer_dataset.get_iterator(True, True)) for raw_batch in infer_dataiter: src_mask = (raw_batch.src != src_vocab.stoi[config.PAD]).unsqueeze(-2) if args.use_cuda: src, src_mask = raw_batch.src.cuda(), src_mask.cuda() if args.greedy: generated, gen_len = greedy(args, net, src, src_mask, src_vocab, tgt_vocab) else: generated, gen_len = generate_beam(args, net, src, src_mask, src_vocab, tgt_vocab) new_translations = gen_batch2str(src, raw_batch.tgt, generated, gen_len, src_vocab, tgt_vocab) for res_sent in new_translations: print(res_sent) translated.extend(new_translations) return translated
def translate(args, net, src_vocab, tgt_vocab, active_out=None): "done" sentences = [l.split() for l in args.text] translated = [] infer_dataset = ParallelDataset(args.text, args.ref_text, src_vocab, tgt_vocab) if args.batch_size is not None: infer_dataset.BATCH_SIZE = args.batch_size if args.max_batch_size is not None: infer_dataset.max_batch_size = args.max_batch_size if args.tokens_per_batch is not None: infer_dataset.tokens_per_batch = args.tokens_per_batch infer_dataiter = iter( infer_dataset.get_iterator(shuffle=True, group_by_size=True, include_indices=True)) for (raw_batch, indices) in infer_dataiter: src_mask = (raw_batch.src != src_vocab.stoi[config.PAD]).unsqueeze(-2) if args.use_cuda: src, src_mask = raw_batch.src.cuda(), src_mask.cuda() else: src = raw_batch.src generated, gen_len = greedy(args, net, src, src_mask, src_vocab, tgt_vocab) new_translations = gen_batch2str(src, raw_batch.tgt, generated, gen_len, src_vocab, tgt_vocab, indices, active_out) translated.extend(new_translations) return translated
def translate(args, net, src_vocab, tgt_vocab): "done" sentences = [l.split() for l in args.text] translated = [] if args.greedy: infer_dataset = ParallelDataset(args.text, args.ref_text, src_vocab, tgt_vocab) if args.batch_size is not None: infer_dataset.BATCH_SIZE = args.batch_size if args.max_batch_size is not None: infer_dataset.max_batch_size = args.max_batch_size if args.tokens_per_batch is not None: infer_dataset.tokens_per_batch = args.tokens_per_batch infer_dataiter = iter(infer_dataset.get_iterator(True, True)) num_sents = 0 for raw_batch in infer_dataiter: src_mask = (raw_batch.src != src_vocab.stoi[config.PAD]).unsqueeze(-2) if args.use_cuda: src, src_mask = raw_batch.src.cuda(), src_mask.cuda() generated, gen_len = greedy(args, net, src, src_mask, src_vocab, tgt_vocab) new_translations = gen_batch2str(src, raw_batch.tgt, generated, gen_len, src_vocab, tgt_vocab) print('src size : {}'.format(src.size())) ''' for res_sent in new_translations: print(res_sent) translated.extend(new_translations) ''' else: for i_s, sentence in enumerate(sentences): s_trans = translate_sentence(sentence, net, args, src_vocab, tgt_vocab) s_trans = remove_special_tok(remove_bpe(s_trans)) translated.append(s_trans) print(translated[-1]) return translated
def load_para_data(params, data): """ Load parallel data. """ data['para'] = {} required_para_train = set(params.clm_steps + params.mlm_steps + params.pc_steps + params.mt_steps) for src, tgt in params.para_dataset.keys(): logger.info('============ Parallel data (%s-%s)' % (src, tgt)) assert (src, tgt) not in data['para'] data['para'][(src, tgt)] = {} for splt in ['train', 'valid', 'test']: # no need to load training data for evaluation if splt == 'train' and params.eval_only: continue # for back-translation, we can't load training data if splt == 'train' and (src, tgt) not in required_para_train and ( tgt, src) not in required_para_train: continue # load binarized datasets src_path, tgt_path = params.para_dataset[(src, tgt)][splt] src_data = load_binarized(src_path, params) tgt_data = load_binarized(tgt_path, params) # update dictionary parameters set_dico_parameters(params, data, src_data['dico']) set_dico_parameters(params, data, tgt_data['dico']) # create ParallelDataset dataset = ParallelDataset(src_data['sentences'], src_data['positions'], tgt_data['sentences'], tgt_data['positions'], params) # remove empty and too long sentences if splt == 'train': dataset.remove_empty_sentences() dataset.remove_long_sentences(params.max_len) # for validation and test set, enumerate sentence per sentence if splt != 'train': dataset.tokens_per_batch = -1 # if there are several processes on the same machine, we can split the dataset if splt == 'train' and params.n_gpu_per_node > 1 and params.split_data: n_sent = len(dataset) // params.n_gpu_per_node a = n_sent * params.local_rank b = n_sent * params.local_rank + n_sent dataset.select_data(a, b) data['para'][(src, tgt)][splt] = dataset logger.info("") logger.info("")