Exemple #1
0
def main():
    with open(args.model, 'rb') as f:
        model = torch.load(f)
    if args.cuda:
        model.cuda()

    with open(args.word_path, 'rb') as f:
        word2id = pickle.load(f)

    raw_data = seg_data(args.data)
    transformed_data = transform_data_to_id(raw_data, word2id)
    data = [x + [y[2]] for x, y in zip(transformed_data, raw_data)]
    data = sorted(data, key=lambda x: len(x[1]))
    print('test data size {:d}'.format(len(data)))
    inference(model, data)
Exemple #2
0
                    help='batch size')
parser.add_argument('--cuda', action='store_true',default=True,
                    help='use CUDA')

args = parser.parse_args()

with open(args.model, 'rb') as f:
    model = torch.load(f)
if args.cuda:
    model.cuda()

with open(args.word_path, 'rb') as f:
    word2id = cPickle.load(f)

raw_data = seg_data(args.data)
transformed_data = transform_data_to_id(raw_data, word2id)
data = [x + [y[2]] for x, y in zip(transformed_data, raw_data)]
data = sorted(data, key=lambda x: len(x[1]))
print( 'test data size {:d}'.format(len(data)))


def inference():
    model.eval()
    predictions = []
    with torch.no_grad():
        for i in range(0, len(data), args.batch_size):
#         for i in range(0, len(data), 3):
            try:
                one = data[i:i + args.batch_size]
    #             print(one)
                query, _ = padding([x[0] for x in one], max_len=50)
Exemple #3
0
                    help='use CUDA')

args = parser.parse_args()

with open(args.model, 'rb') as f:
    model = torch.load(f)
if args.cuda:
    model.cuda()
    print(model)

with open(args.word_path, 'rb') as f:
    word2id = pickle.load(f)
    print(len(word2id))

raw_data = seg_data(args.data)
transformed_data = transform_data_to_id(raw_data, word2id)
data = [x + [y[2]] for x, y in zip(transformed_data, raw_data)]
data = sorted(data, key=lambda x: len(x[1]))
print('test data size {:d}'.format(len(data)))

raw_data_valid = seg_data(args.valid_data)
transformed_data_valid = transform_data_to_id(raw_data_valid, word2id)
dev_data = [x + [y[2]] for x, y in zip(transformed_data_valid, raw_data_valid)]
dev_data = sorted(dev_data, key=lambda x: len(x[1]))
print('valid data size {:d}'.format(len(dev_data)))


def inference():
    model.eval()
    predictions = []
    with torch.no_grad():