示例#1
0
    for i, batch in enumerate(test_batches):
        (id_slice, padded_y_slice, y_slice_lengths, x_slice) = batch
        test_loss, test_predictions = model.forward(padded_y_slice,
                                                    y_slice_lengths, x_slice)

        test_predictions = torch.cat(test_predictions, 0)
        test_predictions = test_predictions.transpose(0, 1)
        references = padded_y_slice.transpose(0, 1)
        color_vectors = x_slice.data.cpu().numpy()[0]  # a bit ugly

        for test_prediction, reference, color in zip(test_predictions,
                                                     references,
                                                     color_vectors):
            comparison.append(([
                v.string for v in vocab.indices2tokens(test_prediction.data)
            ], [v.string for v in vocab.indices2tokens(reference.data)],
                               [str(ck) for ck in color]))

        total_batches_test += 1
        total_loss_test += test_loss.data[0]

        if i % log_interval == 0 and i > 0:
            writer.add_scalar('test_loss_per_batch',
                              total_loss_test / total_batches_test,
                              i + delta_test)
            total_loss_test = 0
            total_batches_test = 0

    delta_test = delta_test + len(test_batches)
    with open(output_path + str(epoch) + ".txt", 'w') as f: