예제 #1
0
def main():
    logger = logging.getLogger('logger')
    utilities.configure_logger(logger, console_only=True)
    parser = argparse.ArgumentParser()
    parser.add_argument('model_path', help="Path to the model to be evaluated")
    parser.add_argument(
        '-c',
        '--cuda',
        action='store_const',
        const=True,
        default=False,
        help=
        "Whether to enable calculation on the GPU through CUDA or not. Defaults to false."
    )

    cli_args = parser.parse_args()
    use_cuda = cli_args.cuda

    model = Siamese(dropout=False)
    if use_cuda:
        model.cuda()
    utils.network.load_model(model, cli_args.model_path, use_cuda=use_cuda)

    model = model.eval()
    data = VocalSketch_1_1()

    partitions = Partitions(data, PartitionSplit(.35, .15, .5))

    dataset = AllPairs(partitions.test)
    rrs = reciprocal_ranks(model, dataset, use_cuda)
    utilities.log_final_stats(rrs)
예제 #2
0
def siamese_loss(model: Siamese,
                 dataset,
                 objective,
                 use_cuda: bool,
                 batch_size=128):
    """
    Calculates the loss of model over dataset by objective. Optionally run on the GPU.
    :param model: a siamese network
    :param dataset: a dataset of imitation/reference pairs
    :param objective: loss function
    :param use_cuda: whether to run on GPU or not.
    :param batch_size: optional param to set batch_size. Defaults to 128.
    :return:
    """
    model = model.eval()
    dataset.epoch_handler()

    data = DataLoader(dataset, batch_size=batch_size, num_workers=4)
    bar = Bar("Calculating loss", max=len(data))
    batch_losses = np.zeros(len(data))
    for i, (left, right, labels) in enumerate(data):
        labels = labels.float()
        left = left.float()
        right = right.float()

        # reshape tensors and push to GPU if necessary
        left = left.unsqueeze(1)
        right = right.unsqueeze(1)
        if use_cuda:
            left = left.cuda()
            right = right.cuda()
            labels = labels.cuda()

        # pass a batch through the network
        outputs = model(left, right)

        # calculate loss and optimize weights
        batch_losses[i] = objective(outputs, labels).item()

        bar.next()
    bar.finish()

    return batch_losses
예제 #3
0
def pairwise_inference_matrix(model: Siamese, pairs_dataset: AllPairs,
                              use_cuda):
    """
    Calculates the pairwise inference matrix for a given model across a set of pairs (typically, all of them).

    :param model: siamese network
    :param pairs_dataset: dataset of desired pairs to calculate pairwise matrix across
    :param use_cuda: bool, whether to run on GPU
    :return: pairwise matrix
    """
    rrs = np.array([])
    pairs = dataloader.DataLoader(pairs_dataset, batch_size=128, num_workers=4)
    model = model.eval()
    bar = Bar("Calculating pairwise inference matrix", max=len(pairs))
    for imitations, references, label in pairs:

        label = label.float()
        imitations = imitations.float()
        references = references.float()

        # reshape tensors and push to GPU if necessary
        imitations = imitations.unsqueeze(1)
        references = references.unsqueeze(1)
        if use_cuda:
            imitations = imitations.cuda()
            references = references.cuda()

        output = model(imitations, references)
        # Detach the gradient, move to cpu, and convert to an ndarray
        np_output = output.detach().cpu().numpy()
        rrs = np.concatenate([rrs, np_output])

        bar.next()
    bar.finish()

    # Reshape vector into matrix
    rrs = rrs.reshape([pairs_dataset.n_imitations, pairs_dataset.n_references])
    return rrs
예제 #4
0
load_from_torch7 = False

print('Loading model...')
model_dir = 'models/snapshot/'
model_load_path = os.path.join(model_dir, 'snapshot_epoch_1.pt')
gConfig = get_config()
gConfig.model_dir = model_dir

criterion = nn.HingeEmbeddingLoss()
model = Siamese()

package = torch.load(model_load_path)

model.load_state_dict(package['state_dict'])
model.eval()
print('Model loaded from {}'.format(model_load_path))

logging('Model configuration:\n{}'.format(model))

modelSize, nParamsEachLayer = modelSize(model)
logging('Model size: {}\n{}'.format(modelSize, nParamsEachLayer))

params = model.parameters()

for i, a_param in enumerate(params):
    print a_param

exit(0)

imagePath = '../data/demo.png'