Ejemplo n.º 1
0
def train(train_loader, model, criterion, optimizer, epoch, evaluation, logger):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    accuracies = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    for i, (g, h, e, target) in enumerate(train_loader):
        
        # Prepare input data
        target = torch.squeeze(target).type(torch.LongTensor)
        if args.cuda:
            g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
        g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(target)

        # Measure data loading time
        data_time.update(time.time() - end)

        def closure():
            optimizer.zero_grad()

            # Compute output
            output = model(g, h, e)
            train_loss = criterion(output, target)

            acc = Variable(evaluation(output.data, target.data, topk=(1,))[0])

            # Logs
            losses.update(train_loss.data[0], g.size(0))
            accuracies.update(acc.data[0], g.size(0))
            # compute gradient and do SGD step
            train_loss.backward()
            return train_loss

        optimizer.step(closure)

        # Measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.log_interval == 0 and i > 0:
            
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Accuracy {acc.val:.4f} ({acc.avg:.4f})'
                  .format(epoch, i, len(train_loader), batch_time=batch_time,
                          data_time=data_time, loss=losses, acc=accuracies))
                          
    logger.log_value('train_epoch_loss', losses.avg)
    logger.log_value('train_epoch_accuracy', accuracies.avg)

    print('Epoch: [{0}] Average Accuracy {acc.avg:.3f}; Average Loss {loss.avg:.3f}; Avg Time x Batch {b_time.avg:.3f}'
          .format(epoch, acc=accuracies, loss=losses, b_time=batch_time))
Ejemplo n.º 2
0
def validate(val_loader, model, criterion, evaluation, logger=None):
    losses = AverageMeter()
    accuracies = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (g, h, e, target) in enumerate(val_loader):

        # Prepare input data
        target = torch.squeeze(target).type(torch.LongTensor)
        if args.cuda:
            g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
        g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(
            target)

        # Compute output
        output = model(g, h, e)

        # Logs
        test_loss = criterion(output, target)
        acc = Variable(evaluation(output.data, target.data, topk=(1, ))[0])

        losses.update(test_loss.data[0], g.size(0))
        accuracies.update(acc.data[0], g.size(0))

    print(' * Average Accuracy {acc.avg:.3f}; Average Loss {loss.avg:.3f}'.
          format(acc=accuracies, loss=losses))

    if logger is not None:
        logger.log_value('test_epoch_loss', losses.avg)
        logger.log_value('test_epoch_accuracy', accuracies.avg)

    return accuracies.avg
def test(test_loader, train_loader, net, cuda, evaluation):
    batch_time = AverageMeter()
    acc = AverageMeter()

    eval_k = (1, 3, 5)

    # Switch to eval mode
    net.eval()

    end = time.time()

    for i, (h1, am1, g_size1, target1) in enumerate(test_loader):
        # Prepare input data
        if cuda:
            h1, am1, g_size1, target1 = h1.cuda(), am1.cuda(), g_size1.cuda(
            ), target1.cuda()
        h1, am1, g_size1, target1 = Variable(h1), Variable(am1), Variable(
            g_size1), Variable(target1)

        D_aux = []
        T_aux = []
        for j, (h2, am2, g_size2, target2) in enumerate(train_loader):
            # Prepare input data
            if cuda:
                h2, am2, g_size2, target2 = h2.cuda(), am2.cuda(
                ), g_size2.cuda(), target2.cuda()
            h2, am2, g_size2, target2 = Variable(h2), Variable(am2), Variable(
                g_size2), Variable(target2)

            d = net(
                h1.expand(h2.size(0), h1.size(1), h1.size(2)),
                am1.expand(am2.size(0), am1.size(1), am1.size(2), am1.size(2)),
                g_size1.expand_as(g_size2), h2, am2, g_size2)

            D_aux.append(d)
            T_aux.append(target2)

        D = torch.cat(D_aux)
        train_target = torch.cat(T_aux, 0)

        bacc = evaluation(D,
                          target1.expand_as(train_target),
                          train_target,
                          k=eval_k)

        # Measure elapsed time
        acc.update(bacc, h1.size(0))
        batch_time.update(time.time() - end)
        end = time.time()

    print('Test distance:')
    for i in range(len(eval_k)):
        print(
            '\t* {k}-NN; Average Acc {acc:.3f}; Avg Time x Batch {b_time.avg:.3f}'
            .format(k=eval_k[i], acc=acc.avg[i], b_time=batch_time))
    return acc
Ejemplo n.º 4
0
def validate_with_output(val_loader,
                         model,
                         criterion,
                         evaluation,
                         logger=None):
    losses = AverageMeter()
    accuracies = AverageMeter()
    activation = {}

    def get_activation(name):
        def hook(model, input, output):
            if name in activation:
                activation[name] = torch.cat(
                    [activation[name], output.detach()])
                # name.append(output.detah())
            else:
                # print(output.detach().shape)
                activation[name] = output.detach()

        return hook

    model.r.learn_modules[0].fcs[2].register_forward_hook(
        get_activation('fc2'))
    # x = torch.randn(1, 25)

    # switch to evaluate mode
    model.eval()

    for i, (g, h, e, target) in enumerate(val_loader):

        # Prepare input data
        target = torch.squeeze(target).type(torch.LongTensor)
        if args.cuda:
            g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
        g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(
            target)

        # Compute output
        output = model(g, h, e)

        # Logs
        test_loss = criterion(output, target)
        acc = Variable(evaluation(output.data, target.data, topk=(1, ))[0])

        losses.update(test_loss.data, g.size(0))
        accuracies.update(acc.data, g.size(0))

    np.save('acs.npy', activation['fc2'].cpu().numpy())
Ejemplo n.º 5
0
def validate(val_loader, model, criterion, evaluation, logger=None):
    batch_time = AverageMeter()
    losses = AverageMeter()
    error_ratio = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (g, h, e, target) in enumerate(val_loader):

        # Prepare input data
        if args.cuda:
            g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
        g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(
            target)

        # Compute output
        output = model(g, h, e)

        # Logs
        losses.update(criterion(output, target).data[0], g.size(0))
        error_ratio.update(evaluation(output, target).data[0], g.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.log_interval == 0 and i > 0:

            print('Test: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Error Ratio {err.val:.4f} ({err.avg:.4f})'.format(
                      i,
                      len(val_loader),
                      batch_time=batch_time,
                      loss=losses,
                      err=error_ratio))

    print(' * Average Error Ratio {err.avg:.3f}; Average Loss {loss.avg:.3f}'.
          format(err=error_ratio, loss=losses))

    if logger is not None:
        logger.log_value('test_epoch_loss', losses.avg)
        logger.log_value('test_epoch_error_ratio', error_ratio.avg)
Ejemplo n.º 6
0
def train(train_loader, model, criterion, optimizer, epoch, evaluation,
          logger):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    error_ratio = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    for i, (g, h, e, target) in enumerate(train_loader):

        # Prepare input data
        if args.cuda:
            g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
        g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(
            target)

        # Measure data loading time
        data_time.update(time.time() - end)

        optimizer.zero_grad()

        # Compute output
        output = model(g, h, e)
        train_loss = criterion(output, target)

        # Logs
        losses.update(train_loss.data[0], g.size(0))
        error_ratio.update(evaluation(output, target).data[0], g.size(0))

        # compute gradient and do SGD step
        train_loss.backward()
        optimizer.step()

        # Measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.log_interval == 0 and i > 0:

            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Error Ratio {err.val:.4f} ({err.avg:.4f})'.format(
                      epoch,
                      i,
                      len(train_loader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      err=error_ratio))

    logger.log_value('train_epoch_loss', losses.avg)
    logger.log_value('train_epoch_error_ratio', error_ratio.avg)

    print(
        'Epoch: [{0}] Avg Error Ratio {err.avg:.3f}; Average Loss {loss.avg:.3f}; Avg Time x Batch {b_time.avg:.3f}'
        .format(epoch, err=error_ratio, loss=losses, b_time=batch_time))
def validation(test_loader, net, cuda, criterion, evaluation):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acc = AverageMeter()

    # switch to eval mode
    net.eval()

    end = time.time()

    for i, (h, am, g_size, target) in enumerate(test_loader):
        # Prepare input data
        if cuda:
            h, am, g_size, target = h.cuda(), am.cuda(), g_size.cuda(), target.cuda()
        h, am, g_size, target = Variable(h, volatile=True), Variable(am, volatile=True), Variable(g_size, volatile=True), Variable(target, volatile=True)

        # Measure data loading time
        data_time.update(time.time() - end)

        # Compute features
        output = net(h, am, g_size)

        loss = criterion(output, target)
        bacc = evaluation(output, target)
        
        # Logs
        losses.update(loss.data[0], h.size(0))
        acc.update(bacc[0].data[0], h.size(0))

        # Measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

    print('Test: Average Loss {loss.avg:.3f}; Average Acc {acc.avg:.3f}; Avg Time x Batch {b_time.avg:.3f}'
          .format(loss=losses, acc=acc, b_time=batch_time))

    return losses, acc
def train(train_loader, net, optimizer, cuda, criterion, epoch):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()

    # switch to train mode
    net.train()

    end = time.time()

    for i, (h, am, g_size, target) in enumerate(train_loader):
        # Prepare input data
        if cuda:
            h, am, g_size, target = h.cuda(), am.cuda(), g_size.cuda(), target.cuda()
        h, am, g_size, target = Variable(h), Variable(am), Variable(g_size), Variable(target)

        # Measure data loading time
        data_time.update(time.time() - end)

        optimizer.zero_grad()

        # Compute features
        output = net(h, am, g_size)

        loss = criterion(output, target)

        # Logs
        losses.update(loss.data[0], h.size(0))

        # Compute gradient and do SGD step
        loss.backward()
        optimizer.step()

        # Measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

    print('Epoch: [{0}] Average Loss {loss.avg:.3f}; Avg Time x Batch {b_time.avg:.3f}'
          .format(epoch, loss=losses, b_time=batch_time))

    return losses
Ejemplo n.º 9
0
def train(train_loader, model, criterion, optimizer, epoch, evaluation,
          logger):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    error_ratio = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    for i, (batch_size, g, b, x, e_d, e_src, e_tgt,
            target) in enumerate(train_loader):
        e_tgt = e_tgt.to_sparse()

        if args.cuda:
            g, b, x, e_d, e_src, e_tgt, target = map(
                lambda a: a.cuda(), (g, b, x, e_d, e_src, e_tgt, target))
        #g,b,x,e_d,e_src,e_tgt,target = map(lambda a:Variable(a), (g,b,x,e_d,e_src,e_tgt,target))

        # Measure data loading time
        data_time.update(time.time() - end)

        optimizer.zero_grad()
        # Compute output
        train_loss = torch.zeros((), )
        output = model(node_features=x,
                       edge_features=e_d,
                       Esrc=e_src,
                       Etgt=e_tgt,
                       batch=b)
        train_loss = criterion(output, target)

        # Logs
        losses.update(train_loss.item(), batch_size)
        error_ratio.update(evaluation(output, target).item(), batch_size)

        # compute gradient and do SGD step
        train_loss.backward()
        optimizer.step()

        # Measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.log_interval == 0 and i > 0:

            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Error Ratio {err.val:.4f} ({err.avg:.4f})'.format(
                      epoch,
                      i,
                      len(train_loader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      err=error_ratio),
                  flush=True)

    logger.log_value('train_epoch_loss', losses.avg)
    logger.log_value('train_epoch_error_ratio', error_ratio.avg)

    print(
        'Epoch: [{0}] Avg Error Ratio {err.avg:.3f}; Average Loss {loss.avg:.3f}; Avg Time x Batch {b_time.avg:.3f}'
        .format(epoch, err=error_ratio, loss=losses, b_time=batch_time),
        flush=True)
Ejemplo n.º 10
0
def validate(val_loader, model, criterion, evaluation, logger=None):
    batch_time = AverageMeter()
    losses = AverageMeter()
    error_ratio = AverageMeter()

    # switch to evaluate mode
    model.eval()
    with torch.no_grad():
        end = time.time()
        for i, (batch_size, g, b, x, e_d, e_src, e_tgt,
                target) in enumerate(val_loader):
            e_tgt.to_sparse()

            if args.cuda:
                g, b, x, e_d, e_src, e_tgt, target = map(
                    lambda a: a.cuda(), (g, b, x, e_d, e_src, e_tgt, target))
            #g,b,x,e_d,e_src,e_tgt,target = map(lambda a:Variable(a), (g,b,x,e_d,e_src,e_tgt,target))

            # Compute output
            train_loss = torch.zeros((), )
            output = model(node_features=x,
                           edge_features=e_d,
                           Esrc=e_src,
                           Etgt=e_tgt,
                           batch=b)

            # Logs
            losses.update(criterion(output, target).item(), batch_size)
            error_ratio.update(evaluation(output, target).item(), batch_size)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.log_interval == 0 and i > 0:

                print('Test: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Error Ratio {err.val:.4f} ({err.avg:.4f})'.format(
                          i,
                          len(val_loader),
                          batch_time=batch_time,
                          loss=losses,
                          err=error_ratio),
                      flush=True)
            #end if
        #end for
    #end torch.no_grad

    print(' * Average Error Ratio {err.avg:.3f}; Average Loss {loss.avg:.3f}'.
          format(err=error_ratio, loss=losses),
          flush=True)

    if logger is not None:
        logger.log_value('test_epoch_loss', losses.avg)
        logger.log_value('test_epoch_error_ratio', error_ratio.avg)

    return error_ratio.avg
Ejemplo n.º 11
0
def validation(test_loader, net, cuda, criterion, evaluation):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acc = AverageMeter()

    # switch to train mode
    net.eval()

    end = time.time()

    for i, (h1, am1, g_size1, h2, am2, g_size2,
            target) in enumerate(test_loader):
        # Prepare input data
        if cuda:
            h1, am1, g_size1 = h1.cuda(), am1.cuda(), g_size1.cuda()
            h2, am2, g_size2 = h2.cuda(), am2.cuda(), g_size2.cuda()
            target = target.cuda()
        h1, am1, g_size1 = Variable(h1, volatile=True), Variable(
            am1, volatile=True), Variable(g_size1, volatile=True)
        h2, am2, g_size2 = Variable(h2, volatile=True), Variable(
            am2, volatile=True), Variable(g_size2, volatile=True)
        target = Variable(target, volatile=True)

        # Measure data loading time
        data_time.update(time.time() - end)

        # Compute features
        output1 = net(h1, am1, g_size1)
        output2 = net(h2, am2, g_size2)

        output = output1 - output2
        output = output.pow(2).sum(1).sqrt()

        loss = criterion(output, target)
        bacc = evaluation(output, target)

        # Logs
        losses.update(loss.data[0], h1.size(0))
        acc.update(bacc[0].data[0], h1.size(0))

        # Measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

    print(
        'Test: Average Loss {loss.avg:.3f}; Average Acc {acc.avg:.3f}; Avg Time x Batch {b_time.avg:.3f}'
        .format(loss=losses, acc=acc, b_time=batch_time))

    return losses, acc
Ejemplo n.º 12
0
def train(train_loader, net, optimizer, cuda, criterion, epoch):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()

    # switch to train mode
    net.train()

    end = time.time()

    for i, (h1, am1, g_size1, h2, am2, g_size2,
            target) in enumerate(train_loader):
        # Prepare input data
        if cuda:
            h1, am1, g_size1 = h1.cuda(), am1.cuda(), g_size1.cuda()
            h2, am2, g_size2 = h2.cuda(), am2.cuda(), g_size2.cuda()
            target = target.cuda()
        h1, am1, g_size1 = Variable(h1), Variable(am1), Variable(g_size1)
        h2, am2, g_size2 = Variable(h2), Variable(am2), Variable(g_size2)
        target = Variable(target)

        # Measure data loading time
        data_time.update(time.time() - end)

        optimizer.zero_grad()

        # Compute features
        output1 = net(h1, am1, g_size1)
        output2 = net(h2, am2, g_size2)

        output = output1 - output2
        output = output.pow(2).sum(1).sqrt()

        loss = criterion(output, target)

        # Logs
        losses.update(loss.data[0], h1.size(0))

        # Compute gradient and do SGD step
        loss.backward()
        optimizer.step()

        # Measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

    print(
        'Epoch: [{0}] Average Loss {loss.avg:.3f}; Avg Time x Batch {b_time.avg:.3f}'
        .format(epoch, loss=losses, b_time=batch_time))

    return losses
Ejemplo n.º 13
0
def test(test_loader, train_loader, net, cuda, evaluation):
    batch_time = AverageMeter()
    acc = AverageMeter()

    eval_k = (1, 3, 5)

    # switch to eval mode
    net.eval()

    end = time.time()

    for i, (h1, am1, g_size1, target1) in enumerate(test_loader):
        # Prepare input data
        if cuda:
            h1, am1, g_size1, target1 = h1.cuda(), am1.cuda(), g_size1.cuda(
            ), target1.cuda()
        h1, am1, g_size1, target1 = Variable(h1, volatile=True), Variable(
            am1,
            volatile=True), Variable(g_size1,
                                     volatile=True), Variable(target1,
                                                              volatile=True)

        # Compute features
        output1 = net(h1, am1, g_size1)

        D_aux = []
        T_aux = []
        for j, (h2, am2, g_size2, target2) in enumerate(train_loader):
            # Prepare input data
            if cuda:
                h2, am2, g_size2, target2 = h2.cuda(), am2.cuda(
                ), g_size2.cuda(), target2.cuda()
            h2, am2, g_size2, target2 = Variable(h2, volatile=True), Variable(
                am2, volatile=True), Variable(
                    g_size2, volatile=True), Variable(target2, volatile=True)

            # Compute features
            output2 = net(h2, am2, g_size2)

            twoab = 2 * output1.mm(output2.t())
            dist = (output1 * output1).sum(1).expand_as(twoab) + (
                output2 * output2).sum(1).expand_as(twoab) - twoab
            dist = dist.sqrt().squeeze()

            D_aux.append(dist)
            T_aux.append(target2)

        D = torch.cat(D_aux)
        train_target = torch.cat(T_aux, 0)
        bacc = evaluation(D,
                          target1.expand_as(train_target),
                          train_target,
                          k=eval_k)

        # Measure elapsed time
        acc.update(bacc, h1.size(0))
        batch_time.update(time.time() - end)
        end = time.time()

    print('Test distance:')
    for i in range(len(eval_k)):
        print(
            '\t* {k}-NN; Average Acc {acc:.3f}; Avg Time x Batch {b_time.avg:.3f}'
            .format(k=eval_k[i], acc=acc.avg[i], b_time=batch_time))

    return acc
Ejemplo n.º 14
0
def train(train_loader, net, distance, optimizer, cuda, criterion, epoch):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()

    # switch to train mode
    net.train()
    distance.train()

    end = time.time()

    for i, (h1, am1, g_size1, h2, am2, g_size2, target) in enumerate(train_loader):
        # Prepare input data
        if cuda:
            h1, am1, g_size1 = h1.cuda(), am1.cuda(), g_size1.cuda()
            h2, am2, g_size2 = h2.cuda(), am2.cuda(), g_size2.cuda()
            target = target.cuda()
        h1, am1, g_size1 = Variable(h1), Variable(am1), Variable(g_size1)
        h2, am2, g_size2 = Variable(h2), Variable(am2), Variable(g_size2)
        target = Variable(target)


        # Measure data loading time
        data_time.update(time.time() - end)

        optimizer.zero_grad()

        # Compute features
        output1 = net(h1, am1, g_size1, output='nodes')
        output2 = net(h2, am2, g_size2, output='nodes')

        # Create a mask for nodes
        node_mask2 = torch.arange(0, h2.size(1)).unsqueeze(0).unsqueeze(-1).expand(h2.size(0),
                                                                                   h2.size(1),
                                                                                   output1.size(2)).long()
        node_mask1 = torch.arange(0, h1.size(1)).unsqueeze(0).unsqueeze(-1).expand(h1.size(0),
                                                                                   h1.size(1),
                                                                                   output1.size(2)).long()

        if h1.is_cuda:
            node_mask1 = node_mask1.cuda()
            node_mask2 = node_mask2.cuda()

        node_mask1 = Variable(node_mask1)
        node_mask2 = Variable(node_mask2)

        node_mask1 = (node_mask1 >= g_size1.unsqueeze(-1).unsqueeze(-1).expand_as(node_mask1))
        node_mask2 = (node_mask2 >= g_size2.unsqueeze(-1).unsqueeze(-1).expand_as(node_mask2))


        output1.register_hook(lambda grad: grad.masked_fill_(node_mask1, 0))
        output2.register_hook(lambda grad: grad.masked_fill_(node_mask2,0))
       
        output = distance(output1, am1, g_size1, output2, am2, g_size2)

        loss = criterion(output, target)

        # Logs
        losses.update(loss.data[0], h1.size(0))

        # Compute gradient and do SGD step
        loss.backward()
        optimizer.step()

        # Measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

    print('Epoch: [{0}] Average Loss {loss.avg:.3f}; Avg Time x Batch {b_time.avg:.3f}'
          .format(epoch, loss=losses, b_time=batch_time))

    return losses
Ejemplo n.º 15
0
def test(test_loader, train_loader, net, distance, cuda, evaluation):
    batch_time = AverageMeter()
    acc = AverageMeter()

    eval_k = (1, 3, 5)

    # switch to train mode
    net.eval()
    distance.eval()

    end = time.time()

    for i, (h1, am1, g_size1, target1) in enumerate(test_loader):
        # Prepare input data
        if cuda:
            h1, am1, g_size1, target1 = h1.cuda(), am1.cuda(), g_size1.cuda(), target1.cuda()
        h1, am1, g_size1, target1 = Variable(h1, volatile=True), Variable(am1, volatile=True), Variable(g_size1, volatile=True), Variable(target1, volatile=True)

        # Compute features
        output1 = net(h1, am1, g_size1, output='nodes')

        D_aux = []
        T_aux = []
        for j, (h2, am2, g_size2, target2) in enumerate(train_loader):
            # Prepare input data
            if cuda:
                h2, am2, g_size2, target2 = h2.cuda(), am2.cuda(), g_size2.cuda(), target2.cuda()
            h2, am2, g_size2, target2 = Variable(h2, volatile=True), Variable(am2, volatile=True), Variable(g_size2, volatile=True), Variable(target2, volatile=True)

            # Compute features
            output2 = net(h2, am2, g_size2, output='nodes')

            # Expand test sample to make all the pairs with the train
            dist = distance(output1.expand(h2.size(0), output1.size(1), output1.size(2)),
                    am1.expand(am2.size(0), am1.size(1), am1.size(2), am1.size(3)), g_size1.expand(g_size2.size(0)), output2, am2, g_size2)

            D_aux.append(dist)
            T_aux.append(target2)

        D = torch.cat(D_aux)
        train_target = torch.cat(T_aux, 0)

        if evaluation.__name__ == 'knn':
            bacc = evaluation(D, target1, train_target, k=eval_k)
        else:
            _, ind_min = torch.min(D, 0)
            ind_min = int(ind_min)
            if ind_min == 0:
                D = D[1:]
                train_target = train_target[1:]
            elif ind_min+1 == D.size(0):
                D = D[:-1]
                train_target = train_target[:-1]
            else:
                D = torch.cat([D[:int(ind_min)], D[int(ind_min) + 1:]])
                train_target = torch.cat([train_target[:int(ind_min)], train_target[int(ind_min) + 1:]])
            bacc = evaluation(D, target1, train_target)

        # Measure elapsed time
        acc.update(bacc, h1.size(0))
        batch_time.update(time.time() - end)
        end = time.time()

        print(str(i)+'/'+str(len(test_loader)))

    print('Test distance:')
    if evaluation.__name__ == 'knn':
        for i in range(len(eval_k)):
            print('\t* {k}-NN; Average Acc {acc:.3f}; Avg Time x Batch {b_time.avg:.3f}'.format(k=eval_k[i], acc=acc.avg[i], b_time=batch_time))
    else:
        print('\t* MAP {acc:.3f}; Avg Time x Batch {b_time.avg:.3f}'.format(acc=acc.avg, b_time=batch_time))
    return acc