コード例 #1
0
def train(data_loader, model, optimizer, cuda, criterion, epoch, log_int=20):
    batch_time = LogMetric.AverageMeter()
    losses_sem = LogMetric.AverageMeter()
    losses_dom = LogMetric.AverageMeter()
    losses_spa = LogMetric.AverageMeter()
    losses = LogMetric.AverageMeter()

    # switch to train mode
    im_net, sk_net = model
    im_net.train()
    sk_net.train()
    torch.set_grad_enabled(True)

    end = time.time()
    for i, (sk, im, im_neg, w2v, _, _) in enumerate(data_loader):
        # Prepare input data
        if cuda:
            im, im_neg, sk, w2v = im.cuda(), im_neg.cuda(), sk.cuda(), w2v.cuda()
        
        optimizer.zero_grad()
        bs = im.size(0)
        # Output
        ## Image
        im_feat, _ = im_net(im) # Image encoding and projection to semantic space

        ## Image Negative
        # Encode negative image
        im_feat_neg, _ = im_net(im_neg)  # Image encoding and projection to semantic space
            
        ## Sketch
        sk_feat, _ = sk_net(sk) # Sketch encoding and projection to semantic space
        
        # LOSS
        loss, loss_sem, loss_dom, loss_spa = criterion(im_feat, sk_feat, w2v, im_feat_neg, i)
        
        # Gradiensts and update
        loss.backward()
        optimizer.step()
                
        # Save values
        losses_sem.update(loss_sem.item(), bs)
        losses_dom.update(loss_dom.item(), bs)
        losses_spa.update(loss_spa.item(), bs)
        losses.update(loss.item(), bs)
        batch_time.update(time.time() - end, bs)
        end = time.time()

        if log_int > 0 and i%log_int == 0:
            print('Epoch: [{0}]({1}/{2}) Average Loss {loss.avg:.3f} ( Sem: {loss_sem.avg} + Dom: {loss_dom.avg} + Spa: {loss_spa.avg}); Avg Time x Batch {b_time.avg:.3f}'
                .format(epoch, i, len(data_loader), loss=losses, loss_sem=losses_sem, loss_dom=losses_dom, loss_spa=losses_spa, b_time=batch_time))
    print('Epoch: [{0}] Average Loss {loss.avg:.3f} ( {loss_sem.avg} + {loss_dom.avg} + {loss_spa.avg} ); Avg Time x Batch {b_time.avg:.3f}'
        .format(epoch, loss=losses, loss_sem=losses_sem, loss_dom=losses_dom, loss_spa=losses_spa, b_time=batch_time))
    return losses, losses_sem, losses_dom, losses_spa
コード例 #2
0
ファイル: train.py プロジェクト: priba/graph_metric.pytorch
def train(data_loader, nets, optimizer, cuda, criterion, epoch):
    batch_time = LogMetric.AverageMeter()
    batch_load_time = LogMetric.AverageMeter()
    losses = LogMetric.AverageMeter()

    net, distNet = nets
    # switch to train mode
    net.train()
    distNet.train()

    end = time.time()
    for i, (g1, g2, g3, target) in enumerate(data_loader):
        # Prepare input data
        if cuda:
            g1.to(torch.device('cuda'))
            g2.to(torch.device('cuda'))
            g1.gdata['std'], g2.gdata['std'] = g1.gdata['std'].cuda(
            ), g2.gdata['std'].cuda()
            if args.triplet:
                g3.to(torch.device('cuda'))
                g3.gdata['std'] = g3.gdata['std'].cuda()
            else:
                target = target.cuda()

        batch_load_time.update(time.time() - end)
        optimizer.zero_grad()

        # Output
        g1 = net(g1)
        g2 = net(g2)

        if args.triplet:
            g3 = net(g3)
            loss = criterion(g1, g2, g3, distNet)
        else:
            loss = criterion(g1, g2, target, distNet)

        # Gradiensts and update
        loss.backward()
        optimizer.step()

        # Save values
        losses.update(loss.item(), g1.batch_size)
        batch_time.update(time.time() - end)
        end = time.time()

        if i > 0 and i % args.log_interval == 0:
            print(
                'Epoch: [{0}]({1}/{2}) Average Loss {loss.avg:.3f}; Avg Time x Batch {b_time.avg:.3f} Avg Load Time x Batch {b_load_time.avg:.3f}'
                .format(epoch,
                        i,
                        len(data_loader),
                        loss=losses,
                        b_time=batch_time,
                        b_load_time=batch_load_time))
    print(
        'Epoch: [{0}] Average Loss {loss.avg:.3f}; Avg Time x Batch {b_time.avg:.3f} Avg Time x Batch {b_load_time.avg:.3f}'
        .format(epoch,
                loss=losses,
                b_time=batch_time,
                b_load_time=batch_load_time))
    return losses
コード例 #3
0
ファイル: train.py プロジェクト: priba/graph_metric.pytorch
    args = Options().parse()
    print('Parameters:\t' + str(args))

    # Check cuda & Set random seed
    args.cuda = args.ngpu > 0 and torch.cuda.is_available()

    if args.seed > 1:
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        if args.cuda:
            torch.cuda.manual_seed(args.seed)

    # Check Test and Load
    if args.test and args.load is None:
        raise Exception('Cannot test without loading a model.')

    if not args.test and args.log is not None:
        print('Initialize logger')
        ind = len(
            glob.glob(args.log + '*_run-batchSize_{}'.format(args.batch_size)))
        log_dir = args.log + '{}_run-batchSize_{}/' \
                .format(ind, args.batch_size)
        args.save = args.save + '{}_run-batchSize_{}/' \
                .format(ind, args.batch_size)
        # Create logger
        print('Log dir:\t' + log_dir)
        logger = LogMetric.Logger(log_dir, force=True)

    main()
    sys.exit()
コード例 #4
0
def train(data_loader, model, optimizer, cuda, criterion, epoch, log_int=20):
    batch_time = LogMetric.AverageMeter()
    losses_sem = LogMetric.AverageMeter()
    losses_dom = LogMetric.AverageMeter()
    losses_spa = LogMetric.AverageMeter()
    losses = LogMetric.AverageMeter()

    # switch to train mode
    im_net, sk_net = model
    im_net.train()
    sk_net.train()
    torch.set_grad_enabled(True)

    #    im_net = im_net.to('cuda:0')
    #    sk_net = sk_net.to('cuda:0')
    #    criterion = criterion.to('cuda:0')

    end = time.time()
    for i, (sk, im, im_neg, w2v, _, _) in enumerate(data_loader):
        #print('the %d-th batch' %(i))
        # Prepare input data
        im, im_neg, sk, w2v = im.to('cuda:0'), im_neg.to('cuda:0'), sk.to(
            'cuda:0'), w2v.to('cuda:0')
        #im, im_neg, sk, w2v = Variable(im.cuda()), Variable(im_neg.cuda()), Variable(sk.cuda()), Variable(w2v.cuda())
        if cuda:
            pass
            #im, im_neg, sk, w2v = Variable(im.cuda()), Variable(im_neg.cuda()), Variable(sk.cuda()), Variable(w2v.cuda())

        #print('cuda device: im=%d,im_neg=%d,sk=%d,w2v=%d' % (im.device.index, im_neg.device.index, sk.device.index, w2v.device.index))
        #print('cuda device: im_net=%d,sk_net=%d,criterion=%d' % (next(im_net.parameters()).is_cuda, next(sk_net.parameters()).is_cuda, next(criterion.parameters()).is_cuda))
        #breakpoint()
        optimizer.zero_grad()
        bs = im.size(0)
        # Output
        ## Image
        im_feat, _ = im_net(
            im)  # Image encoding and projection to semantic space

        ## Image Negative
        # Encode negative image
        im_feat_neg, _ = im_net(
            im_neg)  # Image encoding and projection to semantic space

        ## Sketch
        sk_feat, _ = sk_net(
            sk)  # Sketch encoding and projection to semantic space

        # LOSS
        # Note: here we should use criterion.module(), instead of criterion().
        # Because in DataParallel() uses self.module to point to the input model.
        # The DataParallel can only parallel forward(), can not parallel backward().
        # So criterion should NOT be parallel.
        # Use criterion.module()
        loss, loss_sem, loss_dom, loss_spa = criterion.module(
            im_feat, sk_feat, w2v, im_feat_neg, i)

        # Gradiensts and update
        loss.backward()
        optimizer.step()

        # Save values
        losses_sem.update(loss_sem.item(), bs)
        losses_dom.update(loss_dom.item(), bs)
        losses_spa.update(loss_spa.item(), bs)
        losses.update(loss.item(), bs)
        batch_time.update(time.time() - end, bs)
        end = time.time()

        if log_int > 0 and i % log_int == 0:
            print(
                'Epoch: [{0}]({1}/{2}) Average Loss {loss.avg:.3f} ( Sem: {loss_sem.avg} + Dom: {loss_dom.avg} + Spa: {loss_spa.avg}); Avg Time x Batch {b_time.avg:.3f}'
                .format(epoch,
                        i,
                        len(data_loader),
                        loss=losses,
                        loss_sem=losses_sem,
                        loss_dom=losses_dom,
                        loss_spa=losses_spa,
                        b_time=batch_time))
    print(
        'Epoch: [{0}] Average Loss {loss.avg:.3f} ( {loss_sem.avg} + {loss_dom.avg} + {loss_spa.avg} ); Avg Time x Batch {b_time.avg:.3f}'
        .format(epoch,
                loss=losses,
                loss_sem=losses_sem,
                loss_dom=losses_dom,
                loss_spa=losses_spa,
                b_time=batch_time))
    return losses, losses_sem, losses_dom, losses_spa
コード例 #5
0
ファイル: testHED.py プロジェクト: priba/graph_metric.pytorch
def test(data_loader, gallery_loader, distance, cuda):
    batch_time = LogMetric.AverageMeter()
    acc = LogMetric.AverageMeter()
    meanap = LogMetric.AverageMeter()

    end = time.time()
    distance.eval()

    dist_matrix = []
    start = time.time()
    with torch.no_grad():
        g_gallery = []
        target_gallery = []
        for j, (g, target) in enumerate(gallery_loader):
            if cuda:
                g.to(torch.device('cuda'))
                g.gdata['std'] = g.gdata['std'].cuda()

            target_gallery.append(target)
            g_gallery.append(g)

        target_gallery = np.array(np.concatenate(target_gallery))
        gdata = list(map(lambda g: g.gdata['std'], g_gallery))
        g_gallery = dgl.batch(g_gallery)
        g_gallery.gdata = {'std': torch.cat(gdata)}

        target_query = []
        for i, (g, target) in enumerate(data_loader):

            # Prepare input data
            if cuda:
                g.to(torch.device('cuda'))
                g.gdata['std'] = g.gdata['std'].cuda()

            # Output
            d = distance(g, g_gallery, mode='retrieval')

            dist_matrix.append(d)
            target_query.append(target)

        dist_matrix = torch.stack(dist_matrix)
        target_query = np.array(np.concatenate(target_query))

        target_combined_query = np.unique(target_query)
        combined_dist_matrix = torch.zeros(target_combined_query.shape[0],
                                           dist_matrix.shape[1])

        for i, kw in enumerate(target_combined_query):
            ind = kw == target_query
            combined_dist_matrix[i] = dist_matrix[ind].min(0).values

        # K-NN classifier
        acc.update(
            knn_accuracy(combined_dist_matrix,
                         target_gallery,
                         target_combined_query,
                         k=5,
                         dataset=data_loader.dataset.dataset))

        # mAP retrieval
        meanap.update(
            mean_average_precision(combined_dist_matrix, target_gallery,
                                   target_combined_query))
    batch_time.update(time.time() - start)
    print(
        '* Test Acc {acc.avg:.3f}; mAP {meanap.avg: .3f}; Time x Test {b_time.avg:.3f}'
        .format(acc=acc, meanap=meanap, b_time=batch_time))
    return acc, meanap
コード例 #6
0
def test(data_triplet_loader, nets, cuda, data_pair_loader=None):
    batch_time = LogMetric.AverageMeter()
    acc = LogMetric.AverageMeter()
    auc = LogMetric.AverageMeter()

    net, distance = nets

    # switch to test mode
    net.eval()
    distance.eval()

    end = time.time()

    dist_matrix = []
    start = time.time()
    with torch.no_grad():
        total, correct = 0, 0
        for j, (g1, g2, g3, target) in enumerate(data_triplet_loader):
            if cuda:
                g1.to(torch.device('cuda'))
                g2.to(torch.device('cuda'))
                g3.to(torch.device('cuda'))

            # Output
            g1 = net(g1)
            g2 = net(g2)
            g3 = net(g3)

            d_pos = distance(g1, g2, mode='pairs')
            d_neg = distance(g1, g3, mode='pairs')
            total += d_pos.shape[0]
            correct += (d_pos < d_neg).float().sum()

        acc.update(correct / total)

        if data_pair_loader is not None:
            distances, labels = [], []
            for j, (g1, g2, _, target) in enumerate(data_pair_loader):
                if cuda:
                    g1.to(torch.device('cuda'))
                    g2.to(torch.device('cuda'))

                # Output
                g1 = net(g1)
                g2 = net(g2)

                d = distance(g1, g2, mode='pairs')
                distances.append(d)
                labels.append(target)
            similarity = -torch.cat(distances, 0)
            similarity = (similarity - similarity.min()) / (
                similarity.max() - similarity.min() + 1e-8)
            labels = torch.cat(labels, 0)
            auc.update(metrics.roc_auc_score(labels.cpu(), similarity.cpu()))

        # mAP retrieval
    batch_time.update(time.time() - start)
    print(
        '* Test Acc {acc.avg:.5f}; AUC {auc.avg: .5f} Time x Test {b_time.avg:.3f}'
        .format(acc=acc, auc=auc, b_time=batch_time))
    return acc, auc