Exemplo n.º 1
0
def train(train_loader, train_loader1, model, criterion, optimizer, var_optimizer, epoch, args, log):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    rk_losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    train_loader1_iter = iter(train_loader1)

    model.train()

    end = time.time()
    for i, (input, target) in enumerate(train_loader):
        data_time.update(time.time() - end)

        input = input.cuda(args.gpu, non_blocking=True)
        target = target.cuda(args.gpu, non_blocking=True)


        input1 = next(train_loader1_iter)
        input1 = input1.cuda(args.gpu, non_blocking=True)

        bs = input.shape[0]
        bs1 = input1.shape[0]

        output = model(torch.cat([input, input1.repeat(2, 1, 1, 1)]))
        loss = criterion(output[:bs], target)

        out1_0 = output[bs:bs+bs1].softmax(-1)
        out1_1 = output[bs+bs1:].softmax(-1)
        mi1 = ent((out1_0 + out1_1)/2.) - (ent(out1_0) + ent(out1_1))/2.
        rank_loss = torch.nn.functional.relu(args.mi_th - mi1).mean()

        prec1, prec5 = accuracy(output[:bs], target, topk=(1, 5))
        losses.update(loss.detach().item(), bs)
        rk_losses.update(rank_loss.detach().item(), bs1)
        top1.update(prec1.item(), bs)
        top5.update(prec5.item(), bs)

        optimizer.zero_grad()
        var_optimizer.zero_grad()
        (loss+rank_loss*args.alpha).backward()
        optimizer.step()
        var_optimizer.step()

        batch_time.update(time.time() - end)
        end = time.time()

        if i == len(train_loader) - 1:
            print_log('  Epoch: [{:03d}][{:03d}/{:03d}]   '
                        'Time {batch_time.avg:.3f}   '
                        'Data {data_time.avg:.3f}   '
                        'Loss {loss.avg:.4f}   '
                        'RK Loss {rk_loss.avg:.4f}   '
                        'Prec@1 {top1.avg:.3f}   '
                        'Prec@5 {top5.avg:.3f}   '.format(
                        epoch, i, len(train_loader), batch_time=batch_time, rk_loss=rk_losses,
                        data_time=data_time, loss=losses, top1=top1, top5=top5) + time_string(), log)
    return top1.avg, losses.avg
Exemplo n.º 2
0
 def _calc_pt_plugin(self, pt):
     """Calculate direct entropies and apply PT correction if required """
     calc = self.calc
     pt_corr = lambda R: (R - 1) / (2 * self.N * np.log(2))
     self.H_plugin = {}
     if pt: self.H_pt = {}
     # compute basic entropies
     if 'HX' in calc:
         H = ent(self.PX)
         self.H_plugin['HX'] = H
         if pt:
             self.H_pt['HX'] = H + pt_corr(pt_bayescount(self.PX, self.N))
     if 'HY' in calc:
         H = ent(self.PY)
         self.H_plugin['HY'] = H
         if pt:
             self.H_pt['HY'] = H + pt_corr(pt_bayescount(self.PY, self.N))
     if 'HXY' in calc:
         H = (self.PY * ent(self.PXY)).sum()
         self.H_plugin['HXY'] = H
         if pt:
             for y in xrange(self.Y_dim):
                 H += pt_corr(pt_bayescount(self.PXY[:, y], self.Ny[y]))
             self.H_pt['HXY'] = H
     if 'SiHXi' in calc:
         H = ent(self.PXi).sum()
         self.H_plugin['SiHXi'] = H
         if pt:
             for x in xrange(self.X_n):
                 H += pt_corr(pt_bayescount(self.PXi[:, x], self.N))
             self.H_pt['SiHXi'] = H
     if 'HiXY' in calc:
         H = (self.PY * ent(self.PXiY)).sum()
         self.H_plugin['HiXY'] = H
         if pt:
             for x in xrange(self.X_n):
                 for y in xrange(self.Y_dim):
                     H += pt_corr(
                         pt_bayescount(self.PXiY[:, x, y], self.Ny[y]))
             self.H_pt['HiXY'] = H
     if 'HiX' in calc:
         H = ent(self.PiX)
         self.H_plugin['HiX'] = H
         if pt:
             # no PT correction for HiX
             self.H_pt['HiX'] = H
     if 'ChiX' in calc:
         H = -(self.PX * malog2(
             np.ma.array(self.PiX,
                         copy=False,
                         mask=(self.PiX <= np.finfo(np.float).eps)))).sum(
                             axis=0)
         self.H_plugin['ChiX'] = H
         if pt:
             # no PT correction for ChiX
             self.H_pt['ChiX'] = H
     # for adelman style I(k;spike) (bits/spike)
     if 'HXY1' in calc:
         if self.Y_m != 2:
             raise ValueError, \
             "HXY1 calculation only makes sense for spike data, ie Y_m = 2"
         H = ent(self.PXY[:, 1])
         self.H_plugin['HXY1'] = H
         if pt:
             self.H_pt['HXY1'] = H + pt_corr(
                 pt_bayescount(self.PXY[:, 1], self.Ny[1]))
     if 'ChiXY1' in calc:
         if self.Y_m != 2:
             raise ValueError, \
             "ChiXY1 calculation only makes sense for spike data, ie Y_m = 2"
         H = -np.ma.array(self.PXY[:, 1] * np.log2(self.PX),
                          copy=False,
                          mask=(self.PX <= np.finfo(np.float).eps)).sum()
         self.H_plugin['ChiXY1'] = H
         if pt:
             # no PT for ChiXY1
             self.H_pt['ChiXY1'] = H
Exemplo n.º 3
0
 def _calc_pt_plugin(self, pt):
     """Calculate direct entropies and apply PT correction if required """
     calc = self.calc
     pt_corr = lambda R: (R-1)/(2*self.N*np.log(2))
     self.H_plugin = {}
     if pt: self.H_pt = {}
     # compute basic entropies
     if 'HX' in calc:
         H = ent(self.PX)
         self.H_plugin['HX'] = H
         if pt:
             self.H_pt['HX'] = H + pt_corr(pt_bayescount(self.PX, self.N))
     if 'HY' in calc:
         H = ent(self.PY)
         self.H_plugin['HY'] = H
         if pt:
             self.H_pt['HY'] = H + pt_corr(pt_bayescount(self.PY, self.N))
     if 'HXY' in calc:
         H = (self.PY * ent(self.PXY)).sum()
         self.H_plugin['HXY'] = H
         if pt:
             for y in xrange(self.Y_dim):
                 H += pt_corr(pt_bayescount(self.PXY[:,y], self.Ny[y]))
             self.H_pt['HXY'] = H
     if 'SiHXi' in calc:
         H = ent(self.PXi).sum()
         self.H_plugin['SiHXi'] = H
         if pt:
             for x in xrange(self.X_n):
                 H += pt_corr(pt_bayescount(self.PXi[:,x],self.N))
             self.H_pt['SiHXi'] = H
     if 'HiXY' in calc:
         H = (self.PY * ent(self.PXiY)).sum()
         self.H_plugin['HiXY'] = H
         if pt:
             for x in xrange(self.X_n):
                 for y in xrange(self.Y_dim):
                     H += pt_corr(pt_bayescount(self.PXiY[:,x,y],self.Ny[y]))
             self.H_pt['HiXY'] = H
     if 'HiX' in calc:
         H = ent(self.PiX)
         self.H_plugin['HiX'] = H
         if pt:
             # no PT correction for HiX
             self.H_pt['HiX'] = H
     if 'ChiX' in calc:
         H = -(self.PX*malog2(np.ma.array(self.PiX,copy=False,
                 mask=(self.PiX<=np.finfo(np.float).eps)))).sum(axis=0)
         self.H_plugin['ChiX'] = H
         if pt:
             # no PT correction for ChiX
             self.H_pt['ChiX'] = H
     # for adelman style I(k;spike) (bits/spike)
     if 'HXY1' in calc:
         if self.Y_m != 2:
             raise ValueError, \
             "HXY1 calculation only makes sense for spike data, ie Y_m = 2"
         H = ent(self.PXY[:,1])
         self.H_plugin['HXY1'] = H
         if pt:
             self.H_pt['HXY1'] = H + pt_corr(pt_bayescount(self.PXY[:,1],self.Ny[1])) 
     if 'ChiXY1' in calc:
         if self.Y_m != 2:
             raise ValueError, \
             "ChiXY1 calculation only makes sense for spike data, ie Y_m = 2"
         H = -np.ma.array(self.PXY[:,1]*np.log2(self.PX),copy=False,
                 mask=(self.PX<=np.finfo(np.float).eps)).sum()
         self.H_plugin['ChiXY1'] = H
         if pt:
             # no PT for ChiXY1
             self.H_pt['ChiXY1'] = H