Esempio n. 1
0
 def preds(self, feats):
     key = feats
     if key in self._preds:
         return self._preds[key]
     assert isinstance(feats, Output)
     params_before = len(self.N.params())
     if self.W is None:
         preds = self.N.FC(feats, nout=self.nout, stddev=self.stddev)
     else:
         assert len(self.W) == 1
         W = Output(self.W[0])
         preds = self.N.FCMult(feats, W)
     net_params = self.N.params()
     num_new_params = len(net_params) - params_before
     if self.W is None:
         assert num_new_params >= 1
         self.W = net_params[-num_new_params:]
     else:
         assert num_new_params == 0
     if self.bias:
         params_before = len(self.N.params())
         if self.b is None:
             preds = self.N.Bias(preds)
         else:
             preds = self.N.BiasAdd(preds, self.b, axis=1)
         net_params = self.N.params()
         num_new_params = len(net_params) - params_before
         if self.b is None:
             assert num_new_params == 1
             self.b = Output(net_params[-1], shape=(self.nout,))
         else:
             assert num_new_params == 0
     self._preds[key] = preds
     return preds
Esempio n. 2
0
 def __init__(self,
              num,
              definition,
              limits=default_limits,
              internal_rng=False,
              name=None):
     assert len(limits) == 2
     assert limits[1] > limits[0]
     self.limits = tuple(float(l) for l in limits)
     self.span = limits[1] - limits[0]
     if len(definition) != 1:
         raise ValueError(
             'definition should have 1 parameter (dim), not %d' %
             len(definition))
     try:
         dim = int(definition[0])
     except ValueError:
         raise ValueError('non-integer dim: %s' % dim)
     self.recon_dim = self.sample_dim = dim
     self.num = num
     self.rangekw = dict(low=self.limits[0], high=self.limits[1])
     if internal_rng:
         self.placeholders = [
             t_rng.uniform(size=(num, dim), **self.rangekw)
         ]
     else:
         self.placeholders = [T.matrix()]
     self.flat_data = [Output(self.placeholders[0], shape=(self.num, dim))]
Esempio n. 3
0
 def __init__(self,
              num,
              definition,
              mean=0,
              stdev=None,
              internal_rng=False):
     self.mean = mean
     if len(definition) != 1:
         raise ValueError(
             'definition should have 1 parameter (dim), not %d' %
             len(definition))
     try:
         dim = int(definition[0])
     except ValueError:
         raise ValueError('non-integer dim: %s' % dim)
     if stdev is None:
         var = 2 * np.log(2)
         stdev = var**0.5
     else:
         var = stdev**2
     self.var, self.stdev = (floatX(x) for x in (var, stdev))
     self.recon_dim = self.sample_dim = dim
     self.num = num
     if internal_rng:
         self.placeholders = [
             t_rng.normal(size=(num, dim), avg=mean, std=self.stdev)
         ]
     else:
         self.placeholders = [T.matrix()]
     self.flat_data = [Output(self.placeholders[0], shape=(num, dim))]
Esempio n. 4
0
 def __init__(self,
              args,
              dist,
              nc,
              z=None,
              source=None,
              mode='train',
              bnkwargs={},
              gen_transform=None):
     N = self.net = Net(source=source, name='Generator')
     self.set_mode(mode)
     h_and_weights = dist.embed_data()
     bn_use_ave = (mode == 'test')
     self.data, _ = get_deconvnet(image_size=args.crop_resize,
                                  name=args.gen_net)(
                                      h_and_weights,
                                      N=N,
                                      nout=nc,
                                      size=args.gen_net_size,
                                      num_fc=args.net_fc,
                                      fc_dims=args.net_fc_dims,
                                      nonlin=args.deconv_nonlin,
                                      bn_use_ave=bn_use_ave,
                                      ksize=args.deconv_ksize,
                                      **bnkwargs)
     if gen_transform is not None:
         self.data = Output(gen_transform(self.data.value),
                            shape=self.data.shape)
Esempio n. 5
0
    def logits_to_recon(self, recon_logits):
        def blend(mask, true, false):
            return mask * true + (1 - mask) * false

        shape = self.num, self.recon_dim
        # clamp recon in the dist. range
        recon = recon_logits
        recon = blend(recon < self.limits[0], self.limits[0], recon)
        recon = blend(recon > self.limits[1], self.limits[1], recon)
        return [Output(recon, shape)]
Esempio n. 6
0
 def logits_to_sample(self, recon_logits):
     recon_mean = recon_logits[:, :self.slice_point]
     recon_log_var = recon_logits[:, self.slice_point:]
     if self.log_var_bias != 0:
         recon_log_var += self.log_var_bias
     recon_logstd = recon_log_var / 2
     recon_std = T.exp(recon_logstd)
     standard_sample = t_rng.normal(size=recon_mean.shape)
     sample = recon_mean + standard_sample * recon_std
     sample = [Output(sample, (self.num, self.sample_dim))]
     return sample
Esempio n. 7
0
 def validate(val_loader, model, criterion, print_freq, args):
     batch_time = AverageMeter()
     losses = AverageMeter()
     top1 = AverageMeter()
     top5 = AverageMeter()
 
     # switch to evaluate mode
     model.eval()
 
     end = time.time()
     for i, (input, target) in enumerate(val_loader):
         # convert tensor to np to feed into encoder
         input_np = input.numpy()
         X = Output(input_transform(input_np), (args.batch_size, nc, args.crop_size, args.crop_size))
         with nostdout():
             hs = sample_z_from_x(X)[0].value.eval()
         input_h = torch.from_numpy(hs)     
         
         target = target.cuda(async=True)
         input_var = torch.autograd.Variable(input_h.cuda(), volatile=True)
         target_var = torch.autograd.Variable(target.cuda(), volatile=True)
 
         # compute output
         output = model(input_var)
         loss = criterion(output, target_var)
 
         # measure accuracy and record loss
         prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
         losses.update(loss.data[0], input.size(0))
         top1.update(prec1[0], input.size(0))
         top5.update(prec5[0], input.size(0))
 
         # measure elapsed time
         batch_time.update(time.time() - end)
         end = time.time()
 
         if i % print_freq == 0:
             print('Test: [{0}/{1}]\t'
                   'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                   'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                   'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                   'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                    i, len(val_loader), batch_time=batch_time, loss=losses,
                    top1=top1, top5=top5))
 
     print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
           .format(top1=top1, top5=top5))
 
     return top1.avg
Esempio n. 8
0
 def logits_to_recon(self, recon_logits):
     """Returns the nearest possible `sample()` output from `recon_logits`."""
     return [Output(recon_logits, (self.num, self.recon_dim))]
Esempio n. 9
0
 def logits_to_recon(self, recon_logits):
     recon_mean = recon_logits[:, :self.slice_point]
     return [Output(recon_mean, (self.num, self.sample_dim))]
Esempio n. 10
0
 def logits_to_recon(self, recon_logits):
     shape = self.num, self.recon_dim
     recon = T.tanh(recon_logits)
     limits = -1, 1
     return [Output(self._scale_and_shift(recon, limits), shape)]
Esempio n. 11
0
 def logits_to_recon(self, recon_logits):
     shape = self.num, self.recon_dim
     return [Output(recon_logits, shape)]
Esempio n. 12
0
 def logits_to_recon(self, recon_logits):
     shape = self.num, self.recon_dim
     recon = T.nnet.sigmoid(recon_logits)
     limits = 0, 1
     return [Output(self._scale_and_shift(recon, limits), shape)]
Esempio n. 13
0
    args.noise_input_joint_discrim = [
        bool(float(x)) for x in args.noise_input_joint_discrim.split(',')
    ]
assert len(args.noise_input_joint_discrim) == len(args.noise.split('_'))

dist = MultiDistribution(args.batch_size,
                         args.noise,
                         normalize=args.encode_normalize,
                         weights=args.noise_weight,
                         weight_embed=args.noise_input_weight)
for d in dist.dists:
    d.log_var_bias = args.log_var_bias

# input placeholders
Xi = T.tensor4(dtype='uint8')
X = Output(input_transform(Xi),
           (args.batch_size, nc, args.crop_size, args.crop_size))
assert args.crop_resize <= args.crop_size
if args.crop_size == args.crop_resize:
    Xis = Xi
else:
    Xis = T.tensor4(dtype='uint8')
Xs = Output(input_transform(Xis),
            (args.batch_size, nc, args.crop_resize, args.crop_resize))
Z = dist.placeholders
if args.classifier:
    Y = T.ivector()
    y = Output(Y, shape=(args.batch_size, ), index_max=ny)
else:
    Y = None
    y = None
Esempio n. 14
0
 def logits_to_recon(self, recon_logits):
     return [Output(recon_logits, (self.num, self.recon_dim))]