Example #1
0
 def monitor(self):
     if not self._monitor:
         return
     val_mean_abs = np.array(ca.mean(ca.fabs(self._array)))
     grad_mean_abs = np.array(ca.mean(ca.fabs(self._tmp_grad_array)))
     step_mean_abs = np.array(ca.mean(ca.fabs(self._tmp_last_step)))
     logger.info("%s:\t%.1e  [%.1e, %.1e]" % (self.name, val_mean_abs, grad_mean_abs, step_mean_abs))
Example #2
0
 def monitor(self):
     if not self._monitor:
         return
     val_mean_abs = np.array(ca.mean(ca.fabs(self._array)))
     grad_mean_abs = np.array(ca.mean(ca.fabs(self._tmp_grad_array)))
     step_mean_abs = np.array(ca.mean(ca.fabs(self._tmp_step)))
     log.info('%s:\t%.1e  [%.1e, %.1e]', self.name, val_mean_abs,
              grad_mean_abs, step_mean_abs)
Example #3
0
 def monitor(self):
     for param, step in zip(self.params, self.steps):
         if param.monitor:
             val_mean_abs = np.array(ca.mean(ca.fabs(param.values)))
             grad_mean_abs = np.array(ca.mean(ca.fabs(param.grad())))
             step_mean_abs = np.array(ca.mean(ca.fabs(step)))
             logger.info('%s:\t%.1e  [%.1e, %.1e]'
                         % (param.name, val_mean_abs, grad_mean_abs,
                            step_mean_abs))
Example #4
0
 def monitor(self):
     for param, step in zip(self.params, self.steps):
         if param.monitor:
             val_mean_abs = np.array(ca.mean(ca.fabs(param.values)))
             grad_mean_abs = np.array(ca.mean(ca.fabs(param.grad())))
             step_mean_abs = np.array(ca.mean(ca.fabs(step)))
             logger.info(
                 '%s:\t%.1e  [%.1e, %.1e]' %
                 (param.name, val_mean_abs, grad_mean_abs, step_mean_abs))
    def _update(self):
        # Forward propagation
        next_x = self.x.array
        x_feats = [None]*len(self.layers)
        x_grams = [None]*len(self.layers)
        for l, layer in enumerate(self.layers):
            next_x = layer.fprop(next_x)
            if self.subject_weights[l] > 0:
                x_feats[l] = next_x
            if self.style_weights[l] > 0:
                x_feats[l] = next_x
                x_grams[l] = gram_matrix(next_x)

        # Backward propagation
        grad = ca.zeros_like(next_x)
        loss = ca.zeros(1)
        for l, layer in reversed(list(enumerate(self.layers))):
            if self.subject_weights[l] > 0:
                diff = x_feats[l] - self.subject_feats[l]
                norm = ca.sum(ca.fabs(diff)) + 1e-8
                weight = float(self.subject_weights[l]) / norm
                grad += diff * weight
                loss += 0.5*weight*ca.sum(diff**2)
            if self.style_weights[l] > 0:
                diff = x_grams[l] - self.style_grams[l]
                n_channels = diff.shape[0]
                x_feat = ca.reshape(x_feats[l], (n_channels, -1))
                style_grad = ca.reshape(ca.dot(diff, x_feat), x_feats[l].shape)
                norm = ca.sum(ca.fabs(style_grad))
                weight = float(self.style_weights[l]) / norm
                style_grad *= weight
                grad += style_grad
                loss += 0.25*weight*ca.sum(diff**2)
            grad = layer.bprop(grad)

        if self.tv_weight > 0:
            x = ca.reshape(self.x.array, (3, 1) + grad.shape[2:])
            tv = self.tv_conv.fprop(x, self.tv_kernel)
            tv *= self.tv_weight
            grad -= ca.reshape(tv, grad.shape)

        ca.copyto(self.x.grad_array, grad)
        return loss
Example #6
0
    def _update(self):
        # Forward propagation
        next_x = self.x.array
        x_feats = [None] * len(self.layers)
        for l, layer in enumerate(self.layers):
            next_x = layer.fprop(next_x)
            if self.subject_weights[l] > 0 or self.style_weights[l] > 0:
                x_feats[l] = next_x

        # Backward propagation
        grad = ca.zeros_like(next_x)
        loss = ca.zeros(1)
        for l, layer in reversed(list(enumerate(self.layers))):
            if self.subject_weights[l] > 0:
                diff = x_feats[l] - self.subject_feats[l]
                norm = ca.sum(ca.fabs(diff)) + 1e-8
                weight = float(self.subject_weights[l]) / norm
                grad += diff * weight
                loss += 0.5 * weight * ca.sum(diff**2)
            if self.style_weights[l] > 0:
                diff = gram_matrix(x_feats[l]) - self.style_grams[l]
                n_channels = diff.shape[0]
                x_feat = ca.reshape(x_feats[l], (n_channels, -1))
                style_grad = ca.reshape(ca.dot(diff, x_feat), x_feats[l].shape)
                norm = ca.sum(ca.fabs(style_grad))
                weight = float(self.style_weights[l]) / norm
                style_grad *= weight
                grad += style_grad
                loss += 0.25 * weight * ca.sum(diff**2)
            grad = layer.bprop(grad)

        if self.tv_weight > 0:
            x = ca.reshape(self.x.array, (3, 1) + grad.shape[2:])
            tv = self.tv_conv.fprop(x, self.tv_kernel)
            tv *= self.tv_weight
            grad -= ca.reshape(tv, grad.shape)

        ca.copyto(self.x.grad_array, grad)
        return loss
Example #7
0
 def fprop(self):
     ca.fabs(self.x.out, out=self.out)
Example #8
0
 def fprop(self):
     ca.fabs(self.x.array, out=self.array)
Example #9
0
 def fprop(self):
     ca.fabs(self.x.array, out=self.array)
Example #10
0
 def fprop(self):
     ca.fabs(self.x.out, out=self.out)