Example #1
0
    def test_ops(self):
        x = K.variable(np.random.rand(8, 12))
        y = K.variable(np.random.rand(12, 25))
        z = K.placeholder((25, 18, 13))
        w = K.placeholder((18, 18))

        # ====== dot ====== #
        t = K.dot(x, y)
        self.assertEquals(K.get_shape(t), (8, 25))
        self.assertEquals(K.get_shape(t), K.eval(t).shape)
        t = K.dot(t, K.dimshuffle(z, (1, 0, 2)))
        self.assertEquals(K.get_shape(t), (8, 18, 13))

        # ====== transpose ====== #
        self.assertEquals(K.get_shape(K.transpose(z)), (13, 18, 25))
        self.assertEquals(K.get_shape(K.transpose(t, axes=(2, 0, 1))),
                          (13, 8, 18))

        # ====== eye ====== #
        self.assertEquals(K.get_shape(K.eye(5)), K.eval(K.eye(5)).shape)
        # ====== diag ====== #
        self.assertEquals(K.get_shape(K.diag(w)), (18, ))
        # self.assertEquals(K.get_shape(K.diag(x)),
        # K.eval(K.diag(y)).shape)
        self.assertEquals(K.get_shape(K.square(x)), K.eval(K.square(x)).shape)
        self.assertEquals(K.get_shape(K.abs(x)), K.eval(K.abs(x)).shape)
        self.assertEquals(K.get_shape(K.sqrt(x)), K.eval(K.sqrt(x)).shape)
        self.assertEquals(K.get_shape(K.exp(x)), K.eval(K.exp(x)).shape)
        self.assertEquals(K.get_shape(K.log(x)), K.eval(K.log(x)).shape)
        self.assertEquals(K.get_shape(K.round(x)), K.eval(K.round(x)).shape)
        self.assertEquals(K.get_shape(K.pow(x, 2)), K.eval(K.pow(x, 2)).shape)
        self.assertEquals(K.get_shape(K.clip(x, -1, 1)),
                          K.eval(K.clip(x, -1, 1)).shape)
        self.assertEquals(K.get_shape(K.inv(x)), K.eval(K.inv(x)).shape)
Example #2
0
File: shape.py Project: imito/odin
 def _apply(self, X):
   ndim = len(self.T.input_shape)
   axis = self.T.axis % ndim
   pattern = ['x' if i == axis
              else (i - 1 if i > axis else i)
              for i in range(ndim)]
   return K.dimshuffle(X, pattern)
Example #3
0
 def _apply(self, x):
     axes = iter(range(K.ndim(self.alpha)))
     pattern = [
         'x' if input_axis in self.shared_axes else next(axes)
         for input_axis in range(K.ndim(x))
     ]
     alpha = K.dimshuffle(self.alpha, pattern)
     return K.relu(x, alpha)
Example #4
0
 def _apply(self, X):
     ndim = len(self.T.input_shape)
     axis = self.T.axis % ndim
     pattern = [
         'x' if i == axis else (i - 1 if i > axis else i)
         for i in range(ndim)
     ]
     return K.dimshuffle(X, pattern)
Example #5
0
  def _apply(self, X, noise=0):
    ndim = X.shape.ndims
    # if is training, normalize input by its own mean and std
    mean, var = tf.nn.moments(X, axes=self.axes)
    # prepare dimshuffle pattern inserting broadcastable axes as needed
    param_axes = iter(range(ndim - len(self.axes)))
    pattern = ['x' if input_axis in self.axes else next(param_axes)
               for input_axis in range(ndim)]
    # apply dimshuffle pattern to all parameters
    beta = 0 if self.beta_init is None else \
        K.dimshuffle(self.get('beta'), pattern)
    gamma = 1 if self.gamma_init is None else \
        K.dimshuffle(self.get('gamma'), pattern)

    # ====== if trainign: use local mean and var ====== #
    def training_fn():
      running_mean = ((1 - self.alpha) * self.get('mean') +
                      self.alpha * mean)
      running_var = ((1 - self.alpha) * self.get('var') +
                     self.alpha * var)
      with tf.control_dependencies([
              tf.assign(self.get('mean'), running_mean),
              tf.assign(self.get('var'), running_var)]):
        return tf.identity(mean), tf.identity(var)

    # ====== if inference: use global mean and var ====== #
    def infer_fn():
      return self.get('mean'), self.get('var')

    mean, var = tf.cond(K.is_training(), training_fn, infer_fn)
    inv_std = tf.rsqrt(var + self.epsilon)
    normalized = (X - K.dimshuffle(mean, pattern)) * \
        (gamma * K.dimshuffle(inv_std, pattern))
    # ====== applying noise if required ====== #
    if self.noise_level is not None:
      normalized = K.rand.apply_noise(normalized,
          level=self.noise_level, noise_dims=self.noise_dims,
          noise_type='gaussian')
    # add beta
    normalized = normalized + beta
    # activated output
    return self.activation(normalized)
Example #6
0
 def _apply(self, X):
     # ====== apply convolution ====== #
     conved = self.convolve(X)
     # ====== apply bias ====== #
     if 'b' in self.variable_info:
         if self.untie_biases:
             conved += tf.expand_dims(self.get('b'), axis=0)
         else:
             conved += K.dimshuffle(self.get('b'),
                                    ('x', ) * (self.ndim + 1) + (0, ))
     return self.activation(conved)
Example #7
0
 def _apply(self, x):
     input_shape = K.get_shape(x)
     is_training = K.is_training()
     ndim = K.ndim(x)
     # if is training, normalize input by its own mean and std
     if not is_training:
         mean = self.mean
         inv_std = self.inv_std
     else:
         mean = K.mean(x, self.axes)
         inv_std = K.inv(K.sqrt(K.var(x, self.axes) + self.epsilon))
         # set a default update for them:
         running_mean = ((1 - self.alpha) * self.mean + self.alpha * mean)
         running_inv_std = ((1 - self.alpha) * self.inv_std +
                            self.alpha * inv_std)
     # prepare dimshuffle pattern inserting broadcastable axes as needed
     param_axes = iter(range(ndim - len(self.axes)))
     pattern = [
         'x' if input_axis in self.axes else next(param_axes)
         for input_axis in range(ndim)
     ]
     # apply dimshuffle pattern to all parameters
     beta = 0 if not hasattr(self, 'beta') else K.dimshuffle(
         self.beta, pattern)
     gamma = 1 if not hasattr(self, 'gamma') else K.dimshuffle(
         self.gamma, pattern)
     # normalize
     normalized = (x - K.dimshuffle(mean, pattern)) * \
         (gamma * K.dimshuffle(inv_std, pattern)) + beta
     # set shape for output
     K.add_shape(normalized, input_shape)
     # activated output
     output = self.activation(normalized)
     # add updates for final output
     if is_training:
         add_updates(output, self.mean, running_mean)
         add_updates(output, self.inv_std, running_inv_std)
     return output
Example #8
0
 def _apply(self, x):
     # store last input for deconvolution ops
     self._last_input = x
     conved = self.convolve(x)
     output_shape = K.get_shape(conved)
     if not hasattr(self, 'b'):
         conved = conved
     elif self.untie_biases:
         conved += K.expand_dims(self.b, 0)
     else:
         conved += K.dimshuffle(self.b, ('x', ) * (self.ndim + 1) + (0, ))
     activated = self.activation(conved)
     K.add_shape(activated, output_shape)
     # set shape for output
     return activated
Example #9
0
 def _apply(self, X):
   if X.shape.ndims == 3:
     X = tf.expand_dims(X, axis=-1)
   assert X.shape.ndims == 4, \
   "TimeDelayedConv require 3-D or 4-D input, but given: '%s'" % str(X)
   # [n_sample, n_timestep, n_features, 1]
   # ====== traverse backward along time axis ====== #
   if self.backward:
     X = tf.reverse(X, 1)
   # ====== apply convolution ====== #
   conved = tf.nn.convolution(input=X, filter=self.get('W'),
       padding="VALID",
       strides=[1, 1],
       data_format="NHWC")
   # [n_sample, n_timestep - n_time_context + 1, 1, n_new_features]
   # ====== apply bias ====== #
   if 'b' in self.variable_info:
     conved += K.dimshuffle(self.get('b'), ('x', 'x', 'x', 0))
   # ====== activation ====== #
   conved = self.activation(conved)
   # ====== applying pooling ====== #
   if self.time_pool == 'none':
     pool = tf.squeeze(conved, 2)
     # [n_sample, n_timestep - n_time_context + 1, n_new_features]
   elif self.time_pool == 'stat':
     mean, var = tf.nn.moments(conved, axes=1, keep_dims=True)
     std = tf.sqrt(var)
     pool = tf.concat([mean, std], -1)
     pool = tf.squeeze(pool, axis=[1, 2])
     # [n_sample, n_new_features * 2]
   elif self.time_pool in ('avg', 'max'):
     fn_pool = tf.nn.max_pool if self.time_pool == 'max' else tf.nn.avg_pool
     pool = fn_pool(conved,
                    ksize=[1, tf.shape(conved)[1], 1, 1],
                    strides=[1, 1, 1, 1],
                    padding='VALID',
                    data_format="NHWC")
     pool = tf.squeeze(pool, axis=[1, 2])
     # [n_sample, n_new_features]
   elif self.time_pool in ('sum'):
     pool = tf.reduce_mean(conved, axis=1)
     pool = tf.squeeze(pool, axis=1)
   # ====== return 2D output ====== #
   return pool
Example #10
0
 def _apply(self, x):
     if K.ndim(x) != self.conv.ndim + 2:
         raise ValueError(
             'Input has %d dimensions, but this Ops require %d-D '
             'tensor.' % (K.ndim(x), self.conv.ndim + 2))
     # ====== prepare the deconvolution ====== #
     stride = self.conv.strides
     border_mode = self.conv.pad
     W = self.conv.W
     dilation = self.conv.dilation
     # if Dilated Convolution, must transpose the Weights
     if self.conv.ndim == 2:
         deconv_func = K.deconv2d
     elif self.conv.ndim == 3:
         deconv_func = K.deconv3d
     else:
         raise Exception('No support for %d-D input in TransposedConv' %
                         self.conv.ndim)
     # theano require batch_dims is Constant or None, but tensorflow
     # require batch_dims is a native TensorVariable
     conved = deconv_func(
         x,
         kernel=W,
         output_shape=K.get_shape(
             self.conv._last_input,
             native=True if K.backend() == 'tensorflow' else False),
         strides=stride,
         border_mode=border_mode,
         filter_dilation=dilation)
     if hasattr(self, 'b'):
         if self.conv.untie_biases:
             conved += K.expand_dims(self.b, 0)
         else:
             conved += K.dimshuffle(self.b,
                                    ('x', ) * (self.conv.ndim + 1) + (0, ))
     activated = self.conv.activation(conved)
     K.add_shape(activated, self.conv.input_shape)
     return activated
Example #11
0
File: shape.py Project: imito/odin
 def _apply(self, X):
   return K.dimshuffle(X, pattern=self.pattern)
Example #12
0
 def _apply(self, x):
     return K.dimshuffle(x, pattern=self.pattern)
Example #13
0
 def _apply(self, X):
     return K.dimshuffle(X, pattern=self.pattern)