Пример #1
0
 def _initialize(self, x):
     input_shape = K.get_shape(x)
     config = NNConfig(input_shape=input_shape[1:])
     is_bidirectional = self.direction_mode == 'bidirectional'
     # ====== check input ====== #
     if self.input_mode == 'norm':
         _init_input2hidden(self,
                            config,
                            rnn_mode=self.rnn_mode,
                            input_mode=self.input_mode,
                            W_init=self.W_init,
                            input_dims=input_shape[-1],
                            hidden_dims=self.num_units)
     # ====== create params ====== #
     layer_info = [input_shape[-1], self.num_units] + \
                  [self.num_units * (2 if is_bidirectional else 1),
                   self.num_units] * (self.num_layers - 1)
     if self.rnn_mode == 'lstm':
         from odin.backend.init import lstm as init_func
     elif self.rnn_mode == 'gru':
         from odin.backend.init import gru as init_func
     else:
         from odin.backend.init import rnn as init_func
     # initialize each parameter in params_split=True
     if self.params_split:
         with K.variable_scope(self.name):
             parameters = [
                 init_func(layer_info[i * 2],
                           layer_info[i * 2 + 1],
                           W_init=self.W_init,
                           b_init=self.b_init,
                           one_vector=False,
                           return_variable=True,
                           bidirectional=is_bidirectional,
                           name='layer%d' % i)
                 for i in range(self.num_layers)
             ]
         # print([(j.name, j.tag.roles) for i in parameters for j in i]); exit()
         for p in chain(*parameters):
             config.create_params(p,
                                  shape=K.get_shape(p),
                                  name=p.name.split(':')[0].split('/')[1],
                                  nnops=self,
                                  roles=PARAMETER)
     # else initialize all in 1 big vector
     else:
         parameters = np.concatenate([
             init_func(layer_info[i * 2],
                       layer_info[i * 2 + 1],
                       one_vector=True,
                       return_variable=False,
                       bidirectional=is_bidirectional)
             for i in range(self.num_layers)
         ])
         config.create_params(parameters,
                              shape=parameters.shape,
                              name='params',
                              nnops=self,
                              roles=PARAMETER)
     return config
Пример #2
0
 def test_computational_graph3(self):
     # validate the number of updates found by ComputationGraph
     X = K.placeholder(shape=(None, 28, 28, 3))
     f = N.Sequence([
         N.Conv(32, 3, pad='same', activation=K.linear),
         N.BatchNorm(activation=K.relu),
         N.Flatten(outdim=2),
         N.Dense(16),
         N.BatchNorm(),
         N.Dense(10)
     ])
     K.set_training(True)
     y_train = f(X)
     K.set_training(False)
     y_score = f(X)
     self.assertTrue(
         K.get_shape(y_train) == K.get_shape(y_score)
         and K.get_shape(y_score) == (None, 10))
     cc_train = K.ComputationGraph(y_train)
     cc_score = K.ComputationGraph(y_score)
     self.assertTrue(len(cc_score.updates) == 0)
     self.assertTrue(len(cc_train.updates) == 4)
     # create real function
     fn_train = K.function(X, y_train)
     fn_score = K.function(X, y_score)
     shape1 = fn_train(np.random.rand(12, 28, 28, 3)).shape
     shape2 = fn_score(np.random.rand(12, 28, 28, 3)).shape
     self.assertTrue(shape1 == shape2 and shape1 == (12, 10))
Пример #3
0
 def _apply(self, X, h0=None, mask=None, **kwargs):
     input_shape = K.get_shape(X)
     # ====== check mask ====== #
     if mask is not None and (K.ndim(mask) != K.ndim(X) - 1
                              or K.get_shape(mask)[-1] != input_shape[1]):
         raise Exception(
             'Mask must has "%d" dimensions and the time dimension '
             '(i.e. the second dimension) must equal to "%d"'
             ', but the given mask has shape "%s".' %
             (K.ndim(X) - 1, input_shape[1], K.get_shape(mask)))
     # ====== initialize states ====== #
     h0 = _check_rnn_hidden_states(h0, self, input_shape, 'h0')
     # turn off repeat_states if batch_size already included
     if K.get_shape(h0)[0] != 1:
         self.repeat_states = False
     # ====== precompute input ====== #
     X = K.dot(X, self.W_in) if self.input_mode != 'skip' else X
     if self.input_mode == 'norm':
         # normalize all axes except the time dimension
         bn = BatchNorm(axes=(0, 1),
                        activation=K.linear,
                        gamma_init=self.gamma,
                        beta_init=self.beta,
                        mean_init=self.mean,
                        inv_std_init=self.inv_std)
         X = bn(X)
     out = self._rnn(X, h0=h0, mask=mask, **self.get_recurrent_info(kwargs))
     for i in out:
         K.add_shape(i, shape=tuple(input_shape[:-1]) + (self.num_units, ))
     # only care about the first state
     return out[0] if len(out) == 1 else out
Пример #4
0
def _check_cudnn_hidden_init(s0, shape, nnops, name):
    nb_layers, batch_size, hidden_size = shape
    # ====== init s0 ====== #
    if s0 is None and hasattr(nnops, name):
        s0 = getattr(nnops, name)
    elif s0 is not None:
        if callable(s0) or K.is_trainable_variable(s0) or isinstance(
                s0, np.ndarray):
            _ = (nb_layers, 1, hidden_size) if callable(s0) or isinstance(s0, np.ndarray) \
                else K.get_shape(s0)
            s0 = nnops.configuration.create_params(s0,
                                                   shape=_,
                                                   name=name,
                                                   nnops=nnops,
                                                   roles=INITIAL_STATE)
        # ====== check s0 shape ====== #
        init_shape = K.get_shape(s0)
        if K.ndim(s0) == 2:
            if K.get_shape(s0)[-1] != hidden_size:
                raise ValueError(
                    'init state has %d dimension, but the hidden_size=%d' %
                    (init_shape[-1], hidden_size))
        elif init_shape[::2] != (nb_layers, hidden_size):
            raise ValueError('Require init states of size: %s, but '
                             'given state of size: %s' % (shape, init_shape))
        # ====== return the right shape ====== #
        setattr(nnops, name, s0)
    return s0
Пример #5
0
        def test_func(func):
            y = func(x)
            yT = func.T(func(x))

            self.assertEquals(K.eval(y).shape, K.get_shape(y))

            self.assertEquals(K.eval(yT).shape, (25, 8, 12))
            self.assertEquals(K.eval(yT).shape, K.get_shape(yT))
Пример #6
0
 def _apply(self, X, h0=None, c0=None, mask=None):
     batch_size = K.get_shape(X, native=True)[0]
     is_bidirectional = self.direction_mode == 'bidirectional'
     input_mode = ('skip' if self.input_mode == 'skip'
                   or self.input_mode == 'norm' else 'linear')
     # ====== precompute input ====== #
     # linear or norm input mode
     if self.input_mode == 'norm':
         X = K.dot(X, self.W_in)
         # normalize all axes except the time dimension
         bn = BatchNorm(axes=(0, 1),
                        activation=K.linear,
                        gamma_init=self.gamma,
                        beta_init=self.beta,
                        mean_init=self.mean,
                        inv_std_init=self.inv_std)
         X = bn(X)
         # cudnnRNN doesnt' support multiple inputs
         shapeX = K.get_shape(X, native=True)
         ndims = K.ndim(X)
         if 'rnn' in self.rnn_mode: N = 1
         elif self.rnn_mode == 'gru': N = 3
         else: N = 4
         newshape = [shapeX[i]
                     for i in range(ndims - 1)] + [self.num_units, N]
         X = K.mean(K.reshape(X, newshape), axis=-1)
     # ====== hidden state ====== #
     num_layers = self.num_layers * 2 if is_bidirectional else self.num_layers
     require_shape = (num_layers, batch_size, self.num_units)
     h0 = _check_cudnn_hidden_init(h0, require_shape, self, 'h0')
     c0 = _check_cudnn_hidden_init(c0, require_shape, self, 'c0')
     # ====== parameters ====== #
     if self.params_split:
         parameters = K.concatenate([
             K.flatten(i, outdim=1) for i in self.parameters
             if not has_roles(i, INITIAL_STATE)
         ])
     else:
         parameters = self.params
     # ====== return CuDNN RNN ====== #
     results = K.rnn_dnn(X,
                         hidden_size=self.num_units,
                         rnn_mode=self.rnn_mode,
                         num_layers=self.num_layers,
                         parameters=parameters,
                         h0=h0,
                         c0=c0,
                         input_mode=input_mode,
                         direction_mode=self.direction_mode,
                         dropout=self.dropout,
                         name=self.name)
     if not self.return_states:
         results = results[0]  # only get the output
     return results
Пример #7
0
 def test_slice_ops(self):
     X = K.placeholder(shape=(None, 28, 28, 28, 3))
     f = N.Sequence([
         N.Conv(32, 3, pad='same', activation=K.linear),
         N.BatchNorm(activation=K.relu),
         N.Flatten(outdim=4)[:, 8:12, 18:25, 13:],
     ])
     y = f(X)
     fn = K.function(X, y)
     self.assertTrue(
         fn(np.random.rand(12, 28, 28, 28, 3)).shape[1:] == K.get_shape(y)
         [1:])
     self.assertEqual(K.get_shape(y)[1:], (4, 7, 883))
Пример #8
0
 def _apply(self, X, h0=None, c0=None, mask=None, **kwargs):
     # check input_shape
     input_shape = K.get_shape(X)
     # ====== check mask ====== #
     if mask is not None and (K.ndim(mask) != 2
                              or K.get_shape(mask)[-1] != input_shape[1]):
         raise Exception('Mask must be a 2-D matrix and the time dimension '
                         '(i.e. the second dimension) must equal to "%d"'
                         ', but the given mask has shape "%s".' %
                         (input_shape[1], K.get_shape(mask)))
     # add broadcastable dimension for mask
     if mask is not None:
         mask = K.expand_dims(mask, dim=-1)
     # ====== initialize states ====== #
     # hidden states
     h0 = _check_rnn_hidden_states(h0, self, input_shape, 'h0')
     c0 = _check_rnn_hidden_states(c0, self, input_shape, 'c0')
     # turn off repeat_states if batch_size already included
     if K.get_shape(h0)[0] != 1 and K.get_shape(c0)[0] != 1:
         self.repeat_states = False
     # ====== precompute input ====== #
     # linear or norm input mode
     if self.input_mode != 'skip':
         X = K.dot(X, self.W_in)
         if self.input_mode == 'norm':
             # normalize all axes except the time dimension
             bn = BatchNorm(axes=(0, 1),
                            activation=K.linear,
                            gamma_init=self.gamma,
                            beta_init=self.beta,
                            mean_init=self.mean,
                            inv_std_init=self.inv_std)
             X = bn(X)
     # skip input
     elif input_shape[-1] == self.num_units:
         X = K.repeat(X, 4, axes=-1)
     # ====== compute recurrent output ====== #
     out = self._rnn(X,
                     h0=h0,
                     c0=c0,
                     mask=mask,
                     **self.get_recurrent_info(kwargs))
     if not self.return_cell_memory:
         out = out[:-1]
     for i in out:
         K.add_shape(i, shape=input_shape[:-1] + (self.num_units, ))
     # only care about the first state
     return out[0] if len(out) == 1 else out
Пример #9
0
    def test_linear_algebra_value(self):
        np.random.seed(1208)
        x = K.variable(np.random.randn(2, 4, 3))
        y = K.variable(np.random.rand(1, 2, 3, 5))

        z = K.dot(x, y)
        self.assertEqual(K.get_shape(z), (2, 4, 1, 2, 5))
        self.assertEqual(
            repr(np.sum(K.eval(z)))[:8], "-1.0198305134529524"[:8])

        np.random.seed(1208)
        x = K.variable(np.random.randn(100, 3, 4, 5))
        y = K.variable(np.random.rand(100, 12, 5, 6))
        z = K.batched_dot(x, y)
        self.assertEqual(K.get_shape(z), K.eval(z).shape)
        self.assertEqual(repr(K.eval(z).sum())[:7], "1655.44")
Пример #10
0
 def _initialize(self, X):
     input_shape = K.get_shape(X)
     config = NNConfig(input_shape=input_shape, num_units=self.num_units)
     # ====== check input ====== #
     _init_input2hidden(self,
                        config,
                        rnn_mode='gru',
                        input_mode=self.input_mode,
                        W_init=self.W_in_init,
                        input_dims=input_shape[-1],
                        hidden_dims=self.num_units)
     # ====== initialize inner parameters ====== #
     # W_update, W_reset, W_hidden
     config.create_params(self.W_hid_init,
                          shape=(self.num_units, self.num_units),
                          name='W_hid',
                          nnops=self,
                          roles=WEIGHT,
                          nb_params=3)
     # bias
     if self.b_init is not None:
         config.create_params(self.b_init,
                              shape=(self.num_units, ),
                              name='b',
                              nnops=self,
                              roles=BIAS,
                              nb_params=3)
     return config
Пример #11
0
 def _initialize(self, X):
     input_shape = K.get_shape(X)
     config = NNConfig(input_shape=input_shape, num_units=self.num_units)
     # ====== initialize inner parameters ====== #
     W_init = as_tuple(self.W_init, N=2)
     _init_input2hidden(self,
                        config,
                        rnn_mode='rnn',
                        input_mode=self.input_mode,
                        W_init=W_init[0],
                        input_dims=input_shape[-1],
                        hidden_dims=self.num_units)
     # hidden connection
     config.create_params(W_init[1],
                          shape=(self.num_units, self.num_units),
                          name='W_hid',
                          nnops=self,
                          roles=WEIGHT)
     # bias
     if self.b_init is not None:
         config.create_params(self.b_init,
                              shape=(self.num_units, ),
                              name='b',
                              nnops=self,
                              roles=BIAS)
     return config
Пример #12
0
 def _apply(self, x):
     input_shape = K.get_shape(x)
     if input_shape[self.axis] != 1:
         raise ValueError(
             'The squeeze axis=%d must be 1, but got %d instead' %
             (self.axis, input_shape[self.axis]))
     return K.squeeze(x, axis=self.axis)
Пример #13
0
 def _apply(self, x, **kwargs):
     y = self.ops.apply(x, **kwargs)
     return_list = True
     if not isinstance(y, (tuple, list)):
         return_list = False
         y = [y]
     # apply slice and calculate the shape
     output = []
     for i in y:
         shape = K.get_shape(i)
         i = i[self.slice]
         # good to calculate new output shape
         if isinstance(shape, (tuple, list)):
             new_shape = []
             for dim, idx in zip(shape, self.slice):
                 if isinstance(idx, numbers.Number):
                     dim = -1
                 elif dim is not None and isinstance(idx, slice):
                     dim = idx.indices(dim)
                     dim = dim[1] - dim[0]
                 # -1 mean delete that dimension because of int index
                 if dim > 0 or dim is None:
                     new_shape.append(dim)
             # slice is not specified for all dimension
             if len(new_shape) < K.ndim(i):
                 new_shape += shape[len(self.slice):]
             # add the new shape
             K.add_shape(i, new_shape)
         output.append(i)
     # return output
     if return_list:
         return output
     return output[0]
Пример #14
0
    def _initialize(self, x):
        input_shape = K.get_shape(x)
        config = NNConfig(num_inputs=input_shape[-1])
        shape = (input_shape[-1], self.num_units)

        config.create_params(self.W_init,
                             shape,
                             'W_mean',
                             nnops=self,
                             roles=WEIGHT)
        config.create_params(self.W_init,
                             shape,
                             'W_logsigma',
                             nnops=self,
                             roles=WEIGHT)
        if self.b_init is not None:
            config.create_params(self.b_init, (self.num_units, ),
                                 'b_mean',
                                 nnops=self,
                                 roles=BIAS)
            config.create_params(self.b_init, (self.num_units, ),
                                 'b_logsigma',
                                 nnops=self,
                                 roles=BIAS)
        return config
Пример #15
0
 def sampling(self, x):
     mean, logsigma = self.get_mean_logsigma(x)
     epsilon = K.random_normal(shape=K.get_shape(mean),
                               mean=0.0,
                               std=1.0,
                               dtype=mean.dtype)
     z = mean + K.exp(logsigma) * epsilon
     return z
Пример #16
0
 def _apply(self, x):
     input_shape = K.get_shape(x)
     _validate_input_shape(input_shape)
     other_shape = tuple([
         input_shape[i]
         for i in range(K.ndim(x) - self.outdim + 1, K.ndim(x))
     ])
     return K.reshape(x, (-1, ) + other_shape)
Пример #17
0
    def test_conv2D(self):
        x = K.placeholder((None, 28, 28, 3))
        f1 = N.Conv(16, (3, 3), strides=(2, 2), pad='same')
        y = f1(x)

        f = K.function(x, y)
        z = f(np.random.rand(12, 28, 28, 3))

        self.assertEquals(z.shape, (12, 14, 14, 16))
        self.assertEquals(K.get_shape(y), (None, 14, 14, 16))

        # ====== transpose convolution ====== #
        y = f1.T(y)
        f = K.function(x, y)
        z = f(np.random.rand(12, 28, 28, 3))
        self.assertEquals(z.shape, (12, 28, 28, 3))
        self.assertEquals(K.get_shape(y), (None, 28, 28, 3))
Пример #18
0
    def test_dilatedConv(self):
        x = K.placeholder((None, 28, 28, 3))
        f1 = N.Conv(16, (3, 3), dilation=(2, 2))
        y = f1(x)

        f = K.function(x, y)
        z = f(np.random.rand(12, 28, 28, 3))

        self.assertEquals(z.shape, (12, 24, 24, 16))
        self.assertEquals(K.get_shape(y), (None, 24, 24, 16))
Пример #19
0
    def test_conv3D(self):
        x = K.placeholder((None, 28, 28, 28, 3))
        f1 = N.Conv(16, (3, 3, 3), strides=1, pad='valid')
        y = f1(x)

        f = K.function(x, y)
        z = f(np.random.rand(12, 28, 28, 28, 3))

        self.assertEquals(z.shape, (12, 26, 26, 26, 16))
        self.assertEquals(K.get_shape(y), (None, 26, 26, 26, 16))
Пример #20
0
 def _apply(self, x, **kwargs):
     if self.debug:
         print('**************** Sequences: %s ****************' %
               self.name)
         print('Is training:', K.is_training())
         print('First input:', K.get_shape(x))
     for op in self.ops:
         x = op(x, **_shrink_kwargs(op, kwargs))
         # print after finnish the op
         if self.debug:
             print(' ', str(op), '->',
                   [K.get_shape(i)
                    for i in x] if isinstance(x,
                                              (tuple,
                                               list)) else K.get_shape(x))
     # end debug
     if self.debug:
         print()
     return x
Пример #21
0
        def test_func(func):
            self.assertEquals(K.get_shape(func(x, 0)),
                              K.eval(func(x, 0)).shape)
            self.assertEquals(K.get_shape(func(x, -1)),
                              K.eval(func(x, -1)).shape)
            self.assertEquals(K.get_shape(func(x, 1, True)),
                              K.eval(func(x, 1, True)).shape)

            self.assertEquals(K.get_shape(func(x, 0)), K.get_shape(func(y, 0)))
            self.assertEquals(K.get_shape(func(x, 0, True)),
                              K.get_shape(func(y, 0, True)))

            if func != K.argmax and func != K.argmin:
                self.assertEquals(K.get_shape(func(x, (1, -1))),
                                  K.eval(func(x, (1, -1))).shape)
                self.assertEquals(K.get_shape(func(x, (0, 1))),
                                  K.eval(func(x, (0, 1))).shape)
                self.assertEquals(K.get_shape(func(x, (0, 1), True)),
                                  K.eval(func(x, (0, 1), True)).shape)
Пример #22
0
 def set_outputs(self, *outputs):
     self._output_info = []
     self._outputs = []
     for i in outputs:
         if not K.is_placeholder(i):
             raise ValueError('Only accept input which is placeholder.')
         name, dtype, shape = i.name, i.dtype, K.get_shape(i)
         self._output_info.append((name, dtype, shape))
         self._outputs.append(i)
     return self
Пример #23
0
 def test_flatten(self):
     x = K.placeholder(shape=(None, 8, 12, 25, 18))
     for i in range(1, 5):
         y = K.flatten(x, outdim=i)
         f = K.function(x, y)
         shape1 = K.get_shape(y)
         shape2 = f(np.random.rand(16, 8, 12, 25, 18)).shape
         self.assertEqual(len(shape1), len(shape2))
         self.assertTrue(
             all(i == j for i, j in zip(shape1, shape2) if i is not None))
Пример #24
0
    def set_inputs(self, *inputs):
        self._input_info = []
        self._inputs = []
        for i in inputs:
            if not K.is_placeholder(i):
                raise ValueError('Only accept input which is placeholder.')
            name, dtype, shape = i.name, i.dtype, K.get_shape(i)
            self._input_info.append([name, dtype, shape])
            self._inputs.append(i)
        # ====== Try to check if the inputs match the Ops ====== #
        try:
            # call this to initialize the parameters and get
            # estimated output shape (we assume training and deploying
            # mode get the same shape).
            for i in self._inputs:
                add_role(i, TRAINING)
            self._y_train = self._seq_ops(*self._inputs)

            for i in self._inputs:
                add_role(i, DEPLOYING)
            self._y_pred = self._seq_ops(*self._inputs)

            # create default output
            if len(self._output_info) == 0:
                shape = K.get_shape(self._y_train)
                self._outputs = [
                    K.placeholder(shape=shape,
                                  dtype=self._y_train.dtype,
                                  name='output1')
                ]
                self._output_info = [('output1', self._y_train.dtype, shape)]

            # reset all functions
            for i, j in self._functions.items():
                del self._functions[i]
                del j
            self._functions = {}
        except Exception, e:
            warnings.warn('Inputs do not match the Ops requirements, '
                          'error: ' + str(e))
            self._input_info = []
            self._inputs = []
Пример #25
0
 def _apply(self, x):
     input_shape = K.get_shape(x)
     # calculate statistics
     mean, logsigma = self.get_mean_logsigma(x)
     # variational output
     output = mean
     if K.is_training():
         output = self.sampling(x)
     # set shape for output
     K.add_shape(output, input_shape[:-1] + (self.num_units, ))
     return output
Пример #26
0
 def _apply(self, x):
     input_shape = K.get_shape(x)
     # calculate projection
     activation = K.dot(x, self.W)
     if hasattr(self, 'b') and self.b is not None:
         activation = activation + self.b
     # set shape for output
     K.add_shape(activation, input_shape[:-1] + (self.num_units, ))
     # Nonlinearity might change the shape of activation
     activation = self.activation(activation)
     return activation
Пример #27
0
def _recurrsive_extract_shape(x):
    shape_list = []
    if not isinstance(x, (tuple, list)):
        x = [x]
    for i in x:
        if K.is_variable(i):
            shape = K.get_shape(i)
            if isinstance(shape, (tuple, list)):
                shape_list.append(shape)
        elif isinstance(i, (tuple, list)):
            shape_list += _recurrsive_extract_shape(i)
    return shape_list
Пример #28
0
 def test_helper_ops_variables(self):
     X = K.placeholder(shape=(10, 20))
     f = N.Sequence([
         N.Dense(12),
         N.Dense(8),
         N.BatchNorm(),
         N.Dense(25, W_init=K.zeros(shape=(8, 25)))
     ])
     y = f(X)
     self.assertEqual(K.get_shape(y), (10, 25))
     self.assertEqual(len(f.variables), 10)
     self.assertEqual(len(f.parameters), 7)
     self.assertEqual(len(f.trainable_variables), 9)
Пример #29
0
    def test_dense(self):
        x = K.placeholder((None, 10))

        f1 = N.Dense(20)
        f2 = N.Dense(30)

        y = f2(f1(x))
        y = f1.T(f2.T(y))

        f = K.function(x, y)
        x = f(np.random.rand(12, 10))

        self.assertEquals(x.shape, (12, 10))
        self.assertEquals(K.get_shape(y), (None, 10))
Пример #30
0
    def test_seq(self):
        X = K.placeholder((None, 28, 28, 1))
        f = N.Sequence([
            N.Conv(8, (3, 3), strides=1, pad='same'),
            N.Dimshuffle(pattern=(0, 3, 1, 2)),
            N.FlattenLeft(outdim=2),
            N.Noise(level=0.3, noise_dims=None, noise_type='gaussian'),
            N.Dense(128, activation=K.relu),
            N.Dropout(level=0.3, noise_dims=None),
            N.Dense(10, activation=K.softmax)
        ])
        K.set_training(True)
        y = f(X)
        K.set_training(False)
        yT = f.T(y)
        f1 = K.function(X, y)
        f2 = K.function(X, yT)

        f = cPickle.loads(cPickle.dumps(f))
        K.set_training(True)
        y = f(X)
        K.set_training(False)
        yT = f.T(y)
        f3 = K.function(X, y)
        f4 = K.function(X, yT)

        x = np.random.rand(12, 28, 28, 1)

        self.assertEquals(f1(x).shape, (2688, 10))
        self.assertEquals(f3(x).shape, (2688, 10))
        self.assertEqual(np.round(f1(x).sum(), 4), np.round(f3(x).sum(), 4))
        self.assertEquals(K.get_shape(y), (None, 10))

        self.assertEquals(f2(x).shape, (12, 28, 28, 1))
        self.assertEquals(f4(x).shape, (12, 28, 28, 1))
        self.assertEqual(str(f2(x).sum())[:4], str(f4(x).sum())[:4])
        self.assertEquals(K.get_shape(yT), (None, 28, 28, 1))
Пример #31
0
def convolutional_vae(X, saved_states, **kwargs):
    """ convolutional_vae

    Return
    ------
    [y_encoder, y_decoder]

    States
    ------
    [f_inference (encoder), f_generative (decoder)]

    """
    n = kwargs.get('n', 10)
    batch_size = K.get_shape(X)[0]
    if batch_size is None:
        raise ValueError("You must specify batch_size dimension for the input placeholder.")
    # ====== init ====== #
    if saved_states is None:
        # Encoder
        f_inference = N.Sequence([
            N.Reshape(shape=(-1, 28, 28, 1)),
            N.Conv(num_filters=32, filter_size=3, strides=1, pad='valid',
                   b_init=init_ops.constant_initializer(0.), activation=K.elu),
            N.Conv(num_filters=64, filter_size=5, strides=2, pad='same',
                   b_init=init_ops.constant_initializer(0.), activation=K.elu),

            N.Dropout(level=0.1),
            N.Flatten(outdim=2),

            N.Dense(num_units=n * 2, b_init=None),
            N.BatchNorm(axes=0)
        ], debug=True, name='Encoder')
        # Decoder
        f_generative = N.Sequence([
            N.Dimshuffle(pattern=(0, 'x', 'x', 1)),
            N.TransposeConv(num_filters=64, filter_size=3, strides=1, pad='valid',
                            b_init=init_ops.constant_initializer(0.), activation=K.elu),
            N.TransposeConv(num_filters=32, filter_size=5, strides=2, pad='same',
                            b_init=init_ops.constant_initializer(0.), activation=K.elu),
            N.TransposeConv(num_filters=1, filter_size=13, strides=3, pad='valid',
                            b_init=None),
            N.BatchNorm(activation=K.linear),

            N.Flatten(outdim=3)
        ], debug=True, name="Decoder")
    else:
        f_inference, f_generative = saved_states
    # ====== Perfrom ====== #
    # Encoder
    y_encoder = f_inference(K.cast(X, 'float32'))
    mu = y_encoder[:, :n]
    sigma = K.softplus(y_encoder[:, n:])
    qz = Normal(mu=mu, sigma=sigma, name='Normal_qz')
    # Decoder
    z = Normal(mu=K.zeros(shape=(batch_size, n)),
               sigma=K.ones(shape=(batch_size, n)), name="Normal_pz")
    logits = f_generative(z)
    X_reconstruct = Bernoulli(logits=logits)
    # inference
    params = f_inference.parameters + f_generative.parameters
    inference = ed.KLqp(latent_vars={z: qz}, data={X_reconstruct: X})
    # ====== get cost for training ====== #
    # Bind p(x, z) and q(z | x) to the same placeholder for x.
    if K.is_training():
        import tensorflow as tf
        inference.initialize()
        if True:
            optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0)
            updates = optimizer.apply_gradients(
                optimizer.compute_gradients(inference.loss, var_list=params))
            init = tf.global_variables_initializer()
            init.run()
            f_train = K.function(X, inference.loss, updates)
        else:
            optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0)
            inference.initialize(optimizer=optimizer, var_list=params)
            init = tf.global_variables_initializer()
            init.run()
            f_train = lambda x: inference.update(feed_dict={X: x})['loss']
    samples = K.sigmoid(logits)
    return (samples, z, qz), (f_inference, f_generative)
Пример #32
0
    def test_odin_vs_lasagne(self):
        X1 = K.placeholder(shape=(None, 28, 28))
        X2 = K.placeholder(shape=(None, 784))

        def lasagne_net1():
            "FNN"
            i = lasagne.layers.InputLayer(shape=(None, 784))
            i.input_var = X2

            i = lasagne.layers.DenseLayer(i, num_units=32, W=random(784, 32), b=zeros(32),
                nonlinearity=lasagne.nonlinearities.rectify)
            i = lasagne.layers.DenseLayer(i, num_units=16, W=random(32, 16), b=zeros(16),
                nonlinearity=lasagne.nonlinearities.softmax)
            return X2, lasagne.layers.get_output(i)

        def odin_net1():
            "FNN"
            f = N.Sequence([
                N.Dense(32, W_init=random(784, 32), b_init=zeros(32),
                    activation=K.relu),
                N.Dense(16, W_init=random(32, 16), b_init=zeros(16),
                    activation=K.softmax)
            ])
            return X2, f(X2)

        def lasagne_net2():
            "CNN"
            i = lasagne.layers.InputLayer(shape=(None, 28, 28))
            i.input_var = X1

            i = lasagne.layers.DimshuffleLayer(i, (0, 'x', 1, 2))
            i = lasagne.layers.Conv2DLayer(i, 12, (3, 3), stride=(1, 1), pad='same',
                untie_biases=False,
                W=random(12, 1, 3, 3),
                nonlinearity=lasagne.nonlinearities.rectify)
            i = lasagne.layers.Pool2DLayer(i, pool_size=(2, 2), stride=None, mode='max',
                        ignore_border=True)
            i = lasagne.layers.Conv2DLayer(i, 16, (3, 3), stride=(1, 1), pad='same',
                untie_biases=False,
                W=random(16, 12, 3, 3),
                nonlinearity=lasagne.nonlinearities.sigmoid)
            return X1, lasagne.layers.get_output(i)

        def odin_net2():
            "CNN"
            f = N.Sequence([
                N.Dimshuffle((0, 1, 2, 'x')),
                N.Conv(12, (3, 3), strides=(1, 1), pad='same',
                    untie_biases=False,
                    W_init=random(3, 3, 1, 12),
                    activation=K.relu),
                N.Pool(pool_size=(2, 2), strides=None, mode='max'),
                N.Conv(16, (3, 3), strides=(1, 1), pad='same',
                    untie_biases=False,
                    W_init=random(3, 3, 12, 16),
                    activation=K.sigmoid),
                N.Dimshuffle((0, 3, 1, 2))
            ])
            return X1, f(X1)

        def lasagne_net3():
            "RNN"
            i = lasagne.layers.InputLayer(shape=(None, 28, 28))
            i.input_var = X1

            W = [random(28, 32), random(32, 32), random(32), random_bin(12, 28)]
            i = lasagne.layers.RecurrentLayer(i, num_units=32,
                W_in_to_hid=W[0],
                W_hid_to_hid=W[1],
                b=W[2],
                nonlinearity=lasagne.nonlinearities.rectify,
                hid_init=zeros(1, 32),
                backwards=False,
                learn_init=False,
                gradient_steps=-1,
                grad_clipping=0,
                unroll_scan=False,
                precompute_input=True,
                mask_input=None,
                only_return_final=False)
            return X1, lasagne.layers.get_output(i)

        def odin_net3():
            "RNN"
            W = [random(28, 32), random(32, 32), random(32), random_bin(12, 28)]
            f = N.Sequence([
                N.Dense(num_units=32, W_init=W[0], b_init=W[2],
                    activation=K.linear),
                N.RNN(num_units=32, activation=K.relu,
                    W_init=W[1])
            ])
            return X1, f(X1, hid_init=zeros(1, 32))

        func_list = [
            (lasagne_net1, odin_net1),
            # (lasagne_net2, odin_net2),
            (lasagne_net3, odin_net3)
        ]
        print()
        for i, j in func_list:
            print('Test:', i.__name__, j.__name__)
            seed = np.random.randint(10e8)
            # ====== call the function ====== #
            np.random.seed(seed)
            i = i()
            np.random.seed(seed)
            j = j()
            # ====== create theano function ====== #
            f1 = K.function(i[0], i[1])
            f2 = K.function(j[0], j[1])
            shape = K.get_shape(i[0])
            # ====== get the output ====== #
            x = np.random.rand(*[12 if s is None else s for s in shape])
            y1 = f1(x)
            y2 = f2(x)
            self.assertEqual(y1.shape, y2.shape)
            self.assertAlmostEqual(np.sum(np.abs(y1 - y2)), 0.)