Exemple #1
0
 def __setstate__(self, states):
     (
         self._seq_ops,
         self._input_info,  # list: (name, dtype, shape)
         self._output_info,
         self._path,
         self._loss,
         self._optimizer,
         self._train_args,
         self._metric,
         self._n_epoch,
         self._valid_freq,
         self._batch_size,
         self._seed,
         self._extensions) = states
     # ====== create some default values ====== #
     self._y_train = None
     self._y_pred = None
     self._functions = {}
     # ====== initialize ====== #
     self.set_inputs(*[
         K.placeholder(shape=shape, dtype=dtype, name=name)
         for name, dtype, shape in self._input_info
     ])
     self.set_outputs(*[
         K.placeholder(shape=shape, dtype=dtype, name=name)
         for name, dtype, shape in self._output_info
     ])
Exemple #2
0
 def test_simple_rnn(self):
     np.random.seed(12082518)
     x = np.random.rand(128, 8, 32)
     #
     X = K.placeholder(shape=(None, 8, 32))
     X1 = K.placeholder(shape=(None, 8, 32))
     X2 = K.placeholder(shape=(None, 8, 32))
     X3 = K.placeholder(shape=(None, 8, 33))
     f = N.RNN(32, activation=K.relu, input_mode='skip')
     #
     y = f(X, mask=K.ones(shape=(128, 8)))
     graph = K.ComputationGraph(y)
     self.assertEqual(len(graph.inputs), 1)
     f1 = K.function([X], y)
     x1 = f1(x)
     # ====== different placeholder ====== #
     y = f(X1)
     f2 = K.function([X1], y)
     x2 = f1(x)
     self.assertEqual(np.sum(x1[0] == x2[0]), np.prod(x1[0].shape))
     # ====== pickle load ====== #
     f = cPickle.loads(cPickle.dumps(f))
     y = f(X2)
     f2 = K.function([X2], y)
     x3 = f2(x)
     self.assertEqual(np.sum(x2[0] == x3[0]), np.prod(x2[0].shape))
     # ====== other input shape ====== #
     error_happen = False
     try:
         y = f(X3)
         f3 = K.function([X3], y)
         x3 = f3(np.random.rand(128, 8, 33))
     except (ValueError, Exception):
         error_happen = True
     self.assertTrue(error_happen)
Exemple #3
0
    def test_ops(self):
        x = K.variable(np.random.rand(8, 12))
        y = K.variable(np.random.rand(12, 25))
        z = K.placeholder((25, 18, 13))
        w = K.placeholder((18, 18))

        # ====== dot ====== #
        t = K.dot(x, y)
        self.assertEquals(K.get_shape(t), (8, 25))
        self.assertEquals(K.get_shape(t), K.eval(t).shape)
        t = K.dot(t, K.dimshuffle(z, (1, 0, 2)))
        self.assertEquals(K.get_shape(t), (8, 18, 13))

        # ====== transpose ====== #
        self.assertEquals(K.get_shape(K.transpose(z)), (13, 18, 25))
        self.assertEquals(K.get_shape(K.transpose(t, axes=(2, 0, 1))),
                          (13, 8, 18))

        # ====== eye ====== #
        self.assertEquals(K.get_shape(K.eye(5)), K.eval(K.eye(5)).shape)
        # ====== diag ====== #
        self.assertEquals(K.get_shape(K.diag(w)), (18, ))
        # self.assertEquals(K.get_shape(K.diag(x)),
        # K.eval(K.diag(y)).shape)
        self.assertEquals(K.get_shape(K.square(x)), K.eval(K.square(x)).shape)
        self.assertEquals(K.get_shape(K.abs(x)), K.eval(K.abs(x)).shape)
        self.assertEquals(K.get_shape(K.sqrt(x)), K.eval(K.sqrt(x)).shape)
        self.assertEquals(K.get_shape(K.exp(x)), K.eval(K.exp(x)).shape)
        self.assertEquals(K.get_shape(K.log(x)), K.eval(K.log(x)).shape)
        self.assertEquals(K.get_shape(K.round(x)), K.eval(K.round(x)).shape)
        self.assertEquals(K.get_shape(K.pow(x, 2)), K.eval(K.pow(x, 2)).shape)
        self.assertEquals(K.get_shape(K.clip(x, -1, 1)),
                          K.eval(K.clip(x, -1, 1)).shape)
        self.assertEquals(K.get_shape(K.inv(x)), K.eval(K.inv(x)).shape)
Exemple #4
0
 def _initialize(self, X, y=None):
     with tf.name_scope(self.name):
         # ====== input_shape ====== #
         if self._input_shape is None:
             self._input_shape = X.shape
         elif self.input_shape[1:] != X.shape[1:]:
             raise ValueError(
                 "Initialized with input shape: %s, given tensor with shape: %s"
                 % (self.input_shape, X.shape))
         # ====== output_shape ====== #
         if self._output_shape is None:
             self._output_shape = y.shape
         elif self.output_shape[1:] != y.shape[1:]:
             raise ValueError(
                 "Initialized with output shape: %s, given tensor with shape: %s"
                 % (self.output_shape, y.shape))
         # ====== placeholder ====== #
         self._X = K.placeholder(shape=self.input_shape,
                                 dtype=self.dtype,
                                 name='input')
         self._y = K.placeholder(shape=self.output_shape,
                                 dtype=self.dtype,
                                 name='output')
         # ====== run the network ====== #
         y_pred_logits = self.network.apply(self._X)
         nb_classes = y_pred_logits.shape.as_list()[-1]
         if len(self._output_shape) == 1:
             y_true = tf.one_hot(indices=tf.cast(self._y, 'int32'),
                                 depth=nb_classes)
         elif self._output_shape[-1] != nb_classes:
             raise ValueError(
                 "Given %d classes, but output from network has %s classes"
                 % (self._output_shape[-1], nb_classes))
         self._nb_classes = nb_classes
         # ====== sigmoid or softmax ====== #
         if nb_classes == 2:
             fn_activation = tf.nn.sigmoid
             fn_loss = tf.losses.sigmoid_cross_entropy
             fn_acc = K.metrics.binary_accuracy
         else:
             fn_activation = tf.nn.softmax
             fn_loss = tf.losses.softmax_cross_entropy
             fn_acc = K.metrics.categorical_accuracy
         y_pred_proba = fn_activation(y_pred_logits)
         # ====== class weight ====== #
         class_weights = np.ones(shape=(nb_classes,), dtype=self.dtype) if self._class_weights is None\
         else as_tuple(self._class_weights, N=self.nb_classes, t=float)
         class_weights = tf.constant(value=class_weights,
                                     dtype=self.dtype,
                                     name="class_weights")
         weights = tf.gather(
             class_weights,
             tf.cast(self._y, 'int32')
             if self.nb_classes == 2 else tf.argmax(self._y, axis=-1))
         # ====== objectives ====== #
         cost_train = fn_loss(y_true, logits=y_pred_logits, weights=weights)
         exit()
Exemple #5
0
    def test_conv_deconv_transpose(self):
        def feval(X, y):
            f = K.function(X, y)
            shape = (np.random.randint(8, 18), ) + tuple(X.shape.as_list()[1:])
            x = np.random.rand(*shape)
            return f(x)

        prog = Progbar(target=2 * 3 * 3 * 2 * 2, print_report=True)
        for X in (K.placeholder(shape=(None, 13, 12, 25)),
                  K.placeholder(shape=(None, 13, 12, 8, 25))):
            for strides in (1, 2, 3):
                for filter_size in (3, 4, 5):
                    for num_filters in (8, 25):
                        for pad in ("same", "valid"):
                            for dilation in (1, ):
                                # ====== progress ====== #
                                prog['test'] = "#Dim:%d;Stride:%d;Filter:%d;Channel:%d;Pad:%s" % \
                                    (X.shape.ndims, strides, filter_size, num_filters, pad)
                                prog.add(1)
                                # ====== test Conv ====== #
                                f = N.Conv(num_filters=num_filters,
                                           filter_size=filter_size,
                                           pad=pad,
                                           strides=strides,
                                           activation=tf.nn.relu,
                                           dilation=dilation)
                                fT = f.T
                                y = f(X)
                                self.assertEqual(
                                    feval(X, y).shape[1:],
                                    tuple(y.shape.as_list()[1:]))
                                yT = fT(y)
                                self.assertEqual(
                                    feval(X, yT).shape[1:],
                                    tuple(yT.shape.as_list()[1:]))
                                self.assertEqual(X.shape.as_list(),
                                                 yT.shape.as_list())
                                # ====== test Transpose ====== #
                                f = N.TransposeConv(num_filters=num_filters,
                                                    filter_size=filter_size,
                                                    pad=pad,
                                                    strides=strides,
                                                    activation=K.relu,
                                                    dilation=dilation)
                                fT = f.T
                                y = f(X)
                                self.assertEqual(
                                    feval(X, y).shape[1:],
                                    tuple(y.shape.as_list()[1:]))
                                yT = fT(y)
                                self.assertEqual(
                                    feval(X, yT).shape[1:],
                                    tuple(yT.shape.as_list()[1:]))
                                self.assertEqual(X.shape.as_list(),
                                                 yT.shape.as_list())
Exemple #6
0
    def test_pool_depool(self):
        X1 = K.placeholder(shape=(None, 12, 8, 25), name='X1')
        X2 = K.placeholder(shape=(None, 12, 8, 25, 18), name='X2')
        x1 = np.random.rand(13, 12, 8, 25)
        x2 = np.random.rand(13, 12, 8, 25, 18)
        prog = Progbar(target=2 * 2 * 2 * 3, print_report=True)

        def check_shape(s1, s2):
            self.assertEqual(tuple(s1),
                             tuple(s2),
                             msg="%s != %s" % (str(s1), str(s2)))

        for pool_size in (2, 3):
            for strides in (2, 3):
                # strides > window_shape not supported due to inconsistency
                # between CPU and GPU implementations
                if pool_size < strides:
                    prog.add(1)
                    continue
                for pad in ('valid', 'same'):
                    for transpose_mode in ('nn', 'pad_margin', 'repeat'):
                        # ====== print prog ====== #
                        prog['test'] = "Size:%d,Stride:%d,Pad:%s,T:%s" % \
                            (pool_size, strides, pad, transpose_mode)
                        prog.add(1)
                        # ====== check ops 4D ====== #
                        down = N.Pool(pool_size=pool_size,
                                      strides=strides,
                                      pad=pad,
                                      mode='max',
                                      transpose_mode=transpose_mode)
                        up = down.T
                        y1 = down(X1)
                        check_shape(
                            K.eval(y1, {
                                X1: x1
                            }).shape[1:],
                            y1.shape.as_list()[1:])
                        y2 = up(y1)
                        check_shape(K.eval(y2, {X1: x1}).shape, x1.shape)
                        # ====== check ops 5D ====== #
                        down = N.Pool(pool_size=pool_size,
                                      strides=strides,
                                      pad=pad,
                                      mode='max',
                                      transpose_mode=transpose_mode)
                        up = down.T
                        y1 = down(X2)
                        check_shape(
                            K.eval(y1, {
                                X2: x2
                            }).shape[1:], y1.shape[1:])
                        y2 = up(y1)
                        check_shape(K.eval(y2, {X2: x2}).shape, x2.shape)
Exemple #7
0
    def test_batch_norm(self):
        K.set_training(True)
        x = K.placeholder((None, 8, 12))
        y = N.BatchNorm()(x)
        f = K.function(x, y)
        z = f(np.random.rand(25, 8, 12))
        self.assertEquals(z.shape, (25, 8, 12))

        # ====== Not training ====== #
        K.set_training(False)
        x = K.placeholder((None, 8, 12))
        y = N.BatchNorm()(x)
        f = K.function(x, y)
        z = f(np.random.rand(25, 8, 12))
        self.assertEquals(z.shape, (25, 8, 12))
Exemple #8
0
 def _initialize(self, X, y=None):
   with tf.name_scope(self.name):
     # ====== input_shape ====== #
     if self._input_shape is None:
       self._input_shape = X.shape
     elif self.input_shape[1:] != X.shape[1:]:
       raise ValueError("Initialized with input shape: %s, given tensor with shape: %s"
         % (self.input_shape, X.shape))
     # ====== output_shape ====== #
     if self._output_shape is None:
       self._output_shape = y.shape
     elif self.output_shape[1:] != y.shape[1:]:
       raise ValueError("Initialized with output shape: %s, given tensor with shape: %s"
         % (self.output_shape, y.shape))
     # ====== placeholder ====== #
     self._X = K.placeholder(shape=self.input_shape, dtype=self.dtype, name='input')
     self._y = K.placeholder(shape=self.output_shape, dtype=self.dtype, name='output')
     # ====== run the network ====== #
     y_pred_logits = self.network.apply(self._X)
     nb_classes = y_pred_logits.shape.as_list()[-1]
     if len(self._output_shape) == 1:
       y_true = tf.one_hot(indices=tf.cast(self._y, 'int32'),
                           depth=nb_classes)
     elif self._output_shape[-1] != nb_classes:
       raise ValueError("Given %d classes, but output from network has %s classes" %
         (self._output_shape[-1], nb_classes))
     self._nb_classes = nb_classes
     # ====== sigmoid or softmax ====== #
     if nb_classes == 2:
       fn_activation = tf.nn.sigmoid
       fn_loss = tf.losses.sigmoid_cross_entropy
       fn_acc = K.metrics.binary_accuracy
     else:
       fn_activation = tf.nn.softmax
       fn_loss = tf.losses.softmax_cross_entropy
       fn_acc = K.metrics.categorical_accuracy
     y_pred_proba = fn_activation(y_pred_logits)
     # ====== class weight ====== #
     class_weights = np.ones(shape=(nb_classes,), dtype=self.dtype) if self._class_weights is None\
     else as_tuple(self._class_weights, N=self.nb_classes, t=float)
     class_weights = tf.constant(value=class_weights, dtype=self.dtype,
                                name="class_weights")
     weights = tf.gather(class_weights,
                         tf.cast(self._y, 'int32') if self.nb_classes == 2 else
                         tf.argmax(self._y, axis=-1))
     # ====== objectives ====== #
     cost_train = fn_loss(y_true, logits=y_pred_logits, weights=weights)
     exit()
Exemple #9
0
 def test_computational_graph3(self):
     # validate the number of updates found by ComputationGraph
     X = K.placeholder(shape=(None, 28, 28, 3))
     f = N.Sequence([
         N.Conv(32, 3, pad='same', activation=K.linear),
         N.BatchNorm(activation=K.relu),
         N.Flatten(outdim=2),
         N.Dense(16),
         N.BatchNorm(),
         N.Dense(10)
     ])
     K.set_training(True)
     y_train = f(X)
     K.set_training(False)
     y_score = f(X)
     self.assertTrue(
         K.get_shape(y_train) == K.get_shape(y_score)
         and K.get_shape(y_score) == (None, 10))
     cc_train = K.ComputationGraph(y_train)
     cc_score = K.ComputationGraph(y_score)
     self.assertTrue(len(cc_score.updates) == 0)
     self.assertTrue(len(cc_train.updates) == 4)
     # create real function
     fn_train = K.function(X, y_train)
     fn_score = K.function(X, y_score)
     shape1 = fn_train(np.random.rand(12, 28, 28, 3)).shape
     shape2 = fn_score(np.random.rand(12, 28, 28, 3)).shape
     self.assertTrue(shape1 == shape2 and shape1 == (12, 10))
Exemple #10
0
    def test_seq(self):
        X = K.placeholder((None, 28, 28, 1))
        f = N.Sequence([
            N.Conv(8, (3, 3), strides=1, pad='same'),
            N.Dimshuffle(pattern=(0, 3, 1, 2)),
            N.Flatten(outdim=2),
            N.Noise(level=0.3, noise_dims=None, noise_type='gaussian'),
            N.Dense(128, activation=tf.nn.relu),
            N.Dropout(level=0.3, noise_dims=None),
            N.Dense(10, activation=tf.nn.softmax)
        ])
        y = f(X)
        yT = f.T(y)
        f1 = K.function(X, y, defaults={K.is_training(): True})
        f2 = K.function(X, yT, defaults={K.is_training(): False})

        f = cPickle.loads(cPickle.dumps(f))
        y = f(X)
        yT = f.T(y)
        f3 = K.function(X, y, defaults={K.is_training(): True})
        f4 = K.function(X, yT, defaults={K.is_training(): False})

        x = np.random.rand(12, 28, 28, 1)

        self.assertEquals(f1(x).shape, (2688, 10))
        self.assertEquals(f3(x).shape, (2688, 10))
        self.assertEqual(np.round(f1(x).sum(), 4), np.round(f3(x).sum(), 4))
        self.assertEquals(y.shape.as_list(), (None, 10))

        self.assertEquals(f2(x).shape, (12, 28, 28, 1))
        self.assertEquals(f4(x).shape, (12, 28, 28, 1))
        self.assertEqual(str(f2(x).sum())[:4], str(f4(x).sum())[:4])
        self.assertEquals(yT.shape.as_list(), (None, 28, 28, 1))
Exemple #11
0
    def test_dilatedConv(self):
        x = K.placeholder((None, 28, 28, 3))
        f1 = N.Conv(16, (3, 3), dilation=(2, 2))
        y = f1(x)

        f = K.function(x, y)
        z = f(np.random.rand(12, 28, 28, 3))

        self.assertEquals(z.shape, (12, 24, 24, 16))
        self.assertEquals(y.shape.as_list(), [None, 24, 24, 16])
Exemple #12
0
    def test_conv3D(self):
        x = K.placeholder((None, 28, 28, 28, 3))
        f1 = N.Conv(16, (3, 3, 3), strides=1, pad='valid')
        y = f1(x)

        f = K.function(x, y)
        z = f(np.random.rand(12, 28, 28, 28, 3))

        self.assertEquals(z.shape, (12, 26, 26, 26, 16))
        self.assertEquals(y.shape.as_list(), [None, 26, 26, 26, 16])
Exemple #13
0
 def test_flatten(self):
     x = K.placeholder(shape=(None, 8, 12, 25, 18))
     for i in range(1, 5):
         y = K.flatten(x, outdim=i)
         f = K.function(x, y)
         shape1 = K.get_shape(y)
         shape2 = f(np.random.rand(16, 8, 12, 25, 18)).shape
         self.assertEqual(len(shape1), len(shape2))
         self.assertTrue(
             all(i == j for i, j in zip(shape1, shape2) if i is not None))
Exemple #14
0
 def _auto_create_inputs(self, X):
     if len(self._inputs) > 0:
         return
     if not isinstance(X, (tuple, list)):
         X = (X, )
     X = [
         K.placeholder(shape=(None, ) + i.shape[1:],
                       dtype=i.dtype,
                       name='input%d' % _) for _, i in enumerate(X)
     ]
     self.set_inputs(*X)
Exemple #15
0
 def test_helper_ops_variables(self):
     X = K.placeholder(shape=(10, 20))
     f = N.Sequence([
         N.Dense(12),
         N.Dense(8),
         N.BatchNorm(),
         N.Dense(25, W_init=tf.zeros(shape=(8, 25)))
     ])
     y = f(X)
     self.assertEqual(y.shape.as_list(), [10, 25])
     self.assertEqual(len(f.variables), 10)
     self.assertEqual(len(f.parameters), 7)
     self.assertEqual(len(f.trainable_variables), 9)
Exemple #16
0
 def test_slice_ops(self):
     X = K.placeholder(shape=(None, 28, 28, 28, 3))
     f = N.Sequence([
         N.Conv(32, 3, pad='same', activation=K.linear),
         N.BatchNorm(activation=tf.nn.relu),
         N.Flatten(outdim=4)[:, 8:12, 18:25, 13:],
     ])
     y = f(X)
     fn = K.function(X, y)
     self.assertTrue(
         fn(np.random.rand(12, 28, 28, 28, 3)).shape[1:] == tuple(
             y.shape.as_list()[1:]))
     self.assertEqual(y.shape.as_list()[1:], [4, 7, 883])
Exemple #17
0
 def test_cudnn_rnn_backend(self):
     if get_device() == 'cpu':
         return
     print()
     np.random.seed(1208)
     batch_size = 25
     hidden_size = 12
     X_linear = K.placeholder(shape=(None, 8, 32), name='X_linear')
     X_skip = K.placeholder(shape=(None, 8, 12), name='X_skip')
     for direction_mode in ['bidirectional', 'unidirectional']:
         for nb_layers in [1, 2, 3]:
             for rnn_mode in ['gru', 'lstm', 'rnn_tanh']:
                 for input_mode in ['linear', 'skip']:
                     if input_mode == 'linear':
                         X = X_linear
                         x = np.random.rand(batch_size, 8, 32)
                     else:
                         X = X_skip
                         x = np.random.rand(batch_size, 8, 12)
                     start = timeit.default_timer()
                     y = K.rnn_dnn(X,
                                   hidden_size=hidden_size,
                                   rnn_mode=rnn_mode,
                                   input_mode=input_mode,
                                   num_layers=nb_layers,
                                   direction_mode=direction_mode)
                     # perform function
                     f = K.function(X, y)
                     output = f(x)
                     benchmark = timeit.default_timer() - start
                     self.assertEqual([list(i.shape) for i in output], [[
                         batch_size if j is None else j
                         for j in K.get_shape(i)
                     ] for i in y])
                     print(
                         "*PASSED* [Layers]%s [Mode]%-8s [Input]%-6s [Direction]%s [Benchmark]%.4f"
                         % (nb_layers, rnn_mode, input_mode, direction_mode,
                            benchmark))
Exemple #18
0
 def inputs(self):
     """ Create list of placeholder based on footprint(shape) from previous
     inputs of this Operator
     """
     if self._configuration is None:
         raise Exception("This operators haven't initialized.")
     if id(self) in _cached_placeholder:
         return _cached_placeholder[id(self)]
     inputs = [
         K.placeholder(shape=j, name='%s_input%d' % (self.name, i))
         for i, j in enumerate(self._footprint)
     ]
     _cached_placeholder[id(self)] = inputs
     return inputs
Exemple #19
0
    def test_dense(self):
        x = K.placeholder((None, 10))

        f1 = N.Dense(20)
        f2 = N.Dense(30)

        y = f2(f1(x))
        y = f1.T(f2.T(y))

        f = K.function(x, y)
        x = f(np.random.rand(12, 10))

        self.assertEquals(x.shape, (12, 10))
        self.assertEquals(y.shape.as_list(), [None, 10])
Exemple #20
0
    def test_noise(self):
        x = K.placeholder((2, 3))
        f1 = N.Noise(level=0.5, noise_dims=0, noise_type='gaussian')
        y = f1(x)
        f = K.function(x, y, defaults={K.is_training(): True})
        z = f(np.ones((2, 3)))
        z = z.tolist()
        self.assertTrue(all(i == z[0] for i in z))

        f1 = N.Noise(level=0.5, noise_dims=1, noise_type='gaussian')
        y = f1(x)
        f = K.function(x, y, defaults={K.is_training(): True})
        z = f(np.ones((2, 3)))
        z = z.T.tolist()
        self.assertTrue(all(i == z[0] for i in z))
Exemple #21
0
    def test_dropout(self):
        x = K.placeholder((4, 6))
        f1 = N.Dropout(level=0.5, noise_dims=0, rescale=True)
        y = f1(x)
        f = K.function(x, y, defaults={K.is_training(): True})
        z = f(np.ones((4, 6)))
        z = z.tolist()
        self.assertTrue(all(i == z[0] for i in z))

        f1 = N.Dropout(level=0.5, noise_dims=1, rescale=True)
        y = f1(x)
        f = K.function(x, y, defaults={K.is_training(): True})
        z = f(np.ones((4, 6)))
        z = z.T.tolist()
        self.assertTrue(all(i == z[0] for i in z))
Exemple #22
0
    def test_conv2D(self):
        x = K.placeholder((None, 28, 28, 3))
        f1 = N.Conv(16, (3, 3), strides=(2, 2), pad='same')
        y = f1(x)

        f = K.function(x, y)
        z = f(np.random.rand(12, 28, 28, 3))

        self.assertEquals(z.shape, (12, 14, 14, 16))
        self.assertEquals(y.shape.as_list(), [None, 14, 14, 16])

        # ====== transpose convolution ====== #
        y = f1.T(y)
        f = K.function(x, y)
        z = f(np.random.rand(12, 28, 28, 3))
        self.assertEquals(z.shape, (12, 28, 28, 3))
        self.assertEquals(y.shape.as_list(), [None, 28, 28, 3])
Exemple #23
0
    def test_computational_graph1(self):
        X = K.placeholder(shape=(None, 32), name='input')
        z = K.variable(np.random.rand(10, 10), name='z')
        f = N.Sequence(
            [N.Dense(16, activation=K.relu),
             N.Dense(8, activation=K.softmax)])
        y = f(X)
        add_auxiliary_variable(y, K.constant(10, name='aux_const'))

        tmp = K.ComputationGraph(y)
        self.assertEqual(len(tmp.placeholders), 1)
        self.assertEqual(len(tmp.trainable_variables), 4)
        self.assertEqual(len(tmp.parameters), 4)
        self.assertEqual(len(tmp.dict_of_placeholders), 1)
        self.assertEqual(len(tmp.auxiliary_variables), 1)
        tmp.intermediary_variables  # no idea how to test this
        self.assertEqual(len(tmp.updates), 1)
        self.assertEqual(K.ComputationGraph(y), tmp)
Exemple #24
0
    def test_load_save3(self):
        X = K.placeholder(shape=(None, 28, 28))
        ops = N.Sequence([
            N.Dimshuffle(pattern=(0, 1, 2, 'x')),
            N.Conv(8, (3, 3), strides=(1, 1), pad='same', activation=K.relu),
            K.pool2d,
            N.Flatten(outdim=2),
            N.Dense(64, activation=K.relu),
            N.Dense(10, activation=K.softmax)
        ])
        y = ops(X)
        f1 = K.function(X, y)

        ops_ = cPickle.loads(cPickle.dumps(ops, protocol=cPickle.HIGHEST_PROTOCOL))
        y_ = ops_(X)
        f2 = K.function(X, y_)

        x = np.random.rand(32, 28, 28)
        self.assertEqual(np.sum(f1(x) - f2(x)), 0.)
Exemple #25
0
    def set_inputs(self, *inputs):
        self._input_info = []
        self._inputs = []
        for i in inputs:
            if not K.is_placeholder(i):
                raise ValueError('Only accept input which is placeholder.')
            name, dtype, shape = i.name, i.dtype, K.get_shape(i)
            self._input_info.append([name, dtype, shape])
            self._inputs.append(i)
        # ====== Try to check if the inputs match the Ops ====== #
        try:
            # call this to initialize the parameters and get
            # estimated output shape (we assume training and deploying
            # mode get the same shape).
            for i in self._inputs:
                add_role(i, TRAINING)
            self._y_train = self._seq_ops(*self._inputs)

            for i in self._inputs:
                add_role(i, DEPLOYING)
            self._y_pred = self._seq_ops(*self._inputs)

            # create default output
            if len(self._output_info) == 0:
                shape = K.get_shape(self._y_train)
                self._outputs = [
                    K.placeholder(shape=shape,
                                  dtype=self._y_train.dtype,
                                  name='output1')
                ]
                self._output_info = [('output1', self._y_train.dtype, shape)]

            # reset all functions
            for i, j in self._functions.items():
                del self._functions[i]
                del j
            self._functions = {}
        except Exception, e:
            warnings.warn('Inputs do not match the Ops requirements, '
                          'error: ' + str(e))
            self._input_info = []
            self._inputs = []
Exemple #26
0
    def test_load_save2(self):
        K.set_training(True)
        X = K.placeholder((None, 1, 28, 28))

        f = N.Dense(128, activation=K.relu)
        y = f(X)
        yT = f.T(y)
        f1 = K.function(X, y)
        f2 = K.function(X, yT)

        f = cPickle.loads(cPickle.dumps(f))
        y = f(X)
        yT = f.T(y)
        f3 = K.function(X, y)
        f4 = K.function(X, yT)

        x = np.random.rand(12, 1, 28, 28)

        self.assertEqual(f1(x).sum(), f3(x).sum())
        self.assertEqual(f2(x).sum(), f4(x).sum())
Exemple #27
0
    def test_load_save2(self):
        K.set_training(True)
        X = K.placeholder((None, 1, 28, 28))

        f = N.Dense(128, activation=K.relu)
        y = f(X)
        yT = f.T(y)
        f1 = K.function(X, y)
        f2 = K.function(X, yT)

        f = cPickle.loads(cPickle.dumps(f))
        y = f(X)
        yT = f.T(y)
        f3 = K.function(X, y)
        f4 = K.function(X, yT)

        x = np.random.rand(12, 1, 28, 28)

        self.assertEqual(f1(x).sum(), f3(x).sum())
        self.assertEqual(f2(x).sum(), f4(x).sum())
Exemple #28
0
    def test_load_save3(self):
        X = K.placeholder(shape=(None, 28, 28))
        ops = N.Sequence([
            N.Dimshuffle(pattern=(0, 1, 2, 'x')),
            N.Conv(8, (3, 3), strides=(1, 1), pad='same', activation=K.relu),
            K.pool2d,
            N.Flatten(outdim=2),
            N.Dense(64, activation=K.relu),
            N.Dense(10, activation=K.softmax)
        ])
        y = ops(X)
        f1 = K.function(X, y)

        ops_ = cPickle.loads(
            cPickle.dumps(ops, protocol=cPickle.HIGHEST_PROTOCOL))
        y_ = ops_(X)
        f2 = K.function(X, y_)

        x = np.random.rand(32, 28, 28)
        self.assertEqual(np.sum(f1(x) - f2(x)), 0.)
Exemple #29
0
    def test_computational_graph2(self):
        np.random.seed(1208)

        X = K.variable(np.zeros((8, 12)), name='X')
        Y = K.variable(np.random.rand(12, 8), name='Y')
        Z = K.placeholder(shape=(8, 8), name='Z')
        a = K.dot(X, Y)
        add_roles(a, Auxiliary)
        a = a + Z
        g1 = K.ComputationGraph(a)

        self.assertEqual(len(g1.trainable_variables), 2)
        self.assertEqual(len(g1.placeholders), 1)
        self.assertEqual(len(g1.updates), 1)
        self.assertEqual(len(g1.auxiliary_variables), 1)

        f = K.function(Z, [a] + g1.auxiliary_variables)

        output = f(np.random.rand(8, 8))
        self.assertEqual(repr(np.sum(output[0]))[:5], "32.20")
        self.assertEqual(np.sum(output[1]), 0)
        self.assertEqual(np.unique(K.eval(X)).tolist(), [12.])
Exemple #30
0
    def test_auto_infer_shape(self):
        x = K.variable(np.random.rand(8, 25, 12))
        y = K.placeholder((None, 25, 12))

        def test_func(func):
            self.assertEquals(K.get_shape(func(x, 0)),
                              K.eval(func(x, 0)).shape)
            self.assertEquals(K.get_shape(func(x, -1)),
                              K.eval(func(x, -1)).shape)
            self.assertEquals(K.get_shape(func(x, 1, True)),
                              K.eval(func(x, 1, True)).shape)

            self.assertEquals(K.get_shape(func(x, 0)), K.get_shape(func(y, 0)))
            self.assertEquals(K.get_shape(func(x, 0, True)),
                              K.get_shape(func(y, 0, True)))

            if func != K.argmax and func != K.argmin:
                self.assertEquals(K.get_shape(func(x, (1, -1))),
                                  K.eval(func(x, (1, -1))).shape)
                self.assertEquals(K.get_shape(func(x, (0, 1))),
                                  K.eval(func(x, (0, 1))).shape)
                self.assertEquals(K.get_shape(func(x, (0, 1), True)),
                                  K.eval(func(x, (0, 1), True)).shape)

        test_func(K.var)
        test_func(K.max)
        test_func(K.min)
        test_func(K.any)
        test_func(K.sum)
        test_func(K.prod)
        test_func(K.mean)
        test_func(K.std)
        test_func(K.any)
        test_func(K.argmax)
        test_func(K.argmin)

        self.assertEquals(K.get_shape(K.argsort(x)),
                          K.eval(K.argsort(x)).shape)
Exemple #31
0
    def test_load_save1(self):
        K.set_training(True)
        X = K.placeholder((None, 1, 28, 28))
        f = N.Dense(128, activation=K.relu)
        y = f(X)
        W, b = [K.get_value(p).sum() for p in K.ComputationGraph(y).parameters]
        num_units = f.num_units
        W_init = f.W_init
        b_init = f.b_init
        activation = f.activation

        f = cPickle.loads(cPickle.dumps(f))
        W1, b1 = [K.get_value(p).sum() for p in f.parameters]
        num_units1 = f.num_units
        W_init1 = f.W_init
        b_init1 = f.b_init
        activation1 = f.activation

        self.assertEqual(W1, W)
        self.assertEqual(b1, b)
        self.assertEqual(num_units1, num_units)
        self.assertEqual(W_init1.__name__, W_init.__name__)
        self.assertEqual(b_init.__name__, b_init1.__name__)
        self.assertEqual(activation1, activation)
Exemple #32
0
    def test_load_save1(self):
        K.set_training(True)
        X = K.placeholder((None, 1, 28, 28))
        f = N.Dense(128, activation=K.relu)
        y = f(X)
        W, b = [K.get_value(p).sum() for p in K.ComputationGraph(y).parameters]
        num_units = f.num_units
        W_init = f.W_init
        b_init = f.b_init
        activation = f.activation

        f = cPickle.loads(cPickle.dumps(f))
        W1, b1 = [K.get_value(p).sum() for p in f.parameters]
        num_units1 = f.num_units
        W_init1 = f.W_init
        b_init1 = f.b_init
        activation1 = f.activation

        self.assertEqual(W1, W)
        self.assertEqual(b1, b)
        self.assertEqual(num_units1, num_units)
        self.assertEqual(W_init1.__name__, W_init.__name__)
        self.assertEqual(b_init.__name__, b_init1.__name__)
        self.assertEqual(activation1, activation)
Exemple #33
0
NB_EPOCH = 10
LEARNING_RATE = 0.001
# ===========================================================================
# Load dataset
# ===========================================================================
ds = F.CIFAR10.get_dataset()
nb_labels = 10
print(ds)
X_train = ds['X_train'][:].astype('float32') / 255.
y_train = one_hot(ds['y_train'][:], nb_classes=nb_labels)
X_test = ds['X_test'][:].astype('float32') / 255.
y_test = one_hot(ds['y_test'][:], nb_classes=nb_labels)
# ===========================================================================
# Create network
# ===========================================================================
inputs = [K.placeholder(shape=(None,) + X_train.shape[1:], name='X', dtype='float32'),
          K.placeholder(shape=(None, nb_labels), name='y', dtype='float32')]
print("Inputs:", inputs)
model = N.Lambda.search(MODEL_NAME, prefix='models_cifar')
outputs = model(*inputs)
# ====== create losses ====== #
ce = tf.losses.softmax_cross_entropy(inputs[-1], outputs['logit'])
acc = K.metrics.categorical_accuracy(outputs['prob'], inputs[-1])
cm = K.metrics.confusion_matrix(y_pred=outputs['prob'],
                                y_true=inputs[-1],
                                labels=nb_labels)
# ====== create optimizer ====== #
optz = K.optimizers.Adam(lr=LEARNING_RATE)
parameters = model.parameters
print("#Parameters:", len(parameters))
updates = optz(ce, parameters)
  X_valid, y_valid = X_train[40000:], y_train[40000:]
  X_train, y_train = X_train[:40000], y_train[:40000]
  # normalize value to [0, 1]
  X_train = X_train / 255.
  X_valid = X_valid / 255.
  X_test = X_test / 255.
print(ds)
# ====== others ====== #
X_samples, y_samples = X_train[:25], y_train[:25]
input_shape = ds['X_train'].shape
input_ndim = len(input_shape)
print("Train shape:", ctext(X_train.shape, 'cyan'))
print("Valid shape:", ctext(X_valid.shape, 'cyan'))
print("Test  shape:", ctext(X_test.shape, 'cyan'))
# ====== create basic tensor ====== #
X = K.placeholder(shape=(None,) + input_shape[1:], name='X_input')
y = K.placeholder(shape=(None,), name='y_input')
# ===========================================================================
# Create the network
# ===========================================================================
LATENT_DROPOUT = 0.3
if args.cnn:
  with N.args_scope(([N.Conv, N.Dense], dict(b_init=None, activation=K.linear)),
                    (N.BatchNorm, dict(activation=tf.nn.elu)),
                    (N.Pool, dict(mode='max', pool_size=2))):
    f_encoder = N.Sequence([
        N.Dropout(level=0.5),
        N.Dimshuffle((0, 2, 3, 1)) if is_cifar10 else N.Dimshuffle((0, 1, 2, 'x')),

        N.Conv(num_filters=32, filter_size=3, pad='valid'),
        N.Pool(),
Exemple #35
0
    def test_odin_vs_lasagne(self):
        X1 = K.placeholder(shape=(None, 28, 28))
        X2 = K.placeholder(shape=(None, 784))

        def lasagne_net1():
            "FNN"
            i = lasagne.layers.InputLayer(shape=(None, 784))
            i.input_var = X2

            i = lasagne.layers.DenseLayer(i, num_units=32, W=random(784, 32), b=zeros(32),
                nonlinearity=lasagne.nonlinearities.rectify)
            i = lasagne.layers.DenseLayer(i, num_units=16, W=random(32, 16), b=zeros(16),
                nonlinearity=lasagne.nonlinearities.softmax)
            return X2, lasagne.layers.get_output(i)

        def odin_net1():
            "FNN"
            f = N.Sequence([
                N.Dense(32, W_init=random(784, 32), b_init=zeros(32),
                    activation=K.relu),
                N.Dense(16, W_init=random(32, 16), b_init=zeros(16),
                    activation=K.softmax)
            ])
            return X2, f(X2)

        def lasagne_net2():
            "CNN"
            i = lasagne.layers.InputLayer(shape=(None, 28, 28))
            i.input_var = X1

            i = lasagne.layers.DimshuffleLayer(i, (0, 'x', 1, 2))
            i = lasagne.layers.Conv2DLayer(i, 12, (3, 3), stride=(1, 1), pad='same',
                untie_biases=False,
                W=random(12, 1, 3, 3),
                nonlinearity=lasagne.nonlinearities.rectify)
            i = lasagne.layers.Pool2DLayer(i, pool_size=(2, 2), stride=None, mode='max',
                        ignore_border=True)
            i = lasagne.layers.Conv2DLayer(i, 16, (3, 3), stride=(1, 1), pad='same',
                untie_biases=False,
                W=random(16, 12, 3, 3),
                nonlinearity=lasagne.nonlinearities.sigmoid)
            return X1, lasagne.layers.get_output(i)

        def odin_net2():
            "CNN"
            f = N.Sequence([
                N.Dimshuffle((0, 1, 2, 'x')),
                N.Conv(12, (3, 3), strides=(1, 1), pad='same',
                    untie_biases=False,
                    W_init=random(3, 3, 1, 12),
                    activation=K.relu),
                N.Pool(pool_size=(2, 2), strides=None, mode='max'),
                N.Conv(16, (3, 3), strides=(1, 1), pad='same',
                    untie_biases=False,
                    W_init=random(3, 3, 12, 16),
                    activation=K.sigmoid),
                N.Dimshuffle((0, 3, 1, 2))
            ])
            return X1, f(X1)

        def lasagne_net3():
            "RNN"
            i = lasagne.layers.InputLayer(shape=(None, 28, 28))
            i.input_var = X1

            W = [random(28, 32), random(32, 32), random(32), random_bin(12, 28)]
            i = lasagne.layers.RecurrentLayer(i, num_units=32,
                W_in_to_hid=W[0],
                W_hid_to_hid=W[1],
                b=W[2],
                nonlinearity=lasagne.nonlinearities.rectify,
                hid_init=zeros(1, 32),
                backwards=False,
                learn_init=False,
                gradient_steps=-1,
                grad_clipping=0,
                unroll_scan=False,
                precompute_input=True,
                mask_input=None,
                only_return_final=False)
            return X1, lasagne.layers.get_output(i)

        def odin_net3():
            "RNN"
            W = [random(28, 32), random(32, 32), random(32), random_bin(12, 28)]
            f = N.Sequence([
                N.Dense(num_units=32, W_init=W[0], b_init=W[2],
                    activation=K.linear),
                N.RNN(num_units=32, activation=K.relu,
                    W_init=W[1])
            ])
            return X1, f(X1, hid_init=zeros(1, 32))

        func_list = [
            (lasagne_net1, odin_net1),
            # (lasagne_net2, odin_net2),
            (lasagne_net3, odin_net3)
        ]
        print()
        for i, j in func_list:
            print('Test:', i.__name__, j.__name__)
            seed = np.random.randint(10e8)
            # ====== call the function ====== #
            np.random.seed(seed)
            i = i()
            np.random.seed(seed)
            j = j()
            # ====== create theano function ====== #
            f1 = K.function(i[0], i[1])
            f2 = K.function(j[0], j[1])
            shape = K.get_shape(i[0])
            # ====== get the output ====== #
            x = np.random.rand(*[12 if s is None else s for s in shape])
            y1 = f1(x)
            y2 = f2(x)
            self.assertEqual(y1.shape, y2.shape)
            self.assertAlmostEqual(np.sum(np.abs(y1 - y2)), 0.)
Exemple #36
0
                parallel_iterations=32, back_prop=True,
                swap_memory=False, infer_shape=True,
                name=name)
    # consistent return as theano
    if nb_outputs == 1:
        outputs = outputs[0]
    return outputs


# ====== simulate data ====== #
def doit(_, x, y, z):
    z += K.sum(x + y) + K.sum(K.pow(_, 2))
    return z

sequences = [
    K.placeholder(shape=(600, None)),
    K.variable(np.arange(0, 1200).reshape(-1, 2)),
    K.variable(np.arange(1200, 2400).reshape(-1, 2))
]

outputs_info = K.zeros(shape=(1200,))

X = np.random.rand(600, 3000)
# ====== tf.scan ====== #
y = Scan2(doit,
          sequences=sequences,
          outputs_info=outputs_info,
          n_steps=None,
          backwards=True,
          name=None)
print('Scan:')
Exemple #37
0
y_train = y[:int(0.8 * n)]
X_valid = X[int(0.8 * n):]
y_valid = y[int(0.8 * n):]

print('X:', X.shape, 'y:', y.shape)
print('X_train:', X_train.shape, 'y_train:', y_train.shape)
print('X_valid:', X_valid.shape, 'y_valid:', y_valid.shape)

E = tk.embed(embedding)
# these numbers must be the same for all time
print('Tokenizer:', np.sum(E), np.sum(X_train), np.sum(y_train),
      np.sum(X_valid), np.sum(y_valid))
# ===========================================================================
# Building model
# ===========================================================================
X = K.placeholder(shape=(None, MAX_SEQ_LEN), dtype='int32', name='X')
y = K.placeholder(shape=(None, nb_labels), dtype='float32', name='y')

f = N.Sequence([
    N.Embedding(tk.nb_words, embedding_dims, W_init=E),
    N.Dimshuffle(pattern=(0, 1, 'x', 2)),

    N.Conv(num_filters=128, filter_size=(5, 1), strides=1, pad='valid',
           activation=K.relu),
    N.Pool(pool_size=(5, 1), pad='valid', mode='max'),

    N.Conv(num_filters=128, filter_size=(5, 1), strides=1, pad='valid',
           activation=K.relu),
    N.Pool(pool_size=(5, 1), pad='valid', mode='max'),

    N.Conv(num_filters=128, filter_size=(5, 1), strides=1, pad='valid',
Exemple #38
0
(EXP_DIR, MODEL_PATH, LOG_PATH,
 TRAIN_PATH, TEST_PATH) = get_model_path('xvec', args)
stdio(LOG_PATH)
# ===========================================================================
# Create data feeder
# ===========================================================================
(train, valid,
 test_ids, test_dat,
 all_speakers) = prepare_dnn_data(
    recipe=args.recipe, feat=FEAT, utt_length=args.l)
n_speakers = len(all_speakers) + 1
# ===========================================================================
# Create the network
# ===========================================================================
inputs = [K.placeholder(shape=(None,) + shape[1:],
                        dtype='float32',
                        name='input%d' % i)
          for i, shape in enumerate(as_tuple_of_shape(train.shape))]
X = inputs[0]
y = inputs[1]
print("Inputs:", ctext(inputs, 'cyan'))
# ====== the network ====== #
if os.path.exists(MODEL_PATH):
  x_vec = N.deserialize(path=MODEL_PATH, force_restore_vars=True)
else:
  TRAIN_MODEL = True
  with N.args_scope(
      ['TimeDelayedConv', dict(time_pool='none', activation=K.relu)],
      ['Dense', dict(activation=K.linear, b_init=None)],
      ['BatchNorm', dict(activation=K.relu)]
  ):
Exemple #39
0
  X_valid, y_valid = X_train[40000:], y_train[40000:]
  X_train, y_train = X_train[:40000], y_train[:40000]
  # normalize value to [0, 1]
  X_train = X_train / 255.
  X_valid = X_valid / 255.
  X_test = X_test / 255.
print(ds)
# ====== others ====== #
X_samples, y_samples = X_train[:25], y_train[:25]
input_shape = ds['X_train'].shape
input_ndim = len(input_shape)
print("Train shape:", ctext(X_train.shape, 'cyan'))
print("Valid shape:", ctext(X_valid.shape, 'cyan'))
print("Test  shape:", ctext(X_test.shape, 'cyan'))
# ====== create basic tensor ====== #
X = K.placeholder(shape=(None,) + input_shape[1:], name='X_input')
y = K.placeholder(shape=(None,), name='y_input')
# ===========================================================================
# Create the network
# ===========================================================================
LATENT_DROPOUT = 0.3
if args.cnn:
  with N.args_scope(([N.Conv, N.Dense], dict(b_init=None, activation=K.linear)),
                    (N.BatchNorm, dict(activation=tf.nn.elu)),
                    (N.Pool, dict(mode='max', pool_size=2))):
    f_encoder = N.Sequence([
        N.Dropout(level=0.5),
        N.Dimshuffle((0, 2, 3, 1)) if is_cifar10 else N.Dimshuffle((0, 1, 2, 'x')),

        N.Conv(num_filters=32, filter_size=3, pad='valid'),
        N.Pool(),
Exemple #40
0
 def _initialize(self, X):
   # ====== check inputs dimensions ====== #
   if not hasattr(X, 'shape'):
     raise ValueError("`X` must have `shape` attribute.")
   feat_dim = np.prod(X.shape[1:])
   if self._feat_dim is None:
     self._feat_dim = feat_dim
   # validate input dimension
   if feat_dim != self._feat_dim:
     raise RuntimeError("Feature dimension mismatch %d and %d" %
                        (feat_dim, self.feat_dim))
   # check if tensorflow op initalized
   if hasattr(self, '_f_train'):
     return
   # ====== binary or multi-classes ====== #
   if self.nb_classes == 2:
     out_shape = (None,)
     fn_activation = tf.nn.sigmoid
     fn_loss = tf.losses.sigmoid_cross_entropy
     fn_acc = K.metrics.binary_accuracy
   else:
     out_shape = (None, self.nb_classes)
     fn_activation = tf.nn.softmax
     fn_loss = tf.losses.softmax_cross_entropy
     fn_acc = K.metrics.categorical_accuracy
   # ====== create model ====== #
   with tf.name_scope(self.name, 'logistic_regression'):
     # inputs
     self._X = K.placeholder(shape=(None, self.feat_dim),
                             dtype=self.dtype,
                             name='%s_input' % self.name)
     self._y = K.placeholder(shape=out_shape,
                             dtype=self.dtype,
                             name='%s_output' % self.name)
     # check the bias
     if is_number(self.fit_intercept):
       b_init = float(self.fit_intercept)
     elif self.fit_intercept is False or \
     self.fit_intercept is None:
       b_init = None
     else:
       b_init = self.fit_intercept
     # create the model and initialize
     with K.variable_dtype(dtype=self.dtype):
       self._model = N.Dense(num_units=self.nb_classes,
                         W_init=init_ops.glorot_uniform_initializer(seed=self._rand_state.randint()),
                         b_init=b_init,
                         activation=K.linear)
       y_logits = self._model(self._X)
     y_prob = fn_activation(y_logits)
     # applying class weights
     class_weights = tf.constant(value=self._class_weight,
                                 dtype=self.dtype,
                                 name="class_weights")
     weights = tf.gather(class_weights,
                         tf.cast(self._y, 'int32') if self.nb_classes == 2 else
                         tf.argmax(self._y, axis=-1))
     # optimizer
     params = [v for v in self._model.variables
               if has_roles(v, Weight) or has_roles(v, Bias)]
     losses = fn_loss(self._y, y_logits, weights=weights)
     l1_norm = tf.norm(self._model.get('W'), ord=1) if self.l1 > 0. else 0
     l2_norm = tf.norm(self._model.get('W'), ord=2) if self.l2 > 0. else 0
     losses = losses + self.l1 * l1_norm + self.l2 * l2_norm
     acc = fn_acc(self._y, y_prob)
     updates = self._optimizer.get_updates(losses, params)
     # create function
     if self.confusion_matrix:
       cm = K.metrics.confusion_matrix(y_true=self._y, y_pred=y_prob,
                                       labels=self.nb_classes)
     metrics = [losses, acc, cm] if self.confusion_matrix else [losses, acc]
     self._f_train = K.function(inputs=(self._X, self._y),
                                outputs=metrics,
                                updates=updates,
                                training=True)
     self._f_score = K.function(inputs=(self._X, self._y),
                                outputs=metrics,
                                training=False)
     self._f_pred_prob = K.function(inputs=self._X,
                                    outputs=y_prob,
                                    training=False)
     self._f_pred_logit = K.function(inputs=self._X,
                                     outputs=y_logits,
                                     training=False)
   return self
Exemple #41
0
 def placeholder(self):
   if self.__placeholder is None:
     self.__placeholder = K.placeholder(
         shape=self.shape, dtype=self.dtype, name=self.name)
   return self.__placeholder
Exemple #42
0
).add('--rnn', 'using RNN network', False
).parse()
# ===========================================================================
# Load data
# ===========================================================================
USE_MNIST_DATA = True
if arg.ds.lower() == 'mnist':
  ds = F.MNIST_original.get_dataset()
elif arg.ds.lower() == 'fmnist':
  ds = F.FMNIST_original.get_dataset()
else:
  ds = F.CIFAR10.get_dataset()
  USE_MNIST_DATA = False
print(ds)

X = K.placeholder(shape=(None,) + ds['X_train'].shape[1:], name='X')
y = K.placeholder(shape=(None,), name='y', dtype='int32')
y_onehot = tf.one_hot(y, depth=10)
# ===========================================================================
# Build network
# ===========================================================================
if not arg.rnn:
  ops = N.Sequence([
      N.Dimshuffle((0, 1, 2, 'x')) if USE_MNIST_DATA else N.Dimshuffle((0, 2, 3, 1)),

      N.BatchNorm(axes='auto'),
      N.Conv(32, (3, 3), strides=(1, 1), pad='same',
             activation=tf.nn.relu),
      N.Pool(pool_size=(2, 2), strides=None),
      N.Conv(64, (3, 3), strides=(1, 1), pad='same',
             activation=tf.nn.relu),
Exemple #43
0
from __future__ import print_function, division, absolute_import

import os
os.environ['ODIN'] = 'float32,gpu'
import timeit
import random

import numpy as np

from odin.utils import UnitTimer, Progbar
from odin import backend as K, nnet as N

X1 = K.placeholder(shape=(10000, 1000), name='X1')
X2 = K.placeholder(shape=(10000, 1000), name='X2')

X3 = K.placeholder(shape=(10000, 2000), name='X3')

y1 = K.placeholder(shape=(1000, 2000), name='y1')
y2 = K.placeholder(shape=(2000, 3000), name='y2')
y3 = K.placeholder(shape=(3000, 4000), name='y3')
y4 = K.placeholder(shape=(4000, 5000), name='y4')

z = K.dot(X1, y1) + K.dot(X2, y1)
z = K.dot(z, y2)
z = K.dot(z, y3)
z = K.dot(z, y4)
print(z)
f = K.function([X1, X2, y1, y2, y3, y4], outputs=z)

X1 = X3[:, :1000]
X2 = X3[:, 1000:]