示例#1
0
    def test_to_categorical(self, size, num_labels):

        label = np.random.randint(low=0, high=num_labels, size=(size, ))

        categorical_tf = tf.keras.utils.to_categorical(label, num_classes=None)
        categorical_np = to_categorical(label)

        np.testing.assert_allclose(categorical_tf, categorical_np)
示例#2
0
    def test_from_categorical(self, size, num_labels):

        label = np.random.uniform(low=0, high=num_labels, size=(size, ))

        categorical_tf = tf.keras.utils.to_categorical(label, num_classes=None)
        categorical_np = to_categorical(label)

        np.testing.assert_allclose(categorical_tf, categorical_np)

        fromlabel_tf = tf.math.argmax(categorical_tf, axis=-1)
        fromlabel_np = from_categorical(categorical_np)

        np.testing.assert_allclose(fromlabel_tf, fromlabel_np)
示例#3
0
    train_size = 512
    test_size = 300

    X_train = X_train[:train_size, ...]
    y_train = y_train[:train_size]
    X_test = X_test[:test_size, ...]
    y_test = y_test[:test_size]

    ############################################

    n_train = X_train.shape[0]
    n_test = X_test.shape[0]

    # transform y to array of dimension 10 and in 4 dimension
    y_train = to_categorical(y_train).reshape(n_train, 1, 1, -1)
    y_test = to_categorical(y_test).reshape(n_test, 1, 1, -1)

    # Create the model and training
    model = Network(batch=batch, input_shape=X_train.shape[1:])

    model.add(
        Convolutional_layer(size=3,
                            filters=32,
                            stride=1,
                            pad=True,
                            activation='Relu'))

    model.add(BatchNorm_layer())

    model.add(Maxpool_layer(size=2, stride=1, padding=True))
示例#4
0
          'this was very sad earlier': False,
          'i was good and not bad earlier': True,
          'i was not good and not happy earlier': False,
#          'i am not at all bad or sad right now': True,
#          'i am not at all good or happy right now': False,
          'this was not happy and not good earlier': False,
        }

  # TODO: Up to now it works only if the number of samples is perfectly divisible by batch!!!

  words = set((w for text in data.keys() for w in text.split(' ')))
  print('{:d} unique words found'.format(len(words)))

  coding = {k : v for k, v in enumerate(words)}

  one_hot_encoding = to_categorical(list(data.keys()))
  batch, size = one_hot_encoding.shape
  one_hot_encoding = one_hot_encoding.reshape(batch, 1, 1, size)

  outputs = 8

  # Model initialization
  layer = LSTM_layer(outputs, steps=3, input_shape=one_hot_encoding.shape)
  print(layer)

  layer.forward(one_hot_encoding)
  forward_out = layer.output.copy()

  layer.wf.delta = np.ones(shape=(layer.wf.out_shape), dtype=float)
  layer.wi.delta = np.ones(shape=(layer.wi.out_shape), dtype=float)
  layer.wg.delta = np.ones(shape=(layer.wg.out_shape), dtype=float)