Beispiel #1
0
def test_inference_conv1d_1():
    input_shape = (N, n_channels)
    X = np.random.rand(500, *input_shape)

    kmodel = tf.keras.models.Sequential()
    kmodel.add(
        tf.keras.layers.Conv1D(1,
                               kernel_size,
                               padding="same",
                               input_shape=input_shape))
    kmodel.add(tf.keras.layers.Conv1D(5, kernel_size * 4, padding="same"))
    kmodel.add(
        tf.keras.layers.Conv1D(16, kernel_size, padding="same", strides=2))
    kmodel.add(tf.keras.layers.Flatten())
    kmodel.add(tf.keras.layers.Dense(16))

    model = Model()
    model.add(Input(*input_shape))
    model.add(Conv1D(1, (kernel_size, ), padding="same"))
    model.add(Conv1D(5, (kernel_size * 4, ), padding="same"))
    model.add(Conv1D(16, (kernel_size, ), padding="same", stride=2))
    model.add(Dense(16))

    model.set_weights(kmodel.get_weights())

    p = model.predict(X)
    p_ = kmodel.predict(X)

    error = np.absolute(p - p_)
    assert np.amax(error) < 1e-4
Beispiel #2
0
def test_training_conv1d_with_known_weights():
    input_shape = (2, 1)

    X = np.array([[0., 1.]]).reshape(1, 2, 1)
    y = np.array([[1.]])

    weights = [
        np.array([[[.1, .4]], [[.2, .5]], [[.3, .6]]]),
        np.array([.0, .0]),
        np.array([[[.7, 1.3], [.8, 1.4]], [[.9, 1.5], [1., 1.6]],
                  [[1.1, 1.7], [1.2, 1.8]]]),
        np.array([.0, .0]),
        np.array([[1.9], [2.0], [2.1], [2.2]]),
        np.array([.0])
    ]

    c1 = KConv1D(2, 3, padding="same", input_shape=input_shape)
    c2 = KConv1D(2, 3, padding="same")

    kmodel = Sequential()
    kmodel.add(c1)
    kmodel.add(c2)
    kmodel.add(Flatten())
    kmodel.add(KDense(1))

    kmodel.compile(loss="mean_squared_error", optimizer=SGD(learning_rate=1.0))

    kmodel.set_weights(weights)

    kmodel.train_on_batch(X, y)

    model = Model()
    model.add(Input(*input_shape))
    model.add(Conv1D(2, (3, ), padding="same"))
    model.add(Conv1D(2, (3, ), padding="same"))
    model.add(Dense(1))

    model.set_weights(weights)

    # model.predict(X)

    # print(model._layers[2].neurons[0].weights)
    # print(len(model._layers[2].neurons[0].weights))

    model.fit(X,
              y,
              "mean_squared_error",
              epochs=1,
              batch_size=1,
              learning_rate=1.0)

    print(kmodel.get_weights())

    print(model.get_weights())
Beispiel #3
0
def test_same_padding_with_stride4():
    model = Model().add(Input(6)) \
                   .add(Conv1D(1, (4,), padding="same", stride=3)) \
                   .add(Dense(1, activation="identity"))

    X = np.random.rand(1, 6, 1)

    model.predict(X)

    neuron0 = model._layers[1].neurons[0]
    neuron1 = model._layers[1].neurons[1]

    assert neuron0.lower_padding == 1 and neuron0.upper_padding == 0
    assert neuron1.lower_padding == 0 and neuron1.upper_padding == 0
Beispiel #4
0
def test_training_conv1d():
    loss = "mean_squared_error"
    input_shape = (100, 3)
    X = np.random.rand(500, *input_shape)
    y = np.random.rand(500, 4)

    kmodel = Sequential()
    kmodel.add(
        KConv1D(1,
                3,
                padding="same",
                input_shape=input_shape,
                activation="relu"))
    kmodel.add(Flatten())
    kmodel.add(KDense(y.shape[1]))

    kmodel.compile(loss=loss, optimizer=SGD(learning_rate=LEARNING_RATE))

    model = Model()
    model.add(Input(*input_shape))
    model.add(Conv1D(1, (3, ), padding="same", activation="relu"))
    model.add(Dense(y.shape[1]))

    model.set_weights(kmodel.get_weights())

    unfitted_weights = deepcopy(model.get_weights())

    model.fit(X,
              y,
              loss,
              epochs=EPOCHS,
              batch_size=BATCH_SIZE,
              learning_rate=LEARNING_RATE)

    kmodel.fit(X, y, epochs=EPOCHS, batch_size=BATCH_SIZE, shuffle=False)

    w = model.get_weights()
    w_ = kmodel.get_weights()

    error = [x - x_ for x, x_ in zip(w, w_)]
    update = [x - x_ for x, x_ in zip(w, unfitted_weights)]

    for u in update:
        assert np.amax(np.absolute(u)) > 0.0

    for e in error:
        e_max = np.amax(np.absolute(e))
        print(e_max)
        assert e_max < 0.1
Beispiel #5
0
def test_same_padding_with_stride1():
    model = Model().add(Input(5)) \
                   .add(Conv1D(1, (9,), padding="same", stride=2)) \
                   .add(Dense(1, activation="identity"))

    X = np.random.rand(1, 5, 1)

    model.predict(X)

    neuron0 = model._layers[1].neurons[0]
    neuron1 = model._layers[1].neurons[1]
    neuron2 = model._layers[1].neurons[2]

    assert neuron0.lower_padding == 4 and neuron0.upper_padding == 0
    assert neuron1.lower_padding == 2 and neuron1.upper_padding == 2
    assert neuron2.lower_padding == 0 and neuron2.upper_padding == 4
Beispiel #6
0
def test_connection():
    input_layer = Input(5)
    input_layer.label = "Input"

    conv1d_layer = Conv1D(1, (3, ))
    conv1d_layer.label = "Conv1D"

    weights, biases = conv1d_layer.generate_weights(input_layer)

    gfe.setup(input_layer.n_neurons + conv1d_layer.n_neurons)

    input_layer.init_neurons(neurons_next_layer=conv1d_layer.n_neurons)

    conv1d_layer.init_neurons(weights=weights,
                              biases=biases,
                              trainable_params=None)

    conv1d_layer.connect_incoming(input_layer, "some_partition")

    try:
        gfe.run(1)
    except:
        pass

    assert conv1d_layer.n_neurons == 3

    for neuron in conv1d_layer.neurons:
        edges = list(
            filter(
                lambda x: x.post_vertex == neuron and x.label.startswith(
                    "some_partition"),
                front_end.machine_graph().edges))

        assert len(edges) == conv1d_layer.kernel_shape[0]

        for j in range(0, conv1d_layer.kernel_shape[0]):
            edge = list(
                filter(lambda x: x.pre_vertex.id == neuron.id + j, edges))
            assert len(edge) == 1

    try:
        gfe.stop()
    except:
        pass
Beispiel #7
0
def test_weight_generation():
    n_batches = 5
    batch_size = 8
    n_channels = 1
    kernel_shape = (2, )
    input_shape = (n_batches, batch_size, n_channels)

    X = tf.random.normal(input_shape)

    kconv = KConv1D(1, kernel_shape, input_shape=input_shape[1:])
    conv = Conv1D(kernel_shape, "identity")

    # generate weights
    kconv(X)
    keras_weights_and_biases = kconv.get_weights()
    weights, biases = conv.generate_weights(Input(5))

    assert keras_weights_and_biases[0].shape == weights.shape
    assert keras_weights_and_biases[1].shape == biases.shape
Beispiel #8
0
def test_same_padding():
    model = Model().add(Input(5)) \
                   .add(Conv1D(1, (8,), padding="same")) \
                   .add(Dense(1, activation="identity"))

    X = np.random.rand(1, 5, 1)

    model.predict(X)

    neuron0 = model._layers[1].neurons[0]
    neuron1 = model._layers[1].neurons[1]
    neuron2 = model._layers[1].neurons[2]
    neuron3 = model._layers[1].neurons[3]
    neuron4 = model._layers[1].neurons[4]

    assert neuron0.lower_padding == 3 and neuron0.upper_padding == 0
    assert neuron1.lower_padding == 2 and neuron1.upper_padding == 1
    assert neuron2.lower_padding == 1 and neuron2.upper_padding == 2
    assert neuron3.lower_padding == 0 and neuron3.upper_padding == 3
    assert neuron4.lower_padding == 0 and neuron4.upper_padding == 4
Beispiel #9
0
def test_conv_flatten():
    assert (weights[:, :,
                    0] == np.array(
                        [[0.0, 0.1, 0.2], [0.3, 0.4, 0.5], [0.6, 0.7, 0.8]],
                        dtype=np.float32)).all()

    conv1d = Conv1D(1, (kernel_size, ))
    conv1d.n_filters = n_filters

    input = Input(2)
    input.n_filters = n_channels

    weights_, biases_ = conv1d.generate_weights(input)

    assert weights.shape == weights_.shape
    assert biases.shape == biases_.shape

    flattened_weights = np.empty((kernel_size * n_channels + 1, n_filters))

    for i in range(0, n_filters):
        filter = weights[:, :, i]
        filter = np.append(filter.flatten(), biases[i])
        flattened_weights[:, i] = filter

    flattened_weights = flattened_weights.flatten(order="F")

    flattened_weights = flattened_weights.reshape(kernel_size * n_channels + 1,
                                                  n_filters,
                                                  order="F")

    extracted_weights = np.empty((kernel_size, n_channels, n_filters))
    extracted_biases = np.empty((n_filters, ))

    for i in range(0, n_filters):
        extracted_weights[:, :, i] = flattened_weights[:-1, i].reshape(
            kernel_size, n_channels)
        extracted_biases[i] = flattened_weights[-1, i]

    assert (extracted_biases == biases).all()
    assert (extracted_weights == weights).all()
Beispiel #10
0
def test_inference_conv1d_2():
    input_shape = (N, n_channels)
    X = np.random.rand(500, *input_shape)

    kmodel = tf.keras.models.Sequential()
    kmodel.add(
        tf.keras.layers.Conv1D(1,
                               7,
                               padding="same",
                               input_shape=input_shape,
                               strides=2,
                               activation="relu"))
    kmodel.add(
        tf.keras.layers.Conv1D(5,
                               kernel_size * 4,
                               padding="same",
                               strides=3,
                               activation="tanh"))
    kmodel.add(
        tf.keras.layers.Conv1D(16,
                               kernel_size,
                               padding="same",
                               strides=2,
                               activation="sigmoid"))
    kmodel.add(
        tf.keras.layers.Conv1D(16,
                               kernel_size + 3,
                               padding="same",
                               strides=5,
                               activation="softmax"))
    kmodel.add(tf.keras.layers.Conv1D(5, kernel_size + 1, strides=3))
    kmodel.add(tf.keras.layers.Flatten())
    kmodel.add(tf.keras.layers.Dense(1))

    model = Model()
    model.add(Input(*input_shape))
    model.add(Conv1D(1, (7, ), padding="same", stride=2, activation="relu"))
    model.add(
        Conv1D(5, (kernel_size * 4, ),
               padding="same",
               stride=3,
               activation="tanh"))
    model.add(
        Conv1D(16, (kernel_size, ),
               padding="same",
               stride=2,
               activation="sigmoid"))
    model.add(
        Conv1D(16, (kernel_size + 3, ),
               padding="same",
               stride=5,
               activation="softmax"))
    model.add(Conv1D(5, (kernel_size + 1, ), stride=3))
    model.add(Dense(1))

    model.set_weights(kmodel.get_weights())

    p = model.predict(X)
    p_ = kmodel.predict(X)

    error = np.absolute(p - p_)
    assert np.amax(error) < 1e-4