def test_inference_conv1d_1(): input_shape = (N, n_channels) X = np.random.rand(500, *input_shape) kmodel = tf.keras.models.Sequential() kmodel.add( tf.keras.layers.Conv1D(1, kernel_size, padding="same", input_shape=input_shape)) kmodel.add(tf.keras.layers.Conv1D(5, kernel_size * 4, padding="same")) kmodel.add( tf.keras.layers.Conv1D(16, kernel_size, padding="same", strides=2)) kmodel.add(tf.keras.layers.Flatten()) kmodel.add(tf.keras.layers.Dense(16)) model = Model() model.add(Input(*input_shape)) model.add(Conv1D(1, (kernel_size, ), padding="same")) model.add(Conv1D(5, (kernel_size * 4, ), padding="same")) model.add(Conv1D(16, (kernel_size, ), padding="same", stride=2)) model.add(Dense(16)) model.set_weights(kmodel.get_weights()) p = model.predict(X) p_ = kmodel.predict(X) error = np.absolute(p - p_) assert np.amax(error) < 1e-4
def compare_against_keras(X, y, loss): kmodel = Sequential() kmodel.add(KDense(64, activation="relu", input_shape=(X.shape[1], ))) kmodel.add(KDense(64, activation="tanh")) kmodel.add(KDense(64, activation="softmax")) kmodel.add(KDense(y.shape[1], activation="sigmoid")) kmodel.compile(loss=loss, optimizer=SGD(learning_rate=LEARNING_RATE)) model = Model().add(Input(X.shape[1])) \ .add(Dense(64, activation="relu")) \ .add(Dense(64, activation="tanh")) \ .add(Dense(64, activation="softmax")) \ .add(Dense(y.shape[1], activation="sigmoid")) model.set_weights(kmodel.get_weights()) unfitted_weights = deepcopy(model.get_weights()) model.fit(X, y, loss, epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE) kmodel.fit(X, y, epochs=EPOCHS, batch_size=BATCH_SIZE, shuffle=False) w = model.get_weights() w_ = kmodel.get_weights() error = [x - x_ for x, x_ in zip(w, w_)] update = [x - x_ for x, x_ in zip(w, unfitted_weights)] for u in update: assert np.amax(np.absolute(u)) > 0.0 for e in error: e_max = np.amax(np.absolute(e)) print(e_max) assert e_max < 0.1 """
def test_training_conv1d_with_known_weights(): input_shape = (2, 1) X = np.array([[0., 1.]]).reshape(1, 2, 1) y = np.array([[1.]]) weights = [ np.array([[[.1, .4]], [[.2, .5]], [[.3, .6]]]), np.array([.0, .0]), np.array([[[.7, 1.3], [.8, 1.4]], [[.9, 1.5], [1., 1.6]], [[1.1, 1.7], [1.2, 1.8]]]), np.array([.0, .0]), np.array([[1.9], [2.0], [2.1], [2.2]]), np.array([.0]) ] c1 = KConv1D(2, 3, padding="same", input_shape=input_shape) c2 = KConv1D(2, 3, padding="same") kmodel = Sequential() kmodel.add(c1) kmodel.add(c2) kmodel.add(Flatten()) kmodel.add(KDense(1)) kmodel.compile(loss="mean_squared_error", optimizer=SGD(learning_rate=1.0)) kmodel.set_weights(weights) kmodel.train_on_batch(X, y) model = Model() model.add(Input(*input_shape)) model.add(Conv1D(2, (3, ), padding="same")) model.add(Conv1D(2, (3, ), padding="same")) model.add(Dense(1)) model.set_weights(weights) # model.predict(X) # print(model._layers[2].neurons[0].weights) # print(len(model._layers[2].neurons[0].weights)) model.fit(X, y, "mean_squared_error", epochs=1, batch_size=1, learning_rate=1.0) print(kmodel.get_weights()) print(model.get_weights())
def test_inference(): X = np.random.rand(500, N) kmodel = tf.keras.models.Sequential() kmodel.add(tf.keras.layers.Dense(50, activation="relu", input_shape=(N, ))) kmodel.add(tf.keras.layers.Dense(50, activation="softmax")) kmodel.add(tf.keras.layers.Dense(300, activation="tanh")) kmodel.add(tf.keras.layers.Dense(50, activation="sigmoid")) kmodel.add(tf.keras.layers.Dense(25)) kmodel.add(tf.keras.layers.Dense(17, activation="softmax")) model = Model() model.add(Input(N)) model.add(Dense(50, activation="relu")) model.add(Dense(50, activation="softmax")) model.add(Dense(300, activation="tanh")) model.add(Dense(50, activation="sigmoid")) model.add(Dense(25)) model.add(Dense(17, activation="softmax")) model.set_weights(kmodel.get_weights()) p = model.predict(X) p_ = kmodel.predict(X) error = np.absolute(p - p_) assert np.amax(error) < 1e-4
def test_same_padding_with_stride4(): model = Model().add(Input(6)) \ .add(Conv1D(1, (4,), padding="same", stride=3)) \ .add(Dense(1, activation="identity")) X = np.random.rand(1, 6, 1) model.predict(X) neuron0 = model._layers[1].neurons[0] neuron1 = model._layers[1].neurons[1] assert neuron0.lower_padding == 1 and neuron0.upper_padding == 0 assert neuron1.lower_padding == 0 and neuron1.upper_padding == 0
def test_training_conv1d(): loss = "mean_squared_error" input_shape = (100, 3) X = np.random.rand(500, *input_shape) y = np.random.rand(500, 4) kmodel = Sequential() kmodel.add( KConv1D(1, 3, padding="same", input_shape=input_shape, activation="relu")) kmodel.add(Flatten()) kmodel.add(KDense(y.shape[1])) kmodel.compile(loss=loss, optimizer=SGD(learning_rate=LEARNING_RATE)) model = Model() model.add(Input(*input_shape)) model.add(Conv1D(1, (3, ), padding="same", activation="relu")) model.add(Dense(y.shape[1])) model.set_weights(kmodel.get_weights()) unfitted_weights = deepcopy(model.get_weights()) model.fit(X, y, loss, epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE) kmodel.fit(X, y, epochs=EPOCHS, batch_size=BATCH_SIZE, shuffle=False) w = model.get_weights() w_ = kmodel.get_weights() error = [x - x_ for x, x_ in zip(w, w_)] update = [x - x_ for x, x_ in zip(w, unfitted_weights)] for u in update: assert np.amax(np.absolute(u)) > 0.0 for e in error: e_max = np.amax(np.absolute(e)) print(e_max) assert e_max < 0.1
def test_same_padding_with_stride1(): model = Model().add(Input(5)) \ .add(Conv1D(1, (9,), padding="same", stride=2)) \ .add(Dense(1, activation="identity")) X = np.random.rand(1, 5, 1) model.predict(X) neuron0 = model._layers[1].neurons[0] neuron1 = model._layers[1].neurons[1] neuron2 = model._layers[1].neurons[2] assert neuron0.lower_padding == 4 and neuron0.upper_padding == 0 assert neuron1.lower_padding == 2 and neuron1.upper_padding == 2 assert neuron2.lower_padding == 0 and neuron2.upper_padding == 4
def test_same_padding(): model = Model().add(Input(5)) \ .add(Conv1D(1, (8,), padding="same")) \ .add(Dense(1, activation="identity")) X = np.random.rand(1, 5, 1) model.predict(X) neuron0 = model._layers[1].neurons[0] neuron1 = model._layers[1].neurons[1] neuron2 = model._layers[1].neurons[2] neuron3 = model._layers[1].neurons[3] neuron4 = model._layers[1].neurons[4] assert neuron0.lower_padding == 3 and neuron0.upper_padding == 0 assert neuron1.lower_padding == 2 and neuron1.upper_padding == 1 assert neuron2.lower_padding == 1 and neuron2.upper_padding == 2 assert neuron3.lower_padding == 0 and neuron3.upper_padding == 3 assert neuron4.lower_padding == 0 and neuron4.upper_padding == 4
def test_inference_conv1d_2(): input_shape = (N, n_channels) X = np.random.rand(500, *input_shape) kmodel = tf.keras.models.Sequential() kmodel.add( tf.keras.layers.Conv1D(1, 7, padding="same", input_shape=input_shape, strides=2, activation="relu")) kmodel.add( tf.keras.layers.Conv1D(5, kernel_size * 4, padding="same", strides=3, activation="tanh")) kmodel.add( tf.keras.layers.Conv1D(16, kernel_size, padding="same", strides=2, activation="sigmoid")) kmodel.add( tf.keras.layers.Conv1D(16, kernel_size + 3, padding="same", strides=5, activation="softmax")) kmodel.add(tf.keras.layers.Conv1D(5, kernel_size + 1, strides=3)) kmodel.add(tf.keras.layers.Flatten()) kmodel.add(tf.keras.layers.Dense(1)) model = Model() model.add(Input(*input_shape)) model.add(Conv1D(1, (7, ), padding="same", stride=2, activation="relu")) model.add( Conv1D(5, (kernel_size * 4, ), padding="same", stride=3, activation="tanh")) model.add( Conv1D(16, (kernel_size, ), padding="same", stride=2, activation="sigmoid")) model.add( Conv1D(16, (kernel_size + 3, ), padding="same", stride=5, activation="softmax")) model.add(Conv1D(5, (kernel_size + 1, ), stride=3)) model.add(Dense(1)) model.set_weights(kmodel.get_weights()) p = model.predict(X) p_ = kmodel.predict(X) error = np.absolute(p - p_) assert np.amax(error) < 1e-4