Пример #1
0
def gen_batchnorm():
    model = Sequential()
    model.add(BatchNormalization(input_shape=(2, 1, 3)))
    model.compile(optimizer='sgd', loss='mse')

    params = [0] * 4

    params[0] = np.array([3, 3, 3])  # gamma
    params[1] = np.array([1, 2, -1])  # beta
    params[2] = np.array([2, 2, 2])  # bias
    params[3] = np.array([5, 5, 5])  # variance

    data = np.ndarray((4, 2, 1, 3))

    l = 0
    for b in range(0, 4):
        for h in range(0, 2):
            for w in range(0, 1):
                for c in range(0, 3):
                    l += 1
                    data[b, h, w, c] = l % 7 - 3

    model.set_weights(params)
    output = model.predict(data, batch_size=1)

    wrt = js.JSONwriter(model, "tests/test_batchnorm_model.json")
    wrt.save()

    print(output.shape)

    write("tests/test_batchnorm_output.json", output.tolist())
Пример #2
0
def gen_avgpool_1D_stride_2():
    model = Sequential()

    model.add(AveragePooling1D(pool_size=3, strides=2, input_shape=(5, 2)))
    model.compile(optimizer='rmsprop', loss='mse')

    inp = np.ndarray((1, 5, 2))

    inp[0, 0, 0] = 0
    inp[0, 0, 1] = 1
    inp[0, 1, 0] = 2
    inp[0, 1, 1] = 1
    inp[0, 2, 0] = 0
    inp[0, 2, 1] = 0
    inp[0, 3, 0] = 2
    inp[0, 3, 1] = 1
    inp[0, 4, 0] = 2
    inp[0, 4, 1] = 1

    wrt = js.JSONwriter(model, "tests/test_avgpool_1D_2_model.json")
    wrt.save()

    output = model.predict(inp, batch_size=1)
    print(output.shape)

    write("tests/test_avgpool_1D_2_output.json", output.tolist())
Пример #3
0
def gen_maxpool_2D_stride_1_2():
    model = Sequential()

    model.add(
        MaxPooling2D(pool_size=(2, 4), strides=(2, 1), input_shape=(4, 5, 2)))
    model.compile(optimizer='rmsprop', loss='mse')

    inp = np.ndarray((1, 4, 5, 2))

    inp[0, 0, 0, 0] = 0
    inp[0, 0, 0, 1] = 1
    inp[0, 0, 1, 0] = 2
    inp[0, 0, 1, 1] = 1
    inp[0, 0, 2, 0] = 0
    inp[0, 0, 2, 1] = 0
    inp[0, 0, 3, 0] = 2
    inp[0, 0, 3, 1] = 1
    inp[0, 0, 4, 0] = 2
    inp[0, 0, 4, 1] = 1

    inp[0, 1, 0, 0] = 0
    inp[0, 1, 0, 1] = -1
    inp[0, 1, 1, 0] = 1
    inp[0, 1, 1, 1] = -2
    inp[0, 1, 2, 0] = 3
    inp[0, 1, 2, 1] = 1
    inp[0, 1, 3, 0] = 2
    inp[0, 1, 3, 1] = 0
    inp[0, 1, 4, 0] = 2
    inp[0, 1, 4, 1] = -3

    inp[0, 2, 0, 0] = 1
    inp[0, 2, 0, 1] = 2
    inp[0, 2, 1, 0] = -2
    inp[0, 2, 1, 1] = 0
    inp[0, 2, 2, 0] = 3
    inp[0, 2, 2, 1] = -3
    inp[0, 2, 3, 0] = 2
    inp[0, 2, 3, 1] = 1
    inp[0, 2, 4, 0] = 2
    inp[0, 2, 4, 1] = 0

    inp[0, 3, 0, 0] = 1
    inp[0, 3, 0, 1] = 2
    inp[0, 3, 1, 0] = 0
    inp[0, 3, 1, 1] = -2
    inp[0, 3, 2, 0] = 3
    inp[0, 3, 2, 1] = 1
    inp[0, 3, 3, 0] = 2
    inp[0, 3, 3, 1] = 3
    inp[0, 3, 4, 0] = -3
    inp[0, 3, 4, 1] = 1

    wrt = js.JSONwriter(model, "tests/test_maxpool_2D_2_model.json")
    wrt.save()

    output = model.predict(inp, batch_size=1)
    print(output.shape)

    write("tests/test_maxpool_2D_2_output.json", output.tolist())
Пример #4
0
def gen_conv_1D_stride_2():
    model = Sequential()

    model.add(Conv1D(3, 2, strides=2, input_shape=(6, 4)))
    model.compile(optimizer='rmsprop', loss='mse')

    inp, weight = data_generator((1, 6, 4), (2, 4, 3))

    bias = np.ndarray(3)

    bias[0] = 0.5
    bias[1] = 1.5
    bias[2] = 2.5

    w = [weight, bias]
    model.set_weights(w)

    wrt = js.JSONwriter(model, "tests/test_conv_1D_2_model.json")
    wrt.save()

    output = model.predict(inp, batch_size=1)
    print(output.shape)

    write("tests/test_conv_1D_2_input.json", inp)
    write("tests/test_conv_1D_2_output.json", output)
Пример #5
0
def gen_tanh():
    model = Sequential()

    model.add(Flatten(input_shape=(8, 1, 1)))
    model.add(Dense(4))
    model.add(Activation('tanh'))
    model.compile(optimizer='rmsprop', loss='mse')

    inp, weight = data_generator((1, 8, 1, 1), (8, 4))

    bias = np.ndarray(4)

    bias[0] = 0.5
    bias[1] = 1.5
    bias[2] = 1.0
    bias[3] = 3.0

    w = [weight, bias]
    model.set_weights(w)

    wrt = js.JSONwriter(model, "tests/test_tanh_model.json")
    wrt.save()

    output = model.predict(inp, batch_size=1)
    print(output.shape)

    write("tests/test_tanh_input.json", inp)
    write("tests/test_tanh_output.json", output)
Пример #6
0
def gen_flatten():
    model = Sequential()

    model.add(Flatten(input_shape=(4, 5, 2)))
    model.compile(optimizer='rmsprop', loss='mse')

    inp = np.ndarray((1, 4, 5, 2))

    inp[0, 0, 0, 0] = 0
    inp[0, 0, 0, 1] = 1
    inp[0, 0, 1, 0] = 2
    inp[0, 0, 1, 1] = 1
    inp[0, 0, 2, 0] = 0
    inp[0, 0, 2, 1] = 0
    inp[0, 0, 3, 0] = 2
    inp[0, 0, 3, 1] = 1
    inp[0, 0, 4, 0] = 2
    inp[0, 0, 4, 1] = 1

    inp[0, 1, 0, 0] = 0
    inp[0, 1, 0, 1] = -1
    inp[0, 1, 1, 0] = 1
    inp[0, 1, 1, 1] = -2
    inp[0, 1, 2, 0] = 3
    inp[0, 1, 2, 1] = 1
    inp[0, 1, 3, 0] = 2
    inp[0, 1, 3, 1] = 0
    inp[0, 1, 4, 0] = 2
    inp[0, 1, 4, 1] = -3

    inp[0, 2, 0, 0] = 1
    inp[0, 2, 0, 1] = 2
    inp[0, 2, 1, 0] = -2
    inp[0, 2, 1, 1] = 0
    inp[0, 2, 2, 0] = 3
    inp[0, 2, 2, 1] = -3
    inp[0, 2, 3, 0] = 2
    inp[0, 2, 3, 1] = 1
    inp[0, 2, 4, 0] = 2
    inp[0, 2, 4, 1] = 0

    inp[0, 3, 0, 0] = 1
    inp[0, 3, 0, 1] = 2
    inp[0, 3, 1, 0] = 0
    inp[0, 3, 1, 1] = -2
    inp[0, 3, 2, 0] = 3
    inp[0, 3, 2, 1] = 1
    inp[0, 3, 3, 0] = 2
    inp[0, 3, 3, 1] = 3
    inp[0, 3, 4, 0] = -3
    inp[0, 3, 4, 1] = 1

    wrt = js.JSONwriter(model, "tests/test_flat_model.json")
    wrt.save()

    output = model.predict(inp, batch_size=1)
    print(output.shape)

    write("tests/test_flat_output.json", output.tolist())
Пример #7
0
def gen_gru():
    model = Sequential()
    model.add(
        GRU(2,
            activation='tanh',
            recurrent_activation='relu',
            implementation=1,
            stateful=False,
            batch_input_shape=(5, 3, 3)))
    model.compile(optimizer='sgd', loss='mse')

    kernel = np.ones((3, 6))
    rec_kernel = np.ones((2, 6))
    bias = np.array([1, 2, -1, 0, 3, 4]) / 10

    k = 0
    for h in range(0, 3):
        for w in range(0, 6):
            k += 1
            kernel[h, w] = (k % 5 - 2) / 10

    k = 0
    for h in range(0, 2):
        for w in range(0, 6):
            k += 1
            rec_kernel[h, w] = (k % 5 - 2) / 10

    parameters = [kernel, rec_kernel, bias]
    model.set_weights(parameters)

    data = np.ndarray((5, 3, 3))

    l = 0
    for b in range(0, 5):
        for h in range(0, 3):
            for c in range(0, 3):
                l += 1
                data[b, h, c] = (l % 5 + 1) / 10

    output = model.predict(
        data, batch_size=5
    )  # the batch_size has no impact on the result here # the batch_size has no impact on the result here

    wrt = js.JSONwriter(model, "tests/test_gru_model.json")
    wrt.save()

    print(output.shape)

    inp = np.zeros((5, 1, 3, 3))
    ou = np.zeros((5, 1, 1, 2))
    inp[:, 0, :, :] = data[:, :, :]
    ou[:, 0, 0, :] = output[:, :]
    write("tests/test_gru_input.json", inp)
    write("tests/test_gru_output.json", ou)
Пример #8
0
def gen_simplernn():
    model = Sequential()
    model.add(
        SimpleRNN(4,
                  activation='linear',
                  stateful=False,
                  batch_input_shape=(4, 3, 3)))
    model.compile(optimizer='sgd', loss='mse')

    data = np.ndarray((4, 3, 3))
    kernel = np.ones((3, 4))
    rec_kernel = np.ones((4, 4))
    bias = np.array([1.0, -1.0, 2.0, -4.0])

    k = 0
    for h in range(0, 3):
        for w in range(0, 4):
            k += 1
            kernel[h, w] = k % 5 - 2

    k = 0
    for h in range(0, 4):
        for w in range(0, 4):
            k += 1
            rec_kernel[h, w] = k % 5 - 2

    parameters = [kernel, rec_kernel, bias]

    model.set_weights(parameters)

    l = 0
    for b in range(0, 4):
        for h in range(0, 3):
            for c in range(0, 3):
                l += 1
                data[b, h, c] = l % 5 + 1

    output = model.predict(
        data, batch_size=4)  # the batch_size has no impact on the result here

    wrt = js.JSONwriter(model, "tests/test_simplernn_model.json")
    wrt.save()

    print(output.shape)

    inp = np.zeros((4, 1, 3, 3))
    inp[:, 0, :, :] = data[:, :, :]
    ou = np.zeros((4, 1, 1, 4))
    ou[:, 0, 0, :] = output[:, :]
    write("tests/test_simplernn_input.json", inp)
    write("tests/test_simplernn_output.json", ou)
Пример #9
0
def gen_cropping2D_tests():
    model = Sequential()

    model.add(Cropping2D(cropping=((1, 1), (1, 2)), input_shape=(4, 5, 2)))
    model.compile(optimizer='rmsprop', loss='mse')

    inp, _ = data_generator((1, 4, 5, 2), None)

    wrt = js.JSONwriter(model, "tests/test_crop_2D_model.json")
    wrt.save()

    output = model.predict(inp, batch_size=1)
    print(output.shape)

    write("tests/test_crop_2D_input.json", inp)
    write("tests/test_crop_2D_output.json", output)
Пример #10
0
def gen_permute_tests():
    model = Sequential()

    model.add(Permute((3, 1, 2), input_shape=(2, 3, 4)))
    model.compile(optimizer='rmsprop', loss='mse')

    inp, _ = data_generator((1, 2, 3, 4), None)

    wrt = js.JSONwriter(model, "tests/test_permute_model.json")
    wrt.save()

    output = model.predict(inp, batch_size=1)
    print(output.shape)

    write("tests/test_permute_input.json", inp)
    write("tests/test_permute_output.json", output)
Пример #11
0
def gen_globalaveragepooling2D():
    model = Sequential()

    model.add(GlobalAveragePooling2D(input_shape=(3, 3, 2)))
    model.compile(optimizer='rmsprop', loss='mse')

    inp, _ = data_generator((1, 3, 3, 2), None)

    wrt = js.JSONwriter(model, "tests/test_globalavgpool_2D_model.json")
    wrt.save()

    output = model.predict(inp, batch_size=1)
    print(output.shape)

    write("tests/test_globalavgpool_2D_input.json", inp)
    write("tests/test_globalavgpool_2D_output.json", output)
Пример #12
0
def gen_maxpool_1D_stride_2():
    model = Sequential()

    model.add(MaxPooling1D(pool_size=3, strides=2, input_shape=(5, 2)))
    model.compile(optimizer='rmsprop', loss='mse')

    inp, _ = data_generator((1, 5, 2), None)

    wrt = js.JSONwriter(model, "tests/test_maxpool_1D_2_model.json")
    wrt.save()

    output = model.predict(inp, batch_size=1)
    print(output.shape)

    write("tests/test_maxpool_1D_2_input.json", inp)
    write("tests/test_maxpool_1D_2_output.json", output)
Пример #13
0
def gen_flatten():
    model = Sequential()

    model.add(Flatten(input_shape=(4, 5, 2)))
    model.compile(optimizer='rmsprop', loss='mse')

    inp, _ = data_generator((1, 4, 5, 2), None)

    wrt = js.JSONwriter(model, "tests/test_flat_model.json")
    wrt.save()

    output = model.predict(inp, batch_size=1)
    print(output.shape)

    write("tests/test_flat_input.json", inp)
    write("tests/test_flat_output.json", output)
Пример #14
0
def gen_repeatvector_tests():
    model = Sequential()

    model.add(RepeatVector(3, input_shape=(4, )))
    model.compile(optimizer='rmsprop', loss='mse')

    inp, _ = data_generator((2, 1, 1, 4), None)  #np.ndarray((2, 4))

    wrt = js.JSONwriter(model, "tests/test_repeatvector_model.json")
    wrt.save()

    output = np.zeros((2, 1, 3, 4))
    output[:, 0, :, :] = model.predict(inp[:, 0, 0, :], batch_size=1)
    print(output.shape)

    write("tests/test_repeatvector_input.json", inp)
    write("tests/test_repeatvector_output.json", output)
Пример #15
0
def gen_avgpool_2D_stride_1_2():
    model = Sequential()

    model.add(
        AveragePooling2D(pool_size=(3, 4),
                         strides=(1, 1),
                         input_shape=(4, 5, 2)))
    model.compile(optimizer='rmsprop', loss='mse')

    inp, _ = data_generator((1, 4, 5, 2), None)

    wrt = js.JSONwriter(model, "tests/test_avgpool_2D_2_model.json")
    wrt.save()

    output = model.predict(inp, batch_size=1)
    print(output.shape)

    write("tests/test_avgpool_2D_2_input.json", inp)
    write("tests/test_avgpool_2D_2_output.json", output)
Пример #16
0
def gen_repeatvector_tests():

    model = Sequential()

    model.add(RepeatVector(3, input_shape=(4, )))
    model.compile(optimizer='rmsprop', loss='mse')

    inp = np.ndarray((2, 4))

    for l in range(0, 4):
        inp[0, l] = l + 1
        inp[1, l] = -(l + 1)

    wrt = js.JSONwriter(model, "tests/test_repeatvector_model.json")
    wrt.save()

    output = model.predict(inp, batch_size=1)
    print(output.shape)

    write("tests/test_repeatvector_output.json", output.tolist())
Пример #17
0
def gen_cropping1D_tests():

    model = Sequential()

    model.add(Cropping1D(cropping=(1, 2), input_shape=(5, 2)))
    model.compile(optimizer='rmsprop', loss='mse')

    inp = np.ndarray((1, 5, 2))

    for l in range(0, 5):
        inp[0, l, 0] = l + 1
        inp[0, l, 1] = -(l + 1)

    wrt = js.JSONwriter(model, "tests/test_crop_1D_model.json")
    wrt.save()

    output = model.predict(inp, batch_size=1)
    print(output.shape)

    write("tests/test_crop_1D_output.json", output.tolist())
Пример #18
0
def gen_globalmaxpooling2D():

    model = Sequential()

    model.add(GlobalMaxPooling2D(input_shape=(3, 3, 2)))
    model.compile(optimizer='rmsprop', loss='mse')

    inp = np.ndarray((1, 3, 3, 2))

    inp[0, 0, 0, 0] = 1
    inp[0, 1, 0, 0] = 2
    inp[0, 2, 0, 0] = 0

    inp[0, 0, 1, 0] = 3
    inp[0, 1, 1, 0] = 4
    inp[0, 2, 1, 0] = 0

    inp[0, 0, 2, 0] = 2
    inp[0, 1, 2, 0] = 2
    inp[0, 2, 2, 0] = 0

    inp[0, 0, 0, 1] = 0
    inp[0, 1, 0, 1] = 3
    inp[0, 2, 0, 1] = 1

    inp[0, 0, 1, 1] = 1
    inp[0, 1, 1, 1] = 1
    inp[0, 2, 1, 1] = -1

    inp[0, 0, 2, 1] = -3
    inp[0, 1, 2, 1] = -1
    inp[0, 2, 2, 1] = 0

    wrt = js.JSONwriter(model, "tests/test_globalmaxpool_2D_model.json")
    wrt.save()

    output = model.predict(inp, batch_size=1)
    print(output.shape)

    write("tests/test_globalmaxpool_2D_output.json", output.tolist())
Пример #19
0
def gen_cropping2D_tests():

    model = Sequential()

    model.add(Cropping2D(cropping=((1, 1), (1, 2)), input_shape=(4, 5, 2)))
    model.compile(optimizer='rmsprop', loss='mse')

    inp = np.ndarray((1, 4, 5, 2))

    l = 0
    for h in range(0, 4):
        for w in range(0, 5):
            l += 1
            inp[0, h, w, 0] = l + 1
            inp[0, h, w, 1] = -(l + 1)

    wrt = js.JSONwriter(model, "tests/test_crop_2D_model.json")
    wrt.save()

    output = model.predict(inp, batch_size=1)
    print(output.shape)

    write("tests/test_crop_2D_output.json", output.tolist())
Пример #20
0
def gen_dropout():
    model = Sequential()
    model.add(
        Convolution2D(8, (2, 2),
                      strides=(2, 2),
                      input_shape=(4, 4, 1),
                      activation='relu'))
    model.add(Dropout(rate=0.2))
    model.add(Flatten())
    model.add(Dense(2, activation='linear'))

    model.compile(optimizer='sgd', loss='mse')

    inp, _ = data_generator((1, 4, 4, 1), None)

    wrt = js.JSONwriter(model, "tests/test_dropout_model.json")
    wrt.save()

    output = model.predict(inp, batch_size=1)
    print(output.shape)

    write("tests/test_dropout_input.json", inp)
    write("tests/test_dropout_output.json", output)
Пример #21
0
def gen_permute_tests():

    model = Sequential()

    model.add(Permute((3, 1, 2), input_shape=(2, 3, 4)))
    model.compile(optimizer='rmsprop', loss='mse')

    inp = np.ndarray((1, 2, 3, 4))

    l = 0
    for h in range(0, 2):
        for w in range(0, 3):
            for c in range(0, 4):
                l += 1
                inp[0, h, w, c] = l + 1

    wrt = js.JSONwriter(model, "tests/test_permute_model.json")
    wrt.save()

    output = model.predict(inp, batch_size=1)
    print(output.shape)

    write("tests/test_permute_output.json", output.tolist())
Пример #22
0
def gen_conv_2D_stride_1_1():
    model = Sequential()

    model.add(Conv2D(2, (3, 4), strides=(1, 1), input_shape=(4, 5, 2)))
    model.compile(optimizer='rmsprop', loss='mse')

    inp, weight = data_generator((1, 4, 5, 2), (3, 4, 2, 2))

    bias = np.ndarray(2)

    bias[0] = 0.5
    bias[1] = 1.5

    w = [weight, bias]
    model.set_weights(w)

    wrt = js.JSONwriter(model, "tests/test_conv_2D_1_model.json")
    wrt.save()

    output = model.predict(inp, batch_size=1)
    print(output.shape)

    write("tests/test_conv_2D_1_input.json", inp)
    write("tests/test_conv_2D_1_output.json", output)
Пример #23
0
def gen_batchnorm():
    model = Sequential()
    model.add(BatchNormalization(input_shape=(2, 1, 3)))
    model.compile(optimizer='sgd', loss='mse')

    params = [0] * 4

    params[0] = np.array([3, 3, 3])  # gamma
    params[1] = np.array([1, 2, -1])  # beta
    params[2] = np.array([2, 2, 2])  # bias
    params[3] = np.array([5, 5, 5])  # variance

    inp, _ = data_generator((4, 2, 1, 3), None)

    model.set_weights(params)
    output = model.predict(inp, batch_size=1)

    wrt = js.JSONwriter(model, "tests/test_batchnorm_model.json")
    wrt.save()

    print(output.shape)

    write("tests/test_batchnorm_input.json", inp)
    write("tests/test_batchnorm_output.json", output)
Пример #24
0
def gen_conv_2D_stride_1_2():
    model = Sequential()

    model.add(Conv2D(2, (2, 4), strides=(2, 1), input_shape=(4, 5, 2)))
    model.compile(optimizer='rmsprop', loss='mse')

    weight = np.ndarray((2, 4, 2, 2))

    weight[0, 0, 0, 0] = 0
    weight[0, 0, 0, 1] = 1.5
    weight[0, 0, 1, 0] = 2
    weight[0, 0, 1, 1] = 0.5

    weight[0, 1, 0, 0] = -1
    weight[0, 1, 0, 1] = -2
    weight[0, 1, 1, 0] = 3
    weight[0, 1, 1, 1] = 0

    weight[0, 2, 0, 0] = 1
    weight[0, 2, 0, 1] = 1
    weight[0, 2, 1, 0] = -3
    weight[0, 2, 1, 1] = 2.5

    weight[0, 3, 0, 0] = 1.5
    weight[0, 3, 0, 1] = 0.5
    weight[0, 3, 1, 0] = -2
    weight[0, 3, 1, 1] = 1.5

    weight[1, 0, 0, 0] = -0.5
    weight[1, 0, 0, 1] = 2.5
    weight[1, 0, 1, 0] = 2.5
    weight[1, 0, 1, 1] = 0.5

    weight[1, 1, 0, 0] = -1.5
    weight[1, 1, 0, 1] = -1
    weight[1, 1, 1, 0] = 3
    weight[1, 1, 1, 1] = 0.5

    weight[1, 2, 0, 0] = 1.5
    weight[1, 2, 0, 1] = 1
    weight[1, 2, 1, 0] = 0
    weight[1, 2, 1, 1] = 0

    weight[1, 3, 0, 0] = 1.5
    weight[1, 3, 0, 1] = 0
    weight[1, 3, 1, 0] = -2
    weight[1, 3, 1, 1] = 3

    bias = np.ndarray(2)

    bias[0] = 0.5
    bias[1] = 1.5

    w = [weight, bias]
    model.set_weights(w)

    inp = np.ndarray((1, 4, 5, 2))

    inp[0, 0, 0, 0] = 0
    inp[0, 0, 0, 1] = 1
    inp[0, 0, 1, 0] = 2
    inp[0, 0, 1, 1] = 1
    inp[0, 0, 2, 0] = 0
    inp[0, 0, 2, 1] = 0
    inp[0, 0, 3, 0] = 2
    inp[0, 0, 3, 1] = 1
    inp[0, 0, 4, 0] = 2
    inp[0, 0, 4, 1] = 1

    inp[0, 1, 0, 0] = 0
    inp[0, 1, 0, 1] = -1
    inp[0, 1, 1, 0] = 1
    inp[0, 1, 1, 1] = -2
    inp[0, 1, 2, 0] = 3
    inp[0, 1, 2, 1] = 1
    inp[0, 1, 3, 0] = 2
    inp[0, 1, 3, 1] = 0
    inp[0, 1, 4, 0] = 2
    inp[0, 1, 4, 1] = -3

    inp[0, 2, 0, 0] = 1
    inp[0, 2, 0, 1] = 2
    inp[0, 2, 1, 0] = -2
    inp[0, 2, 1, 1] = 0
    inp[0, 2, 2, 0] = 3
    inp[0, 2, 2, 1] = -3
    inp[0, 2, 3, 0] = 2
    inp[0, 2, 3, 1] = 1
    inp[0, 2, 4, 0] = 2
    inp[0, 2, 4, 1] = 0

    inp[0, 3, 0, 0] = 1
    inp[0, 3, 0, 1] = 2
    inp[0, 3, 1, 0] = 0
    inp[0, 3, 1, 1] = -2
    inp[0, 3, 2, 0] = 3
    inp[0, 3, 2, 1] = 1
    inp[0, 3, 3, 0] = 2
    inp[0, 3, 3, 1] = 3
    inp[0, 3, 4, 0] = -3
    inp[0, 3, 4, 1] = 1

    wrt = js.JSONwriter(model, "tests/test_conv_2D_2_model.json")
    wrt.save()

    output = model.predict(inp, batch_size=1)
    print(output.shape)

    write("tests/test_conv_2D_2_output.json", output.tolist())
Пример #25
0
def gen_tanh():
    model = Sequential()

    model.add(Flatten(input_shape=(8, 1, 1)))
    model.add(Dense(4))
    model.add(Activation('tanh'))
    model.compile(optimizer='rmsprop', loss='mse')

    inp = np.ndarray((1, 8, 1, 1))

    inp[0, 0, 0, 0] = 1
    inp[0, 1, 0, 0] = 2
    inp[0, 2, 0, 0] = -1
    inp[0, 3, 0, 0] = 0

    inp[0, 4, 0, 0] = 3
    inp[0, 5, 0, 0] = 1
    inp[0, 6, 0, 0] = 1
    inp[0, 7, 0, 0] = 2

    weight = np.ndarray((8, 4))

    weight[0, 0] = 0
    weight[0, 1] = 1.5
    weight[0, 2] = 2
    weight[0, 3] = 0.5

    weight[1, 0] = -1
    weight[1, 1] = -2
    weight[1, 2] = 3
    weight[1, 3] = 0

    weight[2, 0] = 1
    weight[2, 1] = 1
    weight[2, 2] = -3
    weight[2, 3] = 2.5

    weight[3, 0] = 1.5
    weight[3, 1] = 0.5
    weight[3, 2] = -2
    weight[3, 3] = 1.5

    weight[4, 0] = -0.5
    weight[4, 1] = 2.5
    weight[4, 2] = 2.5
    weight[4, 3] = 0.5

    weight[5, 0] = -1.5
    weight[5, 1] = -1
    weight[5, 2] = 3
    weight[5, 3] = 0.5

    weight[6, 0] = 1.5
    weight[6, 1] = 1
    weight[6, 2] = 0
    weight[6, 3] = 0

    weight[7, 0] = 1.5
    weight[7, 1] = 0
    weight[7, 2] = -2
    weight[7, 3] = 3

    bias = np.ndarray(4)

    bias[0] = 0.5
    bias[1] = 1.5
    bias[2] = 1.0
    bias[3] = 3.0

    w = [weight, bias]
    model.set_weights(w)

    wrt = js.JSONwriter(model, "tests/test_tanh_model.json")
    wrt.save()

    output = model.predict(inp, batch_size=1)
    print(output.shape)

    write("tests/test_tanh_output.json", output.tolist())
Пример #26
0
from keras.optimizers import RMSprop, Adam
import KerasModeltoJSON as js
import numpy as np
import time

model = Sequential()
model.add(Conv2D(32, (8, 8), padding='valid', input_shape=(84, 84, 4), strides=(4, 4)))
model.add(Activation('relu'))
model.add(Conv2D(64, (4, 4), padding='valid', strides=(2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3), padding='valid', strides=(1, 1)))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(6))
      
# rmsprop = RMSprop(lr=alpha, epsilon=0.01, clipvalue=1.0, decay=0.01)
adam = Adam(lr=0.001)
model.compile(optimizer=adam, loss='mse')

inp = np.random.rand(1, 84, 84, 4)

start = time.process_time()
model.predict(inp, batch_size = 1)
end = time.process_time()

print (end-start)

wrt = js.JSONwriter(model, "tests/test_cnn_model.json")
wrt.save()