Ejemplo n.º 1
0
def build(nc, h, w, loss='categorical_crossentropy', optimizer='adam'):

    inp = Input(shape=(h, w, 3))
    enet = encoder.build(inp)
    enet = decoder.build(enet, nc=nc)

    enet = Activation('sigmoid')(enet)
    # enet = Convolution2D(3, (1, 1), activation='sigmoid')(enet)
    model = Model(inputs=inp, outputs=enet)

    model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy', 'mean_squared_error'])

    return model
Ejemplo n.º 2
0
def test_model_with_partial_loss():
    a = Input(shape=(3, ), name='input_a')
    a_2 = Dense(4, name='dense_1')(a)
    dp = Dropout(0.5, name='dropout')
    a_3 = dp(a_2)
    model = Model(a, [a_2, a_3])

    optimizer = 'rmsprop'
    loss = {'dropout': 'mse'}
    model.compile(optimizer, loss, metrics=['mae'])

    input_a_np = np.random.random((10, 3))
    output_a_np = np.random.random((10, 4))

    # test train_on_batch
    out = model.train_on_batch(input_a_np, output_a_np)
    out = model.test_on_batch(input_a_np, output_a_np)
    # fit
    out = model.fit(input_a_np, [output_a_np])
    # evaluate
    out = model.evaluate(input_a_np, [output_a_np])

    # Same without dropout.
    a = Input(shape=(3, ), name='input_a')
    a_2 = Dense(4, name='dense_1')(a)
    a_3 = Dense(4, name='dense_2')(a_2)
    model = Model(a, [a_2, a_3])

    optimizer = 'rmsprop'
    loss = {'dense_2': 'mse'}
    model.compile(optimizer, loss, metrics={'dense_1': 'mae'})

    # test train_on_batch
    out = model.train_on_batch(input_a_np, output_a_np)
    out = model.test_on_batch(input_a_np, output_a_np)
    # fit
    out = model.fit(input_a_np, [output_a_np])
    # evaluate
    out = model.evaluate(input_a_np, [output_a_np])
Ejemplo n.º 3
0
def test_return_state(layer_class):
    num_states = 2 if layer_class is recurrent.LSTM else 1

    inputs = Input(batch_shape=(num_samples, timesteps, embedding_dim))
    layer = layer_class(units, return_state=True, stateful=True)
    outputs = layer(inputs)
    output, state = outputs[0], outputs[1:]
    assert len(state) == num_states
    model = Model(inputs, state[0])

    inputs = np.random.random((num_samples, timesteps, embedding_dim))
    state = model.predict(inputs)
    np.testing.assert_allclose(K.eval(layer.states[0]), state, atol=1e-4)
def merge_models(nb_classes):

    input = Input(shape=(14, ))
    #dense1 = keras.layers.Dense(nb_classes*2,name='dense_merge_2')(input)
    #dropout1 = keras.layers.Dropout(0.3,name='dense_dropout_1')(dense1)
    out = keras.layers.Dense(nb_classes,
                             activation='softmax',
                             name='dense_merge_1')(input)
    model = keras.models.Model(inputs=input, outputs=out)

    model.summary()

    return model
Ejemplo n.º 5
0
    def build_model(self):
        data = self.data
        inputs = Input(shape=(data.seq_length, ))
        embeddings = Embedding(
            data.max_feature,
            data.embed_dim,
            weights=[data.embed_matrix],
            trainable=False)(inputs)
        x = SpatialDropout1D(self.dropout)(embeddings)
        x = Bidirectional(CuDNNGRU(100, return_sequences=True))(x)
        x = Bidirectional(CuDNNGRU(100, return_sequences=True))(x)
        # x = Bidirectional(CuDNNGRU(data.embed_dim, return_sequences=True))(x)
        x = TimeDistributed(Dense(100, activation="relu"))(x)
        attention = AttLayer()(x)
        avg_pool = GlobalAveragePooling1D()(x)
        max_pool = GlobalMaxPooling1D()(x)
        conc = concatenate([avg_pool, max_pool, attention])
        encoder = Model(inputs=inputs, outputs=conc)

        inputs2 = Input(shape=(self.max_sent, data.seq_length, ))
        x2 = TimeDistributed(encoder)(inputs2)
        x2 = Bidirectional(CuDNNGRU(100, return_sequences=True))(x2)
        x2 = Bidirectional(CuDNNGRU(100, return_sequences=True))(x2)
        # x2 = Bidirectional(CuDNNGRU(data.embed_dim, return_sequences=True))(x2)
        x2 = TimeDistributed(Dense(100, activation="relu"))(x2)
        attention2 = AttLayer()(x2)
        avg_pool2 = GlobalAveragePooling1D()(x2)
        max_pool2 = GlobalMaxPooling1D()(x2)
        conc2 = concatenate([avg_pool2, max_pool2, attention2])
        x2 = Dense(self.dense_size, activation="relu")(conc2)
        outputs2 = Dense(6, activation="sigmoid")(x2)
        model = Model(inputs=inputs2, outputs=outputs2)

        optimizer = self.get_optimizer(self.lr, self.optim_name)
        model.compile(
            loss='binary_crossentropy',
            optimizer=optimizer,
            metrics=['accuracy'])
        self.model = model
Ejemplo n.º 6
0
def test_specify_initial_state_keras_tensor(layer_class):
    num_states = 2 if layer_class is recurrent.LSTM else 1

    # Test with Keras tensor
    inputs = Input((timesteps, embedding_dim))
    initial_state = [Input((units, )) for _ in range(num_states)]
    layer = layer_class(units)
    if len(initial_state) == 1:
        output = layer(inputs, initial_state=initial_state[0])
    else:
        output = layer(inputs, initial_state=initial_state)
    assert initial_state[0] in layer.inbound_nodes[0].input_tensors

    model = Model([inputs] + initial_state, output)
    model.compile(loss='categorical_crossentropy', optimizer='adam')

    inputs = np.random.random((num_samples, timesteps, embedding_dim))
    initial_state = [
        np.random.random((num_samples, units)) for _ in range(num_states)
    ]
    targets = np.random.random((num_samples, units))
    model.fit([inputs] + initial_state, targets)
def test_trainable_weights_count_consistency():
    """Tests the trainable weights consistency check of Model.

    This verifies that a warning is shown if model.trainable is modified
    and the model is summarized/run without a new call to .compile()

    Reproduce issue #8121
    """
    a = Input(shape=(3, ), name='input_a')
    model1 = Model(inputs=a, outputs=Dense(1)(a))

    model1.trainable = False
    b = Input(shape=(3, ), name='input_b')
    y = model1(b)
    model2 = Model(inputs=b, outputs=Dense(1)(y))

    model2.compile(optimizer='adam', loss='mse')

    model1.trainable = True

    # Should warn on .summary()
    with pytest.warns(UserWarning) as w:
        model2.summary()
    warning_raised = any(['Discrepancy' in str(w_.message) for w_ in w])
    assert warning_raised, 'No warning raised when trainable is modified without .compile.'

    # And on .fit()
    with pytest.warns(UserWarning) as w:
        model2.fit(x=np.zeros((5, 3)), y=np.zeros((5, 1)))
    warning_raised = any(['Discrepancy' in str(w_.message) for w_ in w])
    assert warning_raised, 'No warning raised when trainable is modified without .compile.'

    # And shouldn't warn if we recompile
    model2.compile(optimizer='adam', loss='mse')
    with pytest.warns(None) as w:
        model2.summary()
    assert len(
        w
    ) == 0, "Warning raised even when .compile() is called after modifying .trainable"
Ejemplo n.º 8
0
    def build_network(self):
        """ Build the Policy Value Neural Net using Keras. """
        inputs = Input(shape=(4, self.size, self.size))

        # 3 common conv layers
        c_conv1 = Conv2D(filters=32,
                         kernel_size=(3, 3),
                         padding="same",
                         data_format="channels_first",
                         activation="relu",
                         kernel_regularizer=l2(self.l2_const))(inputs)
        c_conv2 = Conv2D(filters=64,
                         kernel_size=(3, 3),
                         padding="same",
                         data_format="channels_first",
                         activation="relu",
                         kernel_regularizer=l2(self.l2_const))(c_conv1)
        c_conv3 = Conv2D(filters=128,
                         kernel_size=(3, 3),
                         padding="same",
                         data_format="channels_first",
                         activation="relu",
                         kernel_regularizer=l2(self.l2_const))(c_conv2)

        # policy head
        p_conv = Conv2D(filters=4,
                        kernel_size=(1, 1),
                        data_format="channels_first",
                        activation="relu",
                        kernel_regularizer=l2(self.l2_const))(c_conv3)
        p_flat = Flatten()(p_conv)
        self.policy_net = Dense(self.size * self.size,
                                activation="softmax",
                                kernel_regularizer=l2(self.l2_const))(p_flat)

        # value head
        v_conv = Conv2D(filters=2,
                        kernel_size=(1, 1),
                        data_format="channels_first",
                        activation="relu",
                        kernel_regularizer=l2(self.l2_const))(c_conv3)
        v_flat = Flatten()(v_conv)
        v_dense = Dense(64, kernel_regularizer=l2(self.l2_const))(v_flat)
        self.value_net = Dense(1,
                               activation="tanh",
                               kernel_regularizer=l2(self.l2_const))(v_dense)

        # connect and build the model
        self.model = Model(inputs, [self.policy_net, self.value_net])
        losses = ['categorical_crossentropy', 'mean_squared_error']
        self.model.compile(optimizer=Adam(), loss=losses)
Ejemplo n.º 9
0
def build_model(img_shape, activation='sigmoid'):
    optim = Adam(lr=0.0001)
    branch_model = get_branch_model(img_shape)

    mid = 32
    xa_inp = Input(shape=branch_model.output_shape[1:], name='hm_inp_a')
    xb_inp = Input(shape=branch_model.output_shape[1:], name='hm_inp_b')
    x1 = Lambda(lambda x: x[0] * x[1], name='lambda_1')([xa_inp, xb_inp])
    x2 = Lambda(lambda x: x[0] + x[1], name='lambda_2')([xa_inp, xb_inp])
    x3 = Lambda(lambda x: K.abs(x[0] - x[1]), name='lambda_3')([xa_inp, xb_inp])
    x4 = Lambda(lambda x: K.square(x), name='lambda_4')(x3)
    x = Concatenate(name='concat_1')([x1, x2, x3, x4])
    x = Reshape((4, branch_model.output_shape[1], 1), name='reshape1')(x)

    # Per feature NN with shared weight is implemented using CONV2D with appropriate stride.
    x = Conv2D(mid, (4, 1), activation='relu', padding='valid', name='hm_conv_2d_1')(x)
    x = Reshape((branch_model.output_shape[1], mid, 1), name='hm_reshape_2')(x)
    x = Conv2D(1, (1, mid), activation='linear', padding='valid', name='hm_conv_2d_2')(x)
    x = Flatten(name='flatten')(x)

    # Weighted sum implemented as a Dense layer.
    x = Dense(1, use_bias=True, activation=activation, name='weighted-average')(x)
    head_model = Model(inputs = [xa_inp, xb_inp], outputs = x, name='head')

    ########################
    # SIAMESE NEURAL NETWORK
    ########################
    # Complete model is constructed by calling the branch model on each input image,
    # and then the head model on the resulting 512-vectors.
    img_a = Input(shape=img_shape)
    img_b = Input(shape=img_shape)
    xa = branch_model(img_a)
    xb = branch_model(img_b)
    x = head_model([xa, xb])
    model = Model(inputs = [img_a, img_b], outputs = x, name='full_model')
    model.compile(optim, loss='binary_crossentropy', metrics=['binary_crossentropy', 'acc'])

    return model, branch_model, head_model
Ejemplo n.º 10
0
def test_sequential_enumerate():
    x = Input(shape=(20, ))
    dense1 = Dense(20)
    dense2 = Dense(10)
    dense3 = Dense(1)
    seq = sequential([
        dense1,
        dense2,
        dense3,
    ], ns='hello')
    seq(x)
    assert dense1.name.endswith('hello.00_dense')
    assert dense2.name.endswith('hello.01_dense')
    assert dense3.name.endswith('hello.02_dense')
Ejemplo n.º 11
0
def test_sequential_namespace():
    x = Input(shape=(20, ))
    dense1 = Dense(20)
    dense2 = Dense(10)
    dense3 = Dense(1)
    seq = sequential([
        dense1,
        dense2,
        dense3,
    ], ns='hello')
    seq(x)
    assert dense1.name.startswith('hello.')
    assert dense2.name.startswith('hello.')
    assert dense3.name.startswith('hello.')
Ejemplo n.º 12
0
def test_sequential_trainable():
    x = Input(shape=(20, ))
    dense1 = Dense(20)
    dense2 = Dense(10)
    dense3 = Dense(1)
    seq = sequential([
        dense1,
        dense2,
        dense3,
    ], trainable=False)
    seq(x)
    assert collect_trainable_weights(dense1) == []
    assert collect_trainable_weights(dense2) == []
    assert collect_trainable_weights(dense3) == []
Ejemplo n.º 13
0
 def simple_model(self, layers):
     print("building simple input")
     input = x = Input(shape=(12, 8, 8))
     x = BatchNormalization(name="in_2", axis=1)(x)
     x = Conv2D(padding='same',
                filters=64,
                kernel_size=4,
                use_biase=False,
                data_format="channels_first",
                name="in_1")
     x = Activation("relu", name="in_3")(x)
     for i in range(layers):
         self.build_residuals(x, i)
     return Model(input, [out1, out2], name="palmtree")
Ejemplo n.º 14
0
def create_model_mtl_only_exchange_rate(horizon=1, nb_train_samples=512, batch_size=32,  feature_count=6, time_lag=6):

    x = Input(shape=(time_lag, feature_count), name="input_layer")
    conv = Conv1D(filters=5, kernel_size=1, activation='relu')(x)
    conv2 = Conv1D(filters=5, kernel_size=3, padding='causal', strides=1, activation='relu', dilation_rate=2)(conv)
    conv3 = Conv1D(filters=5, kernel_size=3, padding='causal', strides=1, activation='relu', dilation_rate=4)(conv2)

    mp = MaxPooling1D(pool_size=1)(conv3)
    # conv2 = Conv1D(filters=5, kernel_size=3, activation='relu')(mp)
    # mp = MaxPooling1D(pool_size=2)(conv2)

    lstm1 = GRU(16, return_sequences=True)(mp)
    lstm2 = GRU(32, return_sequences=True)(lstm1)

    shared_dense = Dense(64, name="shared_layer")(lstm2)

    ## sub1 is main task; units = reshape dimension multiplication
    sub1 = GRU(units=48, name="task1")(shared_dense)
    sub2 = GRU(units=16, name="task2")(shared_dense)
    sub3 = GRU(units=16, name="task3")(shared_dense)
    sub4 = GRU(units=16, name="task4")(shared_dense)
    sub5 = GRU(units=16, name="task5")(shared_dense)

    out1 = Dense(8, name="spec_out1")(sub1)
    out1 = Dense(1, name="out1")(out1)

    out2 = Dense(8, name="spec_out2")(sub2)
    out2 = Dense(1, name="out2")(out2)

    out3 = Dense(1, name="spec_out3")(sub3)
    out3 = Dense(1, name="out3")(out3)

    out4 = Dense(1, name="spec_out4")(sub4)
    out4 = Dense(1, name="out4")(out4)

    out5 = Dense(1, name="spec_out5")(sub5)
    out5 = Dense(1, name="out5")(out5)

    outputs = [out1, out2, out3, out4, out5]

    model = KerasModel(inputs=x, outputs=outputs)


    model.compile(optimizer='adam', loss='mse', metrics=['mae', 'mape', 'mse'], loss_weights=[0.5, 0.25, 0.25, 0.25, 0.25])
    # Callbacks
    # callbacks = [EarlyStopping(monitor='val_mse', patience=10)]

    model.summary()

    return model
Ejemplo n.º 15
0
def test_gan_graph():
    z_shape = (1, 8, 8)
    z = Input(shape=z_shape, name='z')
    gen_cond = Input(shape=(1, 8, 8), name='gen_cond')

    inputs = [z, gen_cond]
    gen_input = merge(inputs, mode='concat', concat_axis=1)
    gen_output = Convolution2D(10, 2, 2, activation='relu',
                               border_mode='same')(gen_input)
    generator = Container(inputs, gen_output)

    f, r = Input(z_shape, name='f'), Input(z_shape, name='r')
    inputs = [f, r]
    dis_input = merge(inputs, mode='concat', concat_axis=1)
    dis_conv = Convolution2D(5, 2, 2, activation='relu')(dis_input)
    dis_flatten = Flatten()(dis_conv)
    dis = Dense(1, activation='sigmoid')(dis_flatten)
    discriminator = Container(inputs, gan_outputs(dis))

    gan = GAN(generator, discriminator, z_shape=z_shape, real_shape=z_shape)
    gan.build('adam', 'adam', gan_binary_crossentropy)
    gan.compile()
    gan.generate({'gen_cond': np.zeros((64,) + z_shape)}, nb_samples=64)
Ejemplo n.º 16
0
def ENet(input_shape, n_classes):

    img_input = Input(shape=input_shape)
    enet = en_build(img_input)
    enet = de_build(enet, n_classes)
    o_shape = Model(img_input, enet).output_shape
    outputHeight = o_shape[1]
    outputWidth = o_shape[2]
    enet = (Reshape((outputHeight * outputWidth, n_classes)))(enet)
    enet = Activation('softmax')(enet)
    model = Model(img_input, enet)
    model.name = 'ENet'

    return model
Ejemplo n.º 17
0
def build_model():
    """
    builds full keras model and returns it
    """
    in_x = x = Input((1, 8, 8))

    # (batch, channels, height, width)
    x = Conv2D(filters=cnn_filter_num, kernel_size=cnn_first_filter_size, padding="same", data_format="channels_first",
               use_bias=False, kernel_regularizer=l2(l2_reg),
               name="input_conv-" + str(cnn_first_filter_size) + "-" + str(cnn_filter_num))(x)
    x = BatchNormalization(axis=1, name="input_batchnorm")(x)
    x = Activation("relu", name="input_relu")(x)

    for i in range(res_layer_num):
        x = _build_residual_block(x, i + 1)

    res_out = x

    # for policy output
    x = Conv2D(filters=2, kernel_size=1, data_format="channels_first", use_bias=False, kernel_regularizer=l2(l2_reg),
               name="policy_conv-1-2")(res_out)

    x = BatchNormalization(axis=1, name="policy_batchnorm")(x)
    x = Activation("relu", name="policy_relu")(x)
    x = Flatten(name="policy_flatten")(x)

    # no output for 'pass'
    policy_out = Dense(n_labels, kernel_regularizer=l2(l2_reg), activation="softmax", name="policy_out")(x)

    # for value output
    x = Conv2D(filters=4, kernel_size=1, data_format="channels_first", use_bias=False, kernel_regularizer=l2(l2_reg),
               name="value_conv-1-4")(res_out)

    x = BatchNormalization(axis=1, name="value_batchnorm")(x)
    x = Activation("relu", name="value_relu")(x)
    x = Flatten(name="value_flatten")(x)
    x = Dense(value_fc_size, kernel_regularizer=l2(l2_reg), activation="relu", name="value_dense")(x)

    value_out = Dense(1, kernel_regularizer=l2(l2_reg), activation="tanh", name="value_out")(x)

    model = Model(in_x, [policy_out, value_out], name="hex_model")

    sgd = optimizers.SGD(lr=learning_rate, momentum=momentum)

    losses = ['categorical_crossentropy', 'mean_squared_error']

    model.compile(loss=losses, optimizer='adam', metrics=['accuracy', 'mae'])

    model.summary()
    return model
Ejemplo n.º 18
0
def sequential_to_gan(generator: Sequential,
                      discriminator: Sequential,
                      nb_real=32,
                      nb_fake=96):
    generator

    fake = Input(shape=discriminator.input_shape[1:], name='fake')
    real = Input(shape=discriminator.input_shape[1:], name='real')

    dis_in = merge([fake, real],
                   concat_axis=0,
                   mode='concat',
                   name='concat_fake_real')
    dis = discriminator(dis_in)
    dis_outputs = gan_outputs(dis,
                              fake_for_gen=(0, nb_fake),
                              fake_for_dis=(nb_fake - nb_real, nb_real),
                              real=(nb_fake, nb_fake + nb_real))
    dis_container = Container([fake, real], dis_outputs)
    return GAN(generator,
               dis_container,
               z_shape=generator.input_shape[1:],
               real_shape=discriminator.input_shape[1:])
Ejemplo n.º 19
0
def decoder_resnet(label_sizes,
                   nb_filter=16,
                   data_shape=(1, 64, 64),
                   nb_bits=12,
                   resnet_depth=(3, 4, 6, 3),
                   optimizer='adam'):
    def _bn_relu_conv(nb_filter, nb_row=3, nb_col=3, subsample=1):
        return sequential([
            BatchNormalization(mode=0, axis=1),
            ELU(),
            Convolution2D(nb_filter=nb_filter,
                          nb_row=nb_row,
                          nb_col=nb_col,
                          subsample=(subsample, subsample),
                          init="he_normal",
                          border_mode="same")
        ])

    def f(nb_filter, subsample=1):
        return sequential([
            _bn_relu_conv(nb_filter, subsample=subsample),
            _bn_relu_conv(nb_filter),
        ])

    input = Input(shape=data_shape)
    fitlers_by_depth = [nb_filter * 2**i for i in range(len(resnet_depth))]
    print("fitlers_by_depth", fitlers_by_depth)
    x = _bn_relu_conv(nb_filter, 3, 3, subsample=2)(input)
    for i, (n, d) in enumerate(zip(fitlers_by_depth, resnet_depth)):
        for di in range(d):
            if di == 0 and i != 0:
                shortcut = _bn_relu_conv(n, 1, 1, subsample=2)
                subsample = 2
            else:
                shortcut = lambda x: x
                subsample = 1
            x = merge([shortcut(x), f(n, subsample)(x)], mode='sum')

    outputs, losses = decoder_end_block(x,
                                        label_sizes,
                                        nb_bits,
                                        activation=lambda: ELU())

    model = Model(input, list(outputs.values()))
    model.compile(
        optimizer,
        loss=list(losses.values()),
        loss_weights={k: decoder_loss_weights(k)
                      for k in losses.keys()})
    return model
Ejemplo n.º 20
0
 def _build_network(self):
     # Input_Layer
     init_x = Input((3, self._board_size, self._board_size))
     x = init_x
     # Convolutional Layer
     x = Conv2D(filters=32,
                kernel_size=(3, 3),
                strides=(1, 1),
                padding='same',
                data_format='channels_first',
                kernel_regularizer=l2(self._l2_coef))(x)
     x = BatchNormalization()(x)
     x = Activation('relu')(x)
     # Residual Layer
     x = self._residual_block(x)
     x = self._residual_block(x)
     x = self._residual_block(x)
     # Policy Head
     policy = Conv2D(filters=2,
                     kernel_size=(1, 1),
                     strides=(1, 1),
                     padding='same',
                     data_format='channels_first',
                     kernel_regularizer=l2(self._l2_coef))(x)
     policy = BatchNormalization()(policy)
     policy = Activation('relu')(policy)
     policy = Flatten()(policy)
     policy = Dense(self._board_size * self._board_size,
                    kernel_regularizer=l2(self._l2_coef))(policy)
     self._policy = Activation('softmax')(policy)
     # Value Head
     value = Conv2D(filters=1,
                    kernel_size=(1, 1),
                    strides=(1, 1),
                    padding='same',
                    data_format="channels_first",
                    kernel_regularizer=l2(self._l2_coef))(x)
     value = BatchNormalization()(value)
     value = Activation('relu')(value)
     value = Flatten()(value)
     value = Dense(32, kernel_regularizer=l2(self._l2_coef))(value)
     value = Activation('relu')(value)
     value = Dense(1, kernel_regularizer=l2(self._l2_coef))(value)
     self._value = Activation('tanh')(value)
     # Define Network
     self._model = Model(inputs=init_x, outputs=[self._policy, self._value])
     # Define the Loss Function
     opt = SGD(lr=self._lr, momentum=self._momentum, nesterov=True)
     losses_type = ['categorical_crossentropy', 'mean_squared_error']
     self._model.compile(optimizer=opt, loss=losses_type)
Ejemplo n.º 21
0
    def build(input_shape, block_fn, repetitions, input_tensor):
        _handle_dim_ordering()
        if len(input_shape) != 3:
            raise Exception(
                "Input shape should be a tuple (nb_channels, nb_rows, nb_cols)"
            )

        # Load function from str if needed.
        block_fn = _get_block(block_fn)

        if input_tensor is None:
            img_input = Input(shape=input_shape)
        else:
            if not K.is_keras_tensor(input_tensor):
                img_input = Input(tensor=input_tensor, shape=input_shape)
            else:
                img_input = input_tensor

        conv1 = _conv_bn_relu(filters=64, kernel_size=(7, 7),
                              strides=(2, 2))(img_input)
        pool1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2),
                             padding="same")(conv1)

        block = pool1
        filters = 64
        for i, r in enumerate(repetitions):
            block = _residual_block(block_fn,
                                    filters=filters,
                                    repetitions=r,
                                    is_first_layer=(i == 0))(block)
            filters *= 2

        # Last activation
        block = _bn_relu(block)

        model = Model(inputs=img_input, outputs=block)
        return model
Ejemplo n.º 22
0
    def build(self):
        mc = self.config.model
        in_x = x = Input((2, 6, 7))  # [own(8x8), enemy(8x8)]

        # (batch, channels, height, width)
        x = Conv2D(filters=mc.cnn_filter_num,
                   kernel_size=mc.cnn_filter_size,
                   padding="same",
                   data_format="channels_first",
                   kernel_regularizer=l2(mc.l2_reg))(x)
        x = BatchNormalization(axis=1)(x)
        x = Activation("relu")(x)

        for _ in range(mc.res_layer_num):
            x = self._build_residual_block(x)

        res_out = x
        # for policy output
        x = Conv2D(filters=2,
                   kernel_size=1,
                   data_format="channels_first",
                   kernel_regularizer=l2(mc.l2_reg))(res_out)
        x = BatchNormalization(axis=1)(x)
        x = Activation("relu")(x)
        x = Flatten()(x)
        # no output for 'pass'
        policy_out = Dense(self.config.n_labels,
                           kernel_regularizer=l2(mc.l2_reg),
                           activation="softmax",
                           name="policy_out")(x)

        # for value output
        x = Conv2D(filters=1,
                   kernel_size=1,
                   data_format="channels_first",
                   kernel_regularizer=l2(mc.l2_reg))(res_out)
        x = BatchNormalization(axis=1)(x)
        x = Activation("relu")(x)
        x = Flatten()(x)
        x = Dense(mc.value_fc_size,
                  kernel_regularizer=l2(mc.l2_reg),
                  activation="relu")(x)
        value_out = Dense(1,
                          kernel_regularizer=l2(mc.l2_reg),
                          activation="tanh",
                          name="value_out")(x)

        self.model = Model(in_x, [policy_out, value_out],
                           name="connect4_model")
Ejemplo n.º 23
0
def create_model_mtv_electricity(horizon=1, nb_train_samples=512, batch_size=32,  feature_count=11, time_lag=6):

    x = Input(shape=(time_lag, feature_count), name="input_layer")
    conv = Conv1D(kernel_size=1, filters=5, activation='relu')(x)
    conv2 = Conv1D(5, kernel_size=3, padding='causal', strides=1, activation='relu', dilation_rate=2)(conv)
    conv3 = Conv1D(5, kernel_size=3, padding='causal', strides=1, activation='relu', dilation_rate=4)(conv2)

    mp = MaxPooling1D(pool_size=1)(conv3)

    lstm1 = GRU(16, return_sequences=True)(mp)
    lstm2 = GRU(32, return_sequences=True)(lstm1)

    shared_dense = Dense(64, name="shared_layer")(lstm2)

    ## sub1 is main task; units = reshape dimension multiplication
    sub1 = GRU(units=72, name="task1")(shared_dense)


    out1 = Dense(8, name="spec_out1")(sub1)
    out1 = Dense(1, name="out1")(out1)

    outputs = out1

    model = KerasModel(inputs=x, outputs=outputs)

    model.compile(optimizer='adam', loss='mse', metrics=['mae', 'mape', 'mse'])
    # Callbacks
    # callbacks = [EarlyStopping(monitor='val_mse', patience=10)]

    model.summary()

    # x = Input(shape=(time_lag, 12), name='aux_input')
    #
    # out1 = Dense(8, name="spec_out1")(x)
    # out1 = Flatten()(out1)
    # out1 = Dense(1, name="out1")(out1)
    #
    # outputs = out1
    #
    # model = KerasModel(inputs=x, outputs=outputs)
    #
    #
    # model.compile(optimizer='adam', loss='mse', metrics=['mae', 'mape', 'mse'])
    # # Callbacks
    # # callbacks = [EarlyStopping(monitor='val_mse', patience=10)]
    #
    # model.summary()
    #
    return model
def build_model(args):
    cnn_filter_num = args['cnn_filter_num']
    cnn_filter_size = args['cnn_filter_size']
    l2_reg = args['l2_reg']

    in_x = x = Input(args['input_dim'])

    # (batch, channels, height, width)
    x = Conv2D(filters=cnn_filter_num,
               kernel_size=cnn_filter_size,
               padding="same",
               data_format="channels_first",
               kernel_regularizer=l2(l2_reg))(x)
    x = BatchNormalization(axis=1)(x)
    x = Activation("relu")(x)

    for _ in range(args['res_layer_num']):
        x = _build_residual_block(args, x)

    res_out = x

    # for policy output
    x = Conv2D(filters=2,
               kernel_size=1,
               data_format="channels_first",
               kernel_regularizer=l2(l2_reg))(res_out)
    x = BatchNormalization(axis=1)(x)
    x = Activation("relu")(x)
    x = Flatten()(x)
    policy_out = Dense(args['policy_dim'],
                       kernel_regularizer=l2(l2_reg),
                       activation="softmax",
                       name="policy")(x)

    # for value output
    x = Conv2D(filters=1,
               kernel_size=1,
               data_format="channels_first",
               kernel_regularizer=l2(l2_reg))(res_out)
    x = BatchNormalization(axis=1)(x)
    x = Activation("relu")(x)
    x = Flatten()(x)
    x = Dense(256, kernel_regularizer=l2(l2_reg), activation="relu")(x)
    value_out = Dense(1,
                      kernel_regularizer=l2(l2_reg),
                      activation="tanh",
                      name="value")(x)

    return Model(in_x, [policy_out, value_out], name="model")
Ejemplo n.º 25
0
def build(nc, w, h, loss='categorical_crossentropy', optimizer='adam', metrics=None, **kwargs):

    inp = Input(shape=(h, w, 3), name='image')
    enet = encoder.build(inp)
    enet = decoder.build(enet, nc=nc)
    name = 'enet_unpooling'

    output_conv = Convolution2D(nc, (1, 1), activation='sigmoid')(enet)

    model = Model(inputs=inp, outputs=output_conv)

    metrics = ['accuracy']
    model.compile(optimizer=optimizer, loss=loss, metrics=metrics)

    return model, name
Ejemplo n.º 26
0
def model_set(input_length, learning_rate):

    input_layer = Input(shape=(input_length, ))

    hidden_layer = Dense(64, activation='relu')(input_layer)

    output_layer = Dense(1, activation='sigmoid')(hidden_layer)

    model = Model(inputs=input_layer, outputs=output_layer)

    adam = Adam(lr=learning_rate)

    model.compile(loss='binary_crossentropy', optimizer=adam)

    return model
Ejemplo n.º 27
0
def test_get_lighting_generator():
    a_shape = (5, 16, 16)
    b_shape = (1, 16, 16)
    c_shape = (1, 16, 16)
    n = 5
    a_input = Input(shape=a_shape)
    b_input = Input(shape=b_shape)
    c_input = Input(shape=c_shape)

    scale_black, scale_white, shift64 = get_lighting_generator(
        [a_input, b_input, c_input], n)

    model = Model([a_input, b_input, c_input],
                  [scale_black, scale_white, shift64])
    model.compile('adam', 'mse')
    bs = (64, )
    a = np.random.sample(bs + a_shape)
    b = np.random.sample(bs + b_shape)
    c = np.random.sample(bs + c_shape)

    y_scale_black = np.random.sample(bs + (1, 64, 64))
    y_scale_white = np.random.sample(bs + (1, 64, 64))
    y_shift64 = np.random.sample(bs + (1, 64, 64))
    model.train_on_batch([a, b, c], [y_scale_black, y_scale_white, y_shift64])
def k_base_model(tongue_image_shape, model_name='resnet50'):

    image_input = Input(shape=tongue_image_shape)

    if model_name == 'vgg16':
        base_model = VGG16(input_tensor=image_input,
                           weights='imagenet', include_top=False, pooling='avg')
    elif model_name == 'vgg19':
        base_model = VGG19(input_tensor=image_input,
                           weights='imagenet', include_top=False, pooling='avg')
    else:
        base_model = ResNet50(input_tensor=image_input,
                              weights='imagenet', include_top=False, pooling='avg')

    return image_input, base_model
Ejemplo n.º 29
0
def test_specify_initial_state_non_keras_tensor(layer_class):
    num_states = 2 if layer_class is recurrent.LSTM else 1

    # Test with non-Keras tensor
    inputs = Input((timesteps, embedding_dim))
    initial_state = [K.random_normal_variable((num_samples, units), 0, 1)
                     for _ in range(num_states)]
    layer = layer_class(units)
    output = layer(inputs, initial_state=initial_state)

    model = Model(inputs, output)
    model.compile(loss='categorical_crossentropy', optimizer='adam')

    inputs = np.random.random((num_samples, timesteps, embedding_dim))
    targets = np.random.random((num_samples, units))
    model.fit(inputs, targets)
Ejemplo n.º 30
0
def build(nc, w, h, loss='categorical_crossentropy', optimizer='adam'):
    data_shape = w * h if None not in (w, h) else -1  # TODO: -1 or None?
    inp = Input(shape=(h, w, 3))
    enet = encoder.build(inp)
    enet = decoder.build(enet, nc=nc)
    name = 'enet_naive_upsampling'

    # enet = Reshape((data_shape, nc))(enet)  # TODO: need to remove data_shape for multi-scale training
    # with tf.name_scope('output'):
    #     enet = Activation('softmax',name="predictions")(enet)
    enet = Activation('softmax', name="segmentation_map")(enet)
    model = Model(inputs=inp, outputs=enet)

    # model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy', 'mean_squared_error', f1_score])

    return model, name