Beispiel #1
0
def test_batch_size_equal_one(layer_class):
    inputs = Input(batch_shape=(1, timesteps, embedding_dim))
    layer = layer_class(units)
    outputs = layer(inputs)
    model = Model(inputs, outputs)
    model.compile('sgd', 'mse')
    x = np.random.random((1, timesteps, embedding_dim))
    y = np.random.random((1, units))
    model.train_on_batch(x, y)
Beispiel #2
0
def test_sparse_placeholder_fit():
    test_inputs = [sparse.random(6, 3, density=0.25).tocsr() for _ in range(2)]
    test_outputs = [
        sparse.random(6, i, density=0.25).tocsr() for i in range(3, 5)
    ]
    in1 = Input(shape=(3, ))
    in2 = Input(shape=(3, ), sparse=True)
    out1 = Dropout(0.5, name='dropout')(in1)
    out2 = Dense(4, name='dense_1')(in2)
    model = Model([in1, in2], [out1, out2])
    model.predict(test_inputs, batch_size=2)
    model.compile('rmsprop', 'mse')
    model.fit(test_inputs,
              test_outputs,
              epochs=1,
              batch_size=2,
              validation_split=0.5)
    model.evaluate(test_inputs, test_outputs, batch_size=2)
    def build(self):
        mc = self.config.model
        in_x = x = Input((2, 6, 7))  # [own(8x8), enemy(8x8)]

        # (batch, channels, height, width)
        x = Conv2D(
            filters=mc.cnn_filter_num,
            kernel_size=mc.cnn_filter_size,
            padding="same",
            data_format="channels_first",
            kernel_regularizer=l2(mc.l2_reg),
        )(x)
        x = BatchNormalization(axis=1)(x)
        x = Activation("relu")(x)

        for _ in range(mc.res_layer_num):
            x = self._build_residual_block(x)

        res_out = x
        # for policy output
        x = Conv2D(
            filters=2,
            kernel_size=1,
            data_format="channels_first",
            kernel_regularizer=l2(mc.l2_reg),
        )(res_out)
        x = BatchNormalization(axis=1)(x)
        x = Activation("relu")(x)
        x = Flatten()(x)
        # no output for 'pass'
        policy_out = Dense(
            self.config.n_labels,
            kernel_regularizer=l2(mc.l2_reg),
            activation="softmax",
            name="policy_out",
        )(x)

        # for value output
        x = Conv2D(
            filters=1,
            kernel_size=1,
            data_format="channels_first",
            kernel_regularizer=l2(mc.l2_reg),
        )(res_out)
        x = BatchNormalization(axis=1)(x)
        x = Activation("relu")(x)
        x = Flatten()(x)
        x = Dense(mc.value_fc_size,
                  kernel_regularizer=l2(mc.l2_reg),
                  activation="relu")(x)
        value_out = Dense(1,
                          kernel_regularizer=l2(mc.l2_reg),
                          activation="tanh",
                          name="value_out")(x)

        self.model = Model(in_x, [policy_out, value_out],
                           name="connect4_model")
Beispiel #4
0
    def build(self):
        mc = self.config.model
        in_x = x = Input((14, 10, 9))  # 14 x 10 x 9

        # (batch, channels, height, width)
        x = Conv2D(filters=mc.cnn_filter_num,
                   kernel_size=mc.cnn_first_filter_size,
                   padding="same",
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="input_conv-" + str(mc.cnn_first_filter_size) + "-" +
                   str(mc.cnn_filter_num))(x)
        x = BatchNormalization(axis=1, name="input_batchnorm")(x)
        x = Activation("relu", name="input_relu")(x)

        for i in range(mc.res_layer_num):
            x = self._build_residual_block(x, i + 1)

        res_out = x

        # for policy output
        x = Conv2D(filters=4,
                   kernel_size=1,
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="policy_conv-1-2")(res_out)
        x = BatchNormalization(axis=1, name="policy_batchnorm")(x)
        x = Activation("relu", name="policy_relu")(x)
        x = Flatten(name="policy_flatten")(x)
        policy_out = Dense(self.n_labels,
                           kernel_regularizer=l2(mc.l2_reg),
                           activation="softmax",
                           name="policy_out")(x)

        # for value output
        x = Conv2D(filters=2,
                   kernel_size=1,
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="value_conv-1-4")(res_out)
        x = BatchNormalization(axis=1, name="value_batchnorm")(x)
        x = Activation("relu", name="value_relu")(x)
        x = Flatten(name="value_flatten")(x)
        x = Dense(mc.value_fc_size,
                  kernel_regularizer=l2(mc.l2_reg),
                  activation="relu",
                  name="value_dense")(x)
        value_out = Dense(1,
                          kernel_regularizer=l2(mc.l2_reg),
                          activation="tanh",
                          name="value_out")(x)

        self.model = Model(in_x, [policy_out, value_out], name="cchess_model")
        self.graph = tf.get_default_graph()
Beispiel #5
0
def test_mask_blending_discriminator():
    fake_shape = (10, 1, 64, 64)
    real_shape = (10, 1, 64, 64)
    fake = Input(batch_shape=fake_shape)
    real = Input(batch_shape=real_shape)
    output = mask_blending_discriminator([fake, real])
    model = Model([fake, real], output)
    model.compile('adam', 'mse')
    f = np.random.sample(fake_shape)
    r = np.random.sample(real_shape)
    y = np.random.sample((
        fake_shape[0],
        1,
    ))
    # TODO: fix different batch sizes for input and output
    # y = np.random.sample((fake_shape[0] + real_shape[0], 1,))
    with pytest.raises(ValueError):
        model.train_on_batch([f, r], y)
Beispiel #6
0
 def train(self):
     print('train model......')
     input = Input(shape=(1,))
     output = Dense(units=1)(input)
     self.model = Model(inputs=input, outputs=output)
     self.model.compile(optimizer='adam', loss='mse')
     self.model.summary()
     self.model.fit(self.train_x, self.train_y, batch_size=64, epochs=20, verbose=2,
                    validation_data=(self.test_x, self.test_y))
    def create_policy_value_net(self):
        """create the policy value network """
        in_x = network = Input((4, self.board_width, self.board_height))

        # conv layers
        network = Conv2D(filters=32,
                         kernel_size=(3, 3),
                         padding="same",
                         data_format="channels_first",
                         activation="relu",
                         kernel_regularizer=l2(self.l2_const))(network)
        network = Conv2D(filters=64,
                         kernel_size=(3, 3),
                         padding="same",
                         data_format="channels_first",
                         activation="relu",
                         kernel_regularizer=l2(self.l2_const))(network)
        network = Conv2D(filters=128,
                         kernel_size=(3, 3),
                         padding="same",
                         data_format="channels_first",
                         activation="relu",
                         kernel_regularizer=l2(self.l2_const))(network)
        network = MaxPooling2D(pool_size=(2, 2),
                               padding="same",
                               data_format="channels_first")(network)
        # action policy layers
        policy_net = Conv2D(filters=4,
                            kernel_size=(1, 1),
                            data_format="channels_first",
                            activation="relu",
                            kernel_regularizer=l2(self.l2_const))(network)
        policy_net = Flatten()(policy_net)
        self.policy_net = Dense(self.board_width * self.board_height,
                                activation="softmax",
                                kernel_regularizer=l2(
                                    self.l2_const))(policy_net)
        # state value layers
        value_net = Conv2D(filters=2,
                           kernel_size=(1, 1),
                           data_format="channels_first",
                           activation="relu",
                           kernel_regularizer=l2(self.l2_const))(network)
        value_net = Flatten()(value_net)
        value_net = Dense(64, kernel_regularizer=l2(self.l2_const))(value_net)
        self.value_net = Dense(1,
                               activation="tanh",
                               kernel_regularizer=l2(self.l2_const))(value_net)

        self.model = Model(in_x, [self.policy_net, self.value_net])

        def policy_value(state_input):
            state_input_union = np.array(state_input)
            results = self.model.predict_on_batch(state_input_union)
            return results

        self.policy_value = policy_value
Beispiel #8
0
def build_model(with_dropout=True):
    kwargs     = {'activation':'relu', 'padding':'same'}
    conv_drop  = 0.2
    dense_drop = 0.5
    inp        = Input(shape=img_shape)

    x = inp

    x = Conv2D(64, (9, 9), **kwargs)(x)
    x = Conv2D(64, (3, 3), **kwargs)(x)
    x = BatchNormalization()(x)
    if with_dropout: x = Dropout(conv_drop, noise_shape=(None, 1, 1, int(x.shape[-1])))(x)

    x = Conv2D(64, (2, 2), **kwargs, strides=2)(x)
    x = Conv2D(64, (3, 3), **kwargs)(x)
    x = Conv2D(64, (3, 3), **kwargs)(x)
    x = BatchNormalization()(x)
    if with_dropout: x = Dropout(conv_drop, noise_shape=(None, 1, 1, int(x.shape[-1])))(x)

    x = Conv2D(64, (2, 2), **kwargs, strides=2)(x)
    x = Conv2D(64, (3, 3), **kwargs)(x)
    x = Conv2D(64, (3, 3), **kwargs)(x)
    x = BatchNormalization()(x)
    if with_dropout: x = Dropout(conv_drop, noise_shape=(None, 1, 1, int(x.shape[-1])))(x)

    x = Conv2D(64, (2, 2), **kwargs, strides=2)(x)
    x = Conv2D(64, (3, 3), **kwargs)(x)
    x = Conv2D(64, (3, 3), **kwargs)(x)
    x = BatchNormalization()(x)
    if with_dropout: x = Dropout(conv_drop, noise_shape=(None, 1, 1, int(x.shape[-1])))(x)

    x = Conv2D(64, (2, 2), **kwargs, strides=2)(x)
    x = Conv2D(64, (3, 3), **kwargs)(x)
    x = Conv2D(64, (3, 3), **kwargs)(x)
    x = BatchNormalization()(x)
    if with_dropout: x = Dropout(conv_drop, noise_shape=(None, 1, 1, int(x.shape[-1])))(x)

    x = Conv2D(64, (2, 2), **kwargs, strides=2)(x)
    x = Conv2D(64, (3, 3), **kwargs)(x)
    x = Conv2D(64, (3, 3), **kwargs)(x)
    x = BatchNormalization()(x)
    if with_dropout: x = Dropout(conv_drop, noise_shape=(None, 1, 1, int(x.shape[-1])))(x)

    h = MaxPooling2D(pool_size=(1, int(x.shape[2])))(x)
    h = Flatten()(h)
    if with_dropout: h = Dropout(dense_drop)(h)
    h = Dense(16, activation='relu')(h)

    v = MaxPooling2D(pool_size=(int(x.shape[1]), 1))(x)
    v = Flatten()(v)
    if with_dropout: v = Dropout(dense_drop)(v)
    v = Dense(16, activation='relu')(v)

    x = Concatenate()([h,v])
    if with_dropout: x = Dropout(0.5)(x)
    x = Dense(4, activation='linear')(x)
    return Model(inp,x)
Beispiel #9
0
    def build(self, res_layers):
        '''
        Constructs the ResNet model based on number of layers
        :param res_layers: number of layers in the model
        '''
        mc = self.config.model
        in_x = x = Input((2, 8, 5))  # [own(8x8), enemy(8x8)]

        # (batch, channels, height, width)
        x = Conv2D(filters=mc.cnn_filter_num,
                   kernel_size=mc.cnn_filter_size,
                   padding="same",
                   data_format="channels_first",
                   kernel_regularizer=l2(mc.l2_reg))(x)
        x = BatchNormalization(axis=1)(x)
        x = Activation("relu")(x)

        logger.debug(f"Build Model with %d Res Blocks" % res_layers)

        #for _ in range(mc.res_layer_num):
        # build with number of res blocks
        for _ in range(res_layers):
            x = self._build_residual_block(x)

        res_out = x
        # for policy output
        x = Conv2D(filters=2,
                   kernel_size=1,
                   data_format="channels_first",
                   kernel_regularizer=l2(mc.l2_reg))(res_out)
        x = BatchNormalization(axis=1)(x)
        x = Activation("relu")(x)
        x = Flatten()(x)
        # no output for 'pass'
        policy_out = Dense(self.config.n_labels,
                           kernel_regularizer=l2(mc.l2_reg),
                           activation="softmax",
                           name="policy_out")(x)

        # for value output
        x = Conv2D(filters=1,
                   kernel_size=1,
                   data_format="channels_first",
                   kernel_regularizer=l2(mc.l2_reg))(res_out)
        x = BatchNormalization(axis=1)(x)
        x = Activation("relu")(x)
        x = Flatten()(x)
        x = Dense(mc.value_fc_size,
                  kernel_regularizer=l2(mc.l2_reg),
                  activation="relu")(x)
        value_out = Dense(1,
                          kernel_regularizer=l2(mc.l2_reg),
                          activation="tanh",
                          name="value_out")(x)

        self.model = Model(in_x, [policy_out, value_out],
                           name="connect4_model")
Beispiel #10
0
def test_weighted_masked_objective():
    a = Input(shape=(3, ), name='input_a')

    # weighted_masked_objective
    def mask_dummy(y_true=None, y_pred=None, weight=None):
        return K.placeholder(y_true.shape)

    weighted_function = _weighted_masked_objective(K.categorical_crossentropy)
    weighted_function(a, a, None)
Beispiel #11
0
def m():
    x = Input(shape=(input_size + output_size, nb_chars))
    m_realness = sequential([
        LSTM(14),
        Dense(1, activation='sigmoid'),
    ])(x)
    m = Model([x], [m_realness])
    m.compile(Adam(), 'mse')
    return m
Beispiel #12
0
def test_warnings():
    a = Input(shape=(3, ), name='input_a')
    b = Input(shape=(3, ), name='input_b')

    a_2 = Dense(4, name='dense_1')(a)
    dp = Dropout(0.5, name='dropout')
    b_2 = dp(b)

    model = Model([a, b], [a_2, b_2])

    optimizer = 'rmsprop'
    loss = 'mse'
    loss_weights = [1., 0.5]
    model.compile(optimizer,
                  loss,
                  metrics=[],
                  loss_weights=loss_weights,
                  sample_weight_mode=None)

    def gen_data(batch_sz):
        while True:
            yield ([
                np.random.random((batch_sz, 3)),
                np.random.random((batch_sz, 3))
            ], [
                np.random.random((batch_sz, 4)),
                np.random.random((batch_sz, 3))
            ])

    with pytest.warns(Warning) as w:
        out = model.fit_generator(gen_data(4),
                                  steps_per_epoch=10,
                                  use_multiprocessing=True,
                                  workers=2)
    warning_raised = any(['Sequence' in str(w_.message) for w_ in w])
    assert warning_raised, 'No warning raised when using generator with processes.'

    with pytest.warns(None) as w:
        out = model.fit_generator(RandomSequence(3),
                                  steps_per_epoch=4,
                                  use_multiprocessing=True,
                                  workers=2)
    assert all(['Sequence' not in str(w_.message)
                for w_ in w]), 'A warning was raised for Sequence.'
Beispiel #13
0
def test_mask_generator():
    shape = (15, )
    input = Input(shape=shape)
    output = mask_generator([input])
    model = Model(input, output)
    model.compile('adam', 'mse')
    bs = (64, )
    x = np.random.sample(bs + shape)
    y = np.random.sample(bs + (1, 64, 64))
    model.train_on_batch(x, y)
Beispiel #14
0
def test_state_reuse(layer_class):
    inputs = Input(batch_shape=(num_samples, timesteps, embedding_dim))
    layer = layer_class(units, return_state=True, return_sequences=True)
    outputs = layer(inputs)
    output, state = outputs[0], outputs[1:]
    output = layer_class(units)(output, initial_state=state)
    model = Model(inputs, output)

    inputs = np.random.random((num_samples, timesteps, embedding_dim))
    outputs = model.predict(inputs)
def build_model_pretrained(model_name, lr, img_shape=(384, 384, 3)):
    inp = Input(shape=img_shape)
    base_model = pretrained[model_name](weights='imagenet', include_top=False, input_shape=img_shape, input_tensor=inp)
    x = base_model.output
    x = GlobalMaxPooling2D()(x)
    branch_model = Model(inputs=base_model.input, outputs=x)
    optim = Adam(lr=lr)
    head_model = build_head(branch_model, activation='sigmoid')
    model = build_siamese(branch_model, head_model, optim, img_shape=img_shape)
    return model, branch_model, head_model
def test_specify_state_with_masking(layer_class):
    ''' This test based on a previously failing issue here:
    https://github.com/keras-team/keras/issues/1567
    '''
    num_states = 2 if layer_class is recurrent.LSTM else 1

    inputs = Input((timesteps, embedding_dim))
    _ = Masking()(inputs)
    initial_state = [Input((units,)) for _ in range(num_states)]
    output = layer_class(units)(inputs, initial_state=initial_state)

    model = Model([inputs] + initial_state, output)
    model.compile(loss='categorical_crossentropy', optimizer='adam')

    inputs = np.random.random((num_samples, timesteps, embedding_dim))
    initial_state = [np.random.random((num_samples, units))
                     for _ in range(num_states)]
    targets = np.random.random((num_samples, units))
    model.fit([inputs] + initial_state, targets)
Beispiel #17
0
def test_get_mask_driver():
    batch_shape = (64, 22)
    n = 50
    input = Input(shape=batch_shape[1:])
    output = get_label_generator(input, nb_units=n, nb_output_units=n)
    model = Model(input, output)
    model.compile('adam', 'mse')
    x = np.random.sample(batch_shape)
    y = np.random.sample((64, n))
    model.train_on_batch(x, y)
def get_merge_dense_only_model():
    inputs = Input(shape=(14, ))
    predictions = Dense(14, )(inputs)
    predictions = Dense(7, activation='softmax')(inputs)

    # This creates a model that includes
    # the Input layer and three Dense layers
    model = Model(inputs=inputs, outputs=predictions)

    return model
Beispiel #19
0
def create_model_mtl_only_electricity(horizon=1,
                                      nb_train_samples=512,
                                      batch_size=32,
                                      feature_count=11,
                                      time_lag=6):
    x = Input(shape=(time_lag, feature_count), name="input_layer")
    conv = Conv1D(kernel_size=1, filters=5, activation='relu')(x)
    conv2 = Conv1D(5,
                   kernel_size=3,
                   padding='causal',
                   strides=1,
                   activation='relu',
                   dilation_rate=2)(conv)
    conv3 = Conv1D(5,
                   kernel_size=3,
                   padding='causal',
                   strides=1,
                   activation='relu',
                   dilation_rate=4)(conv2)

    mp = MaxPooling1D(pool_size=1)(conv3)

    lstm1 = GRU(16, return_sequences=True)(mp)
    lstm2 = GRU(32, return_sequences=True)(lstm1)

    shared_dense = Dense(64, name="shared_layer")(lstm2)

    ## sub1 is main task; units = reshape dimension multiplication
    sub1 = GRU(units=72, name="task1")(shared_dense)
    sub2 = GRU(units=16, name="task2")(shared_dense)
    sub3 = GRU(units=16, name="task3")(shared_dense)

    out1 = Dense(8, name="spec_out1")(sub1)
    out1 = Dense(1, name="out1")(out1)

    out2 = Dense(8, name="spec_out2")(sub2)
    out2 = Dense(1, name="out2")(out2)

    out3 = Dense(1, name="spec_out3")(sub3)
    out3 = Dense(1, name="out3")(out3)

    outputs = [out1, out2, out3]

    model = KerasModel(inputs=x, outputs=outputs)

    model.compile(optimizer='adam',
                  loss='mse',
                  metrics=['mae', 'mape', 'mse'],
                  loss_weights=[0.5, 0.25, 0.25])
    # Callbacks
    # callbacks = [EarlyStopping(monitor='val_mse', patience=10)]

    model.summary()

    return model
Beispiel #20
0
    def __init__(self,
                 input_shape,
                 anchors,
                 input_layers=None,
                 prev_layers=None,
                 box_by_anchor=9,
                 image_shape=None,
                 batch_size=5,
                 is_predict=False,
                 trainable=True):
        self.__trainable = trainable
        self.__input_shape = input_shape

        inputs = Input(self.__input_shape)
        if input_layers is not None:
            inputs = input_layers

        prevs = inputs
        if prev_layers is not None:
            prevs = prev_layers

        intermediate = Conv2D(256,
                              3,
                              strides=3,
                              activation="relu",
                              padding='same',
                              trainable=self.__trainable)(prevs)

        cls_layer = Conv2D(2 * box_by_anchor,
                           1,
                           activation="relu",
                           trainable=self.__trainable)(intermediate)

        # [B, h, w, box_by_anchor * 2] -> [B, anchor boxes, 2]
        cls_logits = Reshape([-1, 2])(cls_layer)
        cls_probs = Activation('softmax')(cls_logits)

        reg_layer = Conv2D(4 * box_by_anchor,
                           1,
                           activation="relu",
                           trainable=self.__trainable)(intermediate)

        # [B, h, w, box_by_anchor * 4] -> [B, anchor boxes, 4]
        regions = Reshape([-1, 4])(reg_layer)

        prop_regs = Regionproposal(
            anchors,
            image_shape=image_shape,
            batch_size=batch_size,
            count_limit_post=1000 if is_predict else 2000)(
                [cls_probs, regions])

        outputs = ([cls_probs, regions, prop_regs])
        self.__network = outputs
        self.__model = Model(inputs=[inputs], outputs=outputs)
Beispiel #21
0
def get_unet():
    inputs = Input((img_rows, img_cols, depth))

    conv1 = block(inputs, 16, 32, True)

    conv2 = block(conv1, 32, 64, True)

    conv3 = block(conv2, 64, 128, True)

    conv4 = block(conv3, 128, 256, True)

    conv5 = block(conv4, 256, 512, True)

    # **** decoding ****
    xx = concatenate([
        Conv2DTranspose(256,
                        (2, 2), strides=(2, 2), padding='same')(conv5), conv4
    ],
                     axis=3)
    up1 = block(xx, 512, 128, False)

    xx = concatenate([
        Conv2DTranspose(256,
                        (2, 2), strides=(2, 2), padding='same')(up1), conv3
    ],
                     axis=3)
    up2 = block(xx, 256, 64, False)

    xx = concatenate([
        Conv2DTranspose(128,
                        (2, 2), strides=(2, 2), padding='same')(up2), conv2
    ],
                     axis=3)
    up3 = block(xx, 128, 32, False)

    xx = concatenate([
        Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(up3), conv1
    ],
                     axis=3)
    up4 = block(xx, 64, 16, False)

    xx = concatenate([
        Conv2DTranspose(16,
                        (2, 2), strides=(2, 2), padding='same')(up4), inputs
    ],
                     axis=3)

    xx = Conv2D(32, (3, 3), activation='relu', padding='same')(xx)
    #    xx = concatenate([xx, conv1a])

    xx = Conv2D(1, (1, 1), activation='sigmoid', padding='same')(xx)

    model = Model(inputs=[inputs], outputs=[xx])

    return model
Beispiel #22
0
    def create_policy_value_net(self):
        """create the policy value network """
        in_x = network = Input((4, self.board_width, self.board_height))

        # conv layers
        '''
        network = Conv2D(filters=32, kernel_size=(3, 3), padding="same", data_format="channels_first", activation="relu", kernel_regularizer=l2(self.l2_const))(network)
        network = Conv2D(filters=64, kernel_size=(3, 3), padding="same", data_format="channels_first", activation="relu", kernel_regularizer=l2(self.l2_const))(network)
        network = Conv2D(filters=128, kernel_size=(3, 3), padding="same", data_format="channels_first", activation="relu", kernel_regularizer=l2(self.l2_const))(network)
        '''

        layer1 = Conv2D(filters=64,
                        kernel_size=(3, 3),
                        padding="same",
                        data_format="channels_first",
                        activation="relu",
                        kernel_regularizer=l2(self.l2_const))(network)
        layer2 = Conv2D(filters=64,
                        kernel_size=(3, 3),
                        padding="same",
                        data_format="channels_first",
                        activation="relu",
                        kernel_regularizer=l2(self.l2_const))(layer1)
        network = Conv2D(filters=128,
                         kernel_size=(3, 3),
                         padding="same",
                         data_format="channels_first",
                         activation="relu",
                         kernel_regularizer=l2(self.l2_const))(network)

        # action policy layers
        policy_net = Conv2D(filters=4,
                            kernel_size=(1, 1),
                            data_format="channels_first",
                            activation="relu",
                            kernel_regularizer=l2(self.l2_const))(network)
        policy_net = Flatten()(policy_net)
        self.policy_net = Dense(self.board_width * self.board_height,
                                activation="softmax",
                                kernel_regularizer=l2(
                                    self.l2_const))(policy_net)
        # state value layers
        value_net = Conv2D(filters=2,
                           kernel_size=(1, 1),
                           data_format="channels_first",
                           activation="relu",
                           kernel_regularizer=l2(self.l2_const))(network)
        value_net = Flatten()(value_net)
        value_net = Dense(64, kernel_regularizer=l2(self.l2_const))(value_net)
        self.value_net = Dense(1,
                               activation="tanh",
                               kernel_regularizer=l2(self.l2_const))(value_net)

        self.model = Model(in_x, [self.policy_net, self.value_net])
        '''
def test_initial_states_as_other_inputs(layer_class):
    num_states = 2 if layer_class is recurrent.LSTM else 1

    # Test with Keras tensor
    main_inputs = Input((timesteps, embedding_dim))
    initial_state = [Input((units,)) for _ in range(num_states)]
    inputs = [main_inputs] + initial_state

    layer = layer_class(units)
    output = layer(inputs)
    assert initial_state[0] in layer._inbound_nodes[0].input_tensors

    model = Model(inputs, output)
    model.compile(loss='categorical_crossentropy', optimizer='adam')

    main_inputs = np.random.random((num_samples, timesteps, embedding_dim))
    initial_state = [np.random.random((num_samples, units))
                     for _ in range(num_states)]
    targets = np.random.random((num_samples, units))
    model.train_on_batch([main_inputs] + initial_state, targets)
Beispiel #24
0
def test_mask_generator():
    shape = (15, )
    input = Input(shape=shape)
    output = tag3d_network_dense([input])
    model = Model(input, output)
    model.compile('adam', 'mse')
    bs = (64, )
    x = np.random.sample(bs + shape)
    y_depth_map = np.random.sample(bs + (1, 16, 16))
    y_mask = np.random.sample(bs + (1, 64, 64))
    model.train_on_batch(x, [y_mask, y_depth_map])
Beispiel #25
0
def get_model(input_shape=(360, 480, 3), classes=12):
    # data_shape = w * h if None not in (w, h) else -1  # TODO: -1 or None?
    inp = Input(shape=input_shape)
    enet = encoder.build(inp)
    enet = decoder.build(enet, nc=classes)
    enet = Conv2D(classes, (1, 1), activation='sigmoid')(enet)
    # enet = Reshape((input_shape[0]*input_shape[1], classes))(enet)
    # # enet = Reshape((data_shape, classes))(enet)  # TODO: need to remove data_shape for multi-scale training
    # enet = Activation('softmax')(enet)
    model = Model(inputs=inp, outputs=enet)
    return model
Beispiel #26
0
def create_model(horizon=1, nb_train_samples=512, batch_size=32, feature_count=11):

    x = Input(shape=(6, feature_count), name="input_layer")
    conv = Conv1D(kernel_size=3, filters=5, activation='relu', dilation_rate=1)(x)
    conv2 = Conv1D(5, kernel_size=3, padding='causal', strides=1, activation='relu', dilation_rate=2)(conv)
    conv3 = Conv1D(5, kernel_size=3, padding='causal', strides=1, activation='relu', dilation_rate=4)(conv2)
    mp = MaxPooling1D(pool_size=2)(conv3)
    # conv2 = Conv1D(filters=5, kernel_size=3, activation='relu')(mp)
    # mp = MaxPooling1D(pool_size=2)(conv2)

    lstm1 = GRU(16, return_sequences=True)(mp)
    lstm2 = GRU(32, return_sequences=True)(lstm1)

    shared_dense = Dense(64, name="shared_layer")(lstm2)
    shared_dense = Flatten()(shared_dense)
    sub1 = Dense(16, name="task1")(shared_dense)
    # sub2 = Dense(16, name="task2")(shared_dense)
    # sub3 = Dense(16, name="task3")(shared_dense)


    # sub1 = GRU(units=16, name="task1")(shared_dense)
    # sub2 = GRU(units=16, name="task2")(shared_dense)
    # sub3 = GRU(units=16, name="task3")(shared_dense)

    # out1_gp = Dense(1, name="out1_gp")(sub1)
    out1 = Dense(1, name="out1")(sub1)
    # out2 = Dense(1, name="out2")(sub2)
    # out3 = Dense(1, name="out3")(sub3)
    # Gaussian setting
    gp_hypers = {'lik': -2.0, 'cov': [[-0.7], [0.0]]}
    gp_params = {
        'cov': 'SEiso',
        'hyp_lik': -2.0,
        'hyp_cov': [[-0.7], [0.0]],
        'opt': {'cg_maxit': 500, 'cg_tol': 1e-4},
        'grid_kwargs': {'eq': 1, 'k': 1e2},
        'update_grid': True,
    }
    gp1 = GP(gp_hypers, batch_size=batch_size, nb_train_samples=nb_train_samples)
    # gp2 = GP(gp_hypers, batch_size=batch_size, nb_train_samples=nb_train_samples)
    # gp3 = GP(gp_hypers, batch_size=batch_size, nb_train_samples=nb_train_samples)

    outputs = [gp1(out1)]

    model = Model(inputs=x, outputs=outputs)


    model.compile(optimizer='adam', loss='mse', metrics=['mae', 'mape', 'mse'])
    # Callbacks
    # callbacks = [EarlyStopping(monitor='val_mse', patience=10)]

    model.summary()

    return model
Beispiel #27
0
 def setUp(self):
     kernel = 2
     input_shape = (1, 4, 6)
     output_dim = 3
     test_data = np.zeros(input_shape)
     inp = Input(shape=(None, input_shape[2]), name="input1")
     x = GatedConv1D(output_dim, kernel_size=kernel)(inp) 
     
     self.sample_zero_model = Model(inputs=inp, outputs=x)
     self.test_data = test_data
     self.output_dim = output_dim
Beispiel #28
0
def build_gan(generator, discriminator):
    # compile 前に変更しておくだけで OK
    discriminator.trainable = False

    noise = Input(shape=(100,))

    fake_image = generator(noise)
    p = discriminator(fake_image)

    model = Model(noise, p)
    return model
Beispiel #29
0
def test_rotates_images():
    bs = 3
    shape = (1, 8, 8)
    img = np.zeros((bs, 1, 8, 8), dtype=K.floatx())
    angle = np.asarray([0, math.pi / 2, math.pi], dtype=K.floatx())

    img_input = Input(shape=shape)
    rot_input = Input(shape=(1, ))
    rot_layer = RotationTransformer()([img_input, rot_input])
    model = Model(input=[img_input, rot_input], output=rot_layer)

    model.compile('adam', 'mse')
    _, theta = model.predict([img, angle])
    np.testing.assert_almost_equal(theta.reshape(-1, 2, 3),
                                   np.asarray([
                                       [[1, 0, 0], [0, 1, 0]],
                                       [[0, -1, 0], [1, 0, 0]],
                                       [[-1, 0, 0], [0, -1, 0]],
                                   ]),
                                   verbose=True)
    def build(input_shape, block_fn, repetitions, input_tensor):
        _handle_dim_ordering()
        if len(input_shape) != 3:
            raise Exception(
                "Input shape should be a tuple (nb_channels, nb_rows, nb_cols)"
            )

        # Permute dimension order if necessary
        if K.image_dim_ordering() == 'tf':
            input_shape = (input_shape[1], input_shape[2], input_shape[0])

        # Load function from str if needed.
        block_fn = _get_block(block_fn)

        if input_tensor is None:
            img_input = Input(shape=input_shape)
        else:
            if not K.is_keras_tensor(input_tensor):
                img_input = Input(tensor=input_tensor, shape=input_shape)
            else:
                img_input = input_tensor

        conv1 = _conv_bn_relu(filters=64, kernel_size=(7, 7),
                              strides=(2, 2))(img_input)
        pool1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2),
                             padding="same")(conv1)

        block = pool1
        filters = 64
        for i, r in enumerate(repetitions):
            block = _residual_block(block_fn,
                                    filters=filters,
                                    repetitions=r,
                                    is_first_layer=(i == 0))(block)
            filters *= 2

        # Last activation
        block = _bn_relu(block)

        model = Model(inputs=img_input, outputs=block)
        return model