def get_seq_model():
  """Define three channel input shape depending on image data format."""
  if K.image_data_format() == 'channels_first':
    input_shape = (3, img_width, img_height)
  else:
    input_shape = (img_width, img_height, 3)

  # Initialize CNN by creating a sequential model.
  model = Sequential()
  model.add(Conv2D(32, (3, 3), input_shape=input_shape))
  model.add(Activation('relu'))
  model.add(MaxPooling2D(pool_size=(2, 2)))

  model.add(Conv2D(32, (3, 3)))
  model.add(Activation('relu'))
  model.add(MaxPooling2D(pool_size=(2, 2)))

  model.add(Conv2D(64, (3, 3)))
  model.add(Activation('relu'))
  model.add(MaxPooling2D(pool_size=(2, 2)))

  model.add(Flatten())
  model.add(Dense(64))
  model.add(Activation('relu'))
  model.add(Dropout(0.5))
  model.add(Dense(2))
  model.add(Activation('sigmoid'))

  model.compile(
      loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])

  return model
def build_small_cnn(input_shape, output_size):
    model = Sequential([
        # conv1_*
        Convolution2D(32,
                      kernel_size=3,
                      padding="same",
                      input_shape=input_shape),
        Activation("relu"),
        Convolution2D(32, kernel_size=3, padding="same"),
        Activation("relu"),
        MaxPooling2D(pool_size=(2, 2)),

        # conv2_*
        Convolution2D(64, kernel_size=3, padding="same"),
        Activation("relu"),
        Convolution2D(64, kernel_size=3, padding="same"),
        Activation("relu"),
        MaxPooling2D(pool_size=(2, 2)),

        # Fully connected
        Flatten(),
        Dense(512),
        Activation("relu"),
        Dense(512),
        Activation("relu"),
        Dense(output_size),
        Activation("softmax")
    ])

    model.compile(loss="categorical_crossentropy",
                  optimizer="adam",
                  metrics=["accuracy"])

    return model
    def model_fn(self, features: tf.Tensor,
                 mode: tf.estimator.ModeKeys) -> tf.Tensor:
        """
            alexnet image classification convolutional network

            :param features: input of the network
            :param mode: standard names for Estimator model modes
            :return: output of the network (except last FC that evaluates the logits)

        """

        assert_input_op = tf.debugging.assert_equal(
            features.get_shape().as_list()[1:], self.INPUT_SHAPE[1:])
        with tf.control_dependencies([assert_input_op]):
            pool5 = self._cnn(features, mode)

        flatten = Flatten()(pool5)
        drop8 = self._fcn(flatten, mode)

        assert_output_op = tf.debugging.assert_equal(
            drop8.get_shape().as_list()[1:], self.OUTPUT_SHAPE[1:])
        with tf.control_dependencies([assert_output_op]):
            drop8 = tf.identity(drop8)

        return drop8
Example #4
0
def build_network(num_actions, agent_history_length, resized_width,
                  resized_height):
    with tf.device("/gpu:0"):
        state = tf.placeholder(
            "float",
            [None, agent_history_length, resized_width, resized_height])
        inputs = Input(shape=(
            agent_history_length,
            resized_width,
            resized_height,
        ))
        model = Convolution2D(filters=16,
                              kernel_size=(8, 8),
                              strides=(4, 4),
                              activation='relu',
                              padding='same')(inputs)
        model = Convolution2D(filters=32,
                              kernel_size=(4, 4),
                              strides=(2, 2),
                              activation='relu',
                              padding='same')(model)
        model = Flatten()(model)
        model = Dense(256, activation='relu')(model)
        q_values = Dense(num_actions, activation='linear')(model)
        m = Model(inputs, outputs=q_values)
    return state, m
Example #5
0
def build_fully_conv(obs_spec,
                     act_spec,
                     data_format='channels_first',
                     broadcast_non_spatial=False,
                     fc_dim=256):
    screen, screen_input = spatial_block('screen', obs_spec.spaces[0],
                                         conv_cfg(data_format, 'relu'))
    minimap, minimap_input = spatial_block('minimap', obs_spec.spaces[1],
                                           conv_cfg(data_format, 'relu'))

    non_spatial_inputs = [Input(s.shape) for s in obs_spec.spaces[2:]]

    if broadcast_non_spatial:
        non_spatial, spatial_dim = non_spatial_inputs[1], obs_spec.spaces[
            0].shape[1]
        non_spatial = tf.log(non_spatial + 1e-5)
        broadcasted_non_spatial = Broadcast2D(spatial_dim)(non_spatial)
        state = tf.concat([screen, minimap, broadcasted_non_spatial], axis=1)
    else:
        state = tf.concat([screen, minimap], axis=1)

    fc = Flatten(name="state_flat")(state)
    fc = Dense(fc_dim, **dense_cfg('relu'))(fc)

    value = Dense(1, name="value_out", **dense_cfg(scale=0.1))(fc)
    value = tf.squeeze(value, axis=-1)

    logits = []
    for space in act_spec:
        if space.is_spatial():
            logits.append(
                Conv2D(1, 1, **conv_cfg(data_format, scale=0.1))(state))
            logits[-1] = Flatten()(logits[-1])
        else:
            logits.append(Dense(space.size(), **dense_cfg(scale=0.1))(fc))

    mask_actions = Lambda(lambda x: tf.where(non_spatial_inputs[0] > 0, x,
                                             -1000 * tf.ones_like(x)),
                          name="mask_unavailable_action_ids")
    logits[0] = mask_actions(logits[0])

    return Model(inputs=[screen_input, minimap_input] + non_spatial_inputs,
                 outputs=logits + [value])
def build_lr(input_shape, output_size):
    model = Sequential([
        Flatten(input_shape=input_shape),
        Dense(output_size),
        Activation("softmax")
    ])

    model.compile(loss="categorical_crossentropy",
                  optimizer="adam",
                  metrics=["accuracy"])

    return model
Example #7
0
def build_cnn(input_layer, layers, conv_cfg, dense=None, prefix=''):
    x = input_layer
    for i, (n_filters, kernel_size, stride) in enumerate(layers):
        x = Conv2D(n_filters,
                   kernel_size,
                   stride,
                   name='%sconv%02d' % (prefix, i + 1),
                   **conv_cfg)(x)

    if dense:
        x = Flatten()(x)
        x = Dense(dense)(x)

    return x
Example #8
0
def model_fn_LENET_5(features,
                     activation='relu',
                     kernel_initializer=tf.keras.initializers.TruncatedNormal(
                         mean=0, stddev=0.1),
                     bias_initializer='zeros'):

    # conv1: output is [None, 28, 28, 6]
    conv1 = Conv2D(filters=6,
                   kernel_size=(5, 5),
                   strides=(1, 1),
                   padding='valid',
                   activation=activation,
                   use_bias=True,
                   kernel_initializer=kernel_initializer,
                   bias_initializer=bias_initializer)(features)

    # pool1: output is [None, 14, 14, 6]
    pool1 = MaxPool2D(pool_size=(2, 2))(conv1)

    # conv2: output is [None, 10, 10, 16]
    conv2 = Conv2D(filters=16,
                   kernel_size=(5, 5),
                   strides=(1, 1),
                   padding='valid',
                   activation=activation,
                   use_bias=True,
                   kernel_initializer=kernel_initializer,
                   bias_initializer=bias_initializer)(pool1)

    # pool2: output is [None, 5, 5, 16] -> flattened on input of FC to [None, 400]
    pool2 = MaxPool2D(pool_size=(2, 2))(conv2)
    flatten = Flatten()(pool2)

    # fc3: output is [None, 120]
    fc3 = Dense(units=120,
                activation=activation,
                use_bias=True,
                kernel_initializer=kernel_initializer,
                bias_initializer=bias_initializer)(flatten)

    # fc4: output is [None, 84]
    fc4 = Dense(units=84,
                activation=activation,
                use_bias=True,
                kernel_initializer=kernel_initializer,
                bias_initializer=bias_initializer)(fc3)

    return fc4
def build_model(hidden_size):
    inputs = Input(shape=(28, 28))
    x1 = Flatten()(inputs)
    x2 = Dense(hidden_size, activation=tf.nn.relu)(x1)
    x3 = Dropout(0.2)(x2)
    x4 = Dense(10, activation=tf.nn.softmax)(x3)
    model = Model(inputs=inputs, outputs=x4)

    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    # Train and fit model
    model.fit(x_train, y_train, epochs=5)
    [loss, acc] = model.evaluate(x_test, y_test)
    return [model, acc]
def build_cnn(input_shape, output_size):
    kwargs = {"kernel_size": 3, "activation": "relu", "padding": "same"}
    model = Sequential([
        # conv1_*
        Convolution2D(64, input_shape=input_shape, **kwargs),
        BatchRenormalization(),
        Convolution2D(64, **kwargs),
        BatchRenormalization(),
        MaxPooling2D(pool_size=(2, 2)),
        Dropout(0.25),

        # conv2_*
        Convolution2D(128, **kwargs),
        BatchRenormalization(),
        Convolution2D(128, **kwargs),
        BatchRenormalization(),
        MaxPooling2D(pool_size=(2, 2)),
        Dropout(0.25),

        # conv3_*
        Convolution2D(256, **kwargs),
        BatchRenormalization(),
        Convolution2D(256, **kwargs),
        BatchRenormalization(),
        MaxPooling2D(pool_size=(2, 2)),
        Dropout(0.25),

        # Fully connected
        Flatten(),
        Dense(1024),
        Activation("relu"),
        Dropout(0.5),
        Dense(512),
        Activation("relu"),
        Dropout(0.5),
        Dense(output_size),
        Activation("softmax")
    ])

    model.compile(loss="categorical_crossentropy",
                  optimizer="adam",
                  metrics=["accuracy"])

    return model
Example #11
0
    def Train(self):
        # self.loadDataFeature()
        self.loadDataTxt()
        self.train_and_test_split(0.75)
        # model
        model = Sequential()

        # model.add(Dense(392, activation='relu'))
        # model.add(Dense(128, activation='relu'))
        # model.add(Dense(36, activation='softmax'))

        #cnn model

        model.add(
            Conv2D(64, (3, 3), activation='relu', input_shape=(28, 28, 1)))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D((2, 2)))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D((2, 2)))
        model.add(Flatten())
        model.add(Dense(128, activation='relu'))
        model.add(Dense(128, activation='relu'))
        model.add(Dense(36, activation='softmax'))

        # model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
        model.fit(
            self.train_data['data'],
            self.train_data['class_name'],
            batch_size=25,
            epochs=100,
            verbose=1,
            validation_data=(self.test_data['data'],
                             self.test_data['class_name']),
        )
        self.model = model
        model.save('digit_classification_model1.h5')
        # Y_pred = model.predict(self.test_data['data'])
        # self.metric(self.test_data['class_name'], Y_pred, data_type='binary')
        self.metric()
def neural_network(input_shape):
    inputs = keras.Input(shape=input_shape)

    #Layer 1
    x = MaxPooling2D(pool_size=(2, 2), name='MaxPooling2D_1')(inputs)
    x = Conv2D(32, kernel_size=(5, 5), padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D(pool_size=(4, 4))(x)

    #Layer 2
    x = Conv2D(64, kernel_size=(5, 5), padding='same', name='Conv2D_2')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D(pool_size=(2, 2), name='MaxPooling2D_3')(x)

    x = Flatten(name='Flatten')(x)

    #Layer 3
    #model.add(Dense(256,name = 'Dense_1'))
    #model.add(BatchNormalization(name = 'BatchNormalization_2'))
    #model.add(LeakyReLU(alpha=0.1))
    #model.add(Dropout(0.5,name = 'Dropout_1'))

    #Layer 4
    x = Dense(128, name='Dense_2')(x)
    x = BatchNormalization(name='BatchNormalization_3')(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Dropout(0.5, name='Dropout_2')(x)

    #Layer 5
    x = Dense(128, name='Dense_3')(x)
    x = BatchNormalization(name='BatchNormalization_4')(x)
    x = LeakyReLU(alpha=0.1)(x)
    #model.add(Dropout(0.5,name = 'Dropout_3'))

    outputs = Dense(1, activation='sigmoid', name='Dense_4')(x)

    model = Model(inputs, outputs)
    return model
Example #13
0
def build_rnn(n_lstm_layers,
              lstm_layer_size,
              n_hiden_layers,
              hidden_layer_size,
              optimizer='adam',
              input_shape=None):
    ''' function to build the RNN architecture '''

    # intialize a classifier
    classifier = Sequential()

    # input layer
    classifier.add(Input(shape=input_shape))

    # lstm layers
    for n in range(n_lstm_layers):
        classifier.add(CuDNNLSTM(units=lstm_layer_size, return_sequences=True))

    # flatten array to 1d vector
    classifier.add(Flatten())

    # hidden layers
    for n in range(n_hiden_layers):
        classifier.add(
            Dense(units=hidden_layer_size,
                  kernel_initializer='uniform',
                  activation='relu'))

    # output layer
    classifier.add(
        Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))

    # compile model
    classifier.compile(optimizer=optimizer,
                       loss='binary_crossentropy',
                       metrics=['accuracy'])

    return classifier
Example #14
0
 def define_discriminator(in_shape=(32, 32, 3)):
     model = Sequential()
     # normal
     model.add(Conv2D(64, (3, 3), padding='same', input_shape=in_shape))
     model.add(LeakyReLU(alpha=0.2))
     # downsample
     model.add(Conv2D(128, (3, 3), strides=(2, 2), padding='same'))
     model.add(LeakyReLU(alpha=0.2))
     # downsample
     model.add(Conv2D(128, (3, 3), strides=(2, 2), padding='same'))
     model.add(LeakyReLU(alpha=0.2))
     # downsample
     model.add(Conv2D(256, (3, 3), strides=(2, 2), padding='same'))
     model.add(LeakyReLU(alpha=0.2))
     # classifier
     model.add(Flatten())
     model.add(Dropout(0.4))
     model.add(Dense(1, activation='sigmoid'))
     # compile model
     opt = Adam(lr=0.0002, beta_1=0.5)
     model.compile(loss='binary_crossentropy',
                   optimizer=opt,
                   metrics=['accuracy'])
     return model
Example #15
0
def create_alpha_zero_model(
    depth,
    input_shape,
    policy_output_size,
    num_filters=64,
    activation="relu",
    policy_factor=1.0,
):
    input = tf.keras.Input(shape=input_shape, name="input")
    conv = Conv2D(
        num_filters,
        kernel_size=3,
        strides=1,
        padding="same",
        kernel_initializer="he_normal",
        kernel_regularizer=l2(1e-4),
        activation=None,
    )

    x = conv(input)
    x = BatchNormalization()(x)
    x = Activation(activation)(x)

    block_output = residual_block(inputs=x, strides=1, num_filters=num_filters)

    for _ in range(depth):
        block_output = residual_block(inputs=block_output,
                                      strides=1,
                                      num_filters=num_filters)

    # TODO: consider adding an extra conv layer here and for the policy head as
    # well, see https://medium.com/oracledevs/lessons-from-alpha-zero-part-6-hyperparameter-tuning-b1cfcbe4ca9
    value_conv_output = Conv2D(
        num_filters // 2,
        kernel_size=3,
        strides=1,
        padding="same",
        kernel_initializer="he_normal",
        kernel_regularizer=l2(1e-4),
        activation=None,
    )(block_output)
    value_conv_output = BatchNormalization()(value_conv_output)
    value_conv_output = Activation(activation)(value_conv_output)

    value = Dense(
        units=1,
        kernel_regularizer=l2(1e-4),
        kernel_initializer="he_normal",
        activation="tanh",
        name="value",
    )(Flatten()(value_conv_output))

    policy_conv_output = Conv2D(
        num_filters // 2,
        kernel_size=3,
        strides=1,
        padding="same",
        kernel_initializer="he_normal",
        kernel_regularizer=l2(1e-4),
        activation=None,
    )(block_output)

    policy_conv_output = BatchNormalization()(policy_conv_output)
    policy_conv_output = Activation(activation)(policy_conv_output)

    policy = (Dense(
        units=policy_output_size,
        kernel_regularizer=l2(1e-4),
        kernel_initializer="he_normal",
        activation=None,
    )(Flatten()(policy_conv_output)) * policy_factor)
    policy = Activation("softmax", name="policy")(policy)
    # policy = tf.keras.layers.Lambda(
    #     # lambda x: x * policy_factor, name="policy"
    # )(policy)
    model = tf.keras.Model(inputs=input, outputs=[policy, value])

    return model
Example #16
0
           activation='tanh'))
NN2.add(ZeroPadding2D(padding=(1, 1)))
NN2.add(
    Conv2D(64, (2, 2),
           use_bias=True,
           padding='valid',
           strides=(1, 1),
           activation='tanh'))
NN2.add(ZeroPadding2D(padding=(1, 1)))
NN2.add(
    Conv2D(64, (2, 2),
           use_bias=True,
           padding='valid',
           strides=(1, 1),
           activation='tanh'))
NN2.add(Flatten())
NN2.add(Dense(Nparameters, activation='linear', use_bias=True))
NN2.summary()
#NN2.compile(loss = root_relative_mean_squared_error, optimizer = "adam",metrics=["MAPE","MSE"])

#setting
NN2.compile(loss=mse_constraint(0.75),
            optimizer="adam",
            metrics=["MAPE", "MSE"])
es = EarlyStopping(monitor='val_loss',
                   mode='min',
                   verbose=1,
                   patience=50,
                   restore_best_weights=True)
#history = NN2.fit(y_train_trafo2,X_train_trafo2, batch_size=64, validation_data = (y_val_trafo2,X_val_trafo2), epochs=100, verbose = True, shuffle=1,callbacks = [es])
#NN2.save_weights("calibrationweights_vola_9x9_log.h5")#id_3283354135d44b67_data_price_norm_231046clean
            model = Sequential()
            # format: Num of filters, window/step, dimensions
            model.add(Conv2D(layer_size, (3, 3),
                             input_shape=x_train.shape[1:]))
            model.add(Activation("relu"))
            model.add(MaxPooling2D(pool_size=(2, 2)))
            print('Layer 0 generated')

            for i in range(conv_layer - 1):
                print(f'Layer {i + 1} generated.')
                model.add(Conv2D(layer_size, (3, 3)))
                model.add(Activation("relu"))
                model.add(MaxPooling2D(pool_size=(2, 2)))

            model.add(Flatten())
            for l in range(dense_layer):
                model.add(Dense(layer_size))
                model.add(Activation("relu"))

            model.add(Dense(1))
            # Final activation function
            model.add(Activation('sigmoid'))

            # Binary loss function
            model.compile(loss="binary_crossentropy",
                          optimizer="adam",
                          metrics=['accuracy'])

            # Batch size ideal 20-200, scales with size of dataset
            model.fit(x_train,
Example #18
0
def stack_layers(inputs, layers, kernel_initializer='glorot_uniform'):
    '''
    Builds the architecture of the network by applying each layer specified in layers to inputs.

    inputs:     a dict containing input_types and input_placeholders for each key and value pair, respecively.
                for spectralnet, this means the input_types 'Unlabeled' and 'Orthonorm'*
    layers:     a list of dicts containing all layers to be used in the network, where each dict describes
                one such layer. each dict requires the key 'type'. all other keys are dependent on the layer
                type

    kernel_initializer: initialization configuration passed to keras (see keras initializers)

    returns:    outputs, a dict formatted in much the same way as inputs. it contains input_types and
                output_tensors for each key and value pair, respectively, where output_tensors are
                the outputs of the input_placeholders in inputs after each layer in layers is applied

    * this is necessary since spectralnet takes multiple inputs and performs special computations on the
      orthonorm layer
    '''
    outputs = dict()

    for key in inputs:
        outputs[key] = inputs[key]

    for layer in layers:
        # check for l2_reg argument
        l2_reg = layer.get('l2_reg')
        if l2_reg:
            l2_reg = l2(layer['l2_reg'])

        # create the layer
        if layer['type'] == 'softplus_reg':
            l = Dense(layer['size'],
                      activation='softplus',
                      kernel_initializer=kernel_initializer,
                      kernel_regularizer=l2(0.001),
                      name=layer.get('name'))
        elif layer['type'] == 'softplus':
            l = Dense(layer['size'],
                      activation='softplus',
                      kernel_initializer=kernel_initializer,
                      kernel_regularizer=l2_reg,
                      name=layer.get('name'))
        elif layer['type'] == 'softmax':
            l = Dense(layer['size'],
                      activation='softmax',
                      kernel_initializer=kernel_initializer,
                      kernel_regularizer=l2_reg,
                      name=layer.get('name'))
        elif layer['type'] == 'tanh':
            l = Dense(layer['size'],
                      activation='tanh',
                      kernel_initializer=kernel_initializer,
                      kernel_regularizer=l2_reg,
                      name=layer.get('name'))
        elif layer['type'] == 'relu':
            l = Dense(layer['size'],
                      activation='relu',
                      kernel_initializer=kernel_initializer,
                      kernel_regularizer=l2_reg,
                      name=layer.get('name'))
        elif layer['type'] == 'selu':
            l = Dense(layer['size'],
                      activation='selu',
                      kernel_initializer=kernel_initializer,
                      kernel_regularizer=l2_reg,
                      name=layer.get('name'))
        elif layer['type'] == 'Conv2D':
            l = Conv2D(layer['channels'],
                       kernel_size=layer['kernel'],
                       activation='relu',
                       data_format='channels_last',
                       kernel_regularizer=l2_reg,
                       name=layer.get('name'))
        elif layer['type'] == 'BatchNormalization':
            l = BatchNormalization(name=layer.get('name'))
        elif layer['type'] == 'MaxPooling2D':
            l = MaxPooling2D(pool_size=layer['pool_size'],
                             data_format='channels_first',
                             name=layer.get('name'))
        elif layer['type'] == 'Dropout':
            l = Dropout(layer['rate'], name=layer.get('name'))
        elif layer['type'] == 'Flatten':
            l = Flatten(name=layer.get('name'))
        elif layer['type'] == 'Orthonorm':
            l = Orthonorm(outputs['Orthonorm'], name=layer.get('name'))
        else:
            raise ValueError("Invalid layer type '{}'".format(layer['type']))

        # apply the layer to each input in inputs
        for k in outputs:
            outputs[k] = l(outputs[k])

    return outputs
Example #19
0
def u_dense_net_2(input_shape,
                  output_shape,
                  num_db,
                  num_channels=64,
                  growth_rate=32,
                  convs_per_db=3):
    assert len(
        input_shape
    ) == 3, f"Input shape must have 3 dimension! Received '{input_shape}'!"
    assert (num_db > 1) and (
        num_db % 2 == 1
    ), f"Number of DenseBlocks must be an odd number more than 1! Received '{num_db}'!"
    # In a U-shaped DenseNet with N DenseBlocks, each side has floor(N/2) DenseBlocks
    num_trans_down = num_trans_up = num_db // 2
    assert (input_shape[0] % (2**num_trans_down) == 0) and (
        input_shape[1] % (2**num_trans_down) == 0
    ), f"Dimension of the input shape {input_shape[:2]} must be a multiple of {2**num_trans_down} to preserve the tensor shape after down-scaling and up-scaling"
    assert (num_channels > 0) and (
        num_channels % 2 == 0
    ), f"Number of channels for TransitionBlock must be an even number more than 0! Received '{num_channels}'!"

    _num_channels = num_channels
    img_in = Input(dtype="float32", shape=input_shape, name="image_input")
    x = Conv2D(_num_channels,
               kernel_size=(5, 5),
               activation="relu",
               padding="same")(img_in)
    ############################### Transition down section ###############################
    db_outputs = []
    for i in range(num_trans_down):
        x = DenseBlock(num_layers=convs_per_db, filters=growth_rate)(x)
        db_outputs.insert(0, x)
        num_channels += growth_rate * i
        num_channels //= 2
        x = TransitionBlock(filters=num_channels, trans_down=True)(x)
    #################################### Mid DenseBlock ###################################
    x_mid = x = DenseBlock(num_layers=convs_per_db, filters=growth_rate)(x)
    ################################ Transition up section ################################
    for i in range(num_trans_up):
        num_channels += growth_rate * (i + 1)
        num_channels //= 2
        x = TransitionBlock(filters=num_channels, trans_down=False)(x)
        x = Concatenate(axis=-1)([x, db_outputs[i]])
        x = DenseBlock(num_layers=convs_per_db, filters=growth_rate)(x)

    img_out = Conv2D(1,
                     kernel_size=(5, 5),
                     activation="sigmoid",
                     padding="same",
                     name="img_out")(x)

    ################################ Path vector section ################################
    x = TransitionBlock(filters=_num_channels, trans_down=True)(x)
    x = DenseBlock(num_layers=convs_per_db, filters=growth_rate)(x)
    x = TransitionBlock(filters=_num_channels, trans_down=True)(x)
    x = Flatten()(x)
    x = Dense(256, activation="relu")(x)
    x = Dense(256, activation="relu")(x)
    x = Dense(output_shape[0] * output_shape[1])(x)
    path_out = Reshape(output_shape, name="path_out")(x)

    model = Model(inputs=[img_in],
                  outputs=[img_out, path_out],
                  name="DenseNet")
    return model
Example #20
0
def model_fn_ALEXNET(features,
                     activation='relu',
                     kernel_initializer=tf.keras.initializers.TruncatedNormal(
                         mean=0, stddev=0.1),
                     bias_initializer='zeros'):

    # input: [None, 227, 227, 3]
    # conv1: f 96, k (11,11), s (4,4), VALID, relu --> [None, 54, 54, 96]
    with tf.control_dependencies(
            tf.debugging.assert_equal(features.get_shape()[1:],
                                      [227, 227, 3])):
        conv1 = Conv2D(filters=96,
                       kernel_size=(11, 11),
                       strides=(4, 4),
                       padding='valid',
                       activation=activation,
                       use_bias=True,
                       kernel_initializer=kernel_initializer,
                       bias_initializer=bias_initializer)(features)

    # pool1: k (3,3), s (2,2), VALID               --> [None, 26, 26, 96]
    with tf.control_dependencies(
            tf.debugging.assert_equal(conv1.get_shape()[1:], [54, 54, 96])):
        pool1 = MaxPool2D(pool_size=(3, 3), strides=(2, 2),
                          padding='valid')(conv1)

    # conv2: f 256, k (5,5), s (1,1), SAME, relu   --> [None, 26, 26, 256]
    with tf.control_dependencies(
            tf.debugging.assert_equal(features.get_shape()[1:], [26, 26, 96])):
        conv2 = Conv2D(filters=256,
                       kernel_size=(5, 5),
                       strides=(1, 1),
                       padding='same',
                       activation=activation,
                       use_bias=True,
                       kernel_initializer=kernel_initializer,
                       bias_initializer=bias_initializer)(pool1)

    # pool2: k (3,3), s (2,2), VALID               --> [None, 12, 12, 256]
    with tf.control_dependencies(
            tf.debugging.assert_equal(conv1.get_shape()[1:], [26, 26, 256])):
        pool2 = MaxPool2D(pool_size=(3, 3), strides=(2, 2),
                          padding='valid')(conv2)

    # conv3: f 384, k (3,3), s(1,1), SAME, relu    --> [None, 12, 12, 384]
    with tf.control_dependencies(
            tf.debugging.assert_equal(features.get_shape()[1:],
                                      [12, 12, 256])):
        conv3 = Conv2D(filters=384,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       padding='same',
                       activation=activation,
                       use_bias=True,
                       kernel_initializer=kernel_initializer,
                       bias_initializer=bias_initializer)(pool2)

    # conv4: f 384, k (3,3), s(1,1), SAME, relu    --> [None, 12, 12, 384]
    with tf.control_dependencies(
            tf.debugging.assert_equal(features.get_shape()[1:],
                                      [12, 12, 384])):
        conv4 = Conv2D(filters=384,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       padding='same',
                       activation=activation,
                       use_bias=True,
                       kernel_initializer=kernel_initializer,
                       bias_initializer=bias_initializer)(conv3)

    # conv5: f 256, k (3,3), s(1,1), SAME, relu    --> [None, 12, 12, 256]
    with tf.control_dependencies(
            tf.debugging.assert_equal(features.get_shape()[1:],
                                      [12, 12, 384])):
        conv5 = Conv2D(filters=256,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       padding='same',
                       activation=activation,
                       use_bias=True,
                       kernel_initializer=kernel_initializer,
                       bias_initializer=bias_initializer)(conv4)

    # pool5: k (3,3), s (2,2)                      --> [None,  5,  5, 256]
    with tf.control_dependencies(
            tf.debugging.assert_equal(conv1.get_shape()[1:], [12, 12, 256])):
        pool5 = MaxPool2D(pool_size=(3, 3), strides=(2, 2),
                          padding='valid')(conv5)

    # flatten --> [None, 6400]
    flatten = Flatten()(pool5)

    # fc6: f 4096, relu --> [None, 4096]
    with tf.control_dependencies(
            tf.debugging.assert_equal(flatten.get_shape()[1:], [6400])):
        fc6 = Dense(units=496,
                    activation=activation,
                    use_bias=True,
                    kernel_initializer=kernel_initializer,
                    bias_initializer=bias_initializer)(flatten)

    # drop7: p 0.5      --> [None, 4096]
    drop7 = Dropout(rate=0.5)(fc6)

    # fc7: f 4096, relu --> [None, 4096]
    with tf.control_dependencies(
            tf.debugging.assert_equal(fc6.get_shape()[1:], [6400])):
        fc7 = Dense(units=496,
                    activation=activation,
                    use_bias=True,
                    kernel_initializer=kernel_initializer,
                    bias_initializer=bias_initializer)(drop7)

    # drop8: p 0.5      --> [None, 4096]
    drop8 = Dropout(rate=0.5)(fc7)

    return drop8
    padding="same",
    kernel_initializer=keras.initializers.TruncatedNormal(stddev=init_stddev),
)(headModel)
headModel = BatchNormalization(momentum=MOM)(headModel)
headModel = LeakyReLU(alpha=0.2)(headModel)
headModel = Conv2D(
    448,
    kernel_size=[5, 5],
    strides=[2, 2],
    padding="same",
    kernel_initializer=keras.initializers.TruncatedNormal(stddev=init_stddev),
)(headModel)
headModel = BatchNormalization(momentum=MOM)(headModel)
headModel = LeakyReLU(alpha=0.2)(headModel)
headModel = AveragePooling2D(pool_size=(2, 2))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(64, activation="relu")(headModel)
headModel = BatchNormalization(momentum=MOM)(headModel)
headModel = LeakyReLU(alpha=0.2)(headModel)
headModel = Dense(32, activation="relu")(headModel)
headModel = BatchNormalization(momentum=MOM)(headModel)
headModel = LeakyReLU(alpha=0.2)(headModel)
headModel = Dense(32, activation="relu")(headModel)
headModel = BatchNormalization(momentum=MOM)(headModel)
headModel = LeakyReLU(alpha=0.2)(headModel)
headModel = Dropout(DROP)(headModel)
headModel = Dense(2, activation="softmax")(headModel)

# place the head FC model on top of the base model (this will become the actual model we will train)
model = Model(inputs=baseModel.input, outputs=headModel)
model.summary()
Example #22
0
    def build(input_shape, num_outputs, block_fn, repetitions, reg_factor):
        """Instantiate a vanilla ResNet3D keras model.

        # Arguments
            input_shape: Tuple of input shape in the format
            (conv_dim1, conv_dim2, conv_dim3, channels) if dim_ordering='tf'
            (filter, conv_dim1, conv_dim2, conv_dim3) if dim_ordering='th'
            num_outputs: The number of outputs at the final softmax layer
            block_fn: Unit block to use {'basic_block', 'bottlenack_block'}
            repetitions: Repetitions of unit blocks
        # Returns
            model: a 3D ResNet model that takes a 5D tensor (volumetric images
            in batch) as input and returns a 1D vector (prediction) as output.
        """
        _handle_data_format()
        if len(input_shape) != 4:
            raise ValueError("Input shape should be a tuple "
                             "(conv_dim1, conv_dim2, conv_dim3, channels) "
                             "for tensorflow as backend or "
                             "(channels, conv_dim1, conv_dim2, conv_dim3) "
                             "for theano as backend")

        block_fn = _get_block(block_fn)
        input = Input(shape=input_shape)
        # first conv
        conv1 = _conv_bn_relu3D(filters=64, kernel_size=(7, 7, 7),
                                strides=(2, 2, 2),
                                kernel_regularizer=l2(reg_factor)
                                )(input)
        pool1 = MaxPooling3D(pool_size=(3, 3, 3), strides=(2, 2, 2),
                             padding="same")(conv1)

        # repeat blocks
        block = pool1
        filters = 64
        for i, r in enumerate(repetitions):
            block = _residual_block3d(block_fn, filters=filters,
                                      kernel_regularizer=l2(reg_factor),
                                      repetitions=r, is_first_layer=(i == 0)
                                      )(block)
            filters *= 2

        # last activation
        block_output = _bn_relu(block)

        # average poll and classification
        pool2 = AveragePooling3D(pool_size=(block.get_shape().as_list()[DIM1_AXIS],
                                            block.get_shape().as_list()[DIM2_AXIS],
                                            block.get_shape().as_list()[DIM3_AXIS]),
                                 strides=(1, 1, 1))(block_output)
        flatten1 = Flatten()(pool2)
        if num_outputs > 1:
            dense = Dense(units=num_outputs,
                          kernel_initializer="he_normal",
                          activation="softmax",
                          kernel_regularizer=l2(reg_factor))(flatten1)
        else:
            dense = Dense(units=num_outputs,
                          kernel_initializer="he_normal",
                          activation="sigmoid",
                          kernel_regularizer=l2(reg_factor))(flatten1)

        model = Model(inputs=input, outputs=dense)
        return model