Beispiel #1
0
    def build(self):
        """
        Builds the full Keras model and stores it in self.model.
        """
        mc = self.config
        in_x = x = Input((12, 8, 8))

        # (batch, channels, height, width)
        x = Conv2D(filters=mc.cnn_filter_num,
                   kernel_size=mc.cnn_first_filter_size,
                   padding="same",
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="input_conv-" + str(mc.cnn_first_filter_size) + "-" +
                   str(mc.cnn_filter_num))(x)
        x = BatchNormalization(axis=1, name="input_batchnorm")(x)
        x = Activation("relu", name="input_relu")(x)

        for i in range(mc.res_layer_num):
            x = self._build_residual_block(x, i + 1)

        res_out = x

        # for policy output
        x = Conv2D(filters=2,
                   kernel_size=1,
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="policy_conv-1-2")(res_out)
        x = BatchNormalization(axis=1, name="policy_batchnorm")(x)
        x = Activation("relu", name="policy_relu")(x)
        x = Flatten(name="policy_flatten")(x)

        policy_out = Dense(self.config.n_labels,
                           kernel_regularizer=l2(mc.l2_reg),
                           activation="softmax",
                           name="policy_out")(x)

        # for value output
        x = Conv2D(filters=4,
                   kernel_size=1,
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="value_conv-1-4")(res_out)
        x = BatchNormalization(axis=1, name="value_batchnorm")(x)
        x = Activation("relu", name="value_relu")(x)
        x = Flatten(name="value_flatten")(x)
        x = Dense(mc.value_fc_size,
                  kernel_regularizer=l2(mc.l2_reg),
                  activation="relu",
                  name="value_dense")(x)
        value_out = Dense(1,
                          kernel_regularizer=l2(mc.l2_reg),
                          activation="tanh",
                          name="value_out")(x)

        self.model = Model(in_x, [policy_out, value_out], name="chess_model")
def buildClassifier(input_shape=(100, 100, 3)):
    """
    This creates the CNN algorithm.
    Args:
        input_shape(tuple): This is the image shape of (100,100,3)
    Returns:
        classifier(sequential): This is the sequential model.
    """
    # Initialising the CNN
    opt = Adam(lr=learning_rate)  # lr = learning rate
    classifier = Sequential()
    classifier.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=input_shape,
               padding='same'))
    classifier.add(MaxPooling2D(pool_size=(3, 3), padding='same'))
    classifier.add(Dropout(0.5))  # added extra Dropout layer
    classifier.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
    classifier.add(Dropout(0.5))  # added extra dropout layer
    classifier.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Dropout(0.2))  # antes era 0.25
    classifier.add(Conv2D(512, (3, 3), padding='same', activation='relu'))
    classifier.add(Conv2D(1024, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(
        Flatten())  # This is added before dense layer a flatten is needed
    classifier.add(Dense(units=1024,
                         activation='relu'))  # added new dense layer
    classifier.add(Dropout(0.2))  # antes era 0.25
    # Step 3 - Flattening
    classifier.add(Flatten())
    classifier.add(Dense(units=1024,
                         activation='relu'))  # added new dense layer
    classifier.add(Dense(units=256,
                         activation='relu'))  # added new dense layer
    # Step 4 - Full connection
    classifier.add(Dropout(0.2))
    classifier.add(Dense(units=1, activation='sigmoid'))
    classifier.summary()

    # Compiling the CNN
    classifier.compile(optimizer=opt,
                       loss='binary_crossentropy',
                       metrics=['accuracy'])
    #plot_model(classifier, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
    return classifier
def create_dummy_classifier(window_size: int,
                            num_rows_df: int,
                            num_output_fields: int,
                            neurons_rnn: int = 10,
                            dropout: float = 0.0,
                            learning_rate: float = 0.01,
                            bidirection: bool = True,
                            return_sequences: bool = False):
                            
    lr_schedule = keras.optimizers.schedules.ExponentialDecay(
        initial_learning_rate=learning_rate,
        decay_steps=10000,
        decay_rate=0.9)

    model = keras.Sequential(name='dummy_classifier')

    model.add(Input(shape=(window_size, num_rows_df), name='input'))

    if bidirection:
        model.add(Bidirectional(
            LSTM(neurons_rnn, return_sequences=return_sequences),
            name='bidirection'))
    else:
        model.add(LSTM(neurons_rnn, name="rnn",
                       return_sequences=return_sequences))
    if return_sequences:
        model.add(Flatten())
    model.add(Dropout(dropout, name='dropout'))
    model.add(Dense(num_output_fields, activation='sigmoid', name='dense_output'))

    model.summary()

    model.compile(loss='binary_crossentropy',
                  optimizer=Adam(learning_rate=lr_schedule), metrics=['accuracy', 'binary_accuracy'])
    return model
Beispiel #4
0
def buildClassifier(input_shape=(100, 100, 3)):
    # Initialising the CNN
    classifier = Sequential()
    classifier.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape, padding='same'))
    classifier.add(MaxPooling2D(pool_size=(4, 4), padding='same'))
    classifier.add(Dropout(0.5))  # added extra Dropout layer
    classifier.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
    classifier.add(Dropout(0.5))  # added extra dropout layer
    classifier.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Dropout(0.2))  # antes era 0.25
    classifier.add(Conv2D(512, (3, 3), padding='same', activation='relu'))
    classifier.add(Conv2D(1024, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Dense(units=1024, activation='relu'))  # added new dense layer
    classifier.add(Dropout(0.2))  # antes era 0.25
    # Step 3 - Flattening
    classifier.add(Flatten())
    classifier.add(Dense(units=1024, activation='relu'))  # added new dense layer
    classifier.add(Dense(units=256, activation='relu'))  # added new dense layer
    # Step 4 - Full connection
    classifier.add(Dropout(0.2))
    classifier.add(Dense(units=1, activation='sigmoid'))
    classifier.summary()

    # Compiling the CNN
    classifier.compile(optimizer='adam',
                       loss='binary_crossentropy',
                       metrics=['accuracy'])
    plot_model(classifier, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
    return classifier
def initialize_model():

    model = Sequential()
    model.add(
        Conv2D(40, 11, strides=1, padding='same', input_shape=(1, 1024, 4)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation('relu'))

    model.add(Conv2D(40, 11, strides=1, padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation('relu'))

    model.add(MaxPooling2D(pool_size=(1, 64)))

    model.add(Flatten())

    model.add(Dense(units=500))

    model.add(Dense(units=640))

    model.add(Reshape((1, 16, 40)))

    model.add(Conv2DTranspose(40, 11, strides=(1, 64), padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation('relu'))

    model.add(Conv2DTranspose(40, 11, strides=(1, 1), padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation('relu'))

    model.add(Conv2D(4, 11, strides=1, padding='same', activation='sigmoid'))
    model.summary()
    model.compile(optimizer='adam', loss='mse')

    return model
Beispiel #6
0
def initialize_model():

    one_filter_keras_model = Sequential()
    one_filter_keras_model.add(
        Conv2D(filters=40,
               kernel_size=(1, 11),
               padding="same",
               input_shape=(1, 1500, 5),
               kernel_constraint=NonNeg()))
    one_filter_keras_model.add(BatchNormalization(axis=-1))
    one_filter_keras_model.add(Activation('relu'))

    one_filter_keras_model.add(MaxPooling2D(pool_size=(1, 30)))

    one_filter_keras_model.add(Flatten())
    one_filter_keras_model.add(Dense(40))
    one_filter_keras_model.add(BatchNormalization(axis=-1))
    one_filter_keras_model.add(Activation('relu'))
    one_filter_keras_model.add(Dropout(0.5))

    one_filter_keras_model.add(Dense(1))
    one_filter_keras_model.add(Activation("sigmoid"))

    one_filter_keras_model.summary()
    one_filter_keras_model.compile(optimizer='adam',
                                   loss='binary_crossentropy',
                                   metrics=[precision, recall, specificity])

    return one_filter_keras_model
Beispiel #7
0
def discriminator_model():
    """Build discriminator architecture."""
    n_layers, use_sigmoid = 3, False
    inputs = Input(shape=input_shape_discriminator)

    x = Conv2D(filters=ndf, kernel_size=(4, 4), strides=2, padding='same')(inputs)
    x = LeakyReLU(0.2)(x)

    nf_mult, nf_mult_prev = 1, 1
    for n in range(n_layers):
        nf_mult_prev, nf_mult = nf_mult, min(2**n, 8)
        x = Conv2D(filters=ndf*nf_mult, kernel_size=(4, 4), strides=2, padding='same')(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.2)(x)

    nf_mult_prev, nf_mult = nf_mult, min(2**n_layers, 8)
    x = Conv2D(filters=ndf*nf_mult, kernel_size=(4, 4), strides=1, padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x)

    x = Conv2D(filters=1, kernel_size=(4, 4), strides=1, padding='same')(x)
    if use_sigmoid:
        x = Activation('sigmoid')(x)

    x = Flatten()(x)
    x = Dense(1024, activation='tanh')(x)
    x = Dense(1, activation='sigmoid')(x)

    model = Model(inputs=inputs, outputs=x, name='Discriminator')
    return model
def bbox_3D_net(input_shape=(224, 224, 3), vgg_weights=None, freeze_vgg=False, bin_num=6):
    vgg16_model = VGG16(include_top=False, weights=vgg_weights, input_shape=input_shape)

    if freeze_vgg:
        for layer in vgg16_model.layers:
            layer.trainable = False

    x = Flatten()(vgg16_model.output)

    dimension = Dense(512)(x)
    dimension = LeakyReLU(alpha=0.1)(dimension)
    dimension = Dropout(0.5)(dimension)
    dimension = Dense(3)(dimension)
    dimension = LeakyReLU(alpha=0.1, name='dimension')(dimension)

    orientation = Dense(256)(x)
    orientation = LeakyReLU(alpha=0.1)(orientation)
    orientation = Dropout(0.5)(orientation)
    orientation = Dense(bin_num * 2)(orientation)
    orientation = LeakyReLU(alpha=0.1)(orientation)
    orientation = Reshape((bin_num, -1))(orientation)
    orientation = Lambda(l2_normalize, name='orientation')(orientation)

    confidence = Dense(256)(x)
    confidence = LeakyReLU(alpha=0.1)(confidence)
    confidence = Dropout(0.5)(confidence)
    confidence = Dense(bin_num, activation='softmax', name='confidence')(confidence)

    model = Model(vgg16_model.input, outputs=[dimension, orientation, confidence])
    return model
Beispiel #9
0
    def build(width, height, depth, classes):
        # initialize the model along with the input shape to be
        # "channels last"

        model = Sequential()
        #the image input
        inputShape = (height, width, depth)

        # if we are using "channels first", update the input shape
        if image_data_format() == "channels_first":
            inputShape = (depth, height, width)

        #Every CNN that you implement will have a build method this function will accept a
        #number of parameters, construct the network architecture, and then return it to the calling function
        #It will accept a number of parameters

        #define the first (and only) CONV=>RELU layer
        #This layer will have 32 filters each of which are 3x3, apply the asame padding 
        #to ensure the size of the output of the convolution operations matches the input
        #(using same padding isn't strictly neccessary for this example, but it's a good)
        #habbit to start forming now
        model.add(Conv2D(32,(3,3),padding="same"))
        model.add(Activation("relu"))

        #softmax classifier
        model.add(Flatten())
        model.add(Dense(classes))
        model.add(Activation("softmax"))

        #return the constructed network architechture
        return model
def compute_word_embeddings(model_dir, epoch):
    path_to_model = to_model_path(model_dir, epoch)
    """ lookup model vocabulary """
    vocabulary = Vocabulary.create_vocabulary_from_vocabulary_json(
        model_dir, "", use_nltk=False)
    """ prepare and load model """
    vinput = Input((196, 512))
    model = create_shatt_model_v2(image_features_graph=(vinput, vinput),
                                  caption_max_length=16,
                                  vocabulary_size=len(vocabulary),
                                  dropout_rate=0.,
                                  start_encoding=vocabulary.get_start_symbol(),
                                  image_features_dimensions=196,
                                  embedding_size=512,
                                  hidden_size=1024,
                                  inference_mode=True,
                                  attention_graph=None,
                                  return_attention=True,
                                  use_max_sampler=True)
    model.load_weights(path_to_model, by_name=True)
    """ establish embedding model """
    layer_name = "shatt_word_embeddings"
    layer = model.get_layer(layer_name)
    if layer == None:
        raise Exception("Cannot find layer with name " + layer_name)
    input_words = Input(shape=(1, ), name="embedding_callback_input_words")
    layer_output = layer(input_words)
    layer_output = Flatten(name="embedding_callback_flatten")(layer_output)
    embedding_model = Model(inputs=input_words, outputs=layer_output)
    """ write metadata.tsv """
    word_sequence = vocabulary.get_word_sequence(padding_symbol="<PAD>")
    store_json_to(word_sequence,
                  model_dir,
                  lookup_filename="word_sequence.json")
    """ encode sequence"""
    encoded_word_sequence = vocabulary.get_encoded_word_sequence(
        include_padding=True)

    sequence = WordSequence(encoded_word_sequence, 64)

    processed_count = 0
    expected_num_batches = sequence.get_num_batches()
    results = []
    try:
        for words in sequence.one_shot_iterator():
            words = np.expand_dims(words, axis=-1)
            word_embeddings = embedding_model.predict_on_batch(words)
            results.extend(word_embeddings)
            processed_count = processed_count + 1
            print(">> Computing word embeddings {:d}/{:d} ({:3.0f}%)".format(
                processed_count, expected_num_batches,
                processed_count / expected_num_batches * 100),
                  end="\r")
    except Exception as e:
        print("Exception: ", e)
    results = np.array(results)
    store_numpy_to(results, model_dir, lookup_file_name="word_embeddings.npy")
 def test_fit_octave(self):
     inputs = Input(shape=(32, 3))
     high, low = OctaveConv1D(13, kernel_size=3, octave=4)(inputs)
     high, low = MaxPool1D()(high), MaxPool1D()(low)
     conv = OctaveConv1D(5, kernel_size=3, octave=4, ratio_out=0.0)([high, low])
     flatten = Flatten()(conv)
     outputs = Dense(units=2, activation='softmax')(flatten)
     model = Model(inputs=inputs, outputs=outputs)
     model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
     model.summary(line_length=200)
     self._test_fit(model)
Beispiel #12
0
 def define_network(self):
     network1 = models.Sequential()
     network1.add(self.image_input_layer)
     network1.add(self.convolution2d_layer)
     network1.add(self.max_pooling2d_layer)
     network1.add(Flatten())
     network1.add(self.fully_connected_layer1)
     network1.add(self.fully_connected_layer2)
     network1.compile(metrics=['accuracy'],
                      loss='categorical_crossentropy',
                      optimizer=self.optimizer)
     return network1
    def cnn_model(self, max_len, max_words):
        model = Sequential()
        model.add(Embedding(max_words, 50, input_length=max_len))
        model.add(Conv1D(32, 3, padding="same", activation=FLAGS.dense_layer_activation))
        model.add(MaxPooling1D())
        model.add(Flatten())
        model.add(Dense(256, activation= FLAGS.dense_layer_activation))
        model.add(Dense(1, activation= FLAGS.output_layer_activation))
        model.summary()

        input= Input(shape= [max_len])
        output = model(input)
        return Model(input, output)
 def test_fit_channels_first(self):
     inputs = Input(shape=(3, 32, 32))
     high, low = OctaveConv2D(13, kernel_size=3, data_format='channels_first')(inputs)
     high, low = MaxPool2D(data_format='channels_first')(high), MaxPool2D(data_format='channels_first')(low)
     high, low = OctaveConv2D(7, kernel_size=3, data_format='channels_first')([high, low])
     high, low = MaxPool2D(data_format='channels_first')(high), MaxPool2D(data_format='channels_first')(low)
     conv = OctaveConv2D(5, kernel_size=3, ratio_out=0.0, data_format='channels_first')([high, low])
     flatten = Flatten()(conv)
     outputs = Dense(units=2, activation='softmax')(flatten)
     model = Model(inputs=inputs, outputs=outputs)
     model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
     model.summary(line_length=200)
     self._test_fit(model, data_format='channels_first')
 def test_make_dual_lambda(self):
     inputs = Input(shape=(32, 32, 3))
     conv = OctaveConv2D(13, kernel_size=3)(inputs)
     pool = OctaveConvDual()(conv, lambda: MaxPool2D())
     conv = OctaveConv2D(7, kernel_size=3)(pool)
     pool = OctaveConvDual()(conv, lambda: MaxPool2D())
     conv = OctaveConv2D(5, kernel_size=3, ratio_out=0.0)(pool)
     flatten = Flatten()(conv)
     outputs = Dense(units=2, activation='softmax')(flatten)
     model = Model(inputs=inputs, outputs=outputs)
     model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
     model.summary(line_length=200)
     self._test_fit(model)
Beispiel #16
0
	def build(width, height, depth, classes):
		# initialize the model along with the input shape to be
		# "channels last" and the channels dimension itself
		model = Sequential()
		inputShape = (height, width, depth)
		chanDim = -1

		# if we are using "channels first", update the input shape
		# and channels dimension
		if K.image_data_format() == "channels_first":
			inputShape = (depth, height, width)
			chanDim = 1

		# first CONV => RELU => CONV => RELU => POOL layer set
		model.add(Conv2D(32, (3, 3), padding="same",
			input_shape=inputShape))
		model.add(Activation("relu"))
		model.add(BatchNormalization(axis=chanDim))
		model.add(Conv2D(32, (3, 3), padding="same"))
		model.add(Activation("relu"))
		model.add(BatchNormalization(axis=chanDim))
		model.add(MaxPooling2D(pool_size=(2, 2)))
		model.add(Dropout(0.25))

		# second CONV => RELU => CONV => RELU => POOL layer set
		model.add(Conv2D(64, (3, 3), padding="same"))
		model.add(Activation("relu"))
		model.add(BatchNormalization(axis=chanDim))
		model.add(Conv2D(64, (3, 3), padding="same"))
		model.add(Activation("relu"))
		model.add(BatchNormalization(axis=chanDim))
		model.add(MaxPooling2D(pool_size=(2, 2)))
		model.add(Dropout(0.25))

		# first (and only) set of FC => RELU layers
		model.add(Flatten())
		model.add(Dense(512))
		model.add(Activation("relu"))
		model.add(BatchNormalization())
		model.add(Dropout(0.5))

		# softmax classifier
		model.add(Dense(classes))
		model.add(Activation("softmax"))

		# return the constructed network architecture
		return model
Beispiel #17
0
    def build(height, width, depth, classes):
        # initialize the model
        model = Sequential()
        #the shape of our image inputs
        inputShape = (height, width, depth)
        #if we are using "channels first" update the input shape
        if (K.image_data_format() == "channels_first"):
            inputShape = (depth, height, width)

        #first set of CONV=>RELU=> POOL layers
        model.add(
            Conv2D(24, (5, 5),
                   strides=(2, 2),
                   padding="valid",
                   input_shape=inputShape))
        model.add(Activation('relu'))

        #second set of 5x5 CONV=>RELU=> POOL layers
        model.add(Conv2D(36, (5, 5), strides=(2, 2), padding="valid"))
        model.add(Activation('relu'))

        #third set of 5x5 CONV=>RELU=> POOL layers
        model.add(Conv2D(48, (5, 5), strides=(2, 2), padding="valid"))
        model.add(Activation('relu'))

        #first set of 3x3 CONV=>RELU=> POOL layers
        model.add(Conv2D(64, (3, 3), padding="valid"))
        model.add(Activation('relu'))

        #second set of 3x3 CONV=>RELU=> POOL layers
        model.add(Conv2D(64, (3, 3), padding="valid"))
        model.add(Activation('relu'))

        #set of fully connected layers
        model.add(Flatten())
        model.add(Dense(1164))
        model.add(Activation('relu'))
        model.add(Dense(100))
        model.add(Activation('relu'))
        model.add(Dense(10))
        model.add(Activation('relu'))

        #output
        model.add(Dense(classes))
        model.add(Activation('tanh'))

        return model
Beispiel #18
0
def modelEncode(cae, filterSize, poolSize, sampSize, gpus):
    if gpus > 1:
        cae = cae.layers[-2]

    # initialize encoder
    encode = Sequential()
    encode.add(
        Convolution2D(8, (filterSize, filterSize),
                      input_shape=(3, sampSize, sampSize),
                      padding='same',
                      weights=cae.layers[0].get_weights()))
    encode.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
    encode.add(Activation('relu'))
    encode.add(
        Convolution2D(16, (filterSize, filterSize),
                      padding='same',
                      weights=cae.layers[3].get_weights()))
    encode.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
    encode.add(Activation('relu'))
    encode.add(
        Convolution2D(32, (filterSize, filterSize),
                      padding='same',
                      weights=cae.layers[6].get_weights()))
    encode.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
    encode.add(Activation('relu'))
    encode.add(
        Convolution2D(64, (filterSize, filterSize),
                      padding='same',
                      weights=cae.layers[9].get_weights()))
    encode.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
    encode.add(Activation('relu'))
    encode.add(
        Convolution2D(128, (filterSize, filterSize),
                      padding='same',
                      weights=cae.layers[12].get_weights()))
    encode.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
    encode.add(Activation('relu'))
    encode.add(Flatten())
    encode.add(Dense(1024, weights=cae.layers[16].get_weights()))
    encode.add(Activation('relu'))

    if gpus > 1:
        encode = multi_gpu_model(encode, gpus=gpus)

    encode.compile(loss='mse', optimizer='adam')

    return encode
Beispiel #19
0
    def build(width, height, depth, classes):
        # depth refers to RGB image
        # initialize the model
        model = Sequential()
        inputShape = (height, width, depth)

        # if we are using "channels first", update the input shape
        if K.image_data_format() == "channels_first":
            inputShape = (depth, height, width)

        # first set of CONV => RELU => POOL layers
        model.add(Conv2D(20, (3, 3), padding="same", input_shape=inputShape))
        model.add(BatchNormalization())
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        model.add(Dropout(0.23))

        # second set of CONV => RELU => POOL layers
        model.add(Conv2D(50, (3, 3), padding="same"))
        model.add(BatchNormalization())
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        model.add(Dropout(0.23))
        #3
        model.add(Conv2D(80, (3, 3), padding="same"))
        model.add(BatchNormalization())
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        model.add(Dropout(0.23))
        #4
        model.add(Conv2D(128, (3, 3), padding="same"))
        model.add(BatchNormalization())
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        model.add(Dropout(0.23))

        # first (and only) set of FC => RELU layers
        model.add(Flatten())
        model.add(Dense(1024))
        model.add(Activation("relu"))

        # softmax classifier
        model.add(Dense(classes))
        model.add(Activation("softmax"))

        # return the constructed network architecture
        return model
Beispiel #20
0
def build_model():
    inp = Input(shape=(FRAME_H, FRAME_W, 3))
    x = Conv2D(filters=8, kernel_size=(5, 5), activation='relu')(inp)
    x = MaxPooling2D((2, 2))(x)

    x = Conv2D(filters=16, kernel_size=(5, 5), activation='relu')(x)
    x = MaxPooling2D((2, 2))(x)

    x = Conv2D(filters=32, kernel_size=(5, 5), activation='relu')(x)
    x = MaxPooling2D((2, 2))(x)

    x = Flatten()(x)
    x = Dropout(0.5)(x)
    x = Dense(128, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(1, activation='tanh')(x)
    return Model(inputs=[inp], outputs=[x])
Beispiel #21
0
    def build(input_shape_width, input_shape_height, classes, 
              weight_path = '', input_shape_depth = 3):
        '''
        weight_path: a .hdf5 file. If exists, we can load model.
        '''
        
        # initialize the model
        model = Sequential()
        
        input_shape = (input_shape_height, input_shape_width, 
                       input_shape_depth)
        # if we are using "channels first", update the input shape
        if K.image_data_format() == 'channels_first':
             input_shape = (input_shape_depth, input_shape_height, 
                            input_shape_width)
        
        # first Convolution + relu + pooling layer
        model.add(Conv2D(filters = 20, kernel_size = (5, 5), 
                         padding = 'same', input_shape = input_shape))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size = (2, 2), strides=(2, 2)))
        
        # second convolutional layer
        model.add(Conv2D(filters = 50, kernel_size = (5, 5), 
                         padding = 'same'))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        
        # Flattening
        model.add(Flatten())

        # Full connection
        model.add(Dense(units = 500))
        model.add(Activation('relu'))

        # output layer
        model.add(Dense(units = classes))
        model.add(Activation('softmax'))

        if weight_path:
            model.load_weights(weight_path)

        # return the constructed network architecture
        return model
Beispiel #22
0
 def model(self) -> Model:
     if self._lazy_model is None:
         input = Input(shape=self.input_size)
         layer = input
         kernel_size = (3, 3)
         depth = int(self.input_size[1] / 8)
         for i in range(depth):
             filters = (2**i) * self.filter_root
             params = {'kernel_size': kernel_size, 'padding': 'same'}
             layer = Conv2D(filters=filters, name=f'CV_{i}',
                            **params)(layer)
             layer = Activation(activation=self.activation,
                                name=f'Act_{i}')(layer)
         layer = Flatten()(layer)
         lung_pixel = Input(shape=(1, ))
         layer = Concatenate(axis=1)([layer, lung_pixel])
         layer = Dense(filters=self.filter_root, activation='relu')(layer)
         self._lazy_model = Model(input, layer, name="Volume")
     return self._lazy_model
Beispiel #23
0
    def build(width, height, depth, classes):

        # Model initialization
        model = Sequential()
        input_shape = (height, width, depth)
        chan_dim = -1

        # Data formatting
        if k.image_data_format() == "channels_first":
            chan_dim = 1

        # First layer set
        model.add(Conv2D(16, (3, 3), padding="same", input_shape=input_shape))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chan_dim))
        model.add(Conv2D(16, (3, 3), padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chan_dim))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        # Second layer set
        model.add(Conv2D(32, (3, 3), padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chan_dim))
        model.add(Conv2D(32, (3, 3), padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chan_dim))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        # Third layer set
        model.add(Flatten())
        model.add(Dense(64))
        model.add(Activation("relu"))
        model.add(BatchNormalization())
        model.add(Dropout(0.5))

        # Softmax classification layer set
        model.add(Dense(classes))
        model.add(Activation("softmax"))

        return model
Beispiel #24
0
    def network(self, weights=None):

        num_inp = Input(shape=[self.state_length])
        num_feats = Dense(70, activation='relu')(num_inp)
        num_feats = Dense(40, activation='relu')(num_feats)

        board_inp = Input(shape=[10, 10, 10])

        board_feats = Dropout(rate=0.05)(
            BatchNormalization()(Conv2D(30,
                                        kernel_size=(3, 3),
                                        strides=(1, 1),
                                        activation='relu')(board_inp)))

        board_feats = Dropout(rate=0.05)(
            BatchNormalization()(Conv2D(30,
                                        kernel_size=(3, 3),
                                        strides=(1, 1),
                                        activation='relu')(board_feats)))

        board_feats = (Conv2D(30,
                              kernel_size=(3, 3),
                              strides=(1, 1),
                              activation='relu')(board_feats))

        board_feats = Flatten()(board_feats)
        board_feats = Dropout(rate=0.05)(Dense(150,
                                               activation='relu')(board_feats))
        #board_feats = Dense(50, activation='relu')(board_feats)
        feats = Dropout(rate=0.05)(Concatenate()([num_feats, board_feats]))
        feats = Dropout(rate=0.02)(Dense(150, activation='relu')(feats))
        feats = Dense(60, activation='relu')(feats)
        output = Dense(4)(feats)

        model = Model([num_inp, board_inp], output)
        model.summary()
        opt = Adam(lr=self.learning_rate, )
        model.compile(loss='mse', optimizer=opt)

        if weights:
            model.load_weights(weights)
        return model
Beispiel #25
0
def createModel(train_data):
    classes = [
        'battery', 'disc', 'glass', 'metals', 'paper', 'plastic_jug_bottle',
        'plastic_packaging', 'styrofoam'
    ]

    model = Sequential()
    # Add layers
    model.add(
        Conv2D(32, (3, 3),
               padding='same',
               input_shape=train_data.shape[1:],
               activation='relu',
               name='conv_1'))
    model.add(Conv2D(32, (3, 3), activation='relu', name='conv_2'))
    model.add(MaxPooling2D(pool_size=(2, 2), name='maxpool_1'))
    model.add(Dropout(0.25))

    model.add(
        Conv2D(64, (3, 3), padding='same', activation='relu', name='conv_3'))
    model.add(Conv2D(64, (3, 3), activation='relu', name='conv_4'))
    model.add(MaxPooling2D(pool_size=(2, 2), name='maxpool_2'))
    model.add(Dropout(0.25))

    model.add(
        Conv2D(128, (3, 3), padding='same', activation='relu', name='conv_5'))
    model.add(Conv2D(128, (3, 3), activation='relu', name='conv_6'))
    model.add(MaxPooling2D(pool_size=(2, 2), name='maxpool_3'))

    model.add(Flatten())
    model.add(Dense(512, activation='relu', name='dense_1'))
    model.add(Dropout(0.5))
    model.add(Dense(128, activation='relu', name='dense_2'))
    model.add(Dense(len(classes), name='output'))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['acc'])  # optimizer=RMSprop(lr=0.001)

    return model
Beispiel #26
0
def def_model():
    model = keras.Sequential([
        Conv2D(64, (3, 3), input_shape=(32, 32, 3), activation='relu'),
        Conv2D(64, (3, 3), input_shape=(32, 32, 3), activation='relu'),
        MaxPool2D((2, 2)),
        Conv2D(128, (3, 3), activation='relu'),
        Conv2D(128, (3, 3), activation='relu'),
        MaxPool2D((2, 2)),
        Conv2D(256, (3, 3), activation='relu'),
        Conv2D(256, (3, 3), activation='relu'),
        MaxPool2D((2, 2)),
        Flatten(input_shape=()),
        #Dense(1024, activation='relu', kernel_regularizer=keras.regularizers.L2(0.05)),
        #Dense(512, activation='relu', kernel_regularizer=keras.regularizers.L1(0.05)),
        #Dense(256, activation='relu'),
        Dense(128, activation='relu'),
        Dropout(0.5),
        Dense(64, activation='relu'),
        Dense(10, activation='softmax')
    ])
    model.compile(optimizer=optimizer, loss=loss_fn, metrics=['accuracy'])
    return model
def get_model(data, params):
    input = Input(shape=data.x_train.shape[1:])

    x = input
    for element in params["network"]:
        if element[0] == "C2D":
            x = Conv2D(filters=element[1],
                       kernel_size=element[2],
                       padding='same')(x)
            if element[3]:
                x = BatchNormalization()(x)
        elif element[0] == "Dense":
            x = Dense(units=element[1])(x)
        elif element[0] == "A":
            x = Activation(element[1])(x)
        elif element[0] == "MaxPool2D":
            x = MaxPool2D()(x)
        elif element[0] == "Flatten":
            x = Flatten()(x)
        else:
            print("Invalid element: " + element[0])

    # There has to be a Dense layer at the end
    x = Dense(units=data.num_classes)(x)

    y_pred = Activation("softmax")(x)

    # Build the model
    model = Model(inputs=[input], outputs=[y_pred])

    model.compile(
        loss="categorical_crossentropy",
        optimizer=params["optimizer"](learning_rate=params["learning_rate"]),
        metrics=["accuracy"])

    return model
Beispiel #28
0
    BatchNormalization(),
    Dropout(0.1),

    myConv2D(filters=128),
    myConv2D(filters=128),
    MaxPool2D(pool_size=(2,2)),
    BatchNormalization(),
    Dropout(0.3),

    myConv2D(filters=256),
    myConv2D(filters=256),
    MaxPool2D(pool_size=(2,2)),
    BatchNormalization(),
    Dropout(0.4),

    Flatten(),
    myDense(units=100),
    Dropout(0.5),
    myDense(units=10, activation='softmax') ])
CNN_model.summary()
new_training = 0
if new_training:
    CNN_model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
    model_saver = keras.callbacks.ModelCheckpoint('models/best_CNN_model_noBN.h5',monitor='val_accuracy', save_best_only=True)
    early_stopper = keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=5)
    performance_sched = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', patience=2, factor=0.2)
    #with tf.device('/gpu:0'):
    CNN_model.fit(X_train, y_train, epochs=5, batch_size=32,
        validation_data=(X_valid, y_valid),
        callbacks=[model_saver, early_stopper, performance_sched])
CNN_model = keras.models.load_model('models/best_CNN_model_noBN.h5')
model.add(BatchNormalization())
# Dropout
model.add(Dropout(0.5))

# 5th Convolutional Layer
model.add(Conv2D(filters = 256, kernel_size = (3,3), strides = (1,1), padding = 'same'))
model.add(Activation('relu'))
# Batch Normalisation
model.add(BatchNormalization())
# Pooling Layer
model.add(MaxPooling2D(pool_size = (3,3), strides = (2,2), padding = 'valid'))
# Dropout
model.add(Dropout(0.5))

# Passing it to a dense layer
model.add(Flatten())

# 1st Dense Layer
model.add(Dense(4096, input_shape = (224*224*3,)))
model.add(Activation('relu'))
# Add Dropout to prevent overfitting
model.add(Dropout(0.25))
# Batch Normalisation
model.add(BatchNormalization())

# 2nd Dense Layer
model.add(Dense(4096))
model.add(Activation('relu'))
# Add Dropout
model.add(Dropout(0.5))
# Batch Normalisation
Beispiel #30
0
def create_model(noise=True,
                 first_kernel_size=(7, 7),
                 n_filters=64,
                 n_covul_layers=5,
                 activation='swish',
                 dense_neurons=1024,
                 dropout=0.5,
                 lr=0.0001):
    kernel = (3, 3)
    n_classes = len(classes)

    input_layer = Input(shape=(300, 300, 3))
    if noise:
        input_layer = GaussianNoise(0.1)(input_layer)

    model = BatchNormalization(axis=[1, 2])(input_layer)

    model = Conv2D(filters=n_filters,
                   kernel_size=first_kernel_size,
                   activation=activation)(model)
    model = BatchNormalization(axis=[1, 2])(model)
    model = MaxPooling2D((2, 2))(model)

    for i in range(2, n_covul_layers):
        model = Conv2D(filters=n_filters * i,
                       kernel_size=kernel,
                       activation=activation)(model)
        model = Conv2D(filters=n_filters * i,
                       kernel_size=kernel,
                       activation=activation,
                       padding='same')(model)
        model = BatchNormalization(axis=[1, 2])(model)
        model = MaxPooling2D((2, 2))(model)

    model = Conv2D(filters=n_filters * (n_covul_layers + 1),
                   kernel_size=kernel,
                   activation=activation,
                   padding='same')(model)
    model = Conv2D(filters=n_filters * (n_covul_layers + 1),
                   kernel_size=kernel,
                   activation=activation,
                   padding='same')(model)
    model = BatchNormalization(axis=[1, 2])(model)
    model = MaxPooling2D((2, 2))(model)

    model = Flatten()(model)
    model = Dense(dense_neurons, activation=activation)(model)
    model = BatchNormalization()(model)
    model = Dropout(dropout)(model)

    model = Dense(dense_neurons / 2, activation=activation)(model)
    model = BatchNormalization()(model)
    model = Dropout(dropout)(model)

    output = Dense(n_classes, activation="softmax")(model)

    model = Model(input_layer, output)
    model.compile(loss="sparse_categorical_crossentropy",
                  optimizer=keras.optimizers.Adam(lr=lr),
                  metrics=["accuracy"])

    return model