Ejemplo n.º 1
0
def get_model(input_shape, nb_classes):
    # Create a simple model.
    inputs = Input(shape=input_shape)

    # model = Conv2D(20, kernel_size=(5, 5), strides=(1, 1), activation='relu', input_shape=input_shape, padding="same")(inputs)
    # model = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(model)
    # model = Conv2D(50, kernel_size=(5, 5), strides=(1, 1), activation='relu', padding='valid')(model)
    # model = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(model)

    # model = Conv2D(20, kernel_size=(5, 5), activation='relu', use_bias=False, input_shape=input_shape)(inputs)
    model = Conv2D(20,
                   kernel_size=(5, 5),
                   activation='relu',
                   input_shape=input_shape)(inputs)
    model = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(model)
    # model = Conv2D(50, kernel_size=(5, 5), strides=(1, 1), use_bias=False, activation='relu')(model)
    model = Conv2D(50, kernel_size=(5, 5), strides=(1, 1),
                   activation='relu')(model)
    model = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(model)
    model = Flatten()(model)
    model = Dense(512, activation='relu')(model)
    model = Dense(nb_classes, activation='softmax')(model)

    model = Model(inputs, model)
    opt = tf.keras.optimizers.Adam(learning_rate=learning_rate)
    model.compile(optimizer=opt,
                  loss=categorical_crossentropy,
                  metrics=['accuracy'])
    # model.compile(optimizer='adam',
    #             loss=categorical_crossentropy,
    #             metrics=['accuracy'])

    return model
def get_model(input_shape,nb_classes):
    # Create a simple model.
    inputs = keras.Input(shape=input_shape)

    model = Conv2D(6, kernel_size=(5, 5), strides=(1, 1), activation='relu', input_shape=input_shape, padding="same")(inputs)
    model = AveragePooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(model)
    model = Conv2D(16, kernel_size=(5, 5), strides=(1, 1), activation='relu', padding='valid')(model)
    model = AveragePooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(model)
    model = Flatten()(model)
    model = Dense(120, activation='relu')(model)
    model = Dense(84, activation='relu')(model)
    model = Dense(nb_classes, activation='softmax')(model)

    model = keras.Model(inputs, model)
    model.compile(optimizer="adam", loss="mean_squared_error")

    return model
Ejemplo n.º 3
0
def build_model(input_shape=(256, 256, 4), per_class_f1=True)):
    input_layer = Input(shape=input_shape)

    model = _build_stemV1(input_layer)
    for i in range(3):
        model = inceptionV1_module(model, regularizer=l2(1e-4))

    model = GlobalAveragePooling2D()(model)
    model = Flatten()(model)
    outputs = Dense(28, activation='sigmoid', kernel_regularizer=l2(1e-4))(model)

    optimizer = tf.train.AdamOptimizer(0.001)

    model = keras.Model(inputs=input_layer, outputs=outputs)

    model.compile(optimizer=optimizer,
                loss='binary_crossentropy',
                metrics=[f1_macro, 'accuracy'])
Ejemplo n.º 4
0
def build_bbox_separable_model(
        input_size=(56, 56, 3),
        n_conv_blocks=3,
        base_conv_n_filters=16,
        n_dense_layers=2,
        dense_size=256,
        dropout_rate=0.25,
        loss=MeanSquaredError(),
        optimizer=Adam(),
        metrics=[MeanAbsoluteError(),
                 MeanBBoxIoU(x2y2=False)]):
    model_in = Input(shape=input_size)

    model = model_in
    for i in range(n_conv_blocks):
        model = SeparableConv2D(base_conv_n_filters * (2**i), (3, 3),
                                padding='same',
                                activation='relu',
                                name="block-{}_conv_0".format(i))(model)
        model = SeparableConv2D(base_conv_n_filters * (2**i), (3, 3),
                                padding='same',
                                activation='relu',
                                name="block-{}_conv_1".format(i))(model)
        model = MaxPooling2D((2, 2),
                             strides=(2, 2),
                             name="block-{}_pool".format(i))(model)

    model = Flatten()(model)
    for i in range(n_dense_layers):
        model = Dense(dense_size, activation='relu',
                      name="dense-{}".format(i))(model)
        model = Dropout(dropout_rate)(model)

    model_out = Dense(4, activation='sigmoid', name="output")(model)
    model = Model(model_in, model_out)
    model.compile(loss=loss, optimizer=optimizer, metrics=metrics)

    return model
model = AveragePooling2D(pool_size=(7, 7))(model)
model = Flatten(name="flatten")(model)
model = Dense(128, activation="relu")(model)
model = Dropout(0.5)(model)
model = Dense(2, activation="softmax")(model)

# placing the new model on top of the base model
model = Model(inputs=basemodel.inputs, outputs=model)

# looping over layers in base models from updating during the first training process
for layer in basemodel.layers:
    layer.trainable = False

# compiling the model
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])

# training the nets
H = model.fit(aug.flow(trainX, trainY, batch_size=BS),
              steps_per_epoch=len(trainX) // BS,
              validation_data=(testX, testY),
              validation_steps=len(testX) // BS,
              epochs=EPOCHS)

# predictions on Test set
predictions = model.predict(testX, batch_size=BS)

# finding the index of the largest probability
predictions = np.argmax(predictions, axis=1)

# show a nicely formatted classification report
Ejemplo n.º 6
0
def build_autoencoder(input_size=32,
                      base_n_filters=8,
                      n_layers=1,
                      encoding_dims=128,
                      loss=MeanSquaredError(),
                      optimizer=Adam()):
    model_in = Input(shape=(input_size, input_size, 3), name="input")

    model = model_in
    for i in range(n_layers):
        model = DepthwiseConv2D(
            (5, 5),
            padding='same',
            activation='relu',
            name="encod_block_{}_depth_conv".format(i))(model)
        model = Conv2D(base_n_filters, (1, 1),
                       padding='same',
                       activation='relu',
                       name="encod_block_{}_conv".format(i))(model)
        model = MaxPooling2D((2, 2),
                             strides=(2, 2),
                             name="encod_block_{}_max_pool".format(i))(model)

    model = Flatten(name="encod_reshap")(model)
    model = Dense(encoding_dims, activation='relu', name="encod_dense")(model)
    if n_layers == 0:
        model = Dense(input_size * input_size * 3,
                      activation='relu',
                      name="decod_dense")(model)
        model_out = Reshape((input_size, input_size, 3),
                            name="decod_reshap")(model)
    else:
        model = Dense((input_size // (2**n_layers) * input_size //
                       (2**n_layers) * base_n_filters),
                      activation='relu',
                      name="decod_dense")(model)
        model = Reshape((input_size // (2**n_layers), input_size //
                         (2**n_layers), base_n_filters),
                        name="decod_reshap")(model)

    for i in range(n_layers):
        model = UpSampling2D((2, 2),
                             name="decod_block_{}_up_sampl".format(i))(model)
        model = DepthwiseConv2D(
            (5, 5),
            padding='same',
            activation='relu',
            name="decod_block_{}_depth_conv".format(i))(model)
        model = Conv2D(base_n_filters, (1, 1),
                       padding='same',
                       activation='relu',
                       name="decod_block_{}_conv".format(i))(model)

    if n_layers != 0:
        model_out = Conv2D(3, (1, 1),
                           padding='same',
                           activation='relu',
                           name="decod_final_conv")(model)

    model = Model(model_in, model_out)
    model.compile(loss=loss, optimizer=optimizer)

    return model
Ejemplo n.º 7
0
                                  weights='imagenet',
                                  include_top=False)

for layer in vgg.layers:
    layer.trainable = False

from tensorflow.keras.layers import Flatten, Dense
from tensorflow.keras.models import Model

model = Flatten()(vgg.output)
final_model = Dense(2, activation='softmax')(model)

model = Model(inputs=vgg.input, outputs=final_model)

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

test_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 /
                                                                255.0)

filenames = os.listdir("../working/test1")
categories = []
for filename in filenames:
    category = filename.split('.')[0]
    categories.append(category)

df1 = pd.DataFrame({'filename': filenames, 'category': categories})
df1.head(5)

model.fit_generator(train_data, epochs=5, steps_per_epoch=len(train_data))
	if (downscale):
		model = MaxPool2D(pool_size=(2, 2),strides=(2, 2))(model)
	
	# engage in dense models
    model = Flatten()(model)
    model = Dense(1024, activation = "relu")(model)
    model = Dropout(rate=0.3)(model)
    dense = Dense(512, activation = "relu")(model)

    head_root = Dense(168, activation = 'softmax', name='out_root')(dense)
    head_vowel = Dense(11, activation = 'softmax', name='out_vowel')(dense)
    head_consonant = Dense(7, activation = 'softmax', name='out_consonant')(dense)

    model = Model(inputs=inputs, outputs=[head_root, head_vowel, head_consonant])

    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'],
                  loss_weights=loss_weights)

    return model

class MultiOutputDataGenerator(tf.keras.preprocessing.image.ImageDataGenerator):
    def flow(self,
             x,
             y=None,
             batch_size=32,
             shuffle=True,
             sample_weight=None,
             seed=None,
             save_to_dir=None,
             save_prefix='',
             save_format='png',
             subset=None):
Ejemplo n.º 9
0
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()

x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

ip = Input(shape=(28, 28, 1))
x = Conv2D(16, (3, 3), activation='relu', padding='same')(ip)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Flatten()(x)
x = Dense(512, activation='relu')(x)
x = Dense(10, activation='softmax')(x)
x = Model(ip, x)

x.compile(optimizer='adam',
          loss='categorical_crossentropy',
          metrics=['accuracy'])
x.fit(x_train,
      y_train,
      epochs=10,
      batch_size=128,
      shuffle=True,
      verbose=1,
      validation_data=(x_test, y_test))
x.save("fashion_mnist2.h5")
# Fully-Connected-Classifier
model = Flatten()(model)
model = Dense(dense_neurons, activation=activation)(model)

model = Dense(dense_neurons / 2, activation='tanh')(model)

# Output Layer
output = Dense(10, activation="softmax")(model)

model = Model(input_layer, output)

# Compiling model
optimizer = keras.optimizers.SGD(lr=learning_rate, momentum=momentum)
model.compile(
    loss="sparse_categorical_crossentropy",
    optimizer=optimizer,
    metrics=["accuracy"]
)
model.summary()

# Train the model
history = model.fit(
    train_data,
    epochs=13,
    validation_data = test_data
)

"""# Saving and Recreating the trained model"""

## Save the whole model
model.save('./trained_CNN/imagewoof/my_model_imagewoof.h5')