# In[ ]: base_pretrained_model = PTModel(input_shape=t_x.shape[1:], include_top=False, weights='imagenet') base_pretrained_model.trainable = False # In[ ]: from keras import models, layers from keras.optimizers import Adam img_in = layers.Input(t_x.shape[1:], name='Image_RGB_In') img_noise = layers.GaussianNoise(GAUSSIAN_NOISE)(img_in) pt_features = base_pretrained_model(img_noise) pt_depth = base_pretrained_model.get_output_shape_at(0)[-1] bn_features = layers.BatchNormalization()(pt_features) feature_dropout = layers.SpatialDropout2D(DROPOUT)(bn_features) gmp_dr = layers.GlobalMaxPooling2D()(feature_dropout) dr_steps = layers.Dropout(DROPOUT)(layers.Dense(DENSE_COUNT, activation='relu')(gmp_dr)) out_layer = layers.Dense(1, activation='sigmoid')(dr_steps) ship_model = models.Model(inputs=[img_in], outputs=[out_layer], name='full_model') ship_model.compile(optimizer=Adam(lr=LEARN_RATE), loss='binary_crossentropy', metrics=['binary_accuracy'])
base_pretrained_model = PTModel(input_shape = t_x.shape[1:], include_top = False, weights = 'imagenet') base_pretrained_model.trainable = False # ## Model Supplements # Here we add a few other layers to the model to make it better suited for the classification problem. # In[ ]: from keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten, Input, Conv2D, multiply, LocallyConnected2D, Lambda, AvgPool2D from keras.models import Model from keras.optimizers import Adam pt_features = Input(base_pretrained_model.get_output_shape_at(0)[1:], name = 'feature_input') pt_depth = base_pretrained_model.get_output_shape_at(0)[-1] from keras.layers import BatchNormalization bn_features = BatchNormalization()(pt_features) gap = GlobalAveragePooling2D()(bn_features) gap_dr = Dropout(DROPOUT)(gap) dr_steps = Dropout(DROPOUT)(Dense(DENSE_COUNT, activation = 'elu')(gap_dr)) out_layer = Dense(t_y.shape[1], activation = 'softmax')(dr_steps) attn_model = Model(inputs = [pt_features], outputs = [out_layer], name = 'trained_model') attn_model.summary()