def train_classifier(fold, model_name, x_train, y_train, x_valid, y_valid, id_valid): print(model_name) x_train_act = x_train x_val_act = x_valid # x_train_act = x_train.astype(np.float32) # for idx in range(len(x_train_act)): # x_train_act[idx] = preprocess_input(x_train_act[idx]) # print(x_train_act.max()) # x_val_act = x_valid.astype(np.float32) # for idx in range(len(x_val_act)): # x_val_act[idx] = preprocess_input(x_val_act[idx]) inputshape = x_train_act.shape[1:] PTModel, preprocess_input = get_model(model_name) base_pretrained_model = PTModel(input_shape = inputshape, include_top = False, weights = 'imagenet') base_pretrained_model.trainable = False from keras import models, layers from keras.optimizers import Adam img_in = layers.Input(inputshape, name='Image_RGB_In') x = base_pretrained_model(img_in) x = layers.Flatten(name='flatten')(x) x = Dense(256)(x) x = BatchActivate(x) x = Dropout(0.5)(x) x = Dense(64)(x) x = BatchActivate(x) x = Dropout(0.5)(x) out_layer = layers.Dense(1, activation = 'sigmoid')(x) class_model = models.Model(inputs = [img_in], outputs = [out_layer], name = 'full_model') class_model.compile(optimizer = Adam(lr=0.01), loss = 'binary_crossentropy', metrics = ['binary_accuracy']) batch_size = 32 base_name = '{}_{}'.format(model_name, fold) save_model_name = '../model/classifier/{}.model'.format(base_name) submission_file = '../result/classifier/{}.csv'.format(base_name) oof_file = '../result/classifier/{}_oof.csv'.format(base_name) print(save_model_name) print(submission_file) print(oof_file) board = keras.callbacks.TensorBoard(log_dir='log/classifier/{}'.format(base_name), histogram_freq=0, write_graph=True, write_images=False) early_stopping = EarlyStopping(monitor='val_binary_accuracy', mode = 'max',patience=5, verbose=1) model_checkpoint = ModelCheckpoint(save_model_name,monitor='val_binary_accuracy', mode = 'max', save_best_only=True, verbose=1) reduce_lr = ReduceLROnPlateau(monitor='val_binary_accuracy', mode = 'max',factor=0.5, patience=2, min_lr=0.00001, verbose=1) epochs = 200 history = class_model.fit(x_train, y_train, validation_data=[x_valid, y_valid], epochs=epochs, batch_size=batch_size, callbacks=[board, early_stopping, reduce_lr, model_checkpoint], verbose=1) model = load_model(save_model_name) oof = model.predict(x_valid) df_oof = pd.DataFrame() df_oof['id'] = id_valid df_oof['target'] = oof df_oof.to_csv(oof_file, index=False) files = os.listdir('../input/test/images/') x_test = np.array([(np.array(load_img("../input/test/images/{}".format(idx), grayscale = False))) for idx in files]).reshape(-1, img_size_target, img_size_target, 3) preds_test = model.predict(x_test) df_result = pd.DataFrame() df_result['id'] = files df_result['pre'] = preds_test.reshape(len(files)) df_result.to_csv(submission_file, index=False)
y_col='has_ship_vec', target_size=IMG_SIZE, color_mode='rgb', batch_size=VALID_IMG_COUNT)) # one big batch print(valid_x.shape, valid_y.shape) # In[ ]: t_x, t_y = next(train_gen) print('x', t_x.shape, t_x.dtype, t_x.min(), t_x.max()) print('y', t_y.shape, t_y.dtype, t_y.min(), t_y.max()) # In[ ]: base_pretrained_model = PTModel(input_shape=t_x.shape[1:], include_top=False, weights='imagenet') base_pretrained_model.trainable = False # In[ ]: from keras import models, layers from keras.optimizers import Adam img_in = layers.Input(t_x.shape[1:], name='Image_RGB_In') img_noise = layers.GaussianNoise(GAUSSIAN_NOISE)(img_in) pt_features = base_pretrained_model(img_noise) pt_depth = base_pretrained_model.get_output_shape_at(0)[-1] bn_features = layers.BatchNormalization()(pt_features) feature_dropout = layers.SpatialDropout2D(DROPOUT)(bn_features) gmp_dr = layers.GlobalMaxPooling2D()(feature_dropout) dr_steps = layers.Dropout(DROPOUT)(layers.Dense(DENSE_COUNT,
lrate = LearningRateScheduler(step_decay) callbacks_list = [qwk, lrate] # from keras.applications.inception_v3 import InceptionV3 as PTModel # from keras.applications.inception_resnet_v2 import InceptionResNetV2 as PTModel # from keras.applications.resnet import ResNet50 as PTModel # from keras.applications.vgg19 import VGG19 as PTModel from keras.applications.vgg16 import VGG16 as PTModel from keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten, Input, Conv2D, multiply, LocallyConnected2D, Lambda from keras.layers import BatchNormalization input_shape = (512, 512, 3) in_lay = Input(shape=input_shape) base_pretrained_model = PTModel(input_tensor=in_lay, include_top=False, weights='imagenet') base_pretrained_model.trainable = False # pt_depth = base_pretrained_model.get_output_shape_at(0)[-1] pt_features = base_pretrained_model(in_lay) bn_features = BatchNormalization()(pt_features) # # here we do an attention mechanism to turn pixels in the GAP on an off # attn_layer = Conv2D(64, kernel_size = (1,1), padding = 'same', activation = 'relu')(Dropout(0.5)(bn_features)) # attn_layer = Conv2D(16, kernel_size = (1,1), padding = 'same', activation = 'relu')(attn_layer) # attn_layer = Conv2D(8, kernel_size = (1,1), padding = 'same', activation = 'relu')(attn_layer) # attn_layer = Conv2D(1, kernel_size = (1,1), padding = 'valid',activation = 'sigmoid')(attn_layer) # # fan it out to all of the channels # up_c2_w = np.ones((1, 1, 1, pt_depth)) # up_c2 = Conv2D(pt_depth, kernel_size = (1,1), padding = 'same',activation = 'linear', use_bias = False, weights = [up_c2_w])