def train_top_model(): train_data = np.load( os.path.join(result_dir, 'bottleneck_features_train.npy')) train_labels = np.array([0] * int(nb_train_samples / 2) + [1] * int(nb_train_samples / 2)) print(train_data.shape) validation_data = np.load( os.path.join(result_dir, 'bottleneck_features_validation.npy')) validation_labels = np.array([0] * int(nb_validation_samples / 2) + [1] * int(nb_validation_samples / 2)) print(validation_data.shape) model = Sequential() model.add(Flatten(input_shape=train_data.shape[1:])) model.add(Dense(256, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=optimizers.SGD(lr=1e-4, momentum=0.9), metrics=['accuracy']) history = model.fit(train_data, train_labels, nb_epoch=nb_epoch, batch_size=32, validation_data=(validation_data, validation_labels)) model.save_weights(os.path.join(result_dir, 'bottleneck_fc_model.h5')) save_history(history, os.path.join(result_dir, 'history_extractor.txt'))
def train_top_model(): """VGGのボトルネック特徴量を入力とし、Dog vs Catの正解を出力とするFCネットワークを訓練""" # 訓練データをロード # ジェネレータではshuffle=Falseなので最初の1000枚がcats、次の1000枚がdogs train_data = np.load( os.path.join(result_dir, 'bottleneck_features_train.npy')) train_labels = np.array([0] * int(nb_train_samples / 2) + [1] * int(nb_train_samples / 2)) # (2000, 4, 4, 512) print(train_data.shape) # バリデーションデータをロード validation_data = np.load( os.path.join(result_dir, 'bottleneck_features_validation.npy')) validation_labels = np.array([0] * int(nb_validation_samples / 2) + [1] * int(nb_validation_samples / 2)) # (800, 4, 4, 512) print(validation_data.shape) # FCネットワークを構築 model = Sequential() model.add(Flatten(input_shape=train_data.shape[1:])) model.add(Dense(256, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=optimizers.SGD(lr=1e-4, momentum=0.9), metrics=['accuracy']) history = model.fit(train_data, train_labels, nb_epoch=nb_epoch, batch_size=32, validation_data=(validation_data, validation_labels)) model.save_weights(os.path.join(result_dir, 'bottleneck_fc_model.h5')) save_history(history, os.path.join(result_dir, 'history_extractor.txt'))
def train_top_model(): """VGGのボトルネック特徴量を入力とし、正解を出力とするFCネットワークを訓練""" # 訓練データをロード # ジェネレータではshuffle=Falseなのでクラスは順番に出てくる # one-hot vector表現へ変換が必要 train_data = np.load( os.path.join(result_dir, 'bottleneck_features_train.npy')) train_labels = [i // nb_samples_per_class for i in range(nb_train_samples)] train_labels = np_utils.to_categorical(train_labels, nb_classes) # バリデーションデータをロード validation_data = np.load( os.path.join(result_dir, 'bottleneck_features_validation.npy')) validation_labels = [ i // nb_samples_per_class for i in range(nb_val_samples) ] validation_labels = np_utils.to_categorical(validation_labels, nb_classes) # FCネットワークを構築 model = Sequential() model.add(Flatten(input_shape=train_data.shape[1:])) model.add(Dense(256, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(nb_classes, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer=optimizers.SGD(lr=1e-4, momentum=0.9), metrics=['accuracy']) history = model.fit(train_data, train_labels, nb_epoch=nb_epoch, batch_size=batch_size, validation_data=(validation_data, validation_labels)) model.save_weights(os.path.join(result_dir, 'bottleneck_fc_model.h5')) save_history(history, os.path.join(result_dir, 'history_extractor.txt'))
preprocessing_function=preprocess_input) train_generator = train_datagen.flow_from_directory( train_data_dir, target_size=(img_rows, img_cols), color_mode='rgb', classes=classes, class_mode='categorical', batch_size=batch_size, shuffle=True) validation_generator = test_datagen.flow_from_directory( validation_data_dir, target_size=(img_rows, img_cols), color_mode='rgb', classes=classes, class_mode='categorical', batch_size=batch_size, shuffle=True) # Fine-tuning history = model.fit_generator( train_generator, samples_per_epoch=nb_train_samples, nb_epoch=nb_epoch, validation_data=validation_generator, nb_val_samples=nb_val_samples) model.save_weights(os.path.join(result_dir, 'finetuning.h5')) save_history(history, os.path.join(result_dir, 'history_finetuning.txt'))
test_datagen = ImageDataGenerator(rescale=1.0 / 255) train_generator = train_datagen.flow_from_directory( train_data_dir, target_size=(img_rows, img_cols), color_mode='rgb', classes=classes, class_mode='categorical', batch_size=batch_size, shuffle=True) validation_generator = test_datagen.flow_from_directory( validation_data_dir, target_size=(img_rows, img_cols), color_mode='rgb', classes=classes, class_mode='categorical', batch_size=batch_size, shuffle=True) # モデル訓練 history = model.fit_generator(train_generator, samples_per_epoch=nb_train_samples, nb_epoch=nb_epoch, validation_data=validation_generator, nb_val_samples=nb_val_samples) model.save_weights(os.path.join(result_dir, 'vgg_scratch.h5')) save_history(history, os.path.join(result_dir, 'history_vgg_scratch.txt'))