def main(): args = get_args() input_path = args.input batch_size = args.batch_size nb_epochs = args.nb_epochs depth = args.depth k = args.width validation_split = args.validation_split use_augmentation = args.aug logging.debug("Loading data...") image, gender, age, _, image_size, _ = load_data(input_path) X_data = image y_data_a = np_utils.to_categorical(age, 101) #custom parameters nb_class = 2 hidden_dim = 512 vgg_model = VGGFace(include_top=False, input_shape=(224, 224, 3)) last_layer = vgg_model.get_layer('pool5').output x = Flatten(name='flatten')(last_layer) x = Dense(hidden_dim, activation='relu', name='fc6')(x) x = Dense(hidden_dim, activation='relu', name='fc7')(x) out = Dense(nb_class, activation='softmax', name='fc8')(x) model = Model(vgg_model.input, out) sgd = SGD(lr=0.1, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss=["categorical_crossentropy"], metrics=['accuracy']) logging.debug("Model summary...") model.count_params() model.summary() logging.debug("Saving model...") mk_dir("models") with open(os.path.join("models", "vgg_{}_{}.json".format(depth, k)), "w") as f: f.write(model.to_json()) mk_dir("checkpoints") callbacks = [ LearningRateScheduler(schedule=Schedule(nb_epochs)), ModelCheckpoint("checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5", monitor="val_loss", verbose=1, save_best_only=True, mode="auto") ] logging.debug("Running training...") data_num = len(X_data) indexes = np.arange(data_num) np.random.shuffle(indexes) X_data = X_data[indexes] y_data_g = y_data_g[indexes] y_data_a = y_data_a[indexes] train_num = int(data_num * (1 - validation_split)) X_train = X_data[:train_num] X_test = X_data[train_num:] y_train_g = y_data_g[:train_num] y_test_g = y_data_g[train_num:] y_train_a = y_data_a[:train_num] y_test_a = y_data_a[train_num:] if use_augmentation: datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, preprocessing_function=get_random_eraser( v_l=0, v_h=255)) training_generator = MixupGenerator(X_train, [y_train_g, y_train_a], batch_size=batch_size, alpha=0.2, datagen=datagen)() hist = model.fit_generator(generator=training_generator, steps_per_epoch=train_num // batch_size, validation_data=(X_test, [y_test_g, y_test_a]), epochs=nb_epochs, verbose=1, callbacks=callbacks) else: hist = model.fit(X_train, [y_train_g, y_train_a], batch_size=batch_size, epochs=nb_epochs, callbacks=callbacks, validation_data=(X_test, [y_test_g, y_test_a])) logging.debug("Saving weights...") model.save_weights(os.path.join("models", "vgg_{}_{}.h5".format(depth, k)), overwrite=True) pd.DataFrame(hist.history).to_hdf( os.path.join("models", "history_{}_{}.h5".format(depth, k)), "history")
input_shape = (32, 32, 3) if __name__ == "__main__": (x_train, y_train), (x_test, y_test) = cifar10.load_data() x_train, x_test = normalize(x_train, x_test) y_train = keras.utils.to_categorical(y_train, num_classes=10) y_test = keras.utils.to_categorical(y_test, num_classes=10) # MUNET model inputs = Input(input_shape) prediction = MUNET(inputs) model_star = Model(inputs=[inputs], outputs=[prediction]) model_star.summary() params = model_star.count_params() params /= 1E6 print() print("Number of params of model: %f M" % (params)) # optimization details sgd = optimizers.SGD(lr=lrf, decay=lr_decay, momentum=0.9, nesterov=True) model_star.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) # data augmentation datagen = ImageDataGenerator( width_shift_range=4. / 32, # randomly shift images horizontally (fraction of total width) height_shift_range=4. /