def main(model, train_file, valid_file, ckpt_folder, optimizer, batch_size, max_steps, lr, l2, fine_tuning): if optimizer == 'Adam': optimizer = Adam(lr=lr) if l2 > 0: regularizer = regularizers.l2(l2) else: regularizer = None metrics = ['categorical_accuracy'] loss = 'categorical_crossentropy' if model == 'VGG16Trunk': model_fn = VGG16Trunk(IMAGE_SHAPE, INPUT_NAME, optimizer, loss, metrics, fine_tuning) elif model == 'XceptionTrunk': model_fn = Xception(IMAGE_SHAPE, INPUT_NAME, optimizer, loss, metrics, fine_tuning) model_fn.summary() # Start training my_train_and_evaluate(model_fn=model_fn, train_file=train_file, valid_file=valid_file, ckpt_folder=ckpt_folder, batch_size=batch_size, max_steps=max_steps)
def process_images_thru_tl(batch_size=32, input1=1024, input2=1024, model_name="vgg"): with tf.device("/device:GPU:1"): if model_name == "vgg": model = VGG16(weights = "imagenet", include_top=False, input_shape = [224, 224, 3]) size = 224 elif model_name == "inceptionv3": model = InceptionV3(weights = "imagenet", include_top=False, input_shape = [299, 299, 3]) size = 299 elif model_name == "resnet50": model = ResNet50(weights = "imagenet", include_top=False, input_shape = [224, 224, 3]) size = 224 elif model_name == "mobilenet": model = ResNet50(weights = "imagenet", include_top=False, input_shape = [224, 224, 3]) size = 224 elif model_name == "xception": model = Xception(weights = "imagenet", include_top=False) size = 299 print("%s %d %d %d" % (model_name, input1, input2, batch_size)) model.summary() model.get_weights() labels = [] batch = [] # input_ = Input(shape=(size,size,3),name = 'image_input') output_ = model.output with tf.device("/device:GPU:1"): if model_name == "inceptionv3" or model_name == "xception": x = GlobalAveragePooling2D(name='avg_pool')(output_) else: x = Flatten(name='flatten')(output_) if input1 != 0: x = Dense(input1, activation='relu', name='fc1')(x) if input2 != 0: x = Dense(input2, activation='relu', name='fc2')(x) x = Dense(128, activation='softmax', name='predictions')(x) for layer in model.layers: layer.trainable = False my_model = Model(inputs=output_, outputs=x) my_model.summary() if os.path.exists("weights_%s_%d_%d_%d.h5" % (model_name, input1, input2, batch_size)): my_model.load_weights("weights_%s_%d_%d_%d.h5" % (model_name, input1, input2, batch_size)) my_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) train_generator = get_train_data.train_generator(img_size=size, batch_size=batch_size) valid_generator = get_train_data.valid_generator(img_size=size, batch_size=8) csv_logger = CSVLogger('log.csv', append=True, separator=',') my_model.fit_generator( train_generator, steps_per_epoch=2000,#1000 epochs=10, validation_data=valid_generator, validation_steps=200,#200 callbacks=[csv_logger]) my_model.save_weights("weights_%s_%d_%d_%d.h5" % (model_name, input1, input2, batch_size))