def main(): #model = double_stream_model_12() #model.load_weights("checkpoints/"+"doublestream"+"_model_weights.h5") from keras.applications.mobilenet import preprocess_input base_model = MobileNet(weights='imagenet',include_top=False,input_shape=(HEIGHT,WIDTH,3)) preprocessing_function = preprocess_input class_list_file = "checkpoints/MobileNet_class_list.txt" class_list = utils.load_class_list(class_list_file) model = utils.build_finetune_model(base_model,dropout=1e-3,num_classes=len(class_list),fc_layers=[1024,1024]) model.load_weights("checkpoints/MobileNet_model_weights.h5") path = "/Users/tangxi/Downloads/Compressed/deepfake_baselinev1_1/test_videos" videos = getvideos(path) predictor = dlib.shape_predictor("/Users/tangxi/Downloads/Compressed/deepfake_baselinev1_1/shape_predictor_68_face_landmarks.dat") detector = dlib.get_frontal_face_detector() predictions=[] for each in videos: p_each_video=0.0 vc = cv2.VideoCapture(each) rval, frame = vc.read() # 获取视频fps fps = vc.get(cv2.CAP_PROP_FPS) # 获取视频总帧数 frame_all = vc.get(cv2.CAP_PROP_FRAME_COUNT) print("[INFO] 视频FPS: {}".format(fps)) print("[INFO] 视频总帧数: {}".format(frame_all)) print("[INFO] 视频时长: {}s".format(frame_all/fps)) fake_count=0 total_count=0 while True: ret,frame = vc.read() if ret is False: break total_count+=1 img = frame.copy() gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) dets = detector(gray, 0) if len(dets)!=1:continue d = dets[0] x1 = d.top() if d.top() > 0 else 0 y1 = d.bottom() if d.bottom() > 0 else 0 x2 = d.left() if d.left() > 0 else 0 y2 = d.right() if d.right() > 0 else 0 face = img[x1-25:y1+10,x2-8:y2+8] print(face.shape) p_fake = predict_single_image(model,face) if p_fake>0.5: fake_count+=1 if fake_count/float(total_count)>0.5: p_each_video = fake_count/float(total_count) else: p_each_video = 0.5 predictions.append(p_each_video) submission_df = pd.DataFrame({"filename": test_videos, "label": predictions}) submission_df.to_csv("submission.csv", index=False)
def build_non_bottleneck_top_model(base_model, class_list): finetune_model = utils.build_finetune_model(base_model, dropout=args.dropout, fc_layers=FC_LAYERS, num_classes=len(class_list)) if args.continue_training: finetune_model.load_weights(WEIGHTS_PATH) adam = Adam(lr=0.00001) finetune_model.compile(adam, loss='categorical_crossentropy', metrics=['accuracy']) return finetune_model
preprocessing_function=preprocessing_function) train_generator = train_datagen.flow_from_directory(TRAIN_DIR, target_size=(HEIGHT, WIDTH), batch_size=BATCH_SIZE) validation_generator = val_datagen.flow_from_directory( VAL_DIR, target_size=(HEIGHT, WIDTH), batch_size=BATCH_SIZE) # Save the list of classes for prediction mode later class_list = utils.get_subfolders(TRAIN_DIR) utils.save_class_list(class_list, model_name=args.model, dataset_name="") finetune_model = utils.build_finetune_model(base_model, dropout=args.dropout, fc_layers=FC_LAYERS, num_classes=len(class_list)) if args.continue_training: finetune_model.load_weights("./checkpoints/" + args.model + "_model_weights.h5") print("load success!") adam = Adam(lr=0.00001) finetune_model.compile(adam, loss='categorical_crossentropy', metrics=['accuracy']) num_train_images = utils.get_num_files(TRAIN_DIR) num_val_images = utils.get_num_files(VAL_DIR)
base_model = MobileNet(weights='imagenet', include_top=False, input_shape=(HEIGHT, WIDTH, 3)) elif model == "ResNet50": HEIGHT = 224 WIDTH = 224 from keras.applications.resnet50 import preprocess_input preprocessing_function = preprocess_input base_model = ResNet50(weights='imagenet', include_top=False, input_shape=(HEIGHT, WIDTH, 3)) class_list_file = "./class_list.txt" class_list = utils.load_class_list(class_list_file) finetune_model = utils.build_finetune_model(base_model, dropout=DROPOUT, fc_layers=FC_LAYERS, num_classes=len(class_list)) finetune_model.load_weights("./" + model + "_model_weights.h5") def classify(image): global finetune_model try: image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) except: print("ERROR classify: could not convert image into color", image.shape) return 0 try: image = np.float32(cv2.resize(
class_list, model_name=args.model, dataset_name=os.path.basename(args.dataset), ) optim = eval(args.optimizer)(lr=args.lr) if args.continue_training is not None: finetune_model = load_model(args.continue_training) if args.transfer_strategy == "finetune": utils.set_trainable(finetune_model, True) else: finetune_model = utils.build_finetune_model( base_model, dropout=args.dropout, fc_layers=FC_LAYERS, num_classes=len(class_list), as_fixed_feature_extractor=True if args.transfer_strategy == "fixed" else False, skip_interval=args.skip_interval, ) finetune_model.compile(optim, loss="categorical_crossentropy", metrics=["accuracy"]) if args.summarize_model: finetune_model.summary() num_train_images = utils.get_num_files(TRAIN_DIR) num_val_images = utils.get_num_files(VAL_DIR) def lr_decay(epoch): if epoch % 20 == 0 and epoch != 0: lr = K.get_value(model.optimizer.lr)