示例#1
0
def main():
    #model = double_stream_model_12()
    #model.load_weights("checkpoints/"+"doublestream"+"_model_weights.h5")
    from keras.applications.mobilenet import preprocess_input
    base_model = MobileNet(weights='imagenet',include_top=False,input_shape=(HEIGHT,WIDTH,3))
    preprocessing_function = preprocess_input
    class_list_file = "checkpoints/MobileNet_class_list.txt"
    class_list = utils.load_class_list(class_list_file)
    model = utils.build_finetune_model(base_model,dropout=1e-3,num_classes=len(class_list),fc_layers=[1024,1024])
    model.load_weights("checkpoints/MobileNet_model_weights.h5")
    path = "/Users/tangxi/Downloads/Compressed/deepfake_baselinev1_1/test_videos"
    videos = getvideos(path)
    predictor = dlib.shape_predictor("/Users/tangxi/Downloads/Compressed/deepfake_baselinev1_1/shape_predictor_68_face_landmarks.dat")
    detector = dlib.get_frontal_face_detector()
    predictions=[]
    for each in videos:
        p_each_video=0.0
        vc = cv2.VideoCapture(each)
        rval, frame = vc.read()
        # 获取视频fps
        fps = vc.get(cv2.CAP_PROP_FPS)
        # 获取视频总帧数
        frame_all = vc.get(cv2.CAP_PROP_FRAME_COUNT)
        print("[INFO] 视频FPS: {}".format(fps))
        print("[INFO] 视频总帧数: {}".format(frame_all))
        print("[INFO] 视频时长: {}s".format(frame_all/fps))
        fake_count=0
        total_count=0
        while True:
            ret,frame = vc.read()
            if ret is False:
                break
            total_count+=1
            img = frame.copy()
            gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
            dets = detector(gray, 0)
            if len(dets)!=1:continue
            d = dets[0]
            x1 = d.top() if d.top() > 0 else 0
            y1 = d.bottom() if d.bottom() > 0 else 0
            x2 = d.left() if d.left() > 0 else 0
            y2 = d.right() if d.right() > 0 else 0
            face = img[x1-25:y1+10,x2-8:y2+8]
            print(face.shape)
            p_fake = predict_single_image(model,face)
            if p_fake>0.5:
                fake_count+=1
        if fake_count/float(total_count)>0.5:
            p_each_video = fake_count/float(total_count)
        else:
            p_each_video = 0.5
    predictions.append(p_each_video)
    submission_df = pd.DataFrame({"filename": test_videos, "label": predictions})
    submission_df.to_csv("submission.csv", index=False)    
示例#2
0
        os.makedirs("%s" % ("Predictions"))

    # Read in your image
    image = cv2.imread(args.image)
    image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
    image = np.array(image)
    #image = Image.open(args.image)
    #print(image.shape)
    #image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
    save_image = image
    image = np.float32(cv2.resize(image, (HEIGHT, WIDTH)))
    image = preprocessing_function(image.reshape(1, HEIGHT, WIDTH, 3))

    class_list_file = "./checkpoints/" + args.model + "_class_list.txt"

    class_list = utils.load_class_list(class_list_file)

    finetune_model = utils.build_finetune_model(base_model,
                                                num_classes=len(class_list),
                                                dropout=args.dropout,
                                                fc_layers=FC_LAYERS)

    finetune_model.load_weights("./checkpoints/" + args.model +
                                "_model_weights.h5")

    # Run the classifier and print results
    st = time.time()

    out = finetune_model.predict(image)

    confidence = out[0]
示例#3
0
        ValueError("You must pass an image path when using prediction mode.")

    images = []

    if args.image_dir is None:
        images.append(args.image)
    else:
        for file in next(os.walk(args.image_dir))[2]:
            if not file.lower().endswith(
                ('.png', '.jpg', '.bmp', '.ppm', '.tif')):
                continue
            images.append(os.path.join(args.image_dir, file))

    images.sort()

    class_list = utils.load_class_list(model_name=args.model,
                                       dataset_name=DATASET_NAME)

    #final_model = load_model(FINAL_MODEL_PATH)
    from keras.models import model_from_json
    with open(FINAL_MODEL_PATH, "r") as json_file:
        loaded_model_json = json_file.read()
        final_model = model_from_json(loaded_model_json)
        json_file.close()
    final_model.load_weights(FINAL_WEIGHTS_PATH)

    # Run the classifier and print results
    st = time.time()

    # Read in images
    for file in images:
        print("File: ", file)