コード例 #1
0
def main():
    args = parser.parse_args()
    landmark = 'mouth'
    subfolders = ['real', 'fake']

    model = LipNet()
    model = model.to(args.device)
    net = nn.DataParallel(model).to(args.device)

    pretrained_dict = torch.load(
        args.pretrained, map_location=torch.device(args.device))
    model_dict = model.state_dict()
    pretrained_dict = {k: v for k, v in pretrained_dict.items()
                       if k in model_dict.keys() and
                       v.size() == model_dict[k].size()}
    missed_params = [k for k, v in model_dict.items(
    ) if not k in pretrained_dict.keys()]
    # print('loaded params/tot params:{}/{}'.format(len(pretrained_dict), len(model_dict)))
    # print('miss matched params:{}'.format(missed_params))
    model_dict.update(pretrained_dict)
    model.load_state_dict(model_dict)

    for subfolder in subfolders:
        file_list = [video_label for video_label in os.listdir(
            os.path.join(args.input, subfolder))]
        for video_id in file_list:
            input_path = os.path.join(
                args.input, subfolder, video_id, landmark)
            output_path = create_directory(
                os.path.join(args.output, subfolder, video_id))
            # the assumption is that this input path contains a mouth folder
            get_lipnet_features(model, input_path, output_path,
                                video_id, seq_size=args.seq_size, device=args.device)
コード例 #2
0
def main():
    args = parser.parse_args()
    landmarks = ['mouth', 'nose', 'both-eyes']
    subfolders = ['real', 'fake']
    for subfolder in subfolders:
        file_list = [
            video_label
            for video_label in os.listdir(os.path.join(args.input, subfolder))
        ]
        for video_id in file_list:
            input_path = os.path.join(args.input, subfolder, video_id)
            # the assumption is that this input path contains 3 folders: mouth, nose, and both-eyes
            for landmark in landmarks:
                landmark_output_path = create_directory(
                    os.path.join(args.output, subfolder, video_id, landmark))
                landmark_dct_seq(input_path, video_id, landmark,
                                 landmark_output_path, args.seq_size)
コード例 #3
0
def process_faces(fa, input_path, video_id, save_path):
    '''
    top level method that takes all of the faces in a directory, performs an affine\
        transformation, and extracts the mouth, nose, and eyes
    '''
    list_dir_landmarks, faces_array, labels = get_landmarks_from_directory(
        os.path.join(input_path, video_id), fa)
    front256 = get_position(256)
    count = 0

    create_directory(os.path.join(save_path, 'mouth'))
    create_directory(os.path.join(save_path, 'both-eyes'))
    create_directory(os.path.join(save_path, 'nose'))
    # create_directory(os.path.join(save_path, 'left-eye'))
    # create_directory(os.path.join(save_path, 'right-eye'))

    for frame, preds, face in zip(labels, list_dir_landmarks, faces_array):
        if preds is not None:
            # get the list of landmarks
            # shape = preds[0] # this command works on my computer, but not lewis
            # shape = preds[0][0] # this command works on Lewis, but not my computer
            # print(preds)

            shape = np.array(preds[0])
            shape = shape[17:]  # diregard the face endpoints

            M = transformation_from_points(
                np.matrix(shape), np.matrix(front256))  # transform the face

            img = cv2.warpAffine(face, M[:2], (256, 256))
            mouth, nose, eyes = landmark_boundaries(front256, img)

            # mouth = cv2.resize(mouth, (256, 128))
            # nose = cv2.resize(nose, (128, 128))
            # eye1 = cv2.resize(eye1, (128, 128))
            # eye2 = cv2.resize(eye2, (128, 128))
            # eyes = cv2.resize(eyes, (256, 128))

            cv2.imwrite(f'{save_path}/mouth/{frame}.jpg', mouth)
            cv2.imwrite(f'{save_path}/nose/{frame}.jpg', nose)
            # cv2.imwrite(f'{save_path}/left-eye/{frame}.jpg', eye1)
            # cv2.imwrite(f'{save_path}/right-eye/{frame}.jpg', eye2)
            cv2.imwrite(f'{save_path}/both-eyes/{frame}.jpg', eyes)

        else:
            count += 1
            print('No Preds:', count)
コード例 #4
0
def main():
    start = time.time()
    args = parser.parse_args()
    model = DeepSpeech.load_model(args.pretrained)
    model.to(args.device)

    window_stride = .01
    window_size = .02
    sample_rate = 16000
    window = 'hamming'  #SpectConfig.window.value

    subfolders = ['real', 'fake']
    for subfolder in subfolders:
        file_list = [
            video_label
            for video_label in os.listdir(os.path.join(args.input, subfolder))
        ]
        for video_id in file_list:
            input_path = os.path.join(args.input, subfolder, video_id)
            output_path = create_directory(
                os.path.join(args.output, subfolder, video_id))
            save_deepspeech2_features(model, input_path, output_path, video_id)

    print(time.time() - start)
コード例 #5
0
def process_video(fa, subfolder, vid):
    global count_processed
    output_path = create_directory(os.path.join(args.output, subfolder, vid))
    process_faces(fa, os.path.join(args.input, subfolder), vid, output_path)
    count_processed += 1
    return f'Finished processing video {vid}'