Example #1
0
def sample_image_pair(iPart, video_real_path, video_fake_path):

    W = 128

    video_real = read_video(video_real_path, 32)

    faces = find_two_consistent_faces(video_real)

    invalid = (faces[0] is None) or (faces[1] is None)

    if invalid:
        return

    video_fake = read_video(video_fake_path, 32)

    x_max = video_real.shape[2]
    y_max = video_real.shape[1]

    anFeatureSet = get_feature_sets()

    for l_feature_set in anFeatureSet:
        l_image_real = []
        l_image_fake = []

        for i in range(100):

            anLines = get_feature_lines(x_max, y_max, faces, l_feature_set, W,
                                        5)

            if anLines is None:
                continue

            anImageReal = sample_feature_image(anLines, video_real)
            l_image_real.append(anImageReal)

            anImageFake = sample_feature_image(anLines, video_fake)
            l_image_fake.append(anImageFake)

        if (len(l_image_real) > 0) and (len(l_image_fake) > 0):

            anImageSetReal = np.stack(l_image_real)
            anImageSetFake = np.stack(l_image_fake)

            zFilenameReal = f"IMG_p_{iPart}_{video_real_path.name}_{video_fake_path.name}_{l_feature_set[0]}_{l_feature_set[1]}_real"
            zFilenameFake = f"IMG_p_{iPart}_{video_real_path.name}_{video_fake_path.name}_{l_feature_set[0]}_{l_feature_set[1]}_fake"

            np.save(get_output_dir() / zFilenameReal, anImageSetReal)
            np.save(get_output_dir() / zFilenameFake, anImageSetFake)
Example #2
0
def sample_video_predict(mtcnn_detector, path):

    l_line = []
    l_fake = []
    l_angle = []
    l_pix = []

    n_target_size = 100
    outputsize = 128 + 64
    orig_video = read_video(path, 0)

    z_max = orig_video.shape[0]
    y_max = orig_video.shape[1]
    x_max = orig_video.shape[2]


    d_faces = _get_face_boxes(mtcnn_detector, orig_video, [0])

    bb_min, bb_max = get_random_face_box_from_z(d_faces, 0, x_max, y_max, z_max)

    real_image, _ = cut_frame(bb_min, bb_max, orig_video, orig_video, 0, outputsize, False)

    for i in range(500):
        rAngle, rPix, test_line = sample(real_image, n_target_size)

        l_line.append(test_line)
        l_fake.append(True)
        l_angle.append(rAngle)
        l_pix.append(rPix)  

    df = pd.DataFrame({'fake': l_fake, 'angle': l_angle, 'pix': l_pix, 'data': l_line})
    
    return df
Example #3
0
def sample_pair(mtcnn_detector, video_real_path, video_fake_path):

    num_frames = 32
    v_max = 9

    video_real = read_video(video_real_path, num_frames)
    video_fake = read_video(video_fake_path, num_frames)

    (face0, face1) = find_two_consistent_faces(mtcnn_detector, ideo_real)

    invalid = (face0 is None) or (face1 is None)

    if invalid:
        return get_error_line()

    z_max = video_real.shape[0]
    x_max = video_real.shape[2]
    y_max = video_real.shape[1]

    max_permutations = v_max * v_max * v_max * v_max
    num_samples = int(0.4 * max_permutations)

    lines = get_video_lines(x_max, y_max, z_max, face0, face1, v_max,
                            num_samples)

    l_data = []

    d_f = get_feature_converter()

    for zFeature in list(lines.keys()):

        real_samples = sample_cube(video_real, lines[zFeature]).reshape(
            -1, num_frames * 3)
        fake_samples = sample_cube(video_fake, lines[zFeature]).reshape(
            -1, num_frames * 3)

        num = real_samples.shape[0]
        iF = d_f[zFeature]

        anF = np.array([iF] * num).reshape(num, 1).astype(np.uint8)

        combined_samples = np.hstack([anF, real_samples, fake_samples])
        l_data.append(combined_samples)

    anData = np.concatenate(l_data)
    return anData
Example #4
0
def sample_pair(video_real_path, video_fake_path):


    video_real = read_video(video_real_path, 32)
    video_fake = read_video(video_fake_path, 32)

    (face0, face1) = find_two_consistent_faces(video_real)

    invalid = (face0 is None) or (face1 is None)

    if invalid:
        return None

    z_max = video_real.shape[0]
    x_max = video_real.shape[2]
    y_max = video_real.shape[1]

    lines = get_video_lines(x_max, y_max, z_max, face0, face1)

    l_data = []

    d_f = get_feature_converter()
    

    for zFeature in list(lines.keys()):
   
        real_samples = sample_cube(video_real, lines[zFeature]).reshape(-1, 16 * 3)
        fake_samples = sample_cube(video_fake, lines[zFeature]).reshape(-1, 16 * 3)

        num = real_samples.shape[0]
        iF = d_f[zFeature]

        anF = np.array([iF] * num).reshape(num, 1).astype(np.uint8)

        combined_samples = np.hstack([anF, real_samples, fake_samples])
        l_data.append(combined_samples)


    anData = np.concatenate(l_data)
    return anData
Example #5
0
def run_one():
    input_dir = get_part_dir(0)
    mtcnn_detector = MTCNNDetector()

    l_files = list(sorted(input_dir.iterdir()))

    l_files = [x for x in l_files if x.suffix == '.mp4']

    video_path = input_dir / "nrdnytturz.mp4"

    assert video_path.is_file()

    #video_path = l_files[126]

    video_size = 32

    W = 256
    H = 1

    video = read_video(video_path, video_size)

    x_max = video.shape[2]
    y_max = video.shape[1]
    z_max = video.shape[0]

    faces = find_two_consistent_faces(mtcnn_detector, video)

    featureset = ['l_mouth', 'r_mouth']

    anSample = sample_feature(video, faces, featureset, W, H, True)

    l_feature0 = np.array((*_get_integer_coords_single_feature(
        x_max, y_max, faces[0], featureset[0]), 0))
    r_feature0 = np.array((*_get_integer_coords_single_feature(
        x_max, y_max, faces[0], featureset[1]), 0))

    vector = r_feature0 - l_feature0

    length_vector = np.sqrt(vector.dot(vector))

    anSampleOut = straighten_sample(anSample, length_vector)

    anSample = anSample.reshape(-1)
Example #6
0
def sample_image_single(iPart, video_path, isFake):

    W = 128

    video = read_video(video_path, 32)

    faces = find_two_consistent_faces(video)

    invalid = (faces[0] is None) or (faces[1] is None)

    if invalid:
        return

    x_max = video.shape[2]
    y_max = video.shape[1]

    anFeatureSet = get_feature_sets()

    for l_feature_set in anFeatureSet:
        l_image = []

        for i in range(100):

            anLines = get_feature_lines(x_max, y_max, faces, l_feature_set, W,
                                        5)

            if anLines is None:
                continue

            anImage = sample_feature_image(anLines, video)
            l_image.append(anImage)

        if len(l_image) > 0:

            if isFake:
                zClass = 'fake'
            else:
                zClass = 'real'

            anImageSet = np.stack(l_image)
            zFilename = f"IMG_p_{iPart}_{video_path.name}_{l_feature_set[0]}_{l_feature_set[1]}_{zClass}"
            np.save(get_output_dir() / zFilename, anImageSet)
Example #7
0
l_video_set = list(df[df.orig == original].file)


num_frames = 32


print(f"Video: {file} Cluster: {iCluster} Original: {original} Part: {part}")


input_dir = get_part_dir(part)

assert (input_dir / file).is_file()
assert (input_dir / original).is_file()

video_real = read_video(input_dir / original, num_frames)
video_fake = read_video(input_dir / file, num_frames)

x_max = video_fake.shape[2]
y_max = video_fake.shape[1]

mtcnn_detector = MTCNNDetector()


l_faces_fake = _get_face_boxes(mtcnn_detector, video_fake, [num_frames//2])

if len (l_faces_fake) == 0:
    #return

l_faces_fake = l_faces_fake[num_frames//2]
Example #8
0
input_dir = get_part_dir(7)
mtcnn_detector = MTCNNDetector()

real_path = input_dir / entry[0]
fake_path = input_dir / entry[1][1]

assert real_path.is_file()
assert fake_path.is_file()

video_size = 32

W = 256
H = 1

real_video = read_video(real_path, video_size)
fake_video = read_video(fake_path, video_size)

x_max = real_video.shape[2]
y_max = real_video.shape[1]
z_max = real_video.shape[0]

face = find_middle_face_box(mtcnn_detector, real_video)

x0 = face[0][1]
x1 = face[1][1]

y0 = face[0][0]
y1 = face[1][0]

real_image = real_video[z_max // 2]
Example #9
0
def sample_video_set(mtcnn_detector, entry):

    outputsize = 128 + 64
    n_target_size = 100
    num_fake_samples_per_frame = 500

    l_line = []
    l_fake = []
    l_angle = []
    l_pix = []

    orig_path = entry[0]

    try:
        orig_video = read_video(orig_path, 0)
    except Exception as err:
        print(err)
        return

    z_max = orig_video.shape[0]
    y_max = orig_video.shape[1]
    x_max = orig_video.shape[2]

    l_all = entry[1]

    for test_path in l_all:

        print ("     " + str(test_path))

        try:
            test_video = read_video(test_path, 0)
        except Exception as err:
            print(err)
            continue

        is_identical_format = (test_video.shape[0] == z_max) and (test_video.shape[1] == y_max) and (test_video.shape[2] == x_max)

        if not is_identical_format:
            print("Not identical formats")
            continue

        d_faces = find_spaced_out_faces_boxes(mtcnn_detector, test_video, 30)

        frame_min = np.array(list(d_faces.keys())).min()
        frame_max = np.array(list(d_faces.keys())).max()

        if frame_max == frame_min:
            print("No faces found")
            continue

        for i in range(50):

            z_sample = np.random.choice(range(frame_min, frame_max))

            bb_min, bb_max = get_random_face_box_from_z(d_faces, z_sample, x_max, y_max, z_max)

            real_image, test_image = cut_frame(bb_min, bb_max, orig_video, test_video, z_sample, outputsize, False)
                
            for i in range(num_fake_samples_per_frame):
                rAngle, rPix, test_line = sample(test_image, n_target_size)

                l_line.append(test_line)
                l_fake.append(True)
                l_angle.append(rAngle)
                l_pix.append(rPix)                                


            for i in range (num_fake_samples_per_frame):
                Angle, rPix, test_line = sample(real_image, n_target_size)

                l_line.append(test_line)
                l_fake.append(False)
                l_angle.append(rAngle)
                l_pix.append(rPix) 

    df = pd.DataFrame({'fake': l_fake, 'angle': l_angle, 'pix': l_pix, 'data': l_line})
    
    return df
Example #10
0
def sample_video(mtcnn_detector, video_path, isDraw):

    video_size = 32

    W = 256
    H = 1

    print(f"{video_path}")

    video = read_video(video_path, video_size)

    if video is None:
        return None

    if video.shape[0] != video_size:
        return None

    x_max = video.shape[2]
    y_max = video.shape[1]
    z_max = video.shape[0]

    faces = find_two_consistent_faces(mtcnn_detector, video)

    invalid = (faces[0] is None) or (faces[1] is None)

    if invalid:
        return None

    l_featuresets = [['l_mouth', 'r_mouth'], ['l_eye', 'r_eye'],
                     ['bb_min', 'bb_max'], ['c_nose', 'r_eye'],
                     ['l_mouth', 'c_nose'], ['f_min', 'f_max'],
                     ['l_eye', 'r_mouth']]

    l_sample = []

    for featureset in l_featuresets:
        anSample = sample_feature(video, faces, featureset, W, H, isDraw)

        l_feature0 = np.array((*_get_integer_coords_single_feature(
            x_max, y_max, faces[0], featureset[0]), 0))
        r_feature0 = np.array((*_get_integer_coords_single_feature(
            x_max, y_max, faces[0], featureset[1]), 0))

        vector = r_feature0 - l_feature0

        length_vector = np.sqrt(vector.dot(vector))

        plt.imshow(anSample)
        plt.show()

        anSample = straighten_sample(anSample, length_vector)

        plt.imshow(anSample)
        plt.show()

        anSample = anSample.reshape(-1)
        l_sample.append(anSample)

    assert len(l_sample) == len(l_featuresets)

    anData = np.stack(l_sample)

    return anData
Example #11
0
def process_part(iCluster):

    isDraw = False

    assert get_ready_data_dir().is_dir()

    output_dir = get_ready_data_dir() / f"c2_{iCluster}"

    if output_dir.is_dir():
        pass
    else:
        output_dir.mkdir()

    assert output_dir.is_dir()

    v = VideoManager.VideoManager()

    l_d = v.get_cluster_metadata(iCluster)

    outputsize = 128 + 64

    mtcnn_detector = MTCNNDetector()

    orig_path = Path(
        "C:\\Users\\T149900\\Downloads\\dfdc_train_part_07\\dfdc_train_part_7\\crnbqgwbmt.mp4"
    )
    orig_path.is_file()

    test_path = Path(
        "C:\\Users\\T149900\\Downloads\\dfdc_train_part_07\\dfdc_train_part_7\\nwzwoxfcnl.mp4"
    )
    test_path.is_file()

    for entry in l_d:

        orig_path = entry[0]

        print(str(orig_path))

        try:
            orig_video = read_video(orig_path, 0)
        except Exception as err:
            print(err)
            continue

        z_max = orig_video.shape[0]
        y_max = orig_video.shape[1]
        x_max = orig_video.shape[2]

        l_all = entry[1]
        l_all.append(orig_path)

        for test_path in l_all:

            print("     " + str(test_path))

            iSample = 0
            filename_base = f"{test_path.stem}"

            try:
                test_video = read_video(test_path, 0)
            except Exception as err:
                print(err)
                continue

            is_identical_format = (test_video.shape[0] == z_max) and (
                test_video.shape[1] == y_max) and (test_video.shape[2]
                                                   == x_max)

            if not is_identical_format:
                print("Not identical formats")
                continue

            d_faces = find_spaced_out_faces_boxes(mtcnn_detector, test_video,
                                                  30)

            for i in range(10):

                z_sample = np.random.choice(range(0, z_max))

                bb_min, bb_max = get_random_face_box_from_z(
                    d_faces, z_sample, x_max, y_max, z_max)

                im_mask, im_real, im_test = cut_frame(bb_min, bb_max,
                                                      orig_video, test_video,
                                                      z_sample, -1, False)

                filename = filename_base + f"_{iSample:003}"
                im_test.save(output_dir / (filename + "_t.png"))
                im_real.save(output_dir / (filename + "_r.png"))
                im_mask.save(output_dir / (filename + "_m.png"))
                iSample = iSample + 1
Example #12
0
def get_sampling_cubes_for_part(iPart, output_dir):

    l_d = read_metadata(iPart)
    dir = get_part_dir(iPart)

    num_videos = len(l_d)

    print(
        f"p_{iPart}: Fake detection on part {iPart}. {len(l_d)} original video(s)."
    )

    for idx_key in range(num_videos):

        current = l_d[idx_key]

        x_real = current[0]

        isCompleted = dataframe_exists(iPart, x_real)

        if isCompleted:
            # print(f"p_{iPart}_{x_real}: Already done.")
            continue
        else:
            print(f"p_{iPart}_{x_real}: Starting. {idx_key +1} of {len(l_d)}")

        x_real = dir / x_real
        assert x_real.is_file(), "Error: Original not found"

        vidcap = cv2.VideoCapture(str(x_real))

        video_real = read_video(vidcap)

        vidcap.release()

        num_frames = video_real.shape[0]

        l_fakes = current[1]

        l_df_video = []

        for x_fake in l_fakes:
            x_fake = dir / x_fake

            if not x_fake.is_file():
                print(
                    f"   WARNING: p_{iPart}_{x_real.stem}: Not a file: {x_fake}. Situation handled."
                )
                continue

            print(f"   p_{iPart}_{x_real.stem}: Processing {str(x_fake.stem)}")

            vidcap = cv2.VideoCapture(str(x_fake))

            video_fake = read_video(vidcap)

            vidcap.release()

            df_video = get_sampling_cubes(video_real, video_fake)

            df_video = df_video.assign(fake=str(x_fake.stem))

            l_df_video.append(df_video)

        if len(l_df_video) > 0:
            df_video = pd.concat(l_df_video, axis=0)
            df_video = df_video.assign(original=str(x_real.stem))
            df_video = df_video.assign(part=iPart)

            df_video.to_pickle(output_dir / f"p_{iPart}_{x_real.stem}_.pkl")
            print(f"p_{iPart}_{x_real.stem}: Complete.")

        else:
            print(
                f"p_{iPart}_{x_real.stem}: WARNING: No fakes found. No sampling cubes produced for video."
            )

    return []
Example #13
0
def sample_single(mtcnn_detector, video_path, rSampleSpace, isShowFaces):

    assert rSampleSpace > 0 and rSampleSpace <= 1.0

    num_frames = 32
    v_max = 9

    video = read_video(video_path, 32)

    if video is None:
        return get_error_line()

    if video.shape[0] == 0:
        return get_error_line()

    (face0, face1) = find_two_consistent_faces(mtcnn_detector, video)

    if isShowFaces:

        print(f"{str(video_path)}")
        image0 = video[0].copy()
        image31 = video[31].copy()

        mtcnn_detector.draw(image0, [face0])
        mtcnn_detector.draw(image31, [face1])

        imgplot = plt.imshow(image0)
        plt.show()

        imgplot = plt.imshow(image31)
        plt.show()

    # Todo: Debug: Draw the two frames with faces.

    invalid = (face0 is None) or (face1 is None)

    if invalid:
        return get_error_line()

    z_max = video.shape[0]
    x_max = video.shape[2]
    y_max = video.shape[1]

    max_permutations = v_max * v_max * v_max * v_max
    num_samples = int(rSampleSpace * max_permutations)

    lines = get_video_lines(x_max, y_max, z_max, face0, face1, v_max,
                            num_samples)

    l_data = []

    d_f = get_feature_converter()

    for zFeature in list(lines.keys()):
        samples = sample_cube(video,
                              lines[zFeature]).reshape(-1, num_frames * 3)

        num = samples.shape[0]
        iF = d_f[zFeature]

        anF = np.array([iF] * num).reshape(num, 1).astype(np.uint8)

        combined_samples = np.hstack([anF, samples])
        l_data.append(combined_samples)

    anData = np.concatenate(l_data)
    return anData