Example #1
0
    def __init__(self, zFile):
        output_dir = get_meta_dir()
        assert output_dir.is_dir()

        zTime = datetime.now().strftime("%Y%m%d-%H%M%S")

        self._log_path = output_dir / (zFile + "_" + zTime + ".txt")
Example #2
0
    def __init__(self):
        df_c = pd.read_feather(get_meta_dir() / "face_clusters.feather")

        l_parts = []

        for x in range(50):
            path = get_part_dir(x, False)
            isDir = path.is_dir()
            if isDir:
                l_parts.append(x)

        l_orig = []
        l_file = []
        l_part = []

        for iPart in l_parts:
            df_meta = read_metadata(iPart)

            for x in df_meta:
                num_fakes = len (x[1])
                l_orig.extend([x[0]]* (num_fakes + 1))
                l_file.append(x[0])
                l_file.extend(x[1])
                l_part.extend([iPart] * (num_fakes + 1))


        df = pd.DataFrame({'orig': l_orig, 'file': l_file, 'part': l_part})

        df = df.merge(df_c, left_on = 'orig', right_on='video')

        df = df.drop(['video', 'chunk'], axis = 1)


        l_file_tuple = list(zip (df.file, df.part))

        l_exists = []

        for x in l_file_tuple:
            filepath = get_part_dir(x[1]) / x[0]
            l_exists.append(filepath.is_file())


        df = df.assign(exists = l_exists)

        num_files = df.shape[0]
        num_originals = np.unique(df.orig).shape[0]
        num_clusters = np.unique(df.cluster).shape[0]

        # print(f"num_files = {num_files}, num_originals = {num_originals}, num_clusters = {num_clusters}")

        self._df = df
Example #3
0
num_originals = azOriginal.shape[0]

num_valid = int(1 + (rValidationSplit * num_originals))
num_train = num_originals - num_valid

azTest = azOriginal[:num_valid]
azTrain = azOriginal[num_valid:]

m_train = df.original.isin(azTrain)
m_test = df.original.isin(azTest)

assert (m_train ^ m_test).all()

df = df.assign(m_train=m_train, m_test=m_test)

df.to_pickle(get_meta_dir() / "df_tgs.pkl")

idx_train = np.where(m_train)[0]

# Todo: seed
np.random.shuffle(idx_train)

num_max_files_per_run = 7000

num_splits = int(1 + idx_train.shape[0] / num_max_files_per_run)

l_idx_train = np.array_split(idx_train, num_splits)

z_model_name = "my_keras"
checkpoint_path = str(get_model_dir() / f"{z_model_name}.model")
Example #4
0
    c("qrbjzz")
    c("lssyhe")
    c("qhqtm")
    c("jqgia")
    c("xjabn")

    c("hpeya")
    c("ggoq")
    c("rfwek")
    c("hmfc")
    c("wervs")
    c("dhwgib")
    c("rzpsy")


input_dir = get_meta_dir()

df_all = pd.read_pickle(input_dir / "all_files.pkl")

l_original = []
l_part = []


def c(txt):
    m = df_all.original.str.startswith(txt)

    nOriginals = df_all.original[m].unique().shape[0]

    assert nOriginals == 1

    l_original.append(df_all.original[m].iloc[0])
Example #5
0
img_size_target = 128
img_size_ori = 128


def upsample(img):
    if img_size_ori == img_size_target:
        return img
    return resize(img, (img_size_target, img_size_target), mode='constant', preserve_range=True)
    #res = np.zeros((img_size_target, img_size_target), dtype=img.dtype)
    #res[:img_size_ori, :img_size_ori] = img
    #return res



model = load_model(get_meta_dir() / "model_2", custom_objects = {'bce_dice_loss' : bce_dice_loss, 'my_iou_metric': my_iou_metric})

df = pd.read_pickle(get_meta_dir() / "df_tgs.pkl")

df = df[df.m_test]

m_fake = (df.original != df.file_stem)

df = df.assign(fake = m_fake)


g = df.groupby('file_stem')

s_files = g.file.apply(list)
s_masks = g.file_mask.apply(list)
s_target = g.fake.first()