예제 #1
0
def add_mode(old_path, new_path, out_path):
    old_modes = imgs.read_seqs(old_path)
    new_modes = imgs.read_seqs(new_path)

    def add_helper(name_i):
        old_i = old_modes[name_i]
        new_i = new_modes[name_i]
        new_i = preproc.rescale.scale(new_i, 64, 64)
        return np.concatenate([old_i, new_i], axis=1)

    unified = {name_i: add_helper(name_i) for name_i in list(new_modes.keys())}
    imgs.save_seqs(unified, out_path)
예제 #2
0
파일: auto.py 프로젝트: tjacek/res_ensemble
def extract_feats(in_path, model_path, out_path=None):
    if (not out_path):
        out_path = os.path.split(in_path)[0] + '/ae_feats'
    model = load_model(model_path)
    seq_dict = imgs.read_seqs(in_path)
    feat_dict = extract.frame_features(seq_dict, model)
    extract.save_seqs(feat_dict, out_path)
예제 #3
0
파일: __init__.py 프로젝트: tjacek/sim_nn
def extract(model_path, out_path, frame_path):
    frames = imgs.read_seqs(frame_path)
    model = load_model(model_path)
    extractor = Model(inputs=model.input,
                      outputs=model.get_layer("hidden").output)
    feat_dict = single.extractor_template(frames, extractor)
    single.save_frame_feats(feat_dict, out_path)
예제 #4
0
파일: data.py 프로젝트: tjacek/res_ensemble
def make_dataset(in_path, frames=True):
    img_seqs = imgs.read_seqs(in_path)
    train, test = split(img_seqs.keys())
    if (frames):
        return to_dataset(train, img_seqs), to_dataset(test, img_seqs)
    else:
        return to_seq_dataset(train, img_seqs), to_seq_dataset(test, img_seqs)
예제 #5
0
파일: data.py 프로젝트: tjacek/sim_nn
def make_dataset(in_path, frames=True, full=False):
    img_seqs = imgs.read_seqs(in_path)
    format = to_dataset if (frames) else to_seq_dataset
    if (full):
        return format(img_seqs.keys(), img_seqs)
    else:
        train, test = split(img_seqs.keys())
        return format(train, img_seqs), format(test, img_seqs)
예제 #6
0
파일: local.py 프로젝트: tjacek/preproc
def compute(in_path, out_path, upsample=False):
    seq_dict = imgs.read_seqs(in_path)
    files.make_dir(out_path)
    for name_i, seq_i in seq_dict.items():
        feat_seq_i = extract(seq_i)
        name_i = name_i.split('.')[0] + '.txt'
        out_i = out_path + '/' + name_i
        np.savetxt(out_i, feat_seq_i, delimiter=',')
예제 #7
0
def binary_extract(frame_path,model_path,seq_path):
    files.make_dir(seq_path)
    paths=model_path if(type(model_path)==list) else files.top_files(model_path) 
    frame_dict=imgs.read_seqs(frame_path)
    for i,in_i in enumerate(paths):
        print(i)
        out_i= seq_path+'/'+in_i.split('/')[-1]
        extract.extract_features(frame_dict,in_i,out_i)
        gc.collect()
예제 #8
0
파일: check.py 프로젝트: tjacek/sim_nn
def compare_lenght(in_path):
    seq_dict = imgs.read_seqs(in_path)
    len_dict = get_len_dict(seq_dict)
    train, test = data.split(len_dict.keys())
    train, test = by_cat(train), by_cat(test)
    for cat_i in train.keys():
        train_i = np.mean([len_dict[name_i] for name_i in train[cat_i]])
        test_i = np.mean([len_dict[name_i] for name_i in test[cat_i]])
        print("%d,%.2f,%.2f" % (cat_i, test_i, train_i))
예제 #9
0
def compute(in_path, out_path, upsample=False):
    seq_dict = imgs.read_seqs(in_path)
    extract = Extractor()
    files.make_dir(out_path)
    for name_i, seq_i in seq_dict.items():
        feat_seq_i = np.array([extract(frame_i) for frame_i in seq_i])
        name_i = name_i.split('.')[0] + '.txt'
        out_i = out_path + '/' + name_i
        if (upsample):
            feat_seq_i = upsampling(feat_seq_i)
        np.savetxt(out_i, feat_seq_i, delimiter=',')
예제 #10
0
파일: new.py 프로젝트: tjacek/sim_nn
 def __call__(self, in_path, out_path, n_epochs, cat_i):
     frames = imgs.read_seqs(in_path)
     train, test = data.split_dict(frames)
     X, y = data.to_seq_dataset(train)
     X, y = self.gen(X, y, cat_i)
     n_channels = X[0].shape[-1]
     params = {"input_shape": (X[0].shape[1], X[0].shape[2], n_channels)}
     sim_metric, model = sim.build_siamese(params, self.make_model)
     sim_metric.fit(X, y, epochs=n_epochs, batch_size=100)
     if (out_path):
         model.save(out_path)
예제 #11
0
def person_model(in_path, out_path, n_epochs=100):
    seq_dict = imgs.read_seqs(in_path)
    train, test = data.split(seq_dict.keys())
    persons = [data.parse_name(name_i)[1] - 1 for name_i in train]
    persons = keras.utils.to_categorical(persons)
    X, y = to_dataset(train, seq_dict)
    n_cats, n_channels = y.shape[1], X.shape[-1]
    model = models.make_exp(n_cats, n_channels)
    model.summary()
    model.fit(X, y, epochs=n_epochs, batch_size=256)
    model.save(out_path)
예제 #12
0
def extract_person(frame_path, model_path, out_path, upsample=False):
    seq_dict = imgs.read_seqs(frame_path)
    model = load_model(model_path)
    feat_dict = {}
    for name_i, seq_i in seq_dict.items():
        seq_i = data.format_frames(seq_i)
        seq_i = model.predict(seq_i)
        if (upsample):
            seq_i = local.upsampling(seq_i)
        feat_dict[name_i] = seq_i
    extract.save_seqs(feat_dict, out_path)
예제 #13
0
def make_global(seq_path, frame_path, out_path):
    glob_dict = clean_dict(get_global_img(seq_path))
    seq_dict = clean_dict(imgs.read_seqs(frame_path))

    def frame_fun(frame_j, name_i):
        return np.concatenate([frame_j, glob_dict[name_i]], axis=0)

    new_seqs = {
        name_i: [frame_fun(frame_j, name_i) for frame_j in seq_i]
        for name_i, seq_i in seq_dict.items()
    }
    imgs.save_seqs(new_seqs, out_path)
예제 #14
0
def make_sim_template(in_path, out_path, n_epochs, gen_pairs):
    frames = imgs.read_seqs(in_path)
    train, test = data.split_dict(frames)
    X, y = data.to_seq_dataset(train)
    X, y = gen_pairs(X, y)
    #    X,y=gen.binary_data(X,y,cat_i,n_samples)
    n_channels = X[0].shape[-1]
    params = {"input_shape": (X[0].shape[1], X[0].shape[2], n_channels)}
    sim_metric, model = sim.build_siamese(params, make_five)
    sim_metric.fit(X, y, epochs=n_epochs, batch_size=100)
    if (out_path):
        model.save(out_path)
예제 #15
0
def action_imgs(in_path, out_path="action_imgs"):
    action_dict = imgs.read_seqs(in_path)
    #    action_dict=imgs.transform(preproc,action_dict)
    files.make_dir(out_path)
    action_dict = {
        name_i: diff_helper(frames_i)
        for name_i, frames_i in action_dict.items()
    }
    for name_i, frames_i in action_dict.items():
        out_i = out_path + '/' + name_i.split(".")[0] + ".png"
        frames_i = [scale(img_i) for img_i in frames_i]
        cv2.imwrite(out_i, sum_imgs(frames_i))
예제 #16
0
def extract_feats(frame_path, model_path, out_path=None):
    extractor = load_model(model_path)
    img_seqs = imgs.read_seqs(frame_path)
    feats_seq = {
        name_i: data.format_frames(seq_i)
        for name_i, seq_i in img_seqs.items()
    }
    feat_dict = {
        name_i: extractor.predict(seq_i)
        for name_i, seq_i in feats_seq.items()
    }
    extract.save_seqs(feat_dict, out_path)
예제 #17
0
def unify_agum(paths, ae_model, out_path):
    img_dict = [imgs.read_seqs(path_i) for path_i in paths]
    img_dict = [files.clean_dict(dict_i) for dict_i in img_dict]
    agum_set = img_dict[0]
    for i, dict_i in enumerate(img_dict[1:]):
        for name_j, seq_j in dict_i.items():
            if (in_train(name_j)):
                name_j = "%s_%d" % (name_j, i)
                agum_set[name_j] = seq_j
    files.make_dir(out_path)
    seq_path = "%s/%s" % (out_path, "frames")
    imgs.save_seqs(agum_set, seq_path)
    simple_agum(out_path, ae_model)
예제 #18
0
파일: ae.py 프로젝트: tjacek/sim_nn
def reconstruct(in_path, model_path, out_path=None, diff=False):
    frames = imgs.read_seqs(in_path)
    model = load_model(model_path)
    frames = {
        name_i: data.format_frames(seq_i)
        for name_i, seq_i in frames.items()
    }
    rec_frames = {}
    for name_i, seq_i in frames.items():
        rec_seq_i = model.predict(seq_i)
        rec_seq_i = [np.vstack(frame_j.T) for frame_j in rec_seq_i]
        rec_frames[name_i] = rec_seq_i
    imgs.save_seqs(rec_frames, out_path)
예제 #19
0
파일: agum.py 프로젝트: tjacek/res_ensemble
def agum_template(raw_path,agum_path,agum,n_iters=10):
    raw_data=imgs.read_seqs(raw_path)
    train,test=data.split(raw_data.keys())
    train_data={ name_i:raw_data[name_i] for name_i in train}
    agum_dict={}
    for name_i,seq_i in list(train_data.items()):
        agum_seq_i = agum(images=seq_i)
        for j in range(n_iters):
            new_name_i=name_i+'_'+str(j)
            print(new_name_i)
            agum_dict[new_name_i]=agum_seq_i
    new_dict={**raw_data,**agum_dict}
    imgs.save_seqs(new_dict,agum_path)
예제 #20
0
파일: vae.py 프로젝트: tjacek/sim_nn
def make_model(in_path, out_path=None, n_epochs=1000, recon=True):
    frames = imgs.read_seqs(in_path)
    train, test = data.split_dict(frames)
    X, y = data.to_frame_dataset(train)
    X = np.array(X)
    #    add_noise(X)
    params = {'n_channels': X.shape[-1]}
    model = make_autoencoder(params)

    original_dim = 64 * 64

    model.summary()
    model.fit(X, epochs=n_epochs, batch_size=64)
    model.save(out_path)
예제 #21
0
파일: __init__.py 프로젝트: tjacek/sim_nn
def train_model(in_path, out_path, n_epochs=5, cat_i=0):
    frames = imgs.read_seqs(in_path)
    train, test = data.split_dict(frames)
    X, y = data.to_frame_dataset(train)
    X = np.array(X)
    if (type(cat_i) == int):
        y = [int(y_i == cat_i) for y_i in y]
    y = keras.utils.to_categorical(y)
    n_cats = 2 if (type(cat_i) == int) else y.shape[-1]
    n_channels = X[0].shape[-1]
    model = make_model(n_cats, n_channels)
    model.fit(X, y, epochs=n_epochs, batch_size=32)
    if (out_path):
        model.save(out_path)
예제 #22
0
파일: ae.py 프로젝트: tjacek/sim_nn
def make_model(frames, out_path=None, n_epochs=1000, recon=True):
    if (type(frames) == str):
        frames = imgs.read_seqs(frames)
    train, test = data.split_dict(frames)
    X, y = data.to_frame_dataset(train)
    X = sub_sample(X)
    X = np.array(X)
    #    add_noise(X)
    params = {
        'n_channels': X.shape[-1],
        "dim": (X.shape[1], X.shape[2]),
        'scale': (4, 4)
    }
    model, auto = make_autoencoder(params)
    model.summary()
    model.fit(X, X, epochs=n_epochs, batch_size=16)
    auto.save(out_path)
    if (recon):
        model.save(out_path + "_recon")
예제 #23
0
def extract_features(frame_path, model_path, out_path):
    seq_dict = imgs.read_seqs(frame_path) if (type(frame_path)
                                              == str) else frame_path
    extractor = make_extractor(load_model(model_path))
    feat_dict = frame_features(seq_dict, extractor)
    save_seqs(feat_dict, out_path)
예제 #24
0
def unify_datasets(in_path, agum_path, out_path):  #for data agumentation
    data1, data2 = imgs.read_seqs(in_path), imgs.read_seqs(agum_path)
    train, test = data.split(data2.keys())
    new_data = {name_i + "_1": data2[name_i] for name_i in train}
    unified = {**data1, **new_data}
    imgs.save_seqs(unified, out_path)
예제 #25
0
파일: ae.py 프로젝트: tjacek/sim_nn
def extract(seq_dict, model_path, out_path=None):
    model = load_model(model_path)
    if (type(seq_dict) == str):
        seq_dict = imgs.read_seqs(seq_dict)
    feat_dict = single.extractor_template(seq_dict, model)
    single.save_frame_feats(feat_dict, out_path)
예제 #26
0
파일: audit.py 프로젝트: tjacek/preproc
def seq_len(in_path):
    img_dict = imgs.read_seqs(in_path)
    for seq_i in img_dict.values():
        print(seq_i[0].shape)
예제 #27
0
파일: check.py 프로젝트: tjacek/sim_nn
def get_proportion(in_path):
    img_seq = imgs.read_seqs(in_path)
    dims = [seq_i[0].shape for seq_i in img_seq.values()]
    prop = [dim_i[0] / dim_i[1] for dim_i in dims]
    print("mean%s median%s" % (np.mean(prop), np.median(prop)))
예제 #28
0
파일: check.py 프로젝트: tjacek/sim_nn
def count_frames(in_path):
    img_seq = imgs.read_seqs(in_path)
    seq_len = [len(seq_i) for seq_i in img_seq.values()]
    return sum(seq_len)
예제 #29
0
def extract_frame_feats(frame_path, model_path, out_path=None):
    extractor = load_model(model_path)
    img_seqs = imgs.read_seqs(frame_path)
    feat_dict = extractor_template(img_seqs, extractor)
    save_frame_feats(feat_dict, out_path)
예제 #30
0
def outliner_transform(in_path,out_path):
    seqs=imgs.read_seqs(in_path)
    seqs={ name_i:outliner(seq_i) 
            for name_i,seq_i in seqs.items()}
    imgs.save_seqs(seqs,out_path)