Ejemplo n.º 1
0
def test():
    cfg = get_config()
    inpts = tf.placeholder(tf.float32,
                           [cfg['batch'], cfg['prev_leng'], cfg['dims']])
    outs = tf.placeholder(
        tf.float32,
        [cfg['batch'], cfg['pred_ext'] + cfg['fut_leng'], cfg['dims']])
    #mod=model.rec_model(cfg)
    mod = model.rec_model(cfg)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg['GPU'])
    loader = utils.Loader(cfg)
    optimizer = tf.train.AdamOptimizer(cfg['lr'])

    # mini=opti.minimize(mod.loss)
    gvs = optimizer.compute_gradients(mod.loss)
    capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]
    mini = optimizer.apply_gradients(capped_gvs)
    init = tf.initializers.global_variables()
    saver = tf.train.Saver()
    newp = str(time.time())[-4:] + "_test_" + cfg["load_path"].split("/")[-3]
    os.mkdir(newp)
    os.mkdir(newp + "/model")
    os.mkdir(newp + "/data")
    total = []
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        if (cfg['load'] == "True"):
            saver.restore(sess, cfg['load_path'])
        else:
            "TESTING must have load=true"

        print("OK")
        for i in range(0, 200):
            x, gt, f, box, info, img = loader.serve_test()

            tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2)
            o = sess.run(mod.out,
                         feed_dict={
                             inpts: x,
                             mod.image: img,
                             outs: tot,
                             mod.target: tot,
                             mod.box: box,
                             mod.inputs: x,
                             mod.feats: f
                         })
            for k in range(0, len(info)):
                total.extend(" ".join(str(info[k])))
                with open(newp + "/data/test" + frame + names[l] + ".json",
                          "w") as out_f:
                    json.dump(df, out_f, sort_keys=True)
                #im = drawer.draw_points(x[k], o[k], gt[k], cfg, info[k])

        f = open(newp + "/dat", "w")
        f.write(str(set(total)))
        f.close()
Ejemplo n.º 2
0
def eval():
    # config
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    # dataset
    # train_dataset = rec_dataset(train_data_path, uid2idx_path, feature_dict_path)
    test_dataset = rec_dataset(test_data_path, uid2idx_path, feature_dict_path)
    # train_dataloader = Data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
    test_dataloader = Data.DataLoader(dataset=test_dataset,
                                      batch_size=batch_size,
                                      shuffle=True,
                                      num_workers=4)
    print("Building dataloader finished")
    # print("Total train length: {} test length: {}".format(len(train_dataset), len(test_dataset)))

    # model and evaluation
    model = rec_model(node_size=total_node,
                      node_dim=voc_dim,
                      neighbors_num=neighbors_num)
    model.to(device)
    global_step = 0
    checkpoint = torch.load(check_point_path)
    model.load_state_dict(checkpoint['net'])
    # optimizer.load_state_dict(checkpoint['optimizer'])
    global_step = checkpoint['global_step']

    model.eval()
    predict = []
    ground_truth = []
    for i, data in tqdm(enumerate(test_dataloader),
                        total=len(test_dataloader)):
        user, target, neighborA, neighborB, label = data
        user = user.to(device)
        target = target.to(device)
        neighborA = neighborA.to(device)
        neighborB = neighborB.to(device)
        label = label.to(device)

        # Forward pass
        score = model(user, target, neighborA, neighborB)
        predict.append(score.cpu().detach().numpy())
        ground_truth.append(label.cpu().detach().numpy())
    predict = np.concatenate(predict, axis=0).flatten()
    ground_truth = np.concatenate(ground_truth, axis=0).flatten()
    np.savetxt("mymodel_result.txt", (predict, ground_truth))
    print(
        classification_report(ground_truth,
                              np.where(predict > 0.5, 1, 0),
                              digits=5))
    print(roc_auc_score(ground_truth, predict))
Ejemplo n.º 3
0
def train():
    cfg = get_config()
    inpts = tf.placeholder(tf.float32,
                           [cfg['batch'], cfg['prev_leng'], cfg['dims']])
    outs = tf.placeholder(
        tf.float32,
        [cfg['batch'], cfg['pred_ext'] + cfg['fut_leng'], cfg['dims']])

    #mod=model.rec_model(cfg)
    mod = model.rec_model(cfg)
    # opti= tf.train.RMSPropOptimizer(cfg['lr'], decay=0.9, momentum=0.5)
    opti = tf.train.AdamOptimizer(cfg['lr'])
    os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg['GPU'])
    if (cfg['clipping']):
        gvs = opti.compute_gradients(mod.loss)
        capped_gvs = [(ClipIfNotNone(grad), var) for grad, var in gvs]
        mini = opti.apply_gradients(capped_gvs)
    else:
        mini = opti.minimize(mod.loss)

    loader = utils.Loader(cfg)

    init = tf.initializers.global_variables()
    saver = tf.train.Saver()

    if (cfg['type'] != 3):
        newp = str(time.time()).split(
            ".")[0][-5:] + "_" + cfg['prefix'] + "_" + "-".join(
                nms[i] for i in range(cfg['type'] + 1)) + str(
                    cfg['prev_leng']) + "_" + str(cfg['fut_leng']) + "_" + str(
                        cfg['units']) + "_" + str(
                            cfg['lat_size']) + "_" + "_".join(
                                name_generator.get_combo())
    else:
        newp = str(time.time()).split(
            ".")[0][-5:] + "_" + cfg['prefix'] + "_" + nms[cfg['type']] + str(
                cfg['prev_leng']) + "_" + str(cfg['fut_leng']) + "_" + str(
                    cfg['units']) + "_" + str(
                        cfg['lat_size']) + "_" + "_".join(
                            name_generator.get_combo())

    os.mkdir(newp)
    os.mkdir(newp + "/model")
    os.mkdir(newp + "/data")
    shutil.copy("config.yaml", newp + "/data/" + "config.yaml")
    tf.summary.scalar("loss", mod.loss)
    tf.summary.scalar("leng_loss", mod.leng_loss)
    tf.summary.scalar("dirs_loss", mod.dirs_loss)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    merge = tf.summary.merge_all()
    with tf.Session(config=config) as sess:
        if (cfg['load'] == "True"):
            saver.restore(sess, cfg['load_path'])
            print("LOADED MODEL at " + cfg['load_path'])
        else:
            sess.run(init)

        train_writer = tf.summary.FileWriter("logs/" + newp, sess.graph)
        #test_writer = tf.summary.FileWriter(newp + "/data", sess.graph)
        print(newp)
        for e in range(cfg['epochs']):
            x, gt, f, box, info, img = loader.serve_test()
            tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2)
            summary, ls, o = sess.run(
                [merge, mod.loss, mod.out],
                feed_dict={
                    inpts: x,
                    outs: tot,
                    mod.target: tot,
                    mod.inputs: x,
                    mod.drop: 1.0,
                    mod.feats: f,
                    mod.box: box,
                    mod.image: img
                })
            print(info[0])

            # for k in range(min(16,len(info))):
            #     im=drawer.draw_points(o[k],x[k],gt[k],cfg,info[k])
            #     im.save(newp+"/"+str(e)+"_"+str(k)+".png")
            for i in range(0, loader.total_data // cfg['batch']):
                # print(str(e)+" _ "+str(i))
                x, gt, f, box, info, img = loader.serve()
                tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2)
                ls, _, o = sess.run(
                    [mod.loss, mini, mod.out],
                    feed_dict={
                        inpts: x,
                        outs: tot,
                        mod.target: tot,
                        mod.image: img,
                        mod.inputs: x,
                        mod.box: box,
                        mod.feats: f,
                        mod.drop: 0.7
                    })
                if (i % 400 == 0):
                    print("TRAIN ", ls)
                if (i % 400 == 0):
                    summ = 0
                    for tst in range(0, 20):
                        x, gt, f, box, info, img = loader.serve_test()

                        tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2)
                        summary, ls, o = sess.run(
                            [merge, mod.loss, mod.out],
                            feed_dict={
                                inpts: x,
                                mod.image: img,
                                mod.drop: 1.0,
                                outs: tot,
                                mod.target: tot,
                                mod.box: box,
                                mod.inputs: x,
                                mod.feats: f
                            })
                        summ += ls
                        for k in range(4):
                            im = drawer.draw_points(o[k], x[k], gt[k], cfg,
                                                    info[k], box[k])
                            im.save(newp + "/" + str(e) + "_" + str(tst) +
                                    "_" + str(k) + ".png")
                        # print(x[0])
                    train_writer.add_summary(summary,
                                             (loader.total_data * e) + i)
                    print(
                        str(summ / 20.0) + " iteration " + str(i) + "of " +
                        str(loader.total_data // cfg['batch']) +
                        " ,at epoch " + str(e) + " of " + str(cfg['epochs']))
                    # x, gt, f, box, info, img = loader.serve_test()
                    #
                    # tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2)
                    # summary, ls, o = sess.run([merge, mod.loss, mod.out],
                    #                           feed_dict={inpts: x, mod.image: img, outs: tot, mod.target: tot,
                    #                                      mod.inputs: x, mod.feats: f, mod.box: box})
                    # print(info[0])

                    #drawer.points_alone(o[k],x[k],gt[k],k,newp)
                    if (i % 4000 == 0):
                        print("SAVING " + newp)
                        saver.save(sess, newp + "/model/model.ckpt")

            # if(e%3==0):
            #     x, gt, f, box,info,img = loader.serve_test()
            #
            #     tot = np.concatenate([x[:,-cfg['pred_ext']:], gt], -2)
            #     summary, ls, o = sess.run([merge, mod.loss, mod.out],
            #                               feed_dict={inpts: x,mod.image:img, outs: tot, mod.target: tot, mod.inputs: x, mod.feats: f,mod.box:box})
            #     print(info[0])
            #     for k in range(min(16,len(info))):
            #         im=drawer.draw_points(o[k],x[k],gt[k],cfg,info[k])
            #
            #         im.save(newp+"/"+str(e)+"_"+str(k)+".png")#"#str("_".join(info[i]))+".png")
            print("SAVING " + newp)
            saver.save(sess, newp + "/model/model.ckpt")
Ejemplo n.º 4
0
def train_GAN():
    cfg = get_config()
    os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg['GPU'])
    inpts = tf.placeholder(tf.float32,
                           [cfg['batch'], cfg['prev_leng'], cfg['dims']])
    outs = tf.placeholder(
        tf.float32,
        [cfg['batch'], cfg['pred_ext'] + cfg['fut_leng'], cfg['dims']])
    real_imgs = tf.placeholder(tf.float32, shape=[cfg['batch'], 128, 256, 5])

    # mod=model.rec_model(cfg)
    mod = model.rec_model(cfg)
    optimizer = tf.train.AdamOptimizer(cfg['lr'])

    r_logits = mod.discrim(inpts, outs, real_imgs)
    f_logits = mod.discrim(inpts, mod.out, real_imgs, reuse=True)

    r_loss = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(logits=r_logits,
                                                labels=tf.ones_like(r_logits)))
    f_loss = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(
            logits=f_logits, labels=tf.zeros_like(f_logits)))
    disc_loss = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(
            logits=r_logits, labels=tf.ones_like(r_logits) * 0.9) +
        tf.nn.sigmoid_cross_entropy_with_logits(
            logits=f_logits, labels=tf.zeros_like(f_logits)))
    d_opti = tf.train.AdamOptimizer(cfg['d_lr'])
    dim_opti = tf.train.AdamOptimizer(cfg['im_d_lr'])
    gen_loss = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(logits=f_logits,
                                                labels=tf.ones_like(f_logits) *
                                                0.9))
    wei = cfg['wei']
    alpha = tf.placeholder(dtype=tf.float32)
    d_step = d_opti.minimize(disc_loss,
                             var_list=tf.get_collection(
                                 tf.GraphKeys.GLOBAL_VARIABLES, scope='DISCR'))
    dim_step = dim_opti.minimize(disc_loss,
                                 var_list=tf.get_collection(
                                     tf.GraphKeys.GLOBAL_VARIABLES,
                                     scope='DIM'))
    gvs = optimizer.compute_gradients(mod.loss + gen_loss * alpha,
                                      var_list=tf.get_collection(
                                          tf.GraphKeys.GLOBAL_VARIABLES,
                                          scope='GEN'))
    print(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='GEN'))
    print("============================================================")
    print(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='DISCR'))
    capped_gvs = [(ClipIfNotNone(grad), var) for grad, var in gvs]
    mini = optimizer.apply_gradients(capped_gvs)
    loader = utils.Loader(cfg)
    init = tf.initializers.global_variables()
    saver = tf.train.Saver()
    init = tf.initializers.global_variables()
    saver = tf.train.Saver()

    newp = str(time.time(
    )).split(".")[0][-4:] + "_" + cfg['prefix'] + "_" + "-".join(
        nms[i]
        for i in range(cfg['type'] + 1)) + str(cfg['prev_leng']) + "_" + str(
            cfg['fut_leng']) + "_" + "_".join(name_generator.get_combo())

    os.mkdir(newp)
    os.mkdir(newp + "/model")
    os.mkdir(newp + "/data")
    shutil.copy("config.yaml", newp + "/data/" + "config.yaml")
    tf.summary.scalar("loss", mod.loss)
    tf.summary.scalar("leng_loss", mod.leng_loss)
    tf.summary.scalar("dirs_loss", mod.dirs_loss)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    merge = tf.summary.merge_all()
    with tf.Session(config=config) as sess:
        if (cfg['load'] == "True"):
            saver.restore(sess, cfg['load_path'])
            print("LOADED MODEL at " + cfg['load_path'])
        else:
            sess.run(init)
        train_writer = tf.summary.FileWriter("logs/" + newp, sess.graph)
        # test_writer = tf.summary.FileWriter(newp + "/data", sess.graph)
        print("OK")
        for e in range(cfg['epochs']):
            wei = max(wei + 0.1, 1.0)
            # x, gt, f, box, info, img = loader.serve_test()
            # tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2)
            # summary, ls, o = sess.run([merge, mod.loss, mod.out],
            #                           feed_dict={inpts: x, outs: tot, mod.target: tot, mod.inputs: x, mod.feats: f,
            #                                      mod.box: box, mod.image: img})
            # print(info[0])
            #
            # for k in range(min(16, len(info))):
            #     im = drawer.draw_points(o[k], x[k], gt[k], cfg, info[k])
            #     im.save(newp + "/" + str(e) + "_" + str(k) + ".png")
            for i in range(0, loader.total_data // cfg['batch']):
                # print(str(e)+" _ "+str(i))

                for k in range(cfg["disc_runs_per_gen_runs"]):
                    x, gt, f, box, info, img = loader.serve()
                    tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2)
                    sess.run(
                        [d_step, dim_step],
                        feed_dict={
                            inpts: x,
                            outs: tot,
                            mod.target: tot,
                            mod.image: img,
                            mod.inputs: x,
                            mod.box: box,
                            mod.feats: f,
                            real_imgs: img
                        })
                ls, _, o = sess.run(
                    [mod.loss, mini, mod.out],
                    feed_dict={
                        inpts: x,
                        outs: tot,
                        mod.target: tot,
                        mod.image: img,
                        mod.inputs: x,
                        mod.box: box,
                        mod.feats: f,
                        real_imgs: img,
                        alpha: wei
                    })
                if (i % 200 == 0):
                    summ = 0
                    d_summ = 0
                    fake_loss = 0
                    real_loss = 0
                    g_l = 0
                    for tst in range(0, 20):
                        x, gt, f, box, info, img = loader.serve_test()
                        tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2)
                        summary, ls, o, gl = sess.run(
                            [merge, mod.loss, mod.out, gen_loss],
                            feed_dict={
                                inpts: x,
                                mod.image: img,
                                outs: tot,
                                mod.target: tot,
                                mod.box: box,
                                mod.inputs: x,
                                mod.feats: f,
                                real_imgs: img
                            })
                        rl, fl, dls, r_l, f_l = sess.run(
                            [r_logits, f_logits, disc_loss, r_loss, f_loss],
                            feed_dict={
                                inpts: x,
                                outs: tot,
                                mod.target: tot,
                                mod.image: img,
                                mod.inputs: x,
                                mod.box: box,
                                mod.feats: f,
                                real_imgs: img
                            })

                        summ += ls + gl
                        g_l += gl
                        d_summ += dls
                        fake_loss += f_l
                        real_loss += r_l
                        # print(x[0])
                    train_writer.add_summary(summary,
                                             (loader.total_data * e) + i)
                    print("fake: " + str(fake_loss / 20.0) + " real: " +
                          str(real_loss / 20.0))
                    print("GEN_TOTAL: " + str(summ / 20.0) + " DISC: " +
                          str(d_summ / 20.0) + " GEN_ADVERSARIAL:" +
                          str(gl / 20.0) + " iteration " + str(i) + "of " +
                          str(loader.total_data // cfg['batch']) +
                          " ,at epoch " + str(e) + " of " + str(cfg['epochs']))
                if (i % 200 == 0):
                    x, gt, f, box, info, img = loader.serve_test()

                    tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2)
                    summary, ls, o = sess.run(
                        [merge, mod.loss, mod.out],
                        feed_dict={
                            inpts: x,
                            mod.image: img,
                            outs: tot,
                            mod.target: tot,
                            mod.inputs: x,
                            mod.feats: f,
                            mod.box: box,
                            real_imgs: img
                        })
                    for k in range(min(16, len(info))):
                        im = drawer.draw_points(o[k], x[k], gt[k], cfg,
                                                info[k])
                        im.save(newp + "/" + str(e) + "_" + str(k) +
                                ".png")  # "#str("_".join(info[i]))+".png")
                if (i % 2000 == 0):
                    saver.save(sess, newp + "/model/model.ckpt")
Ejemplo n.º 5
0
            loss = loss_function(tag_rank, target)
            if i_batch % 20 == 0:
                writer.add_scalar('data/loss', loss, i_batch * 20)
                print(loss)

            loss_all += loss
            loss.backward()
            optimizer.step()
        print('Epoch {}:\t loss:{}'.format(epoch, loss_all))
    writer.export_scalars_to_json("./test.json")
    writer.close()


if __name__ == '__main__':
    model = rec_model(user_max_dict=user_max_dict,
                      movie_max_dict=movie_max_dict,
                      convParams=convParams)
    print(device)
    model = model.to(device)

    # train model
    #train(model=model,num_epochs=1)
    #torch.save(model.state_dict(), 'Params/model_params.pkl')

    # get user and movie feature
    model.load_state_dict(torch.load('Params/model_params.pkl'))
    from recInterface import saveMovieAndUserFeature
    saveMovieAndUserFeature(model=model)

    # test recsys
    # from recInterface import getKNNitem,getUserMostLike
Ejemplo n.º 6
0
def test():
    cfg = get_config()
    filtered = True
    cfg['batch'] = 1
    inpts = tf.placeholder(tf.float32,
                           [cfg['batch'], cfg['prev_leng'], cfg['dims']])
    outs = tf.placeholder(
        tf.float32,
        [cfg['batch'], cfg['pred_ext'] + cfg['fut_leng'], cfg['dims']])
    mod = model.rec_model(cfg)

    # loader = utils.Loader(cfg)
    optimizer = tf.train.AdamOptimizer(cfg['lr'])

    # mini=opti.minimize(mod.loss)
    gvs = optimizer.compute_gradients(mod.loss)
    capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]
    mini = optimizer.apply_gradients(capped_gvs)
    init = tf.initializers.global_variables()
    saver = tf.train.Saver()

    newp = str(time.time()).split(
        ".")[0][-4:] + "_test_" + cfg["load_path"].split("/")[-3]
    if (filtered):
        newp += "_FILTERED"
    os.mkdir(newp)
    os.mkdir(newp + "/model")
    os.mkdir(newp + "/data")
    total = []
    test_paths = [d for d in os.listdir("../kitti_rev2/training/")]
    with tf.Session() as sess:
        if (cfg['load'] == "True"):
            saver.restore(sess, cfg['load_path'])
        else:
            "TESTING must have load=true"
            exit()

        print("OK")
        jsons, tot, file_names = loader(test_paths)

        for i, d in enumerate(jsons):
            print(i)
            df = {}
            f_keys = d.keys()
            for frame in f_keys:
                df[frame] = {}
                img, sx, sy = get_segm(
                    "../kitti_rev2/training/" + file_names[i], frame)

                for object in d[frame]:
                    df[frame][object] = {}
                    cls = d[frame][object]["track_class_name"]

                    if (len(d[frame][object]["future"]) >=
                            cfg['fut_leng']) and (len(d[frame][object]["past"])
                                                  >= cfg['prev_leng'] - 1):

                        gt = np.clip(
                            np.array(
                                d[frame][object]["future"][0:cfg['fut_leng']]),
                            -1000, 3000)
                        past = np.clip(
                            np.array(
                                d[frame][object]["past"][-cfg['prev_leng'] +
                                                         1:]), -1000, 3000)

                        if (np.sqrt(np.sum(np.square(gt[-1] - past[0]))) >
                                80) or not filtered:
                            pres = np.array(d[frame][object]["present"])
                            X = np.concatenate((past, np.expand_dims(pres, 0)),
                                               0)
                            tot = smooth(np.concatenate((X, gt), 0))
                            xo = tot[0:cfg['prev_leng']]
                            x = xo / (sx / 4.0, sy)
                            gto = tot[cfg['prev_leng']:]
                            gt = gto / (sx / 4.0, sy)
                            x = np.expand_dims(x, 0)
                            gt = np.expand_dims(gt, 0)
                            tot = np.concatenate([x[:, -cfg['pred_ext']:], gt],
                                                 -2)

                            imx = np.expand_dims(img, 0)

                            o = sess.run(mod.out,
                                         feed_dict={
                                             inpts: x,
                                             mod.image: imx,
                                             outs: tot,
                                             mod.target: tot,
                                             mod.inputs: x
                                         })
                            o_scaled = scale_up(o[0], sx, sy)
                            gt_scaled = gto
                            df[frame][object]["past"] = xo.tolist()
                            df[frame][object]["gt"] = gt_scaled.tolist()
                            df[frame][object]["pred"] = o_scaled.tolist()
                            df[frame][object]["class"] = cls
            with open(newp + "/data/" + file_names[i] + ".json", "w") as out_f:
                json.dump(df, out_f, sort_keys=True)
        f = open(newp + "/dat", "w")
        f.write(str(set(total)))
        f.close()
Ejemplo n.º 7
0
def test(cf):
    cfg = cf
    print("TEST_")
    filtered=True
    cfg['batch']=1
    cfg['old']=False
    cfg['inverted']=False
    os.environ["CUDA_VISIBLE_DEVICES"] = "1"#str(cfg['GPU'])
    inpts = tf.placeholder(tf.float32, [cfg['batch'], cfg['prev_leng'], cfg['dims']])
    outs = tf.placeholder(tf.float32, [cfg['batch'], cfg['pred_ext'] + cfg['fut_leng'], cfg['dims']])
    mod = model.rec_model(cfg)
    saver = tf.train.Saver()
    newp = str(time.time()).split(".")[0][-4:] + "_test_" + cfg["load_path"].split("/")[-3]
    if (filtered):
        newp+="_FILTERED"
    os.mkdir(newp)
    os.mkdir(newp + "/model")
    os.mkdir(newp + "/data")
    os.mkdir(newp + "/imgs")
    total = []
    test_paths=[d for d in os.listdir("../kitti_rev2/training/")]
    for t in test_paths:
        os.mkdir(newp+"/imgs/"+t)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        print(cfg['load'],cfg['load_path'] )
        if (cfg['load'] == True):
            saver = tf.train.import_meta_graph("/".join(cfg['load_path'].split("/")[0:-2]) + "/model/model.ckpt.meta")

            graph = tf.get_default_graph()
            print_tensors_in_checkpoint_file(cfg['load_path'], all_tensors=False, tensor_name='DEC/DEC/gru_cell/candidate/bias',all_tensor_names=False)
            #saver.restore(sess, cfg['load_path'])
            for v in graph.get_all_collection_keys():
                vars=graph.get_collection(v)
                for v in vars:
                    print(v)
            saver.restore(sess, cfg['load_path'])

            print("MODEL LOADED")
        else:
            print("TESTING must have load=True")
            exit()

        jsons, tot, file_names,vmf = loader(test_paths)

        for i, d in enumerate(jsons):
            print(i)
            df={}
            f_keys = d.keys()
            for frame in f_keys:
                df[frame]={}
                img, sx, sy = get_segm_new("../kitti_rev2/training/"+str(file_names[i])+"/deeplab_cache", frame)

                for object in d[frame]:
                    df[frame][object]={}
                    cls = d[frame][object]["track_class_name"]

                    if (len(d[frame][object]["future"]) >= cfg['fut_leng']) and (
                            len(d[frame][object]["past"]) >= cfg['prev_leng'] - 1):

                        gt = np.clip(np.array(d[frame][object]["future"][0:cfg['fut_leng']]), -1000, 3000)
                        past = np.clip(np.array(d[frame][object]["past"][-cfg['prev_leng'] + 1:]), -1000, 3000)

                        if (np.sqrt(np.sum(np.square(gt[-1]-past[0])))>80) or not filtered:
                            pres = np.array(d[frame][object]["present"])
                            bbox = d[frame][object]["box"]
                            bbox[0] = (bbox[0] / (sx / 2.0)) - 1.0
                            bbox[1] = (bbox[1] / float(sy)) - 0.5
                            bbox[2] = (bbox[2] / (sx / 2.0)) - 1.0
                            bbox[3] = (bbox[3] / float(sy)) - 0.5
                            bbox=np.array(bbox)
                            o_bbox=bbox
                            bbox=np.expand_dims(bbox,0)
                            X = np.concatenate((past, np.expand_dims(pres, 0)), 0)
                            conc = np.concatenate((X, gt), 0) / np.array(((sx / 2.0), float(sy)))
                            conc = conc - np.array([1.0, 0.5])
                            tot = smooth(conc)
                            xo=tot[0:cfg['prev_leng']]
                            x = xo
                            gto=tot[cfg['prev_leng']:]
                            old=gto
                            old_x=x
                            gt = gto
                            x=np.expand_dims(x,0)
                            gt = np.expand_dims(gt, 0)
                            tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2)

                            imx=np.expand_dims(img,0)

                            o = sess.run(mod.out,
                                         feed_dict={inpts: x, mod.image: imx, outs: tot, mod.target: tot,mod.box:bbox,
                                                    mod.inputs: x,mod.drop:1.0})
                            poins = o[0] + np.array([1.0, 0.5])
                            o_scaled = poins * (sx / 2.0, float(sy))
                            gto = gto + np.array([1.0, 0.5])
                            gt_scaled=gto* (sx / 2.0, float(sy))
                            xo = xo + np.array([1.0, 0.5])
                            xo=xo* (sx / 2.0, sy)
                            df[frame][object]["past"] = xo.tolist()
                            df[frame][object]["gt"] = gt_scaled.tolist()
                            df[frame][object]["pred"] = o_scaled.tolist()
                            df[frame][object]["class"] = cls

                            im = drawer.draw_points(o[0], old_x, old, cfg,["../kitti_rev2/training/"+str(file_names[i]),frame,"0000",sx,sy,"test"],o_bbox)
                            im.save(newp+"/imgs/"+file_names[i]+"/"+frame+".png")

            with open(newp + "/data/"+file_names[i]+".json","w") as out_f:
                json.dump(df,out_f,sort_keys=True)
        f = open(newp + "/dat", "w")
        f.write(str(set(total)))
        f.close()
Ejemplo n.º 8
0
def _test(cf):
    cfg = cf
    print("TEST_")
    filtered=True
    cfg['batch']=1
    os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg['GPU'])
    inpts = tf.placeholder(tf.float32, [cfg['batch'], cfg['prev_leng'], cfg['dims']])
    outs = tf.placeholder(tf.float32, [cfg['batch'], cfg['pred_ext'] + cfg['fut_leng'], cfg['dims']])
    mod = model.rec_model(cfg)
    saver = tf.train.Saver()
    newp = str(time.time()).split(".")[0][-4:] + "_test_" + cfg["load_path"].split("/")[-3]
    if (filtered):
        newp+="_FILTERED"
    os.mkdir(newp)
    os.mkdir(newp + "/model")
    os.mkdir(newp + "/data")
    os.mkdir(newp + "/imgs")
    total = []
    test_paths=[d for d in os.listdir("../kitti_rev2/training/")]
    for t in test_paths:
        os.mkdir(newp+"/imgs/"+t)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        print(cfg['load'],cfg['load_path'] )
        if (cfg['load'] == True):
            saver.restore(sess, cfg['load_path'])
            print("MODEL LOADED")
        else:
            print("TESTING must have load=True")
            exit()

        jsons, tot, file_names = loader(test_paths)

        for i, d in enumerate(jsons):
            print(i)
            df={}
            f_keys = d.keys()
            for frame in f_keys:
                df[frame]={}
                img, sx, sy = get_segm_new("../kitti_rev2/training/"+str(file_names[i])+"/deeplab_cache", frame)

                for object in d[frame]:
                    df[frame][object]={}
                    cls = d[frame][object]["track_class_name"]

                    if (len(d[frame][object]["future"]) >= cfg['fut_leng']) and (
                            len(d[frame][object]["past"]) >= cfg['prev_leng'] - 1):

                        gt = np.clip(np.array(d[frame][object]["future"][0:cfg['fut_leng']]), -1000, 3000)
                        past = np.clip(np.array(d[frame][object]["past"][-cfg['prev_leng'] + 1:]), -1000, 3000)

                        if (np.sqrt(np.sum(np.square(gt[-1]-past[0])))>80) or not filtered:
                            pres = np.array(d[frame][object]["present"])
                            bbox = d[frame][object]["box"]
                            bbox[0] = bbox[0] / float(sy)
                            bbox[1] = bbox[1] / float(sy)
                            bbox[2] = bbox[2] / float(sy)
                            bbox[3] = bbox[3] / float(sy)
                            bbox=np.array(bbox)
                            bbox=np.expand_dims(bbox,0)
                            X = np.concatenate((past, np.expand_dims(pres, 0)), 0)
                            tot = smooth(np.concatenate((X, gt), 0))
                            xo=tot[0:cfg['prev_leng']]
                            x = xo /(float(sy),float(sy))
                            gto=tot[cfg['prev_leng']:]
                            gt = gto /(float(sy),float(sy))
                            x=np.expand_dims(x,0)
                            gt = np.expand_dims(gt, 0)
                            tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2)

                            imx=np.expand_dims(img,0)

                            o = sess.run(mod.out,
                                         feed_dict={inpts: x, mod.image: imx, outs: tot, mod.target: tot,mod.box:bbox,
                                                    mod.inputs: x})
                            o_scaled=scale_up(o[0],sy,sy)
                            gt_scaled=gto
                            df[frame][object]["past"] = xo.tolist()
                            df[frame][object]["gt"] = gt_scaled.tolist()
                            df[frame][object]["pred"] = o_scaled.tolist()
                            df[frame][object]["class"] = cls
                            ########
                            ot=(newp+"/imgs/"+file_names[i].split("/"[0:-1]))
                            #[pt, frame, object, sx, sy, cls]
                            im = drawer.draw_points(o_scaled, xo, gt_scaled, cfg,[ot,file_names[i][-1],"0000",sx,sy,"test"],bbox)
                            im.save(newp+"/imgs/"+file_names[i])
                            ########
            with open(newp + "/data/"+file_names[i]+".json","w") as out_f:
                json.dump(df,out_f,sort_keys=True)
        f = open(newp + "/dat", "w")
        f.write(str(set(total)))
        f.close()
Ejemplo n.º 9
0
def test():
    cfg = get_config()
    filtered = False
    cfg['batch'] = 1
    inpts = tf.placeholder(tf.float32,
                           [cfg['batch'], cfg['prev_leng'], cfg['dims']])
    mod = model.rec_model(cfg)

    init = tf.initializers.global_variables()
    saver = tf.train.Saver()

    newp = str(time.time()).split(
        ".")[0][-4:] + "_test_" + cfg["load_path"].split("/")[-3]
    if (filtered):
        newp += "_FILTERED"
    os.mkdir(newp)
    os.mkdir(newp + "/model")
    os.mkdir(newp + "/data")
    total = []
    test_paths = [d for d in os.listdir("../kitti_rev2/training/")]

    with tf.Session() as sess:
        if (cfg['load'] == "True"):
            saver.restore(sess, cfg['load_path'])
        else:
            "TESTING must have load=true"
            exit()

        print("OK")
        #jsn = json.load(open("../modified_context/trajectories.json",'r'))
        vid = 12
        frame = "frame_000012"
        object = 'object_81'
        path = "12_12_MOD_CONTEXT_TEST"
        jsn = json.load(
            open("../kitti_rev2/training/" + str(vid) + "/trajectories.json",
                 'r'))
        #jsons, tot, file_names = loader(test_paths)

        ori_img, ori_sx, ori_sy = get_segm_new(path, "original.npz")
        mod_img, mod_sx, mod_sy = get_segm_new(path, "modified.npz")
        names = ["original", "modified"]

        d = jsn
        df = {}
        df[frame] = {}
        print("THere are " + str(len(jsn[frame])) + " objects")
        l = 0
        for ctx in [ori_img, mod_img]:
            img = ctx
            sx = ori_sx
            sy = ori_sy
            for object in jsn[frame]:
                print(object)
                df[frame][object] = {}
                print(d[frame][object]["past"])
                if len(d[frame][object]["past"]) >= cfg['prev_leng'] - 1:
                    print(object + " has minimum length")
                    #gt = np.clip(np.array(d[frame][object]["future"][0:cfg['fut_leng']]), -1000, 3000)
                    past = np.clip(
                        np.array(d[frame][object]["past"][-cfg['prev_leng'] +
                                                          1:]), -1000, 3000)

                    if (np.sqrt(np.sum(np.square(past[-1] - past[0]))) >
                            80) or not filtered:
                        pres = np.array(d[frame][object]["present"])
                        X = np.concatenate((past, np.expand_dims(pres, 0)), 0)
                        tot = smooth(X)
                        xo = tot[0:cfg['prev_leng']]
                        x = xo / (sx / 3.0, sy)
                        #gto=tot[cfg['prev_leng']:]
                        #gt = gto / (sx / 3.0, sy)
                        x = np.expand_dims(x, 0)
                        #gt = np.expand_dims(gt, 0)
                        #tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2)

                        imx = np.expand_dims(img, 0)

                        o = sess.run(mod.out,
                                     feed_dict={
                                         inpts: x,
                                         mod.image: imx,
                                         mod.inputs: x
                                     })
                        o_scaled = scale_up(o[0], sx, sy)
                        #gt_scaled=gto
                        df[frame][object]["past"] = xo.tolist()
                        #df[frame][object]["gt"] = gt_scaled.tolist()
                        df[frame][object]["pred"] = o_scaled.tolist()
                        print(o_scaled.tolist())

            with open(newp + "/data/test" + frame + names[l] + ".json",
                      "w") as out_f:
                json.dump(df, out_f, sort_keys=True)
                f = open(newp + "/dat", "w")
                f.write(str(set(total)))
                f.close()
            l += 1