def train(): cfg = get_config() inpts = tf.placeholder(tf.float32, [cfg['batch'], cfg['prev_leng'], cfg['dims']]) outs = tf.placeholder( tf.float32, [cfg['batch'], cfg['pred_ext'] + cfg['fut_leng'], cfg['dims']]) #mod=model.rec_model(cfg) mod = model.rec_model(cfg) # opti= tf.train.RMSPropOptimizer(cfg['lr'], decay=0.9, momentum=0.5) opti = tf.train.AdamOptimizer(cfg['lr']) os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg['GPU']) if (cfg['clipping']): gvs = opti.compute_gradients(mod.loss) capped_gvs = [(ClipIfNotNone(grad), var) for grad, var in gvs] mini = opti.apply_gradients(capped_gvs) else: mini = opti.minimize(mod.loss) loader = utils.Loader(cfg) init = tf.initializers.global_variables() saver = tf.train.Saver() if (cfg['type'] != 3): newp = str(time.time()).split( ".")[0][-5:] + "_" + cfg['prefix'] + "_" + "-".join( nms[i] for i in range(cfg['type'] + 1)) + str( cfg['prev_leng']) + "_" + str(cfg['fut_leng']) + "_" + str( cfg['units']) + "_" + str( cfg['lat_size']) + "_" + "_".join( name_generator.get_combo()) else: newp = str(time.time()).split( ".")[0][-5:] + "_" + cfg['prefix'] + "_" + nms[cfg['type']] + str( cfg['prev_leng']) + "_" + str(cfg['fut_leng']) + "_" + str( cfg['units']) + "_" + str( cfg['lat_size']) + "_" + "_".join( name_generator.get_combo()) os.mkdir(newp) os.mkdir(newp + "/model") os.mkdir(newp + "/data") shutil.copy("config.yaml", newp + "/data/" + "config.yaml") tf.summary.scalar("loss", mod.loss) tf.summary.scalar("leng_loss", mod.leng_loss) tf.summary.scalar("dirs_loss", mod.dirs_loss) config = tf.ConfigProto() config.gpu_options.allow_growth = True merge = tf.summary.merge_all() with tf.Session(config=config) as sess: if (cfg['load'] == "True"): saver.restore(sess, cfg['load_path']) print("LOADED MODEL at " + cfg['load_path']) else: sess.run(init) train_writer = tf.summary.FileWriter("logs/" + newp, sess.graph) #test_writer = tf.summary.FileWriter(newp + "/data", sess.graph) print(newp) for e in range(cfg['epochs']): x, gt, f, box, info, img = loader.serve_test() tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2) summary, ls, o = sess.run( [merge, mod.loss, mod.out], feed_dict={ inpts: x, outs: tot, mod.target: tot, mod.inputs: x, mod.drop: 1.0, mod.feats: f, mod.box: box, mod.image: img }) print(info[0]) # for k in range(min(16,len(info))): # im=drawer.draw_points(o[k],x[k],gt[k],cfg,info[k]) # im.save(newp+"/"+str(e)+"_"+str(k)+".png") for i in range(0, loader.total_data // cfg['batch']): # print(str(e)+" _ "+str(i)) x, gt, f, box, info, img = loader.serve() tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2) ls, _, o = sess.run( [mod.loss, mini, mod.out], feed_dict={ inpts: x, outs: tot, mod.target: tot, mod.image: img, mod.inputs: x, mod.box: box, mod.feats: f, mod.drop: 0.7 }) if (i % 400 == 0): print("TRAIN ", ls) if (i % 400 == 0): summ = 0 for tst in range(0, 20): x, gt, f, box, info, img = loader.serve_test() tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2) summary, ls, o = sess.run( [merge, mod.loss, mod.out], feed_dict={ inpts: x, mod.image: img, mod.drop: 1.0, outs: tot, mod.target: tot, mod.box: box, mod.inputs: x, mod.feats: f }) summ += ls for k in range(4): im = drawer.draw_points(o[k], x[k], gt[k], cfg, info[k], box[k]) im.save(newp + "/" + str(e) + "_" + str(tst) + "_" + str(k) + ".png") # print(x[0]) train_writer.add_summary(summary, (loader.total_data * e) + i) print( str(summ / 20.0) + " iteration " + str(i) + "of " + str(loader.total_data // cfg['batch']) + " ,at epoch " + str(e) + " of " + str(cfg['epochs'])) # x, gt, f, box, info, img = loader.serve_test() # # tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2) # summary, ls, o = sess.run([merge, mod.loss, mod.out], # feed_dict={inpts: x, mod.image: img, outs: tot, mod.target: tot, # mod.inputs: x, mod.feats: f, mod.box: box}) # print(info[0]) #drawer.points_alone(o[k],x[k],gt[k],k,newp) if (i % 4000 == 0): print("SAVING " + newp) saver.save(sess, newp + "/model/model.ckpt") # if(e%3==0): # x, gt, f, box,info,img = loader.serve_test() # # tot = np.concatenate([x[:,-cfg['pred_ext']:], gt], -2) # summary, ls, o = sess.run([merge, mod.loss, mod.out], # feed_dict={inpts: x,mod.image:img, outs: tot, mod.target: tot, mod.inputs: x, mod.feats: f,mod.box:box}) # print(info[0]) # for k in range(min(16,len(info))): # im=drawer.draw_points(o[k],x[k],gt[k],cfg,info[k]) # # im.save(newp+"/"+str(e)+"_"+str(k)+".png")#"#str("_".join(info[i]))+".png") print("SAVING " + newp) saver.save(sess, newp + "/model/model.ckpt")
def train_GAN(): cfg = get_config() os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg['GPU']) inpts = tf.placeholder(tf.float32, [cfg['batch'], cfg['prev_leng'], cfg['dims']]) outs = tf.placeholder( tf.float32, [cfg['batch'], cfg['pred_ext'] + cfg['fut_leng'], cfg['dims']]) real_imgs = tf.placeholder(tf.float32, shape=[cfg['batch'], 128, 256, 5]) # mod=model.rec_model(cfg) mod = model.rec_model(cfg) optimizer = tf.train.AdamOptimizer(cfg['lr']) r_logits = mod.discrim(inpts, outs, real_imgs) f_logits = mod.discrim(inpts, mod.out, real_imgs, reuse=True) r_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=r_logits, labels=tf.ones_like(r_logits))) f_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=f_logits, labels=tf.zeros_like(f_logits))) disc_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=r_logits, labels=tf.ones_like(r_logits) * 0.9) + tf.nn.sigmoid_cross_entropy_with_logits( logits=f_logits, labels=tf.zeros_like(f_logits))) d_opti = tf.train.AdamOptimizer(cfg['d_lr']) dim_opti = tf.train.AdamOptimizer(cfg['im_d_lr']) gen_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=f_logits, labels=tf.ones_like(f_logits) * 0.9)) wei = cfg['wei'] alpha = tf.placeholder(dtype=tf.float32) d_step = d_opti.minimize(disc_loss, var_list=tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope='DISCR')) dim_step = dim_opti.minimize(disc_loss, var_list=tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope='DIM')) gvs = optimizer.compute_gradients(mod.loss + gen_loss * alpha, var_list=tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope='GEN')) print(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='GEN')) print("============================================================") print(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='DISCR')) capped_gvs = [(ClipIfNotNone(grad), var) for grad, var in gvs] mini = optimizer.apply_gradients(capped_gvs) loader = utils.Loader(cfg) init = tf.initializers.global_variables() saver = tf.train.Saver() init = tf.initializers.global_variables() saver = tf.train.Saver() newp = str(time.time( )).split(".")[0][-4:] + "_" + cfg['prefix'] + "_" + "-".join( nms[i] for i in range(cfg['type'] + 1)) + str(cfg['prev_leng']) + "_" + str( cfg['fut_leng']) + "_" + "_".join(name_generator.get_combo()) os.mkdir(newp) os.mkdir(newp + "/model") os.mkdir(newp + "/data") shutil.copy("config.yaml", newp + "/data/" + "config.yaml") tf.summary.scalar("loss", mod.loss) tf.summary.scalar("leng_loss", mod.leng_loss) tf.summary.scalar("dirs_loss", mod.dirs_loss) config = tf.ConfigProto() config.gpu_options.allow_growth = True merge = tf.summary.merge_all() with tf.Session(config=config) as sess: if (cfg['load'] == "True"): saver.restore(sess, cfg['load_path']) print("LOADED MODEL at " + cfg['load_path']) else: sess.run(init) train_writer = tf.summary.FileWriter("logs/" + newp, sess.graph) # test_writer = tf.summary.FileWriter(newp + "/data", sess.graph) print("OK") for e in range(cfg['epochs']): wei = max(wei + 0.1, 1.0) # x, gt, f, box, info, img = loader.serve_test() # tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2) # summary, ls, o = sess.run([merge, mod.loss, mod.out], # feed_dict={inpts: x, outs: tot, mod.target: tot, mod.inputs: x, mod.feats: f, # mod.box: box, mod.image: img}) # print(info[0]) # # for k in range(min(16, len(info))): # im = drawer.draw_points(o[k], x[k], gt[k], cfg, info[k]) # im.save(newp + "/" + str(e) + "_" + str(k) + ".png") for i in range(0, loader.total_data // cfg['batch']): # print(str(e)+" _ "+str(i)) for k in range(cfg["disc_runs_per_gen_runs"]): x, gt, f, box, info, img = loader.serve() tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2) sess.run( [d_step, dim_step], feed_dict={ inpts: x, outs: tot, mod.target: tot, mod.image: img, mod.inputs: x, mod.box: box, mod.feats: f, real_imgs: img }) ls, _, o = sess.run( [mod.loss, mini, mod.out], feed_dict={ inpts: x, outs: tot, mod.target: tot, mod.image: img, mod.inputs: x, mod.box: box, mod.feats: f, real_imgs: img, alpha: wei }) if (i % 200 == 0): summ = 0 d_summ = 0 fake_loss = 0 real_loss = 0 g_l = 0 for tst in range(0, 20): x, gt, f, box, info, img = loader.serve_test() tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2) summary, ls, o, gl = sess.run( [merge, mod.loss, mod.out, gen_loss], feed_dict={ inpts: x, mod.image: img, outs: tot, mod.target: tot, mod.box: box, mod.inputs: x, mod.feats: f, real_imgs: img }) rl, fl, dls, r_l, f_l = sess.run( [r_logits, f_logits, disc_loss, r_loss, f_loss], feed_dict={ inpts: x, outs: tot, mod.target: tot, mod.image: img, mod.inputs: x, mod.box: box, mod.feats: f, real_imgs: img }) summ += ls + gl g_l += gl d_summ += dls fake_loss += f_l real_loss += r_l # print(x[0]) train_writer.add_summary(summary, (loader.total_data * e) + i) print("fake: " + str(fake_loss / 20.0) + " real: " + str(real_loss / 20.0)) print("GEN_TOTAL: " + str(summ / 20.0) + " DISC: " + str(d_summ / 20.0) + " GEN_ADVERSARIAL:" + str(gl / 20.0) + " iteration " + str(i) + "of " + str(loader.total_data // cfg['batch']) + " ,at epoch " + str(e) + " of " + str(cfg['epochs'])) if (i % 200 == 0): x, gt, f, box, info, img = loader.serve_test() tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2) summary, ls, o = sess.run( [merge, mod.loss, mod.out], feed_dict={ inpts: x, mod.image: img, outs: tot, mod.target: tot, mod.inputs: x, mod.feats: f, mod.box: box, real_imgs: img }) for k in range(min(16, len(info))): im = drawer.draw_points(o[k], x[k], gt[k], cfg, info[k]) im.save(newp + "/" + str(e) + "_" + str(k) + ".png") # "#str("_".join(info[i]))+".png") if (i % 2000 == 0): saver.save(sess, newp + "/model/model.ckpt")
def test_gan(): cfg = get_config() filtered = True cfg['batch'] = 1 inpts = tf.placeholder(tf.float32, [cfg['batch'], cfg['prev_leng'], cfg['dims']]) outs = tf.placeholder(tf.float32, [cfg['batch'], cfg['pred_ext'] + cfg['fut_leng'], cfg['dims']]) mod = model.rec_model(cfg) # loader = utils.Loader(cfg) optimizer = tf.train.AdamOptimizer(cfg['lr']) # mini=opti.minimize(mod.loss) gvs = optimizer.compute_gradients(mod.loss) capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs] mini = optimizer.apply_gradients(capped_gvs) init = tf.initializers.global_variables() saver = tf.train.Saver() newp = str(time.time()).split(".")[0][-4:] + "_test_" + cfg["load_path"].split("/")[-3] os.mkdir(newp) os.mkdir(newp + "/model") os.mkdir(newp + "/data") os.mkdir(newp+"/imgs") total = [] test_paths=[d for d in os.listdir("../kitti_rev2/training/")] for t in test_paths: os.mkdir(newp+"/imgs/"+t) with tf.Session() as sess: if (cfg['load'] == "True"): saver.restore(sess, cfg['load_path']) else: "TESTING must have load=true" exit() print("OK") jsons, tot, file_names = loader(test_paths) for i, d in enumerate(jsons): df={} f_keys = d.keys() for frame in f_keys: df[frame]={} img, sx, sy = get_segm("../kitti_rev2/training/"+file_names[i], frame) for object in d[frame]: df[frame][object]={} cls = d[frame][object]["track_class_name"] if (len(d[frame][object]["future"]) >= cfg['fut_leng']) and ( len(d[frame][object]["past"]) >= cfg['prev_leng'] - 1): gt = np.clip(np.array(d[frame][object]["future"][0:cfg['fut_leng']]), -1000, 3000) past = np.clip(np.array(d[frame][object]["past"][-cfg['prev_leng'] + 1:]), -1000, 3000) if (np.sqrt(np.sum(np.square(gt[-1]-past[0])))>80) or not filtered: pres = np.array(d[frame][object]["present"]) bbox = d[frame][object]["box"] bbox[0] = bbox[0] / float(sy) bbox[1] = bbox[1] / float(sy) bbox[2] = bbox[2] / float(sy) bbox[3] = bbox[3] / float(sy) X = np.concatenate((past, np.expand_dims(pres, 0)), 0) tot = smooth(np.concatenate((X, gt), 0)) xo=tot[0:cfg['prev_leng']] x = xo /(float(sy),float(sy)) gto=tot[cfg['prev_leng']:] gt = gto /(float(sy),float(sy)) x=np.expand_dims(x,0) gt = np.expand_dims(gt, 0) tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2) imx=np.expand_dims(img,0) o = sess.run(mod.out, feed_dict={inpts: x, mod.image: imx, outs: tot, mod.target: tot, mod.box: bbox, mod.inputs: x}) o_scaled=scale_up(o,sy,sy) gt_scaled=gto df[frame][object]["past"] = xo.tolist() df[frame][object]["gt"] = gt_scaled.tolist() df[frame][object]["pred"] = o_scaled.tolist() df[frame][object]["class"] = cls ############### ot=(newp+"/imgs/"+file_names[i].split("/"[0:-1])) #[pt, frame, object, sx, sy, cls] im = drawer.draw_points(o_scaled, xo, gt_scaled, cfg,[ot,file_names[i][-1],"0000",sx,sy,"test"],bbox) im.save(newp+"/imgs/"+file_names[i]) ################# with open(newp + "/data/"+file_names[i]+".json","w") as out_f: json.dump(df,out_f,sort_keys=True) f = open(newp + "/dat", "w") f.write(str(set(total))) f.close()
def test_broken(cf): cfg = cf print("TEST_") filtered = True os.environ["CUDA_VISIBLE_DEVICES"] = "1" inpts = tf.placeholder(tf.float32, [cfg['batch'], cfg['prev_leng'], cfg['dims']]) outs = tf.placeholder(tf.float32, [cfg['batch'], cfg['pred_ext'] + cfg['fut_leng'], cfg['dims']]) #mod = model.rec_model(cfg) cfg['old']=True newp = str(time.time()).split(".")[0][-4:] + "_test_" + cfg["load_path"].split("/")[-3] if (filtered): newp += "_FILTERED" os.mkdir(newp) os.mkdir(newp + "/model") os.mkdir(newp + "/data") os.mkdir(newp + "/imgs") cfg['center']=False total = [] test_paths = [d for d in os.listdir("../kitti_rev2/training/")] for t in test_paths: os.mkdir(newp + "/imgs/" + t) config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: print(cfg['load'], cfg['load_path']) if (cfg['load'] == True): saver = tf.train.import_meta_graph("/".join(cfg['load_path'].split("/")[0:-2])+"/model/model.ckpt.meta") saver.restore(sess, tf.train.latest_checkpoint("/".join(cfg['load_path'].split("/")[0:-2])+"/model/")) graph = tf.get_default_graph() graph_def = sess.graph.as_graph_def(add_shapes=True) coll=graph.get_all_collection_keys() mod=graph c=0 tensors_per_node = [node.values() for node in graph.get_operations()] tensor_names = [(tensor.name, tensor) for tensors in tensors_per_node for tensor in tensors if tensor.name.startswith("GEN/DEC/dense")] # (u'Placeholder:0', < tf.Tensor 'Placeholder:0' shape=(1, 10, 2)dtype = float32 >), # (u'Placeholder_1:0', < tf.Tensor 'Placeholder_1:0' shape=(1, 31, 2)dtype = float32 >), # (u'Placeholder_2:0', < tf.Tensor 'Placeholder_2:0' shape=(32, 10, 2)dtype = float32 >), # (u'Placeholder_1_1:0', < tf.Tensor 'Placeholder_1_1:0' shape=(32, 31, 2)dtype = float32 >), # (u'Placeholder_2_1:0', < tf.Tensor 'Placeholder_2_1:0' shape=(32, 10, 2)dtype = float32 >), # (u'Placeholder_3:0', < tf.Tensor 'Placeholder_3:0' shape=(32, 31, 2)dtype = float32 >), # (u'Placeholder_4:0', < tf.Tensor 'Placeholder_4:0' shape=(32, 4)dtype = float32 >), # (u'Placeholder_5:0', < tf.Tensor 'Placeholder_5:0' shape=(32, 128)dtype = float32 >), # (u'Placeholder_6:0', < tf.Tensor 'Placeholder_6:0' shape=(32,) dtype = float32 >), # (u'Placeholder_7:0', < tf.Tensor 'Placeholder_7:0' shape=(32,)dtype = float32 >), # (u'Placeholder_8:0', < tf.Tensor 'Placeholder_8:0' shape=(32,)dtype = float32 >), # (u'Placeholder_9:0', < tf.Tensor 'Placeholder_9:0' shape=(32, 128, 256)dtype = float32 >) for t in tensor_names: print(t) out=graph.get_tensor_by_name("GEN/DEC/dense/Tensordot:0") b_out=graph.get_tensor_by_name("GEN/DEC/dense/BiasAdd:0") inpts=graph.get_tensor_by_name("Placeholder:0") targ=graph.get_tensor_by_name("Placeholder_2_1:0") images = graph.get_tensor_by_name("Placeholder_9:0") outs=graph.get_tensor_by_name("Placeholder_1:0") boc=graph.get_tensor_by_name("Placeholder_4:0") else: print("TESTING must have load=True") exit() jsons, tot, file_names, vmf = loader(test_paths) for i, d in enumerate(jsons): print(i) df = {} f_keys = d.keys() for frame in f_keys: df[frame] = {} img, sx, sy = get_segm("../kitti_rev2/training/" + str(file_names[i]), frame) for object in d[frame]: df[frame][object] = {} cls = d[frame][object]["track_class_name"] if (len(d[frame][object]["future"]) >= cfg['fut_leng']) and ( len(d[frame][object]["past"]) >= cfg['prev_leng'] - 1): gt = np.clip(np.array(d[frame][object]["future"][0:cfg['fut_leng']]), -1000, 3000) past = np.clip(np.array(d[frame][object]["past"][-cfg['prev_leng'] + 1:]), -1000, 3000) fact=4.0 if (np.sqrt(np.sum(np.square(gt[-1] - past[0]))) > 60) or not filtered: pres = np.array(d[frame][object]["present"]) bbox = d[frame][object]["box"] bbox[0] = (bbox[0] / (sx /fact)) bbox[1] = (bbox[1] / float(sy)) bbox[2] = (bbox[2] / (sx / fact)) bbox[3] = (bbox[3] / float(sy)) bbox = np.array(bbox) o_bbox = bbox bbox = np.expand_dims(bbox, 0) X = np.concatenate((past, np.expand_dims(pres, 0)), 0) #conc = np.concatenate((X, gt), 0) / np.array(((float(sy)), float(sy))) conc = np.concatenate((X, gt), 0) / np.array((sx/4.0,sy)) tot = smooth(conc) xo = tot[0:cfg['prev_leng']] x = xo gto = tot[cfg['prev_leng']:] old = gto old_x = x gt = gto x = np.expand_dims(x, 0) gt = np.expand_dims(gt, 0) tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2) imx = np.expand_dims(img, 0) # o = sess.run(out, # feed_dict={inpts: x, mod.image: imx, outs: tot, mod.target: tot, mod.box: bbox, # mod.inputs: x}) ########################## #HORRIBLE x=np.tile(x,[cfg['batch'],1,1]) imx = np.tile(imx, [cfg['batch'], 1, 1]) tot= np.tile(tot, [cfg['batch'], 1, 1]) bbox=np.tile(bbox, [cfg['batch'], 1]) ########################## o,bias = sess.run([out,b_out],feed_dict={inpts: x,images:imx,targ:x, outs:tot,boc:bbox}) o=bias poins = o[0] o_scaled = poins * (sx / fact, float(sy)) gto = gto gt_scaled = gto * (sx / fact, float(sy)) xo = xo xo = xo * (sx / fact, float(sy)) df[frame][object]["past"] = xo.tolist() df[frame][object]["gt"] = gt_scaled.tolist() df[frame][object]["pred"] = o_scaled.tolist() df[frame][object]["class"] = cls im = drawer.draw_points(o[0], old_x, old, cfg, ["../kitti_rev2/training/" + str(file_names[i]), frame, "0000", sx, sy, "test"], o_bbox) im.save(newp + "/imgs/" + file_names[i] + "/" + frame + ".png") with open(newp + "/data/" + file_names[i] + ".json", "w") as out_f: json.dump(df, out_f, sort_keys=True) f = open(newp + "/dat", "w") f.write(str(set(total))) f.close()
def test(cf): cfg = cf print("TEST_") filtered=True cfg['batch']=1 cfg['old']=False cfg['inverted']=False os.environ["CUDA_VISIBLE_DEVICES"] = "1"#str(cfg['GPU']) inpts = tf.placeholder(tf.float32, [cfg['batch'], cfg['prev_leng'], cfg['dims']]) outs = tf.placeholder(tf.float32, [cfg['batch'], cfg['pred_ext'] + cfg['fut_leng'], cfg['dims']]) mod = model.rec_model(cfg) saver = tf.train.Saver() newp = str(time.time()).split(".")[0][-4:] + "_test_" + cfg["load_path"].split("/")[-3] if (filtered): newp+="_FILTERED" os.mkdir(newp) os.mkdir(newp + "/model") os.mkdir(newp + "/data") os.mkdir(newp + "/imgs") total = [] test_paths=[d for d in os.listdir("../kitti_rev2/training/")] for t in test_paths: os.mkdir(newp+"/imgs/"+t) config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: print(cfg['load'],cfg['load_path'] ) if (cfg['load'] == True): saver = tf.train.import_meta_graph("/".join(cfg['load_path'].split("/")[0:-2]) + "/model/model.ckpt.meta") graph = tf.get_default_graph() print_tensors_in_checkpoint_file(cfg['load_path'], all_tensors=False, tensor_name='DEC/DEC/gru_cell/candidate/bias',all_tensor_names=False) #saver.restore(sess, cfg['load_path']) for v in graph.get_all_collection_keys(): vars=graph.get_collection(v) for v in vars: print(v) saver.restore(sess, cfg['load_path']) print("MODEL LOADED") else: print("TESTING must have load=True") exit() jsons, tot, file_names,vmf = loader(test_paths) for i, d in enumerate(jsons): print(i) df={} f_keys = d.keys() for frame in f_keys: df[frame]={} img, sx, sy = get_segm_new("../kitti_rev2/training/"+str(file_names[i])+"/deeplab_cache", frame) for object in d[frame]: df[frame][object]={} cls = d[frame][object]["track_class_name"] if (len(d[frame][object]["future"]) >= cfg['fut_leng']) and ( len(d[frame][object]["past"]) >= cfg['prev_leng'] - 1): gt = np.clip(np.array(d[frame][object]["future"][0:cfg['fut_leng']]), -1000, 3000) past = np.clip(np.array(d[frame][object]["past"][-cfg['prev_leng'] + 1:]), -1000, 3000) if (np.sqrt(np.sum(np.square(gt[-1]-past[0])))>80) or not filtered: pres = np.array(d[frame][object]["present"]) bbox = d[frame][object]["box"] bbox[0] = (bbox[0] / (sx / 2.0)) - 1.0 bbox[1] = (bbox[1] / float(sy)) - 0.5 bbox[2] = (bbox[2] / (sx / 2.0)) - 1.0 bbox[3] = (bbox[3] / float(sy)) - 0.5 bbox=np.array(bbox) o_bbox=bbox bbox=np.expand_dims(bbox,0) X = np.concatenate((past, np.expand_dims(pres, 0)), 0) conc = np.concatenate((X, gt), 0) / np.array(((sx / 2.0), float(sy))) conc = conc - np.array([1.0, 0.5]) tot = smooth(conc) xo=tot[0:cfg['prev_leng']] x = xo gto=tot[cfg['prev_leng']:] old=gto old_x=x gt = gto x=np.expand_dims(x,0) gt = np.expand_dims(gt, 0) tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2) imx=np.expand_dims(img,0) o = sess.run(mod.out, feed_dict={inpts: x, mod.image: imx, outs: tot, mod.target: tot,mod.box:bbox, mod.inputs: x,mod.drop:1.0}) poins = o[0] + np.array([1.0, 0.5]) o_scaled = poins * (sx / 2.0, float(sy)) gto = gto + np.array([1.0, 0.5]) gt_scaled=gto* (sx / 2.0, float(sy)) xo = xo + np.array([1.0, 0.5]) xo=xo* (sx / 2.0, sy) df[frame][object]["past"] = xo.tolist() df[frame][object]["gt"] = gt_scaled.tolist() df[frame][object]["pred"] = o_scaled.tolist() df[frame][object]["class"] = cls im = drawer.draw_points(o[0], old_x, old, cfg,["../kitti_rev2/training/"+str(file_names[i]),frame,"0000",sx,sy,"test"],o_bbox) im.save(newp+"/imgs/"+file_names[i]+"/"+frame+".png") with open(newp + "/data/"+file_names[i]+".json","w") as out_f: json.dump(df,out_f,sort_keys=True) f = open(newp + "/dat", "w") f.write(str(set(total))) f.close()
def _test(cf): cfg = cf print("TEST_") filtered=True cfg['batch']=1 os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg['GPU']) inpts = tf.placeholder(tf.float32, [cfg['batch'], cfg['prev_leng'], cfg['dims']]) outs = tf.placeholder(tf.float32, [cfg['batch'], cfg['pred_ext'] + cfg['fut_leng'], cfg['dims']]) mod = model.rec_model(cfg) saver = tf.train.Saver() newp = str(time.time()).split(".")[0][-4:] + "_test_" + cfg["load_path"].split("/")[-3] if (filtered): newp+="_FILTERED" os.mkdir(newp) os.mkdir(newp + "/model") os.mkdir(newp + "/data") os.mkdir(newp + "/imgs") total = [] test_paths=[d for d in os.listdir("../kitti_rev2/training/")] for t in test_paths: os.mkdir(newp+"/imgs/"+t) config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: print(cfg['load'],cfg['load_path'] ) if (cfg['load'] == True): saver.restore(sess, cfg['load_path']) print("MODEL LOADED") else: print("TESTING must have load=True") exit() jsons, tot, file_names = loader(test_paths) for i, d in enumerate(jsons): print(i) df={} f_keys = d.keys() for frame in f_keys: df[frame]={} img, sx, sy = get_segm_new("../kitti_rev2/training/"+str(file_names[i])+"/deeplab_cache", frame) for object in d[frame]: df[frame][object]={} cls = d[frame][object]["track_class_name"] if (len(d[frame][object]["future"]) >= cfg['fut_leng']) and ( len(d[frame][object]["past"]) >= cfg['prev_leng'] - 1): gt = np.clip(np.array(d[frame][object]["future"][0:cfg['fut_leng']]), -1000, 3000) past = np.clip(np.array(d[frame][object]["past"][-cfg['prev_leng'] + 1:]), -1000, 3000) if (np.sqrt(np.sum(np.square(gt[-1]-past[0])))>80) or not filtered: pres = np.array(d[frame][object]["present"]) bbox = d[frame][object]["box"] bbox[0] = bbox[0] / float(sy) bbox[1] = bbox[1] / float(sy) bbox[2] = bbox[2] / float(sy) bbox[3] = bbox[3] / float(sy) bbox=np.array(bbox) bbox=np.expand_dims(bbox,0) X = np.concatenate((past, np.expand_dims(pres, 0)), 0) tot = smooth(np.concatenate((X, gt), 0)) xo=tot[0:cfg['prev_leng']] x = xo /(float(sy),float(sy)) gto=tot[cfg['prev_leng']:] gt = gto /(float(sy),float(sy)) x=np.expand_dims(x,0) gt = np.expand_dims(gt, 0) tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2) imx=np.expand_dims(img,0) o = sess.run(mod.out, feed_dict={inpts: x, mod.image: imx, outs: tot, mod.target: tot,mod.box:bbox, mod.inputs: x}) o_scaled=scale_up(o[0],sy,sy) gt_scaled=gto df[frame][object]["past"] = xo.tolist() df[frame][object]["gt"] = gt_scaled.tolist() df[frame][object]["pred"] = o_scaled.tolist() df[frame][object]["class"] = cls ######## ot=(newp+"/imgs/"+file_names[i].split("/"[0:-1])) #[pt, frame, object, sx, sy, cls] im = drawer.draw_points(o_scaled, xo, gt_scaled, cfg,[ot,file_names[i][-1],"0000",sx,sy,"test"],bbox) im.save(newp+"/imgs/"+file_names[i]) ######## with open(newp + "/data/"+file_names[i]+".json","w") as out_f: json.dump(df,out_f,sort_keys=True) f = open(newp + "/dat", "w") f.write(str(set(total))) f.close()