def detect(self, image): pred_annolist = al.AnnoList() #true_annolist = al.parse(args.test_boxes) #data_dir = os.path.dirname(args.test_boxes) #image_dir = get_image_dir(args) #subprocess.call('mkdir -p %s' % image_dir, shell=True) orig_img = image[:,:,:3] img = imresize(orig_img, (self.H["image_height"], self.H["image_width"])) feed = {self.x_in: img} (np_pred_boxes, np_pred_confidences) = self.sess.run([self.pred_boxes, self.pred_confidences], feed_dict=feed) pred_anno = al.Annotation() pred_anno.imageName = "test" new_img, rects = add_rectangles(self.H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=self.H['rnn_len'], min_conf=0.2, tau=0.25, show_suppressed=False) pred_anno.rects = rects pred_anno.imagePath = "none"#os.path.abspath(data_dir) pred_anno = rescale_boxes((self.H["image_height"], self.H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) pred_annolist.append(pred_anno) predictions = [] for pred in pred_annolist: predictions.append([pred.rects[0].x1, pred.rects[0].y1, pred.rects[0].x2, pred.rects[0].y2]) misc.imsave("test.jpg", new_img) return predictions
def get_results(args, H, data_dir): tf.reset_default_graph() H["grid_width"] = H["image_width"] / H["region_size"] H["grid_height"] = H["image_height"] / H["region_size"] if args.frozen_graph: graph = load_frozen_graph(args.graphfile) else: new_saver = tf.train.import_meta_graph(args.graphfile) NUM_THREADS = 8 with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS), graph=graph if args.frozen_graph else None) as sess: sess.run(tf.global_variables_initializer()) if args.frozen_graph: x_in = graph.get_tensor_by_name('x_in:0') pred_boxes = graph.get_tensor_by_name('add:0') pred_confidences = graph.get_tensor_by_name('Reshape_2:0') else: new_saver.restore(sess, args.weights) x_in = tf.get_collection('placeholders')[0] pred_boxes, pred_confidences = tf.get_collection('vars') #freeze_graph.freeze_graph("overfeat.pb", "", False, args.weights, "add,Reshape_2", "save/restore_all", #"save/Const:0", "overfeat_frozen.pb", False, '') pred_annolist = al.AnnoList() included_extenstions = ['jpg', 'bmp', 'png', 'gif'] image_names = [fn for fn in os.listdir(args.datadir) if any(fn.lower().endswith(ext) for ext in included_extenstions)] image_dir = get_image_dir(args) subprocess.call('mkdir -p %s' % image_dir, shell=True) for i in range(len(image_names)): image_name = image_names[i] if H['grayscale']: orig_img = cv2.imread(image_name) cv2.cvtColor(orig_img,cv2.COLOR_BGR2RGB) if len(orig_img.shape) < 3: orig_img = cv2.cvtColor(orig_img, cv2.COLOR_GRAY2RGB) else: orig_img = cv2.imread('%s/%s' % (data_dir, image_name)) cv2.cvtColor(orig_img,cv2.COLOR_BGR2RGB) img = cv2.resize(orig_img, (H["image_width"],H["image_height"]), interpolation=cv2.INTER_CUBIC) feed = {x_in: img} start_time = time() (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) time_2 = time() pred_anno = al.Annotation() pred_anno.imageName = image_name new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.show_suppressed) print(time() - start_time) pred_anno.rects = rects pred_anno.imagePath = os.path.abspath(data_dir) pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1], test=True) pred_annolist.append(pred_anno) imname = '%s/%s' % (image_dir, os.path.basename(image_name)) misc.imsave(imname, new_img) if i % 25 == 0: print(i) return pred_annolist
def eval(self, weights, test_boxes, min_conf, tau, show_suppressed, expname): self.H["grid_width"] = self.H["image_width"] / self.H["region_size"] self.H["grid_height"] = self.H["image_height"] / self.H["region_size"] x_in = tf.placeholder(tf.float32, name='x_in', shape=[self.H['image_height'], self.H['image_width'], 3]) if self.H['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = self.build_forward( tf.expand_dims(x_in, 0), 'test', reuse=None) grid_area = self.H['grid_height'] * self.H['grid_width'] pred_confidences = tf.reshape( tf.nn.softmax(tf.reshape(pred_confs_deltas, [grid_area * self.H['rnn_len'], 2])), [grid_area, self.H['rnn_len'], 2]) if self.H['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = self.build_forward(tf.expand_dims(x_in, 0), 'test', reuse=None) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver.restore(sess, weights) pred_annolist = al.AnnoList() true_annolist = al.parse(test_boxes) data_dir = os.path.dirname(test_boxes) image_dir = self.get_image_dir(weights, expname, test_boxes) subprocess.call('mkdir -p %s' % image_dir, shell=True) for i in range(len(true_annolist)): true_anno = true_annolist[i] print(true_anno.imageName) orig_img = cv2.imread(true_anno.imageName)[:, :, :3] cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB) img = cv2.resize(orig_img, (self.H["image_width"], self.H["image_height"])) feed = {x_in: img} (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() pred_anno.imageName = true_anno.imageName new_img, rects = add_rectangles(self.H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=self.H['rnn_len'], min_conf=min_conf, tau=tau, show_suppressed=show_suppressed) pred_anno.rects = rects pred_anno.imagePath = os.path.abspath(data_dir) pred_anno = rescale_boxes((self.H["image_height"], self.H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) pred_annolist.append(pred_anno) imname = '%s/%s' % (image_dir, os.path.basename(true_anno.imageName)) cv2.imwrite(imname, new_img) if i % 25 == 0: print(i) return pred_annolist, true_annolist
def get_results(args, H): tf.reset_default_graph() x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3]) H["grid_width"] = H["image_width"] / H["region_size"] H["grid_height"] = H["image_height"] / H["region_size"] if H['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None) grid_area = H['grid_height'] * H['grid_width'] pred_confidences = tf.reshape(tf.nn.softmax(tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2]) if H['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver.restore(sess, args.weights) pred_annolist = al.AnnoList() image_dir = args.logdir i=0 subprocess.call('mkdir -p %s' % image_dir, shell=True) for img_list in os.listdir(args.input_images): img_path=os.path.join(args.input_images,img_list) orig_img = imread(img_path) #img=orig_img[0:480,640:1280] img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic') feed = {x_in: img} (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() pred_anno.imageName = img_list new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.show_suppressed) pred_anno.rects = rects pred_anno.imagePath = args.input_images pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) pred_annolist.append(pred_anno) imname = '%s/%s' % (args.logdir, img_list) misc.imsave(imname, new_img) i +=1 if i % 25 == 0: print(i) return pred_annolist
def get_results(args, H): tf.reset_default_graph() x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3]) if H['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None) grid_area = H['grid_height'] * H['grid_width'] pred_confidences = tf.reshape(tf.nn.softmax(tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2]) if H['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None) saver = tf.train.Saver() with tf.Session() as sess: # sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer()) # saver.restore(sess, './output/lstm_rezoom_lung_2017_01_17_18.24/save.ckpt-1000000') print('args.weights: %s' % (args.weights,)) saver.restore(sess, args.weights) print('run') pred_annolist = al.AnnoList() true_annolist = al.parse(args.test_boxes) data_dir = os.path.dirname(args.test_boxes) image_dir = get_image_dir(args) subprocess.call('mkdir -p %s' % image_dir, shell=True) for i in range(len(true_annolist)): true_anno = true_annolist[i] orig_img = imread('%s/%s' % (data_dir, true_anno.imageName))[:,:,:3] img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic') feed = {x_in: img} (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() pred_anno.imageName = true_anno.imageName new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=H['rnn_len'], min_conf=0.2, tau=args.tau) pred_anno.rects = rects pred_anno.imagePath = os.path.abspath(data_dir) pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) pred_annolist.append(pred_anno) imname = '%s/%s' % (image_dir, os.path.basename(true_anno.imageName)) misc.imsave(imname, new_img) if i % 25 == 0: print(i) return pred_annolist, true_annolist
def get_results(args, H): tf.reset_default_graph() x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3]) #写真のInput詳細がわからない if H['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None) grid_area = H['grid_height'] * H['grid_width'] pred_confidences = tf.reshape(tf.nn.softmax(tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2]) if H['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver.restore(sess, args.weights) pred_annolist = al.AnnoList() true_annolist = al.parse(args.test_boxes) data_dir = os.path.dirname(args.test_boxes) image_dir = get_image_dir(args) subprocess.call('mkdir -p %s' % image_dir, shell=True) for i in range(len(true_annolist)): true_anno = true_annolist[i] orig_img = imread('%s/%s' % (data_dir, true_anno.imageName))[:,:,:3] img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic') #画像のResize feed = {x_in: img} #一写真一Step (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() pred_anno.imageName = true_anno.imageName new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes, #画像に予測した四角形を追加する use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.show_suppressed) pred_anno.rects = rects pred_anno.imagePath = os.path.abspath(data_dir) pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) #予測値(四角形のxy座標)を画像のsizeに合わせる? pred_annolist.append(pred_anno) imname = '%s/%s' % (image_dir, os.path.basename(true_anno.imageName)) misc.imsave(imname, new_img) if i % 25 == 0: print(i) #pred_annolist:予測値(確信度score:全部、予測のx1,y1,x2,y2) #true_annolist:evalファイルのx1,y1,x2,y2のまま。 return pred_annolist, true_annolist
def test(config): """ Takes the config, run test program """ data_mean = load_data_mean(config["data"]["idl_mean"], config["net"]["img_width"], config["net"]["img_height"], image_scaling=1.0) num_test_images = 599 # Warning: load_idl returns an infinite generator. Calling list() before islice() will hang. test_list = list( itertools.islice( load_idl(config["data"]["test_idl"], data_mean, config["net"], False), 0, num_test_images)) img = np.copy(test_list[-1]["raw"]) # plt.imshow(img) net = apollocaffe.ApolloNet() net.phase = 'test' forward(net, test_list[0], config["net"], True) net.load("data/snapshot/reinspect_hcs_800000.h5") annolist = al.AnnoList() net_config = config["net"] pix_per_w = net_config["img_width"] / net_config["grid_width"] pix_per_h = net_config["img_height"] / net_config["grid_height"] if config.has_key("conf_th"): conf_th = config["conf_th"] else: conf_th = 0.6 mae = 0. for i in range(num_test_images): inputs = test_list[i] bbox_list, conf_list = forward(net, inputs, net_config, True) img = np.copy(inputs["raw"]) all_rects = [[[] for x in range(net_config["grid_width"])] for y in range(net_config["grid_height"])] for n in range(len(bbox_list)): for k in range(net_config["grid_height"] * net_config["grid_width"]): y = int(k / net_config["grid_width"]) x = int(k % net_config["grid_width"]) bbox = bbox_list[n][k] conf = conf_list[n][k, 1].flatten()[0] # notice the output rect [cx, cy, w, h] # cx means center x-cord abs_cx = pix_per_w / 2 + pix_per_w * x + int(bbox[0, 0, 0]) abs_cy = pix_per_h / 2 + pix_per_h * y + int(bbox[1, 0, 0]) w = bbox[2, 0, 0] h = bbox[3, 0, 0] all_rects[y][x].append(Rect(abs_cx, abs_cy, w, h, conf)) acc_rects = stitch_rects(all_rects) display = True if display: for rect in acc_rects: if rect.true_confidence < conf_th: continue cv2.rectangle(img, (rect.cx - int(rect.width / 2), rect.cy - int(rect.height / 2)), (rect.cx + int(rect.width / 2), rect.cy + int(rect.height / 2)), (255, 0, 0), 2) # cv2.circle(img, # (rect.cx, rect.cy), # ((rect.width + rect.height)/4), # (255,0,0), # 2) img_name = './data/tmp/%05d.jpg' % i plt.imsave(img_name, img) plt.figure(figsize=(15, 10)) plt.imshow(img) anno = al.Annotation() anno.imageName = inputs["imname"] # count number = 0 for rect in acc_rects: r = al.AnnoRect() r.x1 = rect.cx - rect.width / 2. r.x2 = rect.cx + rect.width / 2. r.y1 = rect.cy - rect.height / 2. r.y2 = rect.cy + rect.height / 2. r.score = rect.true_confidence anno.rects.append(r) if r.score > conf_th: number += 1 annolist.append(anno) mae += abs(number - len(inputs["rects"])) print anno.imageName, number, len( inputs["rects"]), abs(number - len(inputs["rects"])) print mae / num_test_images
def get_results(args, H): tf.reset_default_graph() x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3]) if H['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None) grid_area = H['grid_height'] * H['grid_width'] pred_confidences = tf.reshape(tf.nn.softmax(tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2]) if H['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.initialize_all_variables()) saver.restore(sess, args.weights) pred_annolist = al.AnnoList() true_annolist = al.parse(args.test_boxes) data_dir = os.path.dirname(args.test_boxes) image_dir = get_image_dir(args) #subprocess.call('mkdir -p %s' % image_dir, shell=True) #ivc = cv2.VideoCapture('/home/caucse/images/ets.mp4') #c=1 #if vc.isOpened(): # rval , frame = vc.read() #else: # rval = False memory = sysv_ipc.SharedMemory(123463) memory2 = sysv_ipc.SharedMemory(123464) size = 768, 1024, 3 pedal = PyMouse() pedal.press(1) road_center = 320 while True: #rval, frame = vc.read() #c = c + 1 #if c % 6 is 0: # c = c + 1 #time.sleep(0.5) cv2.waitKey(1) frameCount = bytearray(memory.read()) curve = bytearray(memory2.read()) curve = str(struct.unpack('i',curve)[0]) m = np.array(frameCount, dtype=np.uint8) orig_img = m.reshape(size) #print orig_img[0] #cv2.imshow('1', m) #true_anno = true_annolist[i] #orig_img = imread('%s/%s' % (data_dir, true_anno.imageName))[:,:,:3] #orig_img = imread('/home/caucse/images/1.jpg') #orig_img = m img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic') feed = {x_in: img} (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() #pred_anno.imageName = true_anno.imageName new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.show_suppressed) flag = 0 road_center = 320 + int(curve) print(road_center) for rect in rects: print(rect.x1, rect.x2, rect.y2) if (rect.x1 < road_center and rect.x2 > road_center and rect.y2 > 200) and (rect.x2 - rect.x1 > 30): flag = 1 if flag is 1: pedal.press(2) print("break!") else: pedal.release(2) pedal.press(1) print("acceleration!") pred_anno.rects = rects pred_anno.imagePath = os.path.abspath(data_dir) pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) pred_annolist.append(pred_anno) #imname = '%s/%s' % (image_dir, os.path.basename(true_anno.imageName)) #imname = '/home/caucse/images/_%s.jpg' % (c) cv2.imshow('.jpg', new_img) #misc.imsave(imname, new_img) #if c % 25 == 0: #print(c) for i in range(len(true_annolist)): true_anno = true_annolist[i] #orig_img = imread('%s/%s' % (data_dir, true_anno.imageName))[:,:,:3] orig_img = imread('/home/caucse/images/1.jpg') img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic') feed = {x_in: img} (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() pred_anno.imageName = true_anno.imageName new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.show_suppressed) for rect in rects: print(rect.x1, rect.y1, rect.x2, rect.y2) pred_anno.rects = rects pred_anno.imagePath = os.path.abspath(data_dir) pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) pred_annolist.append(pred_anno) #imname = '%s/%s' % (image_dir, os.path.basename(true_anno.imageName)) imname = '/home/caucse/images/_1.jpg' misc.imsave(imname, new_img) if i % 25 == 0: print(i) return pred_annolist, true_annolist
def pal2al(_annolist): #annotations = []; annotations = AnnotationLib.AnnoList(); for adesc in _annolist.attribute_desc: annotations.attribute_desc[adesc.name] = adesc; print("attribute: ", adesc.name, adesc.id) for valdesc in adesc.val_to_str: annotations.add_attribute_val(adesc.name, valdesc.s, valdesc.id); attribute_name_from_id = {adesc.id: aname for aname, adesc in annotations.attribute_desc.iteritems()} attribute_dtype_from_id = {adesc.id: adesc.dtype for aname, adesc in annotations.attribute_desc.iteritems()} for _a in _annolist.annotation: anno = AnnotationLib.Annotation() anno.imageName = _a.imageName; anno.rects = []; for _r in _a.rect: rect = AnnotationLib.AnnoRect() rect.x1 = _r.x1; rect.x2 = _r.x2; rect.y1 = _r.y1; rect.y2 = _r.y2; if _r.HasField("id"): rect.id = _r.id; if _r.HasField("track_id"): rect.track_id = _r.track_id; if _r.HasField("score"): rect.score = _r.score; for _at in _r.attribute: try: cur_aname = attribute_name_from_id[_at.id]; cur_dtype = attribute_dtype_from_id[_at.id]; except KeyError as e: print("attribute: ", _at.id) print(e) assert(False); if cur_dtype == AnnotationLib.AnnoList.TYPE_INT32: rect.at[cur_aname] = _at.val; elif cur_dtype == AnnotationLib.AnnoList.TYPE_FLOAT: rect.at[cur_aname] = _at.fval; elif cur_dtype == AnnotationLib.AnnoList.TYPE_STRING: rect.at[cur_aname] = _at.strval; else: assert(False); anno.rects.append(rect); annotations.append(anno); return annotations;
def get_results(hypes, sess, image_pl, decoded_logits, validation=True): if hypes['use_rezoom']: pred_boxes = decoded_logits['pred_boxes_new'] else: pred_boxes = decoded_logits['pred_boxes'] pred_confidences = decoded_logits['pred_confidences'] # Build Placeholder shape = [hypes['image_height'], hypes['image_width'], 3] if validation: kitti_txt = os.path.join(hypes['dirs']['data_dir'], hypes['data']['val_file']) else: kitti_txt = os.path.join(hypes['dirs']['data_dir'], hypes['data']['train_file']) # true_annolist = AnnLib.parse(test_idl) val_dir = make_val_dir(hypes, validation) img_dir = make_img_dir(hypes) image_list = [] pred_annolist = AnnLib.AnnoList() files = [line.rstrip() for line in open(kitti_txt)] base_path = os.path.realpath(os.path.dirname(kitti_txt)) for i, file in enumerate(files): image_file = file.split(" ")[0] if not validation and random.random() > 0.2: continue image_file = os.path.join(base_path, image_file) orig_img = scp.misc.imread(image_file)[:, :, :3] img = scp.misc.imresize(orig_img, (hypes["image_height"], hypes["image_width"]), interp='cubic') feed = {image_pl: img} (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = AnnLib.Annotation() pred_anno.imageName = image_file new_img, rects = utils.train_utils.add_rectangles( hypes, [img], np_pred_confidences, np_pred_boxes, show_removed=False, use_stitching=True, rnn_len=hypes['rnn_len'], min_conf=0.50, tau=hypes['tau'], color_acc=(0, 255, 0)) if validation and i % 15 == 0: image_name = os.path.basename(pred_anno.imageName) image_name = os.path.join(img_dir, image_name) scp.misc.imsave(image_name, new_img) if validation: image_name = os.path.basename(pred_anno.imageName) image_list.append((image_name, new_img)) # get name of file to write to image_name = os.path.basename(image_file) val_file_name = image_name.split('.')[0] + '.txt' val_file = os.path.join(val_dir, val_file_name) # write rects to file pred_anno.rects = rects pred_anno = utils.train_utils.rescale_boxes( (hypes["image_height"], hypes["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) write_rects(rects, val_file) pred_annolist.append(pred_anno) start_time = time.time() for i in range(100): (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) dt = (time.time() - start_time) / 100 start_time = time.time() for i in range(100): utils.train_utils.compute_rectangels(hypes, np_pred_confidences, np_pred_boxes, show_removed=False, use_stitching=True, rnn_len=hypes['rnn_len'], min_conf=0.001, tau=hypes['tau']) dt2 = (time.time() - start_time) / 100 return pred_annolist, image_list, dt, dt2
def get_results(args, H): tf.reset_default_graph() x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3]) if H['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward( H, tf.expand_dims(x_in, 0), 'test', reuse=None) grid_area = H['grid_height'] * H['grid_width'] pred_confidences = tf.reshape( tf.nn.softmax( tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2]) if H['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = build_forward( H, tf.expand_dims(x_in, 0), 'test', reuse=None) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.initialize_all_variables()) saver.restore(sess, args.weights) pred_annolist = al.AnnoList() true_annolist = al.parse(args.test_boxes) data_dir = os.path.dirname(args.test_boxes) image_dir = get_image_dir(args) subprocess.call('mkdir -p %s' % image_dir, shell=True) for i in range(len(true_annolist)): true_anno = true_annolist[i] #orig_img = imread('%s/%s' % (data_dir, true_anno.imageName))[:,:,:3] orig_img = imread('%s' % (true_anno.imageName)) img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic') x = np.reshape(img, (H['image_height'], H['image_width'], 1)) new_x = np.zeros((H['image_height'], H['image_width'], 3)) for first in range(0, H['image_height']): for second in range(0, H['image_width']): new_x[first][second] = [ x[first][second], x[first][second], x[first][second] ] feed = {x_in: new_x} (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() pred_anno.imageName = true_anno.imageName new_img, rects = add_rectangles( H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.show_suppressed) pred_anno.rects = rects pred_anno.imagePath = os.path.abspath(data_dir) pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) pred_annolist.append(pred_anno) imname = '%s/%s' % (image_dir, os.path.basename( true_anno.imageName)) misc.imsave(imname, new_img) for i in range(0, len(rects)): if rects[i].score > 0.1: print pred_anno.imageName print "%s %s %s %s %s" % (rects[i].x1, rects[i].x2, rects[i].y1, rects[i].y2, rects[i].score) # print r.writeJSON(rects[i]) #if i % 25 == 0: # print(i) return pred_annolist, true_annolist
def still_image_TENSORBOX_multiclass(frames_list,path_video_folder,hypes_file,weights_file,pred_idl): from train import build_forward print("Starting DET Phase") if not os.path.exists(path_video_folder+'/'+folder_path_det_frames): os.makedirs(path_video_folder+'/'+folder_path_det_frames) print("Created Folder: %s"%path_video_folder+'/'+folder_path_det_frames) if not os.path.exists(path_video_folder+'/'+folder_path_det_result): os.makedirs(path_video_folder+'/'+folder_path_det_result) print("Created Folder: %s"% path_video_folder+'/'+folder_path_det_result) det_frames_list=[] #### START TENSORBOX CODE ### idl_filename=path_video_folder+'/'+path_video_folder+'.idl' ### Opening Hypes file for parameters with open(hypes_file, 'r') as f: H = json.load(f) ### Building Network tf.reset_default_graph() googlenet = googlenet_load.init(H) x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3]) if H['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(H, tf.expand_dims(x_in, 0), googlenet, 'test', reuse=None) grid_area = H['grid_height'] * H['grid_width'] pred_confidences = tf.reshape(tf.nn.softmax(tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], H['num_classes']])), [grid_area, H['rnn_len'], H['num_classes']]) pred_logits = tf.reshape(tf.nn.softmax(tf.reshape(pred_logits, [grid_area * H['rnn_len'], H['num_classes']])), [grid_area, H['rnn_len'], H['num_classes']]) if H['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = build_forward(H, tf.expand_dims(x_in, 0), googlenet, 'test', reuse=None) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.initialize_all_variables()) saver.restore(sess, weights_file )##### Restore a Session of the Model to get weights and everything working annolist = al.AnnoList() #### Starting Evaluating the images lenght=int(len(frames_list)) print("%d Frames to DET"%len(frames_list)) progress = progressbar.ProgressBar(widgets=[progressbar.Bar('=', '[', ']'), ' ',progressbar.Percentage(), ' ',progressbar.ETA()]) frameNr=0 skipped=0 for i in progress(range(0, len(frames_list))): if Utils_Image.isnotBlack(frames_list[i]) & Utils_Image.check_image_with_pil(frames_list[i]): img = imread(frames_list[i]) feed = {x_in: img} (np_pred_boxes,np_pred_logits, np_pred_confidences) = sess.run([pred_boxes,pred_logits, pred_confidences], feed_dict=feed) # print_logits(np_pred_confidences) pred_anno = al.Annotation() #pred_anno.imageName = test_anno.imageName # print "np_pred_confidences shape" + str(np_pred_confidences.shape) # print "np_pred_boxes shape" + str(np_pred_boxes.shape) # for i in range(0, np_pred_confidences.shape[0]): # print np_pred_confidences[i] # for j in range(0, np_pred_confidences.shape[2]): # print np_pred_confidences[i][0][j] rects = get_multiclass_rectangles(H, np_pred_confidences, np_pred_boxes, rnn_len=H['rnn_len']) pred_anno.rects = rects pred_anno.imageName = frames_list[i] pred_anno.frameNr = frameNr frameNr=frameNr+1 det_frames_list.append(frames_list[i]) pick = NMS(rects) draw_rectangles(frames_list[i],frames_list[i], pick) annolist.append(pred_anno) else: skipped=skipped+1 saveTextResults(idl_filename,annolist) annolist.save(pred_idl) print("Skipped %d Black Frames"%skipped) #### END TENSORBOX CODE ### return det_frames_list
def get_results(args, H): tf.reset_default_graph() x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3]) if H['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward( H, tf.expand_dims(x_in, 0), 'test', reuse=None) grid_area = H['grid_height'] * H['grid_width'] pred_confidences = tf.reshape( tf.nn.softmax( tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2]) if H['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = build_forward( H, tf.expand_dims(x_in, 0), 'test', reuse=None) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver.restore(sess, args.weights) pred_annolist = al.AnnoList() true_annolist = al.parse(args.test_boxes) data_dir = os.path.dirname(args.test_boxes) image_dir = get_image_dir(args) os.makedirs(image_dir, exist_ok=True) print('Outputs will be stored in {}'.format(image_dir)) for i in range(len(true_annolist)): try: true_anno = true_annolist[i] orig_img = imread('%s/%s' % (data_dir, true_anno.imageName))[:, :, :3] img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic') feed = {x_in: img} (np_pred_boxes, np_pred_confidences) = sess.run( [pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() pred_anno.imageName = true_anno.imageName new_img, rects = add_rectangles( H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.show_suppressed) rects = [r for r in rects if r.x1 < r.x2 and r.y1 < r.y2] pred_anno.rects = rects pred_anno.imagePath = os.path.abspath(data_dir) pred_anno = rescale_boxes( (H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1], test=True) pred_annolist.append(pred_anno) imname = '%s/%s' % (image_dir, os.path.basename(true_anno.imageName)) misc.imsave(imname, new_img) except FileNotFoundError: pass if i % 25 == 0: print(i) return pred_annolist, true_annolist
def still_image_TENSORBOX(idl_filename, frames_list,folder_path_det_frames,folder_path_det_result,folder_path_frames,path_video_folder,hypes_file,weights_file,pred_idl): print("Starting DET Phase") if not os.path.exists(path_video_folder+'/'+folder_path_det_frames): os.makedirs(path_video_folder+'/'+folder_path_det_frames) print("Created Folder: %s"%path_video_folder+'/'+folder_path_det_frames) if not os.path.exists(path_video_folder+'/'+folder_path_det_result): os.makedirs(path_video_folder+'/'+folder_path_det_result) print("Created Folder: %s"% path_video_folder+'/'+folder_path_det_result) det_frames_list=[] #### START TENSORBOX CODE ### ### Opening Hypes file for parameters with open(hypes_file, 'r') as f: H = json.load(f) ### Get Annotation List of all the image to test test_annos = al.parse(idl_filename) ### Building Network tf.reset_default_graph() googlenet = googlenet_load.init(H) x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['arch']['image_height'], H['arch']['image_width'], 3]) if H['arch']['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(H, tf.expand_dims(x_in, 0), googlenet, 'test', reuse=None) grid_area = H['arch']['grid_height'] * H['arch']['grid_width'] pred_confidences = tf.reshape(tf.nn.softmax(tf.reshape(pred_confs_deltas, [grid_area * H['arch']['rnn_len'], 2])), [grid_area, H['arch']['rnn_len'], 2]) if H['arch']['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = build_forward(H, tf.expand_dims(x_in, 0), googlenet, 'test', reuse=None) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.initialize_all_variables()) saver.restore(sess, weights_file )##### Restore a Session of the Model to get weights and everything working annolist = al.AnnoList() import time; t = time.time() #### Starting Evaluating the images lenght=int(len(frames_list)) print("%d Frames to DET"%len(frames_list)) progress = progressbar.ProgressBar(widgets=[progressbar.Bar('=', '[', ']'), ' ',progressbar.Percentage(), ' ',progressbar.ETA()]) for i in progress(range(0, len(frames_list)-1)): img = imread(frames_list[i]) feed = {x_in: img} (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() #pred_anno.imageName = test_anno.imageName new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes,H["arch"], use_stitching=True, rnn_len=H['arch']['rnn_len'], min_conf=0.5) pred_anno.rects = rects bb_img = Image.open(frames_list[i]) for bb_rect in rects: ################ Adding Rectangle ################### dr = ImageDraw.Draw(bb_img) cor = (bb_rect.x1,bb_rect.y1,bb_rect.x2 ,bb_rect.y2) # DA VERIFICARE Try_2 (x1,y1, x2,y2) cor = (bb_rect.left() ,bb_rect.right(),bb_rect.bottom(),bb_rect.top()) Try_1 dr.rectangle(cor, outline="red") bb_img_det_name = frames_list[i].replace(folder_path_frames,folder_path_det_frames) bb_img.save(bb_img_det_name) det_frames_list.append(bb_img_det_name) annolist.append(pred_anno) annolist.save(pred_idl) #### END TENSORBOX CODE ### return det_frames_list
def get_results(args, H): tf.reset_default_graph() H["grid_width"] = H["image_width"] / H["region_size"] H["grid_height"] = H["image_height"] / H["region_size"] x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3]) if H['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward( H, tf.expand_dims(x_in, 0), 'test', reuse=None) grid_area = H['grid_height'] * H['grid_width'] pred_confidences = tf.reshape( tf.nn.softmax( tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2]) if H['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = build_forward( H, tf.expand_dims(x_in, 0), 'test', reuse=None) saver = tf.train.Saver() all_preditions = [] with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver.restore(sess, args.weights) pred_annolist = al.AnnoList() true_annolist = al.parse(args.test_boxes) data_dir = os.path.join(os.path.dirname(args.test_boxes)) false_positives, false_negatives, true_positives = 0, 0, 0 total_time = 0.0 image_dir = get_image_dir(args) subprocess.call('mkdir -p %s' % image_dir, shell=True) for i in range(len(true_annolist)): true_anno = true_annolist[i] orig_img = imread('%s/%s' % (data_dir, true_anno.imageName))[:, :, :3] img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic') feed = {x_in: img} t0 = time.time() (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) total_time += time.time() - t0 pred_anno = al.Annotation() pred_anno.imageName = true_anno.imageName new_img, rects, all_rects = add_rectangles( H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.show_suppressed) pred_anno.rects = rects pred_anno.imagePath = os.path.abspath(data_dir) pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) pred_annolist.append(pred_anno) all_preditions.extend([[r.x1, r.y1, r.x2, r.y2, r.score, i] for r in all_rects]) prediction = np.array([[r.x1, r.y1, r.x2, r.y2, r.score] for r in rects]) targets = np.array([[r.x1, r.y1, r.x2, r.y2] for r in true_anno.rects]) fp, fn, tp, jaccard = get_metrics(targets, prediction) false_positives += fp false_negatives += fn true_positives += tp precision = np.float64(true_positives) / (true_positives + false_positives) recall = np.float64(true_positives) / (true_positives + false_negatives) print( '[%d/%d]: False positives: %d, False negatives: %d, True positives: %d, Precision: %f, Recall: %f' % (i, len(true_annolist), false_positives, false_negatives, true_positives, precision, recall)) df = pandas.DataFrame(all_preditions) df.columns = ['x1', 'y1', 'x2', 'y2', 'score', 'image_id'] print('Total time: %.4f seconds, per image: %.4f' % (total_time, total_time / len(true_annolist))) return df
def get_results(args, H): tf.reset_default_graph() x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3]) p1_x_in = tf.placeholder(tf.float32, name='p1_x_in', shape=[H['image_height'], H['image_width'], 3]) p2_x_in = tf.placeholder(tf.float32, name='p2_x_in', shape=[H['image_height'], H['image_width'], 3]) p3_x_in = tf.placeholder(tf.float32, name='p3_x_in', shape=[H['image_height'], H['image_width'], 3]) p4_x_in = tf.placeholder(tf.float32, name='p4_x_in', shape=[H['image_height'], H['image_width'], 3]) p5_x_in = tf.placeholder(tf.float32, name='p5_x_in', shape=[H['image_height'], H['image_width'], 3]) p6_x_in = tf.placeholder(tf.float32, name='p6_x_in', shape=[H['image_height'], H['image_width'], 3]) p7_x_in = tf.placeholder(tf.float32, name='p7_x_in', shape=[H['image_height'], H['image_width'], 3]) p8_x_in = tf.placeholder(tf.float32, name='p8_x_in', shape=[H['image_height'], H['image_width'], 3]) f_x_in = tf.placeholder(tf.float32, name='f_x_in', shape=[H['image_height'], H['image_width'], 3]) if H['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward( H, tf.expand_dims(x_in, 0), tf.expand_dims(p1_x_in, 0), tf.expand_dims(p2_x_in, 0), tf.expand_dims(p3_x_in, 0), tf.expand_dims(p4_x_in, 0), tf.expand_dims(p5_x_in, 0), tf.expand_dims(p6_x_in, 0), tf.expand_dims(p7_x_in, 0), tf.expand_dims(p8_x_in, 0), tf.expand_dims(f_x_in, 0), 'test', reuse=None) grid_area = H['grid_height'] * H['grid_width'] pred_confidences = tf.reshape( tf.nn.softmax( tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2]) if H['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = build_forward( H, tf.expand_dims(x_in, 0), 'test', reuse=None) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.initialize_all_variables()) saver.restore(sess, args.weights) pred_annolist = al.AnnoList() true_annolist = al.parse(args.test_boxes) data_dir = os.path.dirname(args.test_boxes) image_dir = get_image_dir(args) subprocess.call('mkdir -p %s' % image_dir, shell=True) count_error = list() for i in range(20): count_error.append(0) for i in range(len(true_annolist)): true_anno = true_annolist[i] orig_img = imread('%s/%s' % (data_dir, true_anno.imageName))[:, :, :3] dir_path = os.path.dirname(true_anno.imageName) file_name = true_anno.imageName.split('/')[-1] (shotname, extension) = os.path.splitext(file_name) p1_image_path = data_dir + "/" + dir_path + "/" + ( str(int(shotname) - 1)).zfill(4) + ".png" p2_image_path = data_dir + "/" + dir_path + "/" + ( str(int(shotname) - 2)).zfill(4) + ".png" p3_image_path = data_dir + "/" + dir_path + "/" + ( str(int(shotname) - 3)).zfill(4) + ".png" p4_image_path = data_dir + "/" + dir_path + "/" + ( str(int(shotname) - 4)).zfill(4) + ".png" p5_image_path = data_dir + "/" + dir_path + "/" + ( str(int(shotname) - 5)).zfill(4) + ".png" p6_image_path = data_dir + "/" + dir_path + "/" + ( str(int(shotname) - 6)).zfill(4) + ".png" p7_image_path = data_dir + "/" + dir_path + "/" + ( str(int(shotname) - 7)).zfill(4) + ".png" p8_image_path = data_dir + "/" + dir_path + "/" + ( str(int(shotname) - 8)).zfill(4) + ".png" f_image_path = data_dir + "/" + dir_path + "/" + ( str(int(shotname) + 1)).zfill(4) + ".png" if not os.path.exists(p1_image_path): print "File not exists: %s" % p1_image_path exit() if not os.path.exists(p2_image_path): print "File not exists: %s" % p2_image_path exit() if not os.path.exists(f_image_path): print "File not exists: %s" % f_image_path exit() p1_img = imread(p1_image_path) p2_img = imread(p2_image_path) p3_img = imread(p3_image_path) p4_img = imread(p4_image_path) p5_img = imread(p5_image_path) p6_img = imread(p6_image_path) p7_img = imread(p7_image_path) p8_img = imread(p8_image_path) f_img = imread(f_image_path) img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic') feed = { x_in: img, p1_x_in: p1_img, p2_x_in: p2_img, p3_x_in: p3_img, p4_x_in: p4_img, p5_x_in: p5_img, p6_x_in: p6_img, p7_x_in: p7_img, p8_x_in: p8_img, f_x_in: f_img } (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() pred_anno.imageName = true_anno.imageName true_count = len(true_anno.rects) # print true_count for j in range(20): min_confidence = (j * 1.0) / 20.0 new_img, rects, count = add_rectangles( H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=H['rnn_len'], min_conf=min_confidence, tau=args.tau, show_suppressed=args.show_suppressed) count_error[j] += abs(count - true_count) pred_anno.rects = rects pred_anno.imagePath = os.path.abspath(data_dir) pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) pred_annolist.append(pred_anno) # imname = '%s/%s' % (image_dir, os.path.basename(true_anno.imageName)) # misc.imsave(imname, new_img) if i % 25 == 0: print(i) print "Count error: %s" % str(min(count_error) / 350.0) return pred_annolist, true_annolist
def pred(self, weights, test_boxes, min_conf, tau, show_suppressed, expname): self.H["grid_width"] = self.H["image_width"] / self.H["region_size"] self.H["grid_height"] = self.H["image_height"] / self.H["region_size"] x_in = tf.placeholder( tf.float32, name='x_in', shape=[self.H['image_height'], self.H['image_width'], 3]) if self.H['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = self.build_forward( tf.expand_dims(x_in, 0), 'test', reuse=None) grid_area = self.H['grid_height'] * self.H['grid_width'] pred_confidences = tf.reshape( tf.nn.softmax( tf.reshape(pred_confs_deltas, [grid_area * self.H['rnn_len'], 2])), [grid_area, self.H['rnn_len'], 2]) if self.H['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = self.build_forward( tf.expand_dims(x_in, 0), 'test', reuse=None) rect_list = [] saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver.restore(sess, weights) pred_annolist = al.AnnoList() true_annolist = al.parse(test_boxes) data_dir = os.path.dirname(test_boxes) image_dir = self.get_image_dir(weights, expname, test_boxes) subprocess.call('mkdir -p %s' % image_dir, shell=True) for i in range(len(true_annolist)): true_anno = true_annolist[i] orig_img = imread('%s/%s' % (data_dir, true_anno.imageName))[:, :, :3] img = imresize(orig_img, (self.H["image_height"], self.H["image_width"]), interp='cubic') feed = {x_in: img} (np_pred_boxes, np_pred_confidences) = sess.run( [pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() pred_anno.imageName = true_anno.imageName new_img, rects = add_rectangles( self.H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=self.H['rnn_len'], min_conf=min_conf, tau=tau, show_suppressed=show_suppressed) print 'tb model ', len(rects) # pred_anno.rects = rects pred_anno.imagePath = os.path.abspath(data_dir) pred_anno = rescale_boxes( (self.H["image_height"], self.H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) pred_annolist.append(pred_anno) imname = 'box_sample.jpg' misc.imsave(imname, new_img) if i % 25 == 0: print(i) # print pred_anno.imageName for rect_i in range(0, len(rects)): rect_list.append({}) rect_list[-1]['x_min'] = rects[rect_i].left() / 640. rect_list[-1]['x_max'] = rects[rect_i].right() / 640. rect_list[-1]['y_min'] = rects[rect_i].top() / 480. rect_list[-1]['y_max'] = rects[rect_i].bottom() / 480. return rect_list
def run_eval(H, checkpoint_dir, hypes_file, output_path): """Do Evaluation with full epoche of data. Args: H: Hypes checkpoint_dir: directory with checkpoint files output_path: path to save results """ #Load GT true_idl = H['data']['test_idl'] true_annos = al.parse(true_idl) # define output files pred_file = 'val_%s.idl' % os.path.basename(hypes_file).replace( '.json', '') pred_idl = os.path.join(output_path, pred_file) true_file = 'true_%s.idl' % os.path.basename(hypes_file).replace( '.json', '') true_idl_scaled = os.path.join(output_path, true_file) data_folder = os.path.dirname(os.path.realpath(true_idl)) #Load Graph Model tf.reset_default_graph() googlenet = googlenet_load.init(H) x_in = tf.placeholder(tf.float32, name='x_in') if H['arch']['use_lstm']: lstm_forward = build_lstm_forward(H, tf.expand_dims(x_in, 0), googlenet, 'test', reuse=None) pred_boxes, pred_logits, pred_confidences = lstm_forward else: overfeat_forward = build_overfeat_forward(H, tf.expand_dims(x_in, 0), googlenet, 'test') pred_boxes, pred_logits, pred_confidences = overfeat_forward start_time = time.time() saver = tf.train.Saver() with tf.Session() as sess: logging.info("Starting Evaluation") sess.run(tf.initialize_all_variables()) # Restore Checkpoints ckpt = tf.train.get_checkpoint_state(checkpoint_dir) if ckpt and ckpt.model_checkpoint_path: logging.info(ckpt.model_checkpoint_path) saver.restore(sess, ckpt.model_checkpoint_path) annolist = al.AnnoList() trueanno = al.AnnoList() #shuffle true_annos to randomize plottet Images shuffle(true_annos) for i in range(len(true_annos)): true_anno = true_annos[i] img = imread(os.path.join(data_folder, true_anno.imageName)) # Rescale Boxes trueanno.append( rescale_boxes(img.shape, true_annos[i], H["arch"]["image_height"], H["arch"]["image_width"])) # Rescale Images img = imresize( img, (H["arch"]["image_height"], H["arch"]["image_width"]), interp='cubic') feed = {x_in: img} (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() pred_anno.imageName = true_anno.imageName new_img, rects = add_rectangles([img], np_pred_confidences, np_pred_boxes, H["arch"], use_stitching=True, rnn_len=H['arch']['rnn_len'], min_conf=0.3) pred_anno.rects = rects annolist.append(pred_anno) if i % 20 == 0: # Draw every 20th Image; # plotted Image is randomized due to shuffling duration = time.time() - start_time duration = float(duration) * 1000 / 20 out_img = os.path.join(output_path, 'test_%i.png' % i) scp.misc.imsave(out_img, new_img) logging.info('Step %d: Duration %.3f ms' % (i, duration)) start_time = time.time() annolist.save(pred_idl) trueanno.save(true_idl_scaled) # write results to disk iou_threshold = 0.5 rpc_cmd = './utils/annolist/doRPC.py --minOverlap %f %s %s' % ( iou_threshold, true_idl_scaled, pred_idl) rpc_output = subprocess.check_output(rpc_cmd, shell=True) txt_file = [line for line in rpc_output.split('\n') if line.strip()][-1] output_png = os.path.join(output_path, "roc.png") plot_cmd = './utils/annolist/plotSimple.py %s --output %s' % (txt_file, output_png) plot_output = subprocess.check_output(plot_cmd, shell=True)
def evaluate(H, valids, param_path, thr = 0.7, l = 60000, r = 120010, sep = 10000, with_anno = True): true_annos = al.parse(valids) L = range(l, r, sep) for iteration in L: tf.reset_default_graph() # print(H['batch_size']) x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3]) if H['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward( H, tf.expand_dims(x_in, 0), 'test', reuse=None) grid_area = H['grid_height'] * H['grid_width'] pred_confidences = tf.reshape(tf.nn.softmax( tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2]) if H['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None) saver = tf.train.Saver() gpu_options = tf.GPUOptions() gpu_options.allow_growth=True config = tf.ConfigProto(gpu_options=gpu_options) with tf.Session(config = config) as sess: sess.run(tf.global_variables_initializer()) print('load from ' + (param_path + 'save.ckpt-%d' % iteration)) saver.restore(sess, param_path + 'save.ckpt-%d' % iteration) annolist = al.AnnoList() rslt = [] t = time.time() if not os.path.exists(param_path + 'val'): os.makedirs(param_path + 'val') for i in range(len(true_annos)): true_anno = true_annos[i] img = imread(SAMPLE_DIR + true_anno.imageName) feed = {x_in: img} (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() pred_anno.imageName = true_anno.imageName new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=H['rnn_len'], min_conf=thr, show_suppressed=False) pred_anno.rects = rects annolist.append(pred_anno) fname = true_anno.imageName if with_anno: imwrite(param_path + 'val/' + fname[fname.rindex('/') + 1 : -4] + '_' + str(iteration) + '_pred.jpg', new_img) shutil.copy(SAMPLE_DIR + true_anno.imageName[:-4] + '_gt.bmp', param_path + 'val/' + fname[fname.rindex('/') + 1 : -4] + '_gt.bmp') box_confs = trans(np_pred_boxes, H, np_pred_confidences, thr) ret = { 'file' : fname, 'box' : box_confs.tolist() } rslt.append(ret) avg_time = (time.time() - t) / (i + 1) print('%f images/sec' % (1. / avg_time)) rrslt = [] for it in rslt: it['box'] = filterBoxes(it['box'], 0.1) with open(param_path + 'result_' + str(iteration) + '.json', 'w') as f: json.dump(rslt, f) generate_result(TRUNK_DIR, rslt, param_path + 'csv_' + str(iteration) + '.csv')
def get_results(hypes, sess, image_pl, calib_pl, xy_scale_pl, decoded_logits, validation=True, encoder_out=None): pred_encoder = None if encoder_out is not None: pred_encoder = encoder_out['deep_feat'] pred_boxes = decoded_logits['pred_boxes_new'] #pred_boxes = decoded_logits['pred_bbox_proj'] pred_depths = decoded_logits['pred_depths_new'] pred_locations = decoded_logits['pred_locations_new'] pred_confidences = decoded_logits['pred_confidences'] pred_corners = decoded_logits['pred_corners'] refined_global_corners = decoded_logits['refined_global_corners'] # Build Placeholder shape = [hypes['image_height'], hypes['image_width'], 3] if validation: kitti_txt = os.path.join(hypes['dirs']['data_dir'], hypes['data']['val_file']) else: kitti_txt = os.path.join(hypes['dirs']['data_dir'], hypes['data']['train_file']) # true_annolist = AnnLib.parse(test_idl) val_dir = make_val_dir(hypes, validation) img_dir = make_img_dir(hypes) feature_dir = make_feature_dir(hypes) image_list = [] pred_annolist = AnnLib.AnnoList() files = [line.rstrip() for line in open(kitti_txt)] base_path = os.path.realpath(os.path.dirname(kitti_txt)) for i, file in tqdm.tqdm(enumerate(files), total=len(files)): image_file = file.split(" ")[0] if not validation and random.random() > 0.2: continue image_file_split = image_file.split('/') image_file = os.path.join(base_path, image_file) index = image_file_split[-1].split('.')[0] calib_file = os.path.join(base_path, image_file_split[0], 'calib', index + '.txt') orig_img = scp.misc.imread(image_file)[:, :, :3] xy_scale = np.reshape([ hypes['image_width'] * 1.0 / orig_img.shape[1], hypes['image_height'] * 1.0 / orig_img.shape[0] ], (1, 1, 1, 2)).astype(np.float32) xy_scale = np.repeat(xy_scale, hypes['grid_height'], axis=1) xy_scale = np.repeat(xy_scale, hypes['grid_width'], axis=2) img = scp.misc.imresize(orig_img, (hypes["image_height"], hypes["image_width"]), interp='cubic') calibs = [line.rstrip().split(' ') for line in open(calib_file)] assert calibs[2][0] == 'P2:' calib = np.reshape(calibs[2][1:], (1, 1, 1, 3, 4)).astype(np.float32) calib = np.repeat(calib, hypes['grid_height'], axis=1) calib = np.repeat(calib, hypes['grid_width'], axis=2) feed = {image_pl: img, calib_pl: calib, xy_scale_pl: xy_scale} ## Connor if encoder_out is not None: (np_pred_boxes, np_pred_confidences, np_refined_global_corners, encoded_features) = sess.run([ pred_boxes, pred_confidences, refined_global_corners, pred_encoder ], feed_dict=feed) feature_name = os.path.basename(image_file).split('.')[0] feature_filepath = os.path.join(feature_dir, feature_name) np.save(feature_filepath, encoded_features) else: (np_pred_boxes, np_pred_confidences, np_refined_global_corners) = sess.run( [pred_boxes, pred_confidences, refined_global_corners], feed_dict=feed) """ depth_map = np.reshape(np_pred_depths, (12, 39)) depth_map = depth_map / np.amax(depth_map) depth_map[depth_map<0] = 0 depth_map = (depth_map * 255).astype(np.uint8) depth_map = scp.misc.imresize(depth_map, (120, 390)) #scp.misc.imsave('./visualize/kittiBox/{}_depth_map.png'.format(index), depth_map) plt.figure(figsize=(12, 4)) plt.imshow(depth_map, cmap='winter') plt.savefig('./visualize/kittiBox/{}_depth_map_pred.png'.format(index)) plt.close() depth_map_gt = tf.get_collection('depth_map_gt') np_depth_map_gt = sess.run(depth_map_gt, feed_dict=feed)[0] depth_map = np.reshape(np_depth_map_gt, (12, 39)) depth_map = depth_map / np.amax(depth_map) depth_map[depth_map<0] = 0 depth_map = (depth_map * 255).astype(np.uint8) depth_map = scp.misc.imresize(depth_map, (120, 390)) #scp.misc.imsave('./visualize/kittiBox/{}_depth_map.png'.format(index), depth_map) plt.figure(figsize=(12, 4)) plt.imshow(depth_map, cmap='winter') plt.savefig('./visualize/kittiBox/{}_depth_map_true.png'.format(index)) plt.close() """ outer_size = hypes['grid_width'] * hypes['grid_height'] * 1 np_refined_corners = np_refined_global_corners.reshape( (outer_size, 3, 8)) np_pred_x = np.mean(np_refined_corners[:, 0, :], axis=-1, keepdims=True) np_pred_y = np.mean(np_refined_corners[:, 1, :4], axis=-1, keepdims=True) np_pred_depths = np.mean(np_refined_corners[:, 2, :], axis=-1, keepdims=True) np_pred_locations = np.concatenate( [np_pred_x, np_pred_y, np_pred_depths], axis=1) np_pred_corners = np.reshape( np_refined_corners - np_pred_locations.reshape(outer_size, 3, 1), (outer_size, 24)) pred_anno = AnnLib.Annotation() pred_anno.imageName = image_file new_img, rects = utils.train_utils.add_rectangles( hypes, [img], np_pred_confidences, np_pred_boxes, np_pred_depths, np_pred_locations, np_pred_corners, show_removed=False, use_stitching=True, rnn_len=hypes['rnn_len'], min_conf=0.50, tau=hypes['tau'], color_acc=(0, 255, 0)) if validation and i % 30 == 0: image_name = os.path.basename(pred_anno.imageName) image_name = os.path.join(img_dir, image_name) scp.misc.imsave(image_name, new_img) if validation: image_name = os.path.basename(pred_anno.imageName) image_list.append((image_name, new_img)) # get name of file to write to image_name = os.path.basename(image_file) val_file_name = image_name.split('.')[0] + '.txt' val_file = os.path.join(val_dir, val_file_name) # write rects to file for rect in rects: rect.calib = calib pred_anno.rects = rects pred_anno = utils.train_utils.rescale_boxes( (hypes["image_height"], hypes["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) write_rects(rects, val_file) pred_annolist.append(pred_anno) start_time = time.time() for i in xrange(100): (np_pred_boxes, np_pred_confidence, np_pred_depths, np_pred_locations) = \ sess.run([pred_boxes, pred_confidences, pred_depths, pred_locations], feed_dict=feed) dt = (time.time() - start_time) / 100 start_time = time.time() for i in xrange(100): utils.train_utils.compute_rectangels(hypes, np_pred_confidences, np_pred_boxes, np_pred_depths, np_pred_locations, show_removed=False, use_stitching=True, rnn_len=hypes['rnn_len'], min_conf=0.001, tau=hypes['tau']) dt2 = (time.time() - start_time) / 100 return pred_annolist, image_list, dt, dt2
pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = build_forward(H, tf.expand_dims( x_in, 0), 'test', reuse=None) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.initialize_all_variables()) saver.restore( sess, '/home/craig/Desktop/tensorbox/output/lstm_rezoom_2016_12_26_00.02/save.ckpt-30000' ) annolist = al.AnnoList() import time t = time.time() for i in range(0, len(true_annos)): true_anno = true_annos[i] img = imread('./new_labels/%s' % true_anno.imageName) feed = {x_in: img} (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() pred_anno.imageName = true_anno.imageName new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True,
def get_results(args, H): tf.reset_default_graph() H["grid_width"] = H["image_width"] / H["region_size"] H["grid_height"] = H["image_height"] / H["region_size"] x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3]) if H['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward( H, tf.expand_dims(x_in, 0), 'test', reuse=None) grid_area = H['grid_height'] * H['grid_width'] pred_confidences = tf.reshape( tf.nn.softmax( tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2]) if H['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = build_forward( H, tf.expand_dims(x_in, 0), 'test', reuse=None) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver.restore(sess, args.weights) pred_annolist = al.AnnoList() true_annolist = al.parse(args.test_boxes) data_dir = os.path.dirname(args.test_boxes) image_dir = get_image_dir(args) subprocess.call('mkdir -p %s' % image_dir, shell=True) for i in range(len(true_annolist)): true_anno = true_annolist[i] orig_img = cv2.imread(os.path.join(data_dir, true_anno.imageName))[:, :, :3] img = cv2.resize(orig_img, (H["image_width"], H["image_height"]), interpolation=cv2.INTER_CUBIC) feed = {x_in: img} (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() pred_anno.imageName = true_anno.imageName new_img, rects = add_rectangles( H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.show_suppressed) pred_anno.rects = rects pred_anno.imagePath = os.path.abspath(data_dir) pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) pred_annolist.append(pred_anno) imname = os.path.join(image_dir, os.path.basename(true_anno.imageName)) cv2.imwrite(imname, new_img) if i % 25 == 0: print(i) return pred_annolist, true_annolist
def main(args, logger): # setup logger.info(args) os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = args.gpu_fraction # path path_hypes_file = '{}/hypes.json'.format(os.path.dirname(args.weights)) with open(path_hypes_file, 'r') as f: H = json.load(f) expname = args.expname + '_' if args.expname else '' # graph tf.reset_default_graph() H['grid_width'] = H['image_width'] / H['region_size'] H['grid_height'] = H['image_height'] / H['region_size'] X = tf.placeholder(tf.float32, name='input', shape=(H['image_height'], H['image_width'], 3)) if H['use_rezoom']: (pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas) = build_forward(H, tf.expand_dims(X, 0), 'test', reuse=None) grid_area = H['grid_height'] * H['grid_width'] reshape_shape = [grid_area * H['rnn_len'], 2] pred_confidences = tf.reshape( tf.nn.softmax(tf.reshape(pred_confs_deltas, reshape_shape)), reshape_shape) pred_boxes = pred_boxes + pred_boxes_deltas if H[ 'reregress'] else pred_boxes else: (pred_boxes, pred_logits, pred_confidences) = build_forward(H, tf.expand_dims(X, 0), 'test', reuse=None) # load checkopint saver = tf.train.Saver() with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) saver.restore(sess, args.weights) pred_annolist = al.AnnoList() # get all video candidate video_paths = glob( os.path.join(args.video_root, '*.{}'.format(args.video_type))) for v in video_paths: video_fullname = '.'.join(v.split('.')[:-1]) video_name = video_fullname.split('/')[-1] txtname = video_fullname + '_detection.txt' txtname = '/'.join( [args.outputdir, video_name, txtname.split('/')[-1]]) if os.path.isfile(txtname): logger.info('{} existed, pass'.format(txtname)) continue if not os.path.exists(os.path.dirname(txtname)): os.makedirs(os.path.dirname(txtname)) logger.info('Predicting {}'.format(os.path.basename(v))) # video operation cap = cv2.VideoCapture(v) total_frame = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) fourcc = cv2.VideoWriter_fourcc(*'XVID') resolution = tuple(map(int, (cap.get(3), cap.get(4)))) filename = 'detected_{}'.format(os.path.basename(v)) # output video if args.output_video: outputdir = os.path.join( args.outputdir, '{}-skip-{}-count-{}'.format( datetime.now().strftime('%Y%m%d'), args.skip_nframe, args.frame_count or 'all')) if not os.path.exists(outputdir): os.makedirs(outputdir) out = cv2.VideoWriter(os.path.join(outputdir, filename), fourcc, 15, resolution) data = [] logger.info('total {} skip {}'.format(total_frame, args.skip_nframe)) for frame_idx in tqdm(range(0, total_frame, args.skip_nframe)): if args.frame_count and len(data) > args.frame_count: break if not cap.isOpened(): logger.error('{} is close'.format(os.path.basename(v))) ok, frame = cap.read() if ok: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) image = cv2.resize(frame, (H['image_width'], H['image_height'])) (np_pred_boxes, np_pred_confidences) = sess.run( [pred_boxes, pred_confidences], feed_dict={X: image}) pred_anno = al.Annotation() new_img, rects = add_rectangles( H, [image], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.suppressed) pred_anno.rects = rects pred_anno = rescale_boxes( (H["image_height"], H["image_width"]), pred_anno, frame.shape[0], frame.shape[1]) results = [] for r in pred_anno.rects: results.append([ max(r.y1, 0), max(r.x1, 0), max(r.y2, 0), max(r.x2, 0), r.score ]) data.append(str([frame_idx + 1, results]) + '\n') pred_annolist.append(pred_anno) if args.output_video: out.write(new_img) else: logger.warning('cannot read frame {}'.format(frame_idx)) cap.release() if args.output_video: out.release() with open(txtname, 'w+') as f: f.writelines(data)
def get_results(args, H): tf.reset_default_graph() x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3]) if H['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward( H, tf.expand_dims(x_in, 0), 'test', reuse=None) grid_area = H['grid_height'] * H['grid_width'] pred_confidences = tf.reshape( tf.nn.softmax( tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2]) if H['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = build_forward( H, tf.expand_dims(x_in, 0), 'test', reuse=None) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.initialize_all_variables()) saver.restore(sess, args.weights) pred_annolist = al.AnnoList() data_dir = os.path.dirname(args.test_boxes) image_dir = get_image_dir(args) subprocess.call('mkdir -p %s' % image_dir, shell=True) memory = sysv_ipc.SharedMemory(123463) memory2 = sysv_ipc.SharedMemory(123464) size = 768, 1024, 3 pedal = PyMouse() pedal.press(1) road_center = 320 while True: cv2.waitKey(1) frameCount = bytearray(memory.read()) curve = bytearray(memory2.read()) curve = str(struct.unpack('i', curve)[0]) m = np.array(frameCount, dtype=np.uint8) orig_img = m.reshape(size) img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic') feed = {x_in: img} (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() new_img, rects = add_rectangles( H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.show_suppressed) flag = 0 road_center = 320 + int(curve) print(road_center) for rect in rects: print(rect.x1, rect.x2, rect.y2) if (rect.x1 < road_center and rect.x2 > road_center and rect.y2 > 200) and (rect.x2 - rect.x1 > 30): flag = 1 if flag is 1: pedal.press(2) print("break!") else: pedal.release(2) pedal.press(1) print("acceleration!") pred_anno.rects = rects pred_anno.imagePath = os.path.abspath(data_dir) pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) pred_annolist.append(pred_anno) cv2.imshow('.jpg', new_img) return none
def get_results(hypes, sess, image_pl, calib_pl, xy_scale_pl, decoded_logits, validation=True): pred_boxes = decoded_logits['pred_boxes_new'] #pred_boxes = decoded_logits['pred_bbox_proj'] pred_depths = decoded_logits['pred_depths_new'] pred_locations = decoded_logits['pred_locations_new'] pred_confidences = decoded_logits['pred_confidences'] pred_corners = decoded_logits['pred_corners'] refined_global_corners = decoded_logits['refined_global_corners'] # Build Placeholder shape = [hypes['image_height'], hypes['image_width'], 3] if validation: kitti_txt = os.path.join(hypes['dirs']['data_dir'], hypes['data']['val_file']) else: kitti_txt = os.path.join(hypes['dirs']['data_dir'], hypes['data']['train_file']) # true_annolist = AnnLib.parse(test_idl) val_dir = make_val_dir(hypes, validation) img_dir = make_img_dir(hypes) image_list = [] pred_annolist = AnnLib.AnnoList() files = [line.rstrip() for line in open(kitti_txt)] base_path = os.path.realpath(os.path.dirname(kitti_txt)) for i, file in enumerate(files): sys.stdout.write('\r>> Converting %d/%d\n' % (i + 1, len(files))) sys.stdout.flush() image_file = file.split(" ")[0] if not validation and random.random() > 0.2: continue image_file_split = image_file.split('/') image_file = os.path.join(base_path, image_file) index = image_file_split[-1].split('.')[0] calib_file = os.path.join(base_path, image_file_split[0], 'calib', index + '.txt') orig_img = scp.misc.imread(image_file)[:, :, :3] xy_scale = np.reshape([ hypes['image_width'] * 1.0 / orig_img.shape[1], hypes['image_height'] * 1.0 / orig_img.shape[0] ], (1, 1, 1, 2)).astype(np.float32) xy_scale = np.repeat(xy_scale, hypes['grid_height'], axis=1) xy_scale = np.repeat(xy_scale, hypes['grid_width'], axis=2) img = scp.misc.imresize(orig_img, (hypes["image_height"], hypes["image_width"]), interp='cubic') calibs = [line.rstrip().split(' ') for line in open(calib_file)] assert calibs[2][0] == 'P2:' calib = np.reshape(calibs[2][1:], (1, 1, 1, 3, 4)).astype(np.float32) calib = np.repeat(calib, hypes['grid_height'], axis=1) calib = np.repeat(calib, hypes['grid_width'], axis=2) feed = {image_pl: img, calib_pl: calib, xy_scale_pl: xy_scale} (np_pred_boxes, np_pred_confidences, np_refined_global_corners) = sess.run( [pred_boxes, pred_confidences, refined_global_corners], feed_dict=feed) outer_size = hypes['grid_width'] * hypes['grid_height'] * 1 np_refined_corners = np_refined_global_corners.reshape( (outer_size, 3, 8)) np_pred_x = np.mean(np_refined_corners[:, 0, :], axis=-1, keepdims=True) np_pred_y = np.mean(np_refined_corners[:, 1, :4], axis=-1, keepdims=True) np_pred_depths = np.mean(np_refined_corners[:, 2, :], axis=-1, keepdims=True) np_pred_locations = np.concatenate( [np_pred_x, np_pred_y, np_pred_depths], axis=1) np_pred_corners = np.reshape( np_refined_corners - np_pred_locations.reshape(outer_size, 3, 1), (outer_size, 24)) pred_anno = AnnLib.Annotation() pred_anno.imageName = image_file new_img, rects = utils.train_utils.add_rectangles( hypes, [img], np_pred_confidences, np_pred_boxes, np_pred_depths, np_pred_locations, np_pred_corners, show_removed=False, use_stitching=True, rnn_len=hypes['rnn_len'], min_conf=0.50, tau=hypes['tau'], color_acc=(0, 255, 0)) if validation and i % 15 == 0: image_name = os.path.basename(pred_anno.imageName) image_name = os.path.join(img_dir, image_name) #scp.misc.imsave(image_name, new_img) if validation: image_name = os.path.basename(pred_anno.imageName) image_list.append((image_name, new_img)) # get name of file to write to image_name = os.path.basename(image_file) val_file_name = image_name.split('.')[0] + '.txt' val_file = os.path.join(val_dir, val_file_name) # write rects to file for rect in rects: rect.calib = calib pred_anno.rects = rects pred_anno = utils.train_utils.rescale_boxes( (hypes["image_height"], hypes["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) write_rects(rects, val_file) pred_annolist.append(pred_anno) start_time = time.time() for i in xrange(100): (np_pred_boxes, np_pred_confidence, np_pred_depths, np_pred_locations) = \ sess.run([pred_boxes, pred_confidences, pred_depths, pred_locations], feed_dict=feed) dt = (time.time() - start_time) / 100 start_time = time.time() for i in xrange(100): utils.train_utils.compute_rectangels(hypes, np_pred_confidences, np_pred_boxes, np_pred_depths, np_pred_locations, show_removed=False, use_stitching=True, rnn_len=hypes['rnn_len'], min_conf=0.001, tau=hypes['tau']) dt2 = (time.time() - start_time) / 100 return pred_annolist, image_list, dt, dt2