def load_idl_tf(idlfile, H, jitter): """Take the idlfile and net configuration and create a generator that outputs a jittered version of a random image from the annolist that is mean corrected.""" annolist = al.parse(idlfile) annos = [] for anno in annolist: anno.imageName = os.path.join( os.path.dirname(os.path.realpath(idlfile)), anno.imageName) annos.append(anno) random.seed(0) if H['data']['truncate_data']: annos = annos[:10] for epoch in itertools.count(): random.shuffle(annos) for origin_anno in annos: tiles = preprocess_image(deepcopy(origin_anno), H) for I, anno in tiles: if jitter: jitter_scale_min = 0.9 jitter_scale_max = 1.1 jitter_offset = 16 I, anno = annotation_jitter(I, anno, target_width=H["image_width"], target_height=H["image_height"], jitter_scale_min=jitter_scale_min, jitter_scale_max=jitter_scale_max, jitter_offset=jitter_offset) boxes, flags = annotation_to_h5(H, anno) yield {"image": I, "boxes": boxes, "flags": flags}
def get_results(args, H): tf.reset_default_graph() x_in = tf.placeholder(tf.float32, name="x_in", shape=[H["image_height"], H["image_width"], 3]) googlenet = googlenet_load.init(H) if H["use_rezoom"]: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward( H, tf.expand_dims(x_in, 0), googlenet, "test", reuse=None ) grid_area = H["grid_height"] * H["grid_width"] pred_confidences = tf.reshape( tf.nn.softmax(tf.reshape(pred_confs_deltas, [grid_area * H["rnn_len"], 2])), [grid_area, H["rnn_len"], 2] ) if H["reregress"]: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = build_forward( H, tf.expand_dims(x_in, 0), googlenet, "test", reuse=None ) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.initialize_all_variables()) saver.restore(sess, args.weights) pred_annolist = al.AnnoList() true_annolist = al.parse(args.test_idl) data_dir = os.path.dirname(args.test_idl) image_dir = get_image_dir(args) subprocess.call("mkdir -p %s" % image_dir, shell=True) for i in range(len(true_annolist)): true_anno = true_annolist[i] orig_img = imread("%s/%s" % (data_dir, true_anno.imageName))[:, :, :3] img = imresize(orig_img, (H["image_height"], H["image_width"]), interp="cubic") feed = {x_in: img} (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() pred_anno.imageName = true_anno.imageName new_img, rects = add_rectangles( H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=H["rnn_len"], min_conf=0.2, tau=args.tau, ) pred_anno.rects = rects pred_anno.imagePath = os.path.abspath(data_dir) pred_anno = rescale_boxes( (H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1] ) pred_annolist.append(pred_anno) imname = "%s/%s" % (image_dir, os.path.basename(true_anno.imageName)) misc.imsave(imname, new_img) if i % 25 == 0: print(i) return pred_annolist, true_annolist
def load_idl(idlfile, data_mean, net_config, jitter=True): """Take the idlfile, data mean and net configuration and create a generator that outputs a jittered version of a random image from the annolist that is mean corrected.""" annolist = al.parse(idlfile) annos = [x for x in annolist] for anno in annos: anno.imageName = os.path.join( os.path.dirname(os.path.realpath(idlfile)), anno.imageName) while True: random.shuffle(annos) for anno in annos: if jitter: jit_image, jit_anno = annotation_jitter( anno, target_width=net_config["img_width"], target_height=net_config["img_height"]) else: jit_image = imread(anno.imageName) jit_anno = anno image = image_to_h5(jit_image, data_mean, image_scaling=1.0) boxes, box_flags = annotation_to_h5( jit_anno, net_config["grid_width"], net_config["grid_height"], net_config["region_size"], net_config["max_len"]) yield {"imname": anno.imageName, "raw": jit_image, "image": image, "boxes": boxes, "box_flags": box_flags, "anno": jit_anno}
def load_idl_tf(idlfile, H, jitter): """Take the idlfile and net configuration and create a generator that outputs a jittered version of a random image from the annolist that is mean corrected.""" annolist = al.parse(idlfile) annos = [] for anno in annolist: anno.imageName = os.path.join( os.path.dirname(os.path.realpath(idlfile)), anno.imageName) annos.append(anno) random.seed(0) if H['data']['truncate_data']: annos = annos[:10] for epoch in itertools.count(): random.shuffle(annos) for anno in annos: I = imread(anno.imageName) #Skip Greyscale images if len(I.shape) < 3: continue if I.shape[2] == 4: I = I[:, :, :3] if I.shape[0] != H["image_height"] or I.shape[1] != H["image_width"]: if epoch == 0: anno = rescale_boxes(I.shape, anno, H["image_height"], H["image_width"]) I = imresize(I, (H["image_height"], H["image_width"]), interp='cubic') if jitter: jitter_scale_min=0.9 jitter_scale_max=1.1 jitter_offset=16 I, anno = annotation_jitter(I, anno, target_width=H["image_width"], target_height=H["image_height"], jitter_scale_min=jitter_scale_min, jitter_scale_max=jitter_scale_max, jitter_offset=jitter_offset) boxes, flags = annotation_to_h5(H, anno, H["grid_width"], H["grid_height"], H["rnn_len"]) yield {"image": I, "boxes": boxes, "flags": flags}
def test_IDL(idl_filename): print("Starting Testing Dataset... May take a while") progress = progressbar.ProgressBar(widgets=[progressbar.Bar('=', '[', ']'), ' ',progressbar.Percentage(), ' ',progressbar.ETA()]) test_annos = al.parse(idl_filename) for test_anno in progress(test_annos): bb_img = Image.open(test_anno.imageName) orig_img = cv2.imread(test_anno.imageName, 0) cv2.imshow('Original Image',orig_img) cv2.waitKey(2) for test_rect in test_anno.rects: dr = ImageDraw.Draw(bb_img) cor = (test_rect.x2,test_rect.y2,test_rect.x1,test_rect.y1 ) # DA VERIFICARE Try_2 (x1,y1, x2,y2) cor = (bb_rect.left() ,bb_rect.right(),bb_rect.bottom(),bb_rect.top()) Try_1 dr.rectangle(cor, outline="green") image_name, image_ext = os.path.splitext(test_anno.imageName) bb_img.save(image_name+'_copy'+image_ext) bb_img = cv2.imread(image_name+'_copy'+image_ext, 0) cv2.imshow('Mine Rectangle detection',bb_img) cv2.waitKey(2) os.remove(image_name+'_copy'+image_ext)
def load_idl_tf(idlfile, H, jitter): """Take the idlfile and net configuration and create a generator that outputs a jittered version of a random image from the annolist that is mean corrected.""" arch = H['arch'] annolist = al.parse(idlfile) annos = [x for x in annolist] for anno in annos: anno.imageName = os.path.join( os.path.dirname(os.path.realpath(idlfile)), anno.imageName) random.seed(0) if H['data']['truncate_data']: annos = annos[:10] while True: random.shuffle(annos) for anno in annos: if arch["image_width"] != 640 or arch["image_height"] != 480: rescale_boxes(anno, arch["image_width"], arch["image_height"]) I = imread(anno.imageName) if jitter: jit_image, jit_anno = annotation_jitter(I, anno, target_width=arch["image_width"], target_height=arch["image_height"]) else: I = imread(anno.imageName) try: jit_image, jit_anno = annotation_jitter(I, anno, target_width=arch["image_width"], target_height=arch["image_height"], jitter_scale_min=1.0, jitter_scale_max=1.0, jitter_offset=0) except: import traceback print(traceback.format_exc()) continue boxes, box_flags = annotation_to_h5( jit_anno, arch["grid_width"], arch["grid_height"], arch["rnn_len"]) yield {"imname": anno.imageName, "raw": [], "image": jit_image, "boxes": boxes, "box_flags": box_flags}
def load_idl_tf(idlfile, H, jitter): """Take the idlfile and net configuration and create a generator that outputs a jittered version of a random image from the annolist that is mean corrected.""" annolist = al.parse(idlfile) annos = [x for x in annolist] for anno in annos: anno.imageName = os.path.join( os.path.dirname(os.path.realpath(idlfile)), anno.imageName) random.seed(0) if H['data']['truncate_data']: annos = annos[:10] while True: random.shuffle(annos) for anno in annos: I = imread(anno.imageName) if I.shape[0] != H["arch"]["image_height"] or I.shape[1] != H["arch"]["image_width"]: I, anno = rescale_boxes(I, anno, H["arch"]["image_height"], H["arch"]["image_width"]) if jitter: jitter_scale_min=0.9 jitter_scale_max=1.1 jitter_offset=16 I, anno = annotation_jitter(I, anno, target_width=H["arch"]["image_width"], target_height=H["arch"]["image_height"], jitter_scale_min=jitter_scale_min, jitter_scale_max=jitter_scale_max, jitter_offset=jitter_offset) boxes, flags = annotation_to_h5(anno, H["arch"]["grid_width"], H["arch"]["grid_height"], H["arch"]["rnn_len"]) yield {"image": I, "boxes": boxes, "flags": flags}
def pred(self, weights, test_boxes, min_conf, tau, show_suppressed, expname): self.H["grid_width"] = self.H["image_width"] / self.H["region_size"] self.H["grid_height"] = self.H["image_height"] / self.H["region_size"] x_in = tf.placeholder( tf.float32, name='x_in', shape=[self.H['image_height'], self.H['image_width'], 3]) if self.H['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = self.build_forward( tf.expand_dims(x_in, 0), 'test', reuse=None) grid_area = self.H['grid_height'] * self.H['grid_width'] pred_confidences = tf.reshape( tf.nn.softmax( tf.reshape(pred_confs_deltas, [grid_area * self.H['rnn_len'], 2])), [grid_area, self.H['rnn_len'], 2]) if self.H['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = self.build_forward( tf.expand_dims(x_in, 0), 'test', reuse=None) rect_list = [] saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver.restore(sess, weights) pred_annolist = al.AnnoList() true_annolist = al.parse(test_boxes) data_dir = os.path.dirname(test_boxes) image_dir = self.get_image_dir(weights, expname, test_boxes) subprocess.call('mkdir -p %s' % image_dir, shell=True) for i in range(len(true_annolist)): true_anno = true_annolist[i] orig_img = imread('%s/%s' % (data_dir, true_anno.imageName))[:, :, :3] img = imresize(orig_img, (self.H["image_height"], self.H["image_width"]), interp='cubic') feed = {x_in: img} (np_pred_boxes, np_pred_confidences) = sess.run( [pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() pred_anno.imageName = true_anno.imageName new_img, rects = add_rectangles( self.H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=self.H['rnn_len'], min_conf=min_conf, tau=tau, show_suppressed=show_suppressed) print 'tb model ', len(rects) # pred_anno.rects = rects pred_anno.imagePath = os.path.abspath(data_dir) pred_anno = rescale_boxes( (self.H["image_height"], self.H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) pred_annolist.append(pred_anno) imname = 'box_sample.jpg' misc.imsave(imname, new_img) if i % 25 == 0: print(i) # print pred_anno.imageName for rect_i in range(0, len(rects)): rect_list.append({}) rect_list[-1]['x_min'] = rects[rect_i].left() / 640. rect_list[-1]['x_max'] = rects[rect_i].right() / 640. rect_list[-1]['y_min'] = rects[rect_i].top() / 480. rect_list[-1]['y_max'] = rects[rect_i].bottom() / 480. return rect_list
def get_results(args, H): tf.reset_default_graph() H["grid_width"] = H["image_width"] / H["region_size"] H["grid_height"] = H["image_height"] / H["region_size"] x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3]) if H['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward( H, tf.expand_dims(x_in, 0), 'test', reuse=None) grid_area = H['grid_height'] * H['grid_width'] pred_confidences = tf.reshape( tf.nn.softmax( tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2]) if H['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = build_forward( H, tf.expand_dims(x_in, 0), 'test', reuse=None) saver = tf.train.Saver() all_preditions = [] with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver.restore(sess, args.weights) pred_annolist = al.AnnoList() true_annolist = al.parse(args.test_boxes) data_dir = os.path.join(os.path.dirname(args.test_boxes)) false_positives, false_negatives, true_positives = 0, 0, 0 total_time = 0.0 image_dir = get_image_dir(args) subprocess.call('mkdir -p %s' % image_dir, shell=True) for i in range(len(true_annolist)): true_anno = true_annolist[i] orig_img = imread('%s/%s' % (data_dir, true_anno.imageName))[:, :, :3] img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic') feed = {x_in: img} t0 = time.time() (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) total_time += time.time() - t0 pred_anno = al.Annotation() pred_anno.imageName = true_anno.imageName new_img, rects, all_rects = add_rectangles( H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.show_suppressed) pred_anno.rects = rects pred_anno.imagePath = os.path.abspath(data_dir) pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) pred_annolist.append(pred_anno) all_preditions.extend([[r.x1, r.y1, r.x2, r.y2, r.score, i] for r in all_rects]) prediction = np.array([[r.x1, r.y1, r.x2, r.y2, r.score] for r in rects]) targets = np.array([[r.x1, r.y1, r.x2, r.y2] for r in true_anno.rects]) fp, fn, tp, jaccard = get_metrics(targets, prediction) false_positives += fp false_negatives += fn true_positives += tp precision = np.float64(true_positives) / (true_positives + false_positives) recall = np.float64(true_positives) / (true_positives + false_negatives) print( '[%d/%d]: False positives: %d, False negatives: %d, True positives: %d, Precision: %f, Recall: %f' % (i, len(true_annolist), false_positives, false_negatives, true_positives, precision, recall)) df = pandas.DataFrame(all_preditions) df.columns = ['x1', 'y1', 'x2', 'y2', 'score', 'image_id'] print('Total time: %.4f seconds, per image: %.4f' % (total_time, total_time / len(true_annolist))) return df
def run_eval(H, checkpoint_dir , hypes_file, output_path): """Do Evaluation with full epoche of data. Args: H: Hypes checkpoint_dir: directory with checkpoint files output_path: path to save results """ #Load GT true_idl = H['data']['test_idl'] true_annos = al.parse(true_idl) # define output files pred_file = 'val_%s.idl' % os.path.basename(hypes_file).replace('.json', '') pred_idl = os.path.join(output_path, pred_file) true_file = 'true_%s.idl' % os.path.basename(hypes_file).replace('.json', '') true_idl_scaled = os.path.join(output_path, true_file) data_folder = os.path.dirname(os.path.realpath(true_idl)) #Load Graph Model tf.reset_default_graph() googlenet = googlenet_load.init(H) x_in = tf.placeholder(tf.float32, name='x_in') if H['arch']['use_lstm']: lstm_forward = build_lstm_forward(H, tf.expand_dims(x_in, 0), googlenet, 'test', reuse=None) pred_boxes, pred_logits, pred_confidences = lstm_forward else: overfeat_forward = build_overfeat_forward(H, tf.expand_dims(x_in, 0), googlenet, 'test') pred_boxes, pred_logits, pred_confidences = overfeat_forward start_time = time.time() saver = tf.train.Saver() with tf.Session() as sess: logging.info("Starting Evaluation") sess.run(tf.initialize_all_variables()) # Restore Checkpoints ckpt = tf.train.get_checkpoint_state(checkpoint_dir) if ckpt and ckpt.model_checkpoint_path: logging.info(ckpt.model_checkpoint_path) saver.restore(sess, ckpt.model_checkpoint_path) annolist = al.AnnoList() trueanno = al.AnnoList() #shuffle true_annos to randomize plottet Images shuffle(true_annos) for i in range(len(true_annos)): true_anno = true_annos[i] img = imread( os.path.join(data_folder, true_anno.imageName)) # Rescale Boxes trueanno.append(rescale_boxes(img.shape, true_annos[i], H["arch"]["image_height"], H["arch"]["image_width"])) # Rescale Images img = imresize(img, (H["arch"]["image_height"], H["arch"]["image_width"]), interp='cubic') feed = {x_in: img} (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() pred_anno.imageName = true_anno.imageName new_img, rects = add_rectangles([img], np_pred_confidences, np_pred_boxes, H["arch"], use_stitching=True, rnn_len=H['arch']['rnn_len'], min_conf=0.3) pred_anno.rects = rects annolist.append(pred_anno) if i % 20 == 0: # Draw every 20th Image; # plotted Image is randomized due to shuffling duration = time.time() - start_time duration = float(duration)*1000/20 out_img = os.path.join(output_path, 'test_%i.png'%i) scp.misc.imsave(out_img, new_img) logging.info('Step %d: Duration %.3f ms' % (i, duration)) start_time = time.time() annolist.save(pred_idl) trueanno.save(true_idl_scaled) # write results to disk iou_threshold = 0.5 rpc_cmd = './utils/annolist/doRPC.py --minOverlap %f %s %s' % (iou_threshold, true_idl_scaled, pred_idl) rpc_output = subprocess.check_output(rpc_cmd, shell=True) txt_file = [line for line in rpc_output.split('\n') if line.strip()][-1] output_png = os.path.join(output_path, "roc.png") plot_cmd = './utils/annolist/plotSimple.py %s --output %s' % (txt_file, output_png) plot_output = subprocess.check_output(plot_cmd, shell=True)
def evaluate(H, valids, param_path, thr = 0.7, l = 60000, r = 120010, sep = 10000, with_anno = True): true_annos = al.parse(valids) L = range(l, r, sep) for iteration in L: tf.reset_default_graph() # print(H['batch_size']) x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3]) if H['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward( H, tf.expand_dims(x_in, 0), 'test', reuse=None) grid_area = H['grid_height'] * H['grid_width'] pred_confidences = tf.reshape(tf.nn.softmax( tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2]) if H['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None) saver = tf.train.Saver() gpu_options = tf.GPUOptions() gpu_options.allow_growth=True config = tf.ConfigProto(gpu_options=gpu_options) with tf.Session(config = config) as sess: sess.run(tf.global_variables_initializer()) print('load from ' + (param_path + 'save.ckpt-%d' % iteration)) saver.restore(sess, param_path + 'save.ckpt-%d' % iteration) annolist = al.AnnoList() rslt = [] t = time.time() if not os.path.exists(param_path + 'val'): os.makedirs(param_path + 'val') for i in range(len(true_annos)): true_anno = true_annos[i] img = imread(SAMPLE_DIR + true_anno.imageName) feed = {x_in: img} (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() pred_anno.imageName = true_anno.imageName new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=H['rnn_len'], min_conf=thr, show_suppressed=False) pred_anno.rects = rects annolist.append(pred_anno) fname = true_anno.imageName if with_anno: imwrite(param_path + 'val/' + fname[fname.rindex('/') + 1 : -4] + '_' + str(iteration) + '_pred.jpg', new_img) shutil.copy(SAMPLE_DIR + true_anno.imageName[:-4] + '_gt.bmp', param_path + 'val/' + fname[fname.rindex('/') + 1 : -4] + '_gt.bmp') box_confs = trans(np_pred_boxes, H, np_pred_confidences, thr) ret = { 'file' : fname, 'box' : box_confs.tolist() } rslt.append(ret) avg_time = (time.time() - t) / (i + 1) print('%f images/sec' % (1. / avg_time)) rrslt = [] for it in rslt: it['box'] = filterBoxes(it['box'], 0.1) with open(param_path + 'result_' + str(iteration) + '.json', 'w') as f: json.dump(rslt, f) generate_result(TRUNK_DIR, rslt, param_path + 'csv_' + str(iteration) + '.csv')
def still_image_TENSORBOX(idl_filename, frames_list,folder_path_det_frames,folder_path_det_result,folder_path_frames,path_video_folder,hypes_file,weights_file,pred_idl): print("Starting DET Phase") if not os.path.exists(path_video_folder+'/'+folder_path_det_frames): os.makedirs(path_video_folder+'/'+folder_path_det_frames) print("Created Folder: %s"%path_video_folder+'/'+folder_path_det_frames) if not os.path.exists(path_video_folder+'/'+folder_path_det_result): os.makedirs(path_video_folder+'/'+folder_path_det_result) print("Created Folder: %s"% path_video_folder+'/'+folder_path_det_result) det_frames_list=[] #### START TENSORBOX CODE ### ### Opening Hypes file for parameters with open(hypes_file, 'r') as f: H = json.load(f) ### Get Annotation List of all the image to test test_annos = al.parse(idl_filename) ### Building Network tf.reset_default_graph() googlenet = googlenet_load.init(H) x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['arch']['image_height'], H['arch']['image_width'], 3]) if H['arch']['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(H, tf.expand_dims(x_in, 0), googlenet, 'test', reuse=None) grid_area = H['arch']['grid_height'] * H['arch']['grid_width'] pred_confidences = tf.reshape(tf.nn.softmax(tf.reshape(pred_confs_deltas, [grid_area * H['arch']['rnn_len'], 2])), [grid_area, H['arch']['rnn_len'], 2]) if H['arch']['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = build_forward(H, tf.expand_dims(x_in, 0), googlenet, 'test', reuse=None) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.initialize_all_variables()) saver.restore(sess, weights_file )##### Restore a Session of the Model to get weights and everything working annolist = al.AnnoList() import time; t = time.time() #### Starting Evaluating the images lenght=int(len(frames_list)) print("%d Frames to DET"%len(frames_list)) progress = progressbar.ProgressBar(widgets=[progressbar.Bar('=', '[', ']'), ' ',progressbar.Percentage(), ' ',progressbar.ETA()]) for i in progress(range(0, len(frames_list)-1)): img = imread(frames_list[i]) feed = {x_in: img} (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() #pred_anno.imageName = test_anno.imageName new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes,H["arch"], use_stitching=True, rnn_len=H['arch']['rnn_len'], min_conf=0.5) pred_anno.rects = rects bb_img = Image.open(frames_list[i]) for bb_rect in rects: ################ Adding Rectangle ################### dr = ImageDraw.Draw(bb_img) cor = (bb_rect.x1,bb_rect.y1,bb_rect.x2 ,bb_rect.y2) # DA VERIFICARE Try_2 (x1,y1, x2,y2) cor = (bb_rect.left() ,bb_rect.right(),bb_rect.bottom(),bb_rect.top()) Try_1 dr.rectangle(cor, outline="red") bb_img_det_name = frames_list[i].replace(folder_path_frames,folder_path_det_frames) bb_img.save(bb_img_det_name) det_frames_list.append(bb_img_det_name) annolist.append(pred_anno) annolist.save(pred_idl) #### END TENSORBOX CODE ### return det_frames_list
def run_eval(H, checkpoint_dir, hypes_file, output_path): """Do Evaluation with full epoche of data. Args: H: Hypes checkpoint_dir: directory with checkpoint files output_path: path to save results """ #Load GT true_idl = H['data']['test_idl'] true_annos = al.parse(true_idl) # define output files pred_file = 'val_%s.idl' % os.path.basename(hypes_file).replace( '.json', '') pred_idl = os.path.join(output_path, pred_file) true_file = 'true_%s.idl' % os.path.basename(hypes_file).replace( '.json', '') true_idl_scaled = os.path.join(output_path, true_file) data_folder = os.path.dirname(os.path.realpath(true_idl)) #Load Graph Model tf.reset_default_graph() googlenet = googlenet_load.init(H) x_in = tf.placeholder(tf.float32, name='x_in') if H['arch']['use_lstm']: lstm_forward = build_lstm_forward(H, tf.expand_dims(x_in, 0), googlenet, 'test', reuse=None) pred_boxes, pred_logits, pred_confidences = lstm_forward else: overfeat_forward = build_overfeat_forward(H, tf.expand_dims(x_in, 0), googlenet, 'test') pred_boxes, pred_logits, pred_confidences = overfeat_forward start_time = time.time() saver = tf.train.Saver() with tf.Session() as sess: logging.info("Starting Evaluation") sess.run(tf.initialize_all_variables()) # Restore Checkpoints ckpt = tf.train.get_checkpoint_state(checkpoint_dir) if ckpt and ckpt.model_checkpoint_path: logging.info(ckpt.model_checkpoint_path) saver.restore(sess, ckpt.model_checkpoint_path) annolist = al.AnnoList() trueanno = al.AnnoList() #shuffle true_annos to randomize plottet Images shuffle(true_annos) for i in range(len(true_annos)): true_anno = true_annos[i] img = imread(os.path.join(data_folder, true_anno.imageName)) # Rescale Boxes trueanno.append( rescale_boxes(img.shape, true_annos[i], H["arch"]["image_height"], H["arch"]["image_width"])) # Rescale Images img = imresize( img, (H["arch"]["image_height"], H["arch"]["image_width"]), interp='cubic') feed = {x_in: img} (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() pred_anno.imageName = true_anno.imageName new_img, rects = add_rectangles([img], np_pred_confidences, np_pred_boxes, H["arch"], use_stitching=True, rnn_len=H['arch']['rnn_len'], min_conf=0.3) pred_anno.rects = rects annolist.append(pred_anno) if i % 20 == 0: # Draw every 20th Image; # plotted Image is randomized due to shuffling duration = time.time() - start_time duration = float(duration) * 1000 / 20 out_img = os.path.join(output_path, 'test_%i.png' % i) scp.misc.imsave(out_img, new_img) logging.info('Step %d: Duration %.3f ms' % (i, duration)) start_time = time.time() annolist.save(pred_idl) trueanno.save(true_idl_scaled) # write results to disk iou_threshold = 0.5 rpc_cmd = './utils/annolist/doRPC.py --minOverlap %f %s %s' % ( iou_threshold, true_idl_scaled, pred_idl) rpc_output = subprocess.check_output(rpc_cmd, shell=True) txt_file = [line for line in rpc_output.split('\n') if line.strip()][-1] output_png = os.path.join(output_path, "roc.png") plot_cmd = './utils/annolist/plotSimple.py %s --output %s' % (txt_file, output_png) plot_output = subprocess.check_output(plot_cmd, shell=True)
def get_results(args, H): tf.reset_default_graph() x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3]) if H['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward( H, tf.expand_dims(x_in, 0), 'test', reuse=None) grid_area = H['grid_height'] * H['grid_width'] pred_confidences = tf.reshape( tf.nn.softmax( tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2]) if H['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = build_forward( H, tf.expand_dims(x_in, 0), 'test', reuse=None) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.initialize_all_variables()) saver.restore(sess, args.weights) pred_annolist = al.AnnoList() true_annolist = al.parse(args.test_boxes) data_dir = os.path.dirname(args.test_boxes) image_dir = get_image_dir(args) subprocess.call('mkdir -p %s' % image_dir, shell=True) for i in range(len(true_annolist)): true_anno = true_annolist[i] orig_img = imread('%s/%s' % (data_dir, true_anno.imageName))[:, :, :3] img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic') feed = {x_in: img} (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() pred_anno.imageName = true_anno.imageName new_img, rects = add_rectangles( H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.show_suppressed) pred_anno.rects = rects pred_anno.imagePath = os.path.abspath(data_dir) pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) pred_annolist.append(pred_anno) imname = '%s/%s' % (image_dir, os.path.basename( true_anno.imageName)) misc.imsave(imname, new_img) if i % 25 == 0: print(i) return pred_annolist, true_annolist
def load_idl_tf(idlfile, H, jitter): """Take the idlfile and net configuration and create a generator that outputs a jittered version of a random image from the annolist that is mean corrected.""" annolist = al.parse(idlfile) annos = [] for anno in annolist: anno.imageName = os.path.join( os.path.dirname(os.path.realpath(idlfile)), anno.imageName) annos.append(anno) random.seed(0) if H['data']['truncate_data']: annos = annos[:10] for epoch in itertools.count(): random.shuffle(annos) for anno in annos: try: # Here it seems we randomly read images in as grayscale with some probability # Then the image is converted to shape HxWx3 so it fits the input shape of the model # If there's no 'grayscale_prob', it simply reads in images in rgb mode. There was # a check for len(I.shape) < 3, but it happened before I was created....... if 'grayscale' in H and 'grayscale_prob' in H: I = imread(anno.imageName, mode='RGB' if random.random() < H['grayscale_prob'] else 'L') if len(I.shape) < 3: I = cv2.cvtColor(I, cv2.COLOR_GRAY2RGB) else: I = imread(anno.imageName, mode='RGB') # if len(I.shape) < 3: # continue if I.shape[0] != H["image_height"] or I.shape[1] != H[ "image_width"]: if epoch == 0: anno = rescale_boxes(I.shape, anno, H["image_height"], H["image_width"]) I = imresize(I, (H["image_height"], H["image_width"]), interp='cubic') if jitter: jitter_scale_min = 0.9 jitter_scale_max = 1.1 jitter_offset = 16 I, anno = annotation_jitter( I, anno, target_width=H["image_width"], target_height=H["image_height"], jitter_scale_min=jitter_scale_min, jitter_scale_max=jitter_scale_max, jitter_offset=jitter_offset) boxes, flags = annotation_to_h5(H, anno, H["grid_width"], H["grid_height"], H["rnn_len"]) yield {"image": I, "boxes": boxes, "flags": flags} except Exception as exc: print(exc)
def get_results(args, H): tf.reset_default_graph() x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3]) p1_x_in = tf.placeholder(tf.float32, name='p1_x_in', shape=[H['image_height'], H['image_width'], 3]) p2_x_in = tf.placeholder(tf.float32, name='p2_x_in', shape=[H['image_height'], H['image_width'], 3]) p3_x_in = tf.placeholder(tf.float32, name='p3_x_in', shape=[H['image_height'], H['image_width'], 3]) p4_x_in = tf.placeholder(tf.float32, name='p4_x_in', shape=[H['image_height'], H['image_width'], 3]) p5_x_in = tf.placeholder(tf.float32, name='p5_x_in', shape=[H['image_height'], H['image_width'], 3]) p6_x_in = tf.placeholder(tf.float32, name='p6_x_in', shape=[H['image_height'], H['image_width'], 3]) p7_x_in = tf.placeholder(tf.float32, name='p7_x_in', shape=[H['image_height'], H['image_width'], 3]) p8_x_in = tf.placeholder(tf.float32, name='p8_x_in', shape=[H['image_height'], H['image_width'], 3]) f_x_in = tf.placeholder(tf.float32, name='f_x_in', shape=[H['image_height'], H['image_width'], 3]) if H['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward( H, tf.expand_dims(x_in, 0), tf.expand_dims(p1_x_in, 0), tf.expand_dims(p2_x_in, 0), tf.expand_dims(p3_x_in, 0), tf.expand_dims(p4_x_in, 0), tf.expand_dims(p5_x_in, 0), tf.expand_dims(p6_x_in, 0), tf.expand_dims(p7_x_in, 0), tf.expand_dims(p8_x_in, 0), tf.expand_dims(f_x_in, 0), 'test', reuse=None) grid_area = H['grid_height'] * H['grid_width'] pred_confidences = tf.reshape( tf.nn.softmax( tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2]) if H['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = build_forward( H, tf.expand_dims(x_in, 0), 'test', reuse=None) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.initialize_all_variables()) saver.restore(sess, args.weights) pred_annolist = al.AnnoList() true_annolist = al.parse(args.test_boxes) data_dir = os.path.dirname(args.test_boxes) image_dir = get_image_dir(args) subprocess.call('mkdir -p %s' % image_dir, shell=True) count_error = list() for i in range(20): count_error.append(0) for i in range(len(true_annolist)): true_anno = true_annolist[i] orig_img = imread('%s/%s' % (data_dir, true_anno.imageName))[:, :, :3] dir_path = os.path.dirname(true_anno.imageName) file_name = true_anno.imageName.split('/')[-1] (shotname, extension) = os.path.splitext(file_name) p1_image_path = data_dir + "/" + dir_path + "/" + ( str(int(shotname) - 1)).zfill(4) + ".png" p2_image_path = data_dir + "/" + dir_path + "/" + ( str(int(shotname) - 2)).zfill(4) + ".png" p3_image_path = data_dir + "/" + dir_path + "/" + ( str(int(shotname) - 3)).zfill(4) + ".png" p4_image_path = data_dir + "/" + dir_path + "/" + ( str(int(shotname) - 4)).zfill(4) + ".png" p5_image_path = data_dir + "/" + dir_path + "/" + ( str(int(shotname) - 5)).zfill(4) + ".png" p6_image_path = data_dir + "/" + dir_path + "/" + ( str(int(shotname) - 6)).zfill(4) + ".png" p7_image_path = data_dir + "/" + dir_path + "/" + ( str(int(shotname) - 7)).zfill(4) + ".png" p8_image_path = data_dir + "/" + dir_path + "/" + ( str(int(shotname) - 8)).zfill(4) + ".png" f_image_path = data_dir + "/" + dir_path + "/" + ( str(int(shotname) + 1)).zfill(4) + ".png" if not os.path.exists(p1_image_path): print "File not exists: %s" % p1_image_path exit() if not os.path.exists(p2_image_path): print "File not exists: %s" % p2_image_path exit() if not os.path.exists(f_image_path): print "File not exists: %s" % f_image_path exit() p1_img = imread(p1_image_path) p2_img = imread(p2_image_path) p3_img = imread(p3_image_path) p4_img = imread(p4_image_path) p5_img = imread(p5_image_path) p6_img = imread(p6_image_path) p7_img = imread(p7_image_path) p8_img = imread(p8_image_path) f_img = imread(f_image_path) img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic') feed = { x_in: img, p1_x_in: p1_img, p2_x_in: p2_img, p3_x_in: p3_img, p4_x_in: p4_img, p5_x_in: p5_img, p6_x_in: p6_img, p7_x_in: p7_img, p8_x_in: p8_img, f_x_in: f_img } (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() pred_anno.imageName = true_anno.imageName true_count = len(true_anno.rects) # print true_count for j in range(20): min_confidence = (j * 1.0) / 20.0 new_img, rects, count = add_rectangles( H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=H['rnn_len'], min_conf=min_confidence, tau=args.tau, show_suppressed=args.show_suppressed) count_error[j] += abs(count - true_count) pred_anno.rects = rects pred_anno.imagePath = os.path.abspath(data_dir) pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) pred_annolist.append(pred_anno) # imname = '%s/%s' % (image_dir, os.path.basename(true_anno.imageName)) # misc.imsave(imname, new_img) if i % 25 == 0: print(i) print "Count error: %s" % str(min(count_error) / 350.0) return pred_annolist, true_annolist
def get_results(hypes, sess, image_pl, decoded_logits, validation=True): if hypes['use_rezoom']: pred_boxes = decoded_logits['pred_boxes_new'] else: pred_boxes = decoded_logits['pred_boxes'] pred_confidences = decoded_logits['pred_confidences'] # Build Placeholder shape = [hypes['image_height'], hypes['image_width'], 3] pred_annolist = AnnLib.AnnoList() if validation: test_idl = os.path.join(hypes['dirs']['data_dir'], hypes['data']['val_idl']) else: test_idl = os.path.join(hypes['dirs']['data_dir'], hypes['data']['train_idl']) true_annolist = AnnLib.parse(test_idl) data_dir = os.path.dirname(test_idl) val_dir = make_val_dir(hypes, validation) img_dir = make_img_dir(hypes) image_list = [] for i in range(len(true_annolist)): if not validation and random.random() > 0.2: continue true_anno = true_annolist[i] orig_img = scp.misc.imread('%s/%s' % (data_dir, true_anno.imageName))[:, :, :3] img = scp.misc.imresize(orig_img, (hypes["image_height"], hypes["image_width"]), interp='cubic') feed = {image_pl: img} (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = AnnLib.Annotation() pred_anno.imageName = true_anno.imageName new_img, rects = utils.train_utils.add_rectangles( hypes, [img], np_pred_confidences, np_pred_boxes, show_removed=False, use_stitching=True, rnn_len=hypes['rnn_len'], min_conf=0.50, tau=hypes['tau']) if validation and i % 15 == 0: image_name = os.path.basename(pred_anno.imageName) image_name = os.path.join(img_dir, image_name) scp.misc.imsave(image_name, new_img) # get name of file to write to image_name = os.path.basename(true_anno.imageName) val_file_name = image_name.split('.')[0] + '.txt' val_file = os.path.join(val_dir, val_file_name) # write rects to file pred_anno.rects = rects pred_anno.imagePath = os.path.abspath(data_dir) pred_anno = utils.train_utils.rescale_boxes( (hypes["image_height"], hypes["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) write_rects(rects, val_file) pred_annolist.append(pred_anno) image_list = [] start_time = time.time() for i in xrange(100): (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) dt = (time.time() - start_time) / 100 start_time = time.time() for i in xrange(100): utils.train_utils.compute_rectangels(hypes, np_pred_confidences, np_pred_boxes, show_removed=False, use_stitching=True, rnn_len=hypes['rnn_len'], min_conf=0.001, tau=hypes['tau']) dt2 = (time.time() - start_time) / 100 return pred_annolist, true_annolist, image_list, dt, dt2
def get_results(args, H): tf.reset_default_graph() x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3]) if H['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None) grid_area = H['grid_height'] * H['grid_width'] pred_confidences = tf.reshape(tf.nn.softmax(tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2]) if H['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas else: pred_boxes, pred_logits, pred_confidences = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.initialize_all_variables()) saver.restore(sess, args.weights) pred_annolist = al.AnnoList() true_annolist = al.parse(args.test_boxes) data_dir = os.path.dirname(args.test_boxes) image_dir = get_image_dir(args) #subprocess.call('mkdir -p %s' % image_dir, shell=True) #ivc = cv2.VideoCapture('/home/caucse/images/ets.mp4') #c=1 #if vc.isOpened(): # rval , frame = vc.read() #else: # rval = False memory = sysv_ipc.SharedMemory(123463) memory2 = sysv_ipc.SharedMemory(123464) size = 768, 1024, 3 pedal = PyMouse() pedal.press(1) road_center = 320 while True: #rval, frame = vc.read() #c = c + 1 #if c % 6 is 0: # c = c + 1 #time.sleep(0.5) cv2.waitKey(1) frameCount = bytearray(memory.read()) curve = bytearray(memory2.read()) curve = str(struct.unpack('i',curve)[0]) m = np.array(frameCount, dtype=np.uint8) orig_img = m.reshape(size) #print orig_img[0] #cv2.imshow('1', m) #true_anno = true_annolist[i] #orig_img = imread('%s/%s' % (data_dir, true_anno.imageName))[:,:,:3] #orig_img = imread('/home/caucse/images/1.jpg') #orig_img = m img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic') feed = {x_in: img} (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() #pred_anno.imageName = true_anno.imageName new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.show_suppressed) flag = 0 road_center = 320 + int(curve) print(road_center) for rect in rects: print(rect.x1, rect.x2, rect.y2) if (rect.x1 < road_center and rect.x2 > road_center and rect.y2 > 200) and (rect.x2 - rect.x1 > 30): flag = 1 if flag is 1: pedal.press(2) print("break!") else: pedal.release(2) pedal.press(1) print("acceleration!") pred_anno.rects = rects pred_anno.imagePath = os.path.abspath(data_dir) pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) pred_annolist.append(pred_anno) #imname = '%s/%s' % (image_dir, os.path.basename(true_anno.imageName)) #imname = '/home/caucse/images/_%s.jpg' % (c) cv2.imshow('.jpg', new_img) #misc.imsave(imname, new_img) #if c % 25 == 0: #print(c) for i in range(len(true_annolist)): true_anno = true_annolist[i] #orig_img = imread('%s/%s' % (data_dir, true_anno.imageName))[:,:,:3] orig_img = imread('/home/caucse/images/1.jpg') img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic') feed = {x_in: img} (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) pred_anno = al.Annotation() pred_anno.imageName = true_anno.imageName new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.show_suppressed) for rect in rects: print(rect.x1, rect.y1, rect.x2, rect.y2) pred_anno.rects = rects pred_anno.imagePath = os.path.abspath(data_dir) pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) pred_annolist.append(pred_anno) #imname = '%s/%s' % (image_dir, os.path.basename(true_anno.imageName)) imname = '/home/caucse/images/_1.jpg' misc.imsave(imname, new_img) if i % 25 == 0: print(i) return pred_annolist, true_annolist
from utils.stitch_wrapper import stitch_rects from utils.train_utils import add_rectangles from utils.rect import Rect from utils.stitch_wrapper import stitch_rects from evaluate import add_rectangles import cv2 hypes_file = './hypes/lstm_rezoom.json' iteration = 10000 with open(hypes_file, 'r') as f: H = json.load(f) true_json = './new_labels/alb.json' pred_json = './output/%d_val_%s.json' % ( iteration, os.path.basename(hypes_file).replace('.json', '')) true_annos = al.parse(true_json) tf.reset_default_graph() x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3]) if H['use_rezoom']: pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward( H, tf.expand_dims(x_in, 0), 'test', reuse=None) grid_area = H['grid_height'] * H['grid_width'] pred_confidences = tf.reshape( tf.nn.softmax( tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2]) if H['reregress']: pred_boxes = pred_boxes + pred_boxes_deltas