def create_test(): print("Loading testing data...") pair1 = {} pair2 = [] frame_dir = FLAGS.test_dir poi_dir = "poi" bb_dir = "faster_bb" query_list = ["search", "streetview_clean", "aerial_clean"] query = {} for query_dir in query_list: query[query_dir] = {} for img_name in os.listdir(query_dir): gps = img_name.replace(".jpg", "").replace(".png", "") query[query_dir][gps] = cv2.imread(osp.join(query_dir, img_name), cv2.IMREAD_COLOR) for img_name in os.listdir(frame_dir): im = cv2.imread(osp.join(frame_dir, img_name), cv2.IMREAD_COLOR) img_name = img_name.replace(".jpg", "").replace(".png", "") with open(osp.join(poi_dir, img_name + ".txt"), 'r') as f: for line in f: token = line.strip().split("\t") gps_pos = [0, 0] [name, gps_pos[0], gps_pos[1], google_type, img_ref, gt] = token str_gps_pos = [gps_pos[0], gps_pos[1]] query_name = str(str_gps_pos[0]) + '_' + str(str_gps_pos[1]) gt = gt.split(',') gt = [int(i) for i in gt] positive = im[gt[1]:gt[3], gt[0]:gt[2]] anchor = {} for query_dir in query_list: if not query_dir in pair1: pair1[query_dir] = [] pair1[query_dir].append( data.transform_img(query[query_dir][query_name])) pair2.append(data.transform_img(positive)) for query_dir in query_list: pair1[query_dir] = np.array(pair1[query_dir]) test_data = [pair1, np.array(pair2)] print("\nFinish loading... size of testing data: {}".format( len(test_data[1]))) return test_data
with tf.Graph().as_default(), tf.Session(config=config) as sess: img_input = tf.placeholder('float32', shape=(None, 227, 227, 3)) feature = model.inference(img_input, 1, FLAGS.feature, False) norm_cross_pred = model.feature_normalize([feature]) pred = norm_cross_pred[0] saver = tf.train.Saver() saver.restore(sess, FLAGS.model_dir) img_name = FLAGS.file.replace(".jpg", "").replace(".png", "") print("Load image: {}".format(img_name)) img_name = osp.basename(img_name) img = cv2.imread(FLAGS.file, cv2.IMREAD_COLOR) img = transform_img(img, 227, 227) for layer in layer_list: output_layer = osp.join(output_root, layer) if not osp.exists(output_layer): os.makedirs(output_layer) out, input = sess.run([pred, img_input], feed_dict={img_input: [img]}) out = np.array(out[0]) IPython.embed() with open(os.path.join(output_layer, img_name + '.pkl'), 'wb') as ff: pickle.dump(out, ff)
def run(model, img, write=False, path=None, use_gpu=False): """ Args model (Task2Models): object which runs models on an image depending on task. img (PIL Image): loaded image to classify and localize. """ img_tensor = transform_img(img, use_gpu=use_gpu) img = un_normalize(img_tensor.squeeze(0), use_gpu=use_gpu) # move rgb chanel to last img = np.moveaxis(img, 0, 2) _, channels, height, width = img_tensor.shape all_task2prob_cam = {} task2cam_path = {} for tasks in model: print(tasks) start = time.time() task2prob_cam = model.infer(img_tensor, tasks) print(f"Loading+Inference time:{time.time()-start}") if write: for task, (task_prob, _) in task2prob_cam.items(): if path is not None: cam_path = path.replace("/data3/CXR-CHEST/DATA/images/", "").replace("valid", "/data3/xray4all/valid_results/valid_cam") cam_dir = os.path.dirname(cam_path) cam_basename = os.path.basename(cam_path) cam_path = os.path.join(cam_dir, task + "_" + cam_basename) if not os.path.exists(cam_dir): os.makedirs(cam_dir) task2cam_path[task] = cam_path with open("/data3/xray4all/valid_results/probs.csv", 'a') as f: print(task2cam_path[task] + "," + str(task_prob), file=f) else: cam_path = f'cams/CAM_{task}_{task_prob:.3f}.jpg' task2cam_path[task] = cam_path for task, (task_prob, task_cam) in task2prob_cam.items(): resized_cam = cv2.resize(task_cam, (height, width)) img_with_cam = add_heat_map(img, resized_cam, normalize=False) if write: scipy.misc.imsave(task2cam_path[task], img_with_cam) else: all_task2prob_cam[task] = (task_prob, img_with_cam) return all_task2prob_cam
def create_triplet_drone(): print("Loading training data...") train_data = [] frame_dir = FLAGS.train_dir poi_dir = "poi" bb_dir = "faster_bb" query_list = ["search", "streetview_clean", "aerial_clean"] temp_dir = osp.join("temp", parameter_name) negative_threshold = 0.3 query = {} #print("remove and make new directory {}".format(temp_dir)) if not os.path.exists(temp_dir): os.makedirs(temp_dir) for query_dir in query_list: query_path = osp.join(temp_dir, query_dir) if not os.path.exists(query_path): os.makedirs(query_path) for img_name in os.listdir(query_dir): gps = img_name.replace(".jpg", "").replace(".png", "") query[gps] = cv2.imread(osp.join(query_dir, img_name), cv2.IMREAD_COLOR) cv2.imwrite(osp.join(query_path, img_name), data.transform_img(query[gps])) for img_name in os.listdir(frame_dir): im = cv2.imread(osp.join(frame_dir, img_name), cv2.IMREAD_COLOR) img_name = img_name.replace(".jpg", "").replace(".png", "") negative_dir = osp.join(temp_dir, img_name, "negative_dir") if not os.path.exists(negative_dir): os.makedirs(negative_dir) # write all proposal first with open(osp.join(bb_dir, img_name + ".txt"), 'r') as ff: for linee in ff: token = linee.strip().split() bb = [ int(float(token[0])), int(float(token[1])), int(float(token[2])), int(float(token[3])) ] cv2.imwrite( osp.join( negative_dir, "{}_{}_{}_{}.jpg".format(bb[0], bb[1], bb[2], bb[3])), data.transform_img(im[bb[1]:bb[3], bb[0]:bb[2]])) with open(osp.join(poi_dir, img_name + ".txt"), 'r') as f: for line in f: token = line.strip().split("\t") gps_pos = [0, 0] [name, gps_pos[0], gps_pos[1], google_type, img_ref, gt] = token str_gps_pos = [gps_pos[0], gps_pos[1]] query_name = str(str_gps_pos[0]) + '_' + str(str_gps_pos[1]) gt = gt.split(',') gt = [int(i) for i in gt] positive = im[gt[1]:gt[3], gt[0]:gt[2]] positive_dir = "{}_{}_{}_{}".format(gt[0], gt[1], gt[2], gt[3]) negative_list = [] with open(osp.join(bb_dir, img_name + ".txt"), 'r') as ff: for linee in ff: token = linee.strip().split() bb = [ int(float(token[0])), int(float(token[1])), int(float(token[2])), int(float(token[3])) ] if similarity.iou(gt, bb) < negative_threshold: negative_list.append(bb) positive_num = negative_num = 200 #len(negative_list) #positive_num = negative_num = 1 bb = data.proposal_enlarge(im, gt, FLAGS.enlarge) positive_path = osp.join(temp_dir, img_name, positive_dir) if not os.path.exists(positive_path): os.makedirs(positive_path) gt_path = osp.join(positive_path, 'gt.jpg') cv2.imwrite(gt_path, positive) if FLAGS.da: # remove previous data augmentation try: shutil.rmtree(temp_dir) except: pass positive_list = data.img_augmentation( im[bb[1]:bb[3], bb[0]:bb[2]], positive_num - 1, positive_path) positive_list = os.listdir(positive_path) else: positive_list = [] for _ in xrange(negative_num): positive_list.append(gt_path) random.shuffle(positive_list) #positive_index = random.sample(xrange(len(positive_list)), positive_num) anchor = {} for query_dir in query_list: anchor[query_dir] = osp.join(temp_dir, query_dir, query_name + ".jpg") for index in random.sample(xrange(len(negative_list)), negative_num): #IPython.embed() negative_bb = negative_list[index] negative = osp.join( negative_dir, "{}_{}_{}_{}.jpg".format(negative_bb[0], negative_bb[1], negative_bb[2], negative_bb[3])) train_data.append( TripletData(anchor, positive_list.pop(), negative)) sys.stdout.write("\r{:6d}".format(len(train_data))) sys.stdout.flush() random.shuffle(train_data) print("\nFinish loading... size of training data: {}".format( len(train_data))) # validation on training data val_ratio = 1. / 100 # 1./7 split_index = int(len(train_data) * val_ratio) return Dataset(train_data[split_index:]), Dataset(train_data[:split_index])
pkl_list = {} if True: output_dir = os.path.join(output_root, "cross") for img_name in os.listdir(query_dir[0]): if img_name.find('.jpg')==-1: #is a directory continue img_name=img_name.replace(".jpg","").replace(".png","") print(img_name) img = {} for query in query_dir: img[query] = cv2.imread(os.path.join(query, img_name+'.jpg'), cv2.IMREAD_COLOR) img[query] = transform_img(img[query], 227,227) for layer in layer_list: output_layer = os.path.join(output_dir, layer) if not layer in pkl_list: pkl_list[layer] = [] if not os.path.exists(output_layer): os.makedirs(output_layer) pkl_list[layer].append([img, os.path.join(output_layer, img_name+".pkl")]) for layer in layer_list: write_cross_pkl(pkl_list[layer], sess, cross_pred) if True: