def extract_gt_features(dataset_name): sys.path.append('faster_rcnn_pytorch') from extract_feature_from_bb import extractfeatures, build_extractor #model_file = 'faster_rcnn_pytorch/models/VGGnet_fast_rcnn_iter_70000.h5' model_file = '../faster_rcnn_voc/models/saved_vot2007/faster_rcnn_270000.pth.tar' extractor = build_extractor(model_file) img_paths, gts = get_dataset_info(dataset_name) features_gt = [] classes_gt = [] for img_path, gt in zip(img_paths, gts): classes = [ g[0] for g in gt] dets = np.array([[0.] + [g[1][1], g[1][0], g[1][3], g[1][2]] for g in gt], dtype='float32') #dets = np.array([[0.] + list(g[1]) for g in gt], dtype='float32') DEBUG = False if DEBUG: img = cv2.imread(img_path) for det in dets: cv2.rectangle(img, (int(det[1]), int(det[2])), (int(det[3]), int(det[4])), (0, 204, 0), 2) cv2.imshow('img', img) cv2.waitKey(0) feature_gt = extractfeatures(img_path, extractor, dets) feature_gt = feature_gt.data.cpu().numpy() classes_gt.append(classes) features_gt.append(feature_gt) gt_pkl = '../pkls/new_vot_{}_gt.pkl'.format(dataset_name) pkl.dump([classes_gt, features_gt], open(gt_pkl, 'wb'))
def load_info_test(dataset_name, target_classname): img_paths, bboxs_gts = get_dataset_info(dataset_name) gt_info = pkl.load( open('../pkls/new_vot_' + dataset_name + '_gt.pkl', 'rb')) classes_gt = gt_info[0] features_gt = gt_info[1] features_gt_use = [] img_paths_use = [] bboxs_gt_use = [] for img_path, class_info, feature_gt, bboxs_gt in zip( img_paths, classes_gt, features_gt, bboxs_gts): exist_this_class = False features_gt_list = [] bboxs_gt_list = [] for classname, f_gt, bbox_gt in zip(class_info, feature_gt, bboxs_gt): bbox_gt = bbox_gt[1] if classname == target_classname: #normalize feature features_gt_list.append(f_gt / norm(f_gt)) #features_gt_use.append(f_gt) bboxs_gt_list.append(bbox_gt) exist_this_class = True if exist_this_class is True: features_gt_use.append(bboxs_gt_list) img_paths_use.append(img_path) bboxs_gt_use.append(bboxs_gt_list) return features_gt_use, img_paths_use, bboxs_gt_use
def load_info_train(dataset_name, target_classname): img_paths, bboxs_gts = get_dataset_info(dataset_name) gt_info = pkl.load( open('../pkls/new_vot_' + dataset_name + '_gt.pkl', 'rb')) classes_gt = gt_info[0] features_gt = gt_info[1] features_gt_use = [] img_paths_use = [] bboxs_gt_use = [] for img_path, class_info, feature_gt, bboxs_gt in zip( img_paths, classes_gt, features_gt, bboxs_gts): for classname, f_gt, bbox_gt in zip(class_info, feature_gt, bboxs_gt): bbox_gt = bbox_gt[1] if classname == target_classname: #normalize feature features_gt_use.append(f_gt / norm(f_gt)) #features_gt_use.append(f_gt) img_paths_use.append(img_path) bboxs_gt_use.append(bbox_gt) #only store the first item for one image break return features_gt_use, img_paths_use, bboxs_gt_use
# coding: utf-8 import cv2 import os import numpy as np import pickle as pkl from preprocess import get_dataset_info import sys from edge_boxes_with_python.edge_boxes import get_windows import argparse if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--dataset-name', default='trainval') args = parser.parse_args() dataset_name = args.dataset_name img_paths, gts = get_dataset_info(dataset_name) bbslist_pkl = '../pkls/vot_{}_bbslist.pkl'.format(dataset_name) if os.path.exists(bbslist_pkl): print('{} exists'.format(bbslist_pkl)) else: img_paths = [os.path.abspath(img_path) for img_path in img_paths] bbslist = [] step = 0 gap = 1000 img_no = len(img_paths) while step < img_no: try: bbslist.extend( get_windows(img_paths[step:min(img_no, step + gap)])) except: