def test_universal(): test_obj = [{'a': 'abc', 'b': 1}, 2, 'c'] # dump as a string for format in ['json', 'yaml', 'pickle']: cvb.dump(test_obj, format=format) with pytest.raises(ValueError): cvb.dump(test_obj) with pytest.raises(TypeError): cvb.dump(test_obj, 'tmp.txt') # test load/dump with filename for format in ['json', 'yaml', 'pkl']: tmp_filename = '.cvbase_test.tmp.' + format cvb.dump(test_obj, tmp_filename) assert path.isfile(tmp_filename) load_obj = cvb.load(tmp_filename) assert load_obj == test_obj remove(tmp_filename) # test json load/dump with file object for format in ['json', 'yaml', 'pkl']: tmp_filename = '.cvbase_test.tmp.' + format mode = 'wb' if format == 'pkl' else 'w' with open(tmp_filename, mode) as f: cvb.dump(test_obj, f, format=format) assert path.isfile(tmp_filename) mode = 'rb' if format == 'pkl' else 'r' with open(tmp_filename, mode) as f: load_obj = cvb.load(f, format=format) assert load_obj == test_obj remove(tmp_filename)
def run(thread_index, thread_num, result, args): cls_list = ['_' for _ in range(81)] # datasets val = TrainDataset(args.base_path, args.img_list, args.use_mode, cls_list, phase='test') num = len(val) New2Old = cvb.load('/data/luqi/coco-master/PythonAPI/Newlabel.pkl') multilabel = cvb.load('/data/luqi/dataset/pytorch_data/multilabel.pkl') thread_result = [] for i in range(num): if i % thread_num != thread_index: continue start_time = time.time() all_class_box_feature, all_class_box_box, all_class_box_score, all_class_box_label, all_class_box_weight, all_class_box_origin_score, all_class_box_origin_box, unique_class, unique_class_len, image_id, phase_np = val[ i] bboxes = [] for cls_index in range(80): if unique_class[cls_index] == 0: continue img_name = str(int(image_id)).zfill(12) class_score = float(multilabel[img_name][cls_index]) start = int(unique_class_len[cls_index]) end = int(unique_class_len[cls_index + 1]) for index in range(start, end): # if(all_class_box_label[index]==0): # continue x1, y1, x2, y2 = all_class_box_origin_box[index, 0:4] # score = all_class_box_origin_score[index, 0] * class_score score = all_class_box_origin_score[index, 0] category_id = New2Old[str(cls_index + 1)][1] bboxes.append({ 'bbox': [ int(x1), int(y1), int(x2) - int(x1) + 1, int(y2) - int(y1) + 1 ], 'score': float(score), 'category_id': category_id, 'image_id': int(image_id) }) # count += 1 thread_result.extend(bboxes) end_time = time.time() print_time = float(end_time - start_time) print('thread_index:{}, index:{}, image_id:{}, cost:{}'.format( thread_index, i, image_id, print_time)) result.extend(thread_result)
def __init__(self, annot_fn): data = cvb.load(annot_fn) self.images_info = data['images'] self.annot_info = data['annotations'] self.indexing = [] for i, ann in enumerate(self.annot_info): for j in range(len(ann['regions'])): self.indexing.append((i, j))
def __init__(self, dataset, annot_fn): self.dataset = dataset data = cvb.load(annot_fn) self.images_info = data['images'] self.annot_info = data['annotations'] self.category_info = data['categories'] # make dict self.imgfn_dict = dict([(a['id'], a['file_name']) for a in self.images_info]) self.size_dict = dict([(a['id'], (a['width'], a['height'])) for a in self.images_info]) self.anns_dict = self.make_dict() self.img_ids = list(self.anns_dict.keys())
def from_file(filename): if filename.endswith('.py'): sys.path.append(osp.dirname(filename)) module_name = osp.basename(filename).rstrip('.py') cfg = import_module(module_name) config_dict = { name: value for name, value in cfg.__dict__.items() if not name.startswith(('__', '_')) } elif filename.endswith(('.yaml', '.json')): config_dict = cvb.load(filename) else: raise IOError( 'only py/yaml/json type are supported as config files') return Config(config_dict, filename=filename)
def test_solver(model, data_loader, output_dir): # load checkpoint load_checkpoint(model, output_dir[0]) New2Old = cvb.load('/mnt/lustre/liushu1/mask_rcnn/coco-master/PythonAPI/Newlabel.pkl') result_path = os.path.join(output_dir[1], 'result.json') log_dir = output_dir[1] count = 0 logger = solver_log(os.path.join(log_dir, 'test_'+ time.strftime('%Y%m%d_%H%M%S', time.localtime()) +'.log')) # logger = solver_log(os.path.join(log_dir, 'test1.log')) results = [] for box_feature, rank_score, box_box, box_label, box_score_origin, box_box_origin, image_id, box_keep_np in data_loader: # print(image_id) image_id = int(image_id.numpy()) bboxes = [] start = time.time() box_feature_variable = Variable(box_feature).cuda() box_score_variable = Variable(rank_score).cuda() box_label_variable = Variable(box_label).cuda() box_box_variable = Variable(box_box).cuda() output = test(box_feature_variable, box_score_variable, box_box_variable, model) # keep = list(np.where(output==1)[0]) box_score_origin = box_score_origin.cpu().numpy().astype(np.float) box_keep_np = box_keep_np.cpu().numpy().astype(np.int) # final_score = box_score_origin * output final_score = box_score_origin * output # for index in keep: for index in range(final_score.shape[0]): # cls_index = np.argmax(box_score_origin[index, :]) cls_all_index = np.where(box_keep_np[index, :]==1)[0] for cls_index in cls_all_index: # cls_index = np.argsort(final_score[index, :])[::-1][0] x1, y1, x2, y2 = box_box_origin[index, cls_index*4:cls_index*4+4] score = final_score[index, cls_index] # score = box_score_origin[index, cls_index] category_id = New2Old[str(cls_index+1)][1] bboxes.append({'bbox': [int(x1), int(y1), int(x2)-int(x1)+1, int(y2)-int(y1)+1], 'score': float(score), 'category_id':category_id, 'image_id':int(image_id)}) count += 1 end = time.time() print_time = float(end-start) results.extend(bboxes) logger.info('index:{}, image_id:{}, cost:{}'.format(count, image_id,print_time)) cvb.dump(results, result_path)
def run(thread_index, thread_num, result, args): txt_path = os.path.join(args.base_path, args.img_list) New2Old = cvb.load( '/mnt/lustre/liushu1/mask_rcnn/coco-master/PythonAPI/Newlabel.pkl') proposal_base_path = os.path.join(args.base_path, 'info/') thread_result = [] val_list = cvb.list_from_file(txt_path) val_num = len(val_list) for index, img_name in enumerate(val_list): if index % thread_num != thread_index: continue proposal_path = os.path.join(proposal_base_path, img_name + '.pkl') # box_feature, box_box, box_score = cvb.load(proposal_path) box_feature, box_box, box_score = pkl.load(open( os.path.join(proposal_path), 'rb'), encoding='iso-8859-1') box_feature = box_feature.astype(np.float) box_box = box_box.astype(np.float) box_score = box_score.astype(np.float) # bbox = verify_nms(box_box, box_score, img_name, New2Old, iou_thr=0.5, score_thr=0.1) # bbox = verify_nms_with_box_voting(box_box, box_score, img_name, New2Old, iou_thr=0.5, score_thr=0.01, bv_method='ID') bbox = verify_soft_nms(box_box, box_score, img_name, New2Old, iou_thr=0.5, score_thr=0.01) # bbox = verify_nms_with_limit(box_box, box_score, img_name, New2Old, iou_thr=0.5) # cls_num = 0 # for ii in range(1, 81): # valid_index = np.where(box_score[:,ii]>=0.01)[0] # valid_num = len(valid_index) # cls_num += valid_num thread_result.extend(bbox) print('img_index:{}, bbox_len:{}'.format(img_name, len(bbox))) # print('thread_index:{}, index:{}'.format(thread_index, index)) result.extend(thread_result)
def __getitem__(self, idx): # logger1 = solver_log(os.path.join('/mnt/lustre/liushu1/', 'load_dataset.log')) info_pkl_path = os.path.join(self.info_path, self.image_index[idx] + '.pkl') score_pkl_path = os.path.join(self.score_path, self.image_index[idx] + '.pkl') img_id = int(float(self.image_index[idx])) img_path = os.path.join(self.img_path, self.image_index[idx] + '.jpg') img = cv2.imread(img_path) height, width = img.shape[0:2] box_feature, box_box, box_score = pkl.load(open( os.path.join(info_pkl_path), 'rb'), encoding='iso-8859-1') box_feature = box_feature.astype(np.float) box_box = box_box.astype(np.float) box_score = box_score.astype(np.float) # gts_info = pkl.load(open(os.path.join(self.gt_path, self.image_index[idx] + '.pkl'), 'rb'), encoding='iso-8859-1') gts_info = pkl.load( open(os.path.join(self.gt_path, self.image_index[idx] + '.pkl'), 'rb')) gts_box = np.zeros((len(gts_info), 5)) for index, gt in enumerate(gts_info): gts_box[index, :] = gt['bbox'] # box_label, box_weight = stage1_before_assign_weight_slack_has_class(box_feature, box_box, box_score, gts_box, weight=self.weight) # box_label, box_weight = stage1_before_no_gt_assign_weight_slack_has_class(box_feature, box_box, box_score, gts_box, weight=self.weight) box_label, box_weight = stage_full_assign_weight_slack_has_class_v5( box_feature, box_box, box_score, gts_box, weight=self.weight) box_score = box_score[:, 1:] # box_feature = proposals_feature box_box = box_box[:, 4:] unique_class = np.zeros(80).astype(np.float) unique_class_len = np.zeros(81).astype(np.float) # cumulative sum cls_num = 0 cls_list = [] label_num = 0 for ii in range(80): valid_index = np.where(box_score[:, ii] >= self.score_thresh)[0] cls_list.append(valid_index) valid_num = len(valid_index) cls_num += valid_num if not valid_num == 0: unique_class[ii] = 1 label_num += len(np.where(box_label[valid_index, ii] == 1)[0]) unique_class_len[ii + 1] = valid_num + unique_class_len[ii] # origin box_box and box_score box_box_origin = box_box.copy() box_score_origin = box_score.copy() box_box[:, 0::2] = np.log(box_box[:, 0::2] / float(width) + 0.5) box_box[:, 1::2] = np.log(box_box[:, 1::2] / float(height) + 0.5) score_info = cvb.load(score_pkl_path) save_score_stage1 = score_info[:, 0:1] save_score_origin = score_info[:, 1:2] save_score_final = save_score_stage1 * save_score_origin all_class_box_feature = np.zeros((cls_num, 1024)) all_class_box_weight = np.ones((cls_num, 1)) all_class_box_label = np.zeros((cls_num, 1)) all_class_box_box = np.zeros((cls_num, 32)) all_class_box_score = np.zeros((cls_num, 32)) all_class_box_origin_box = np.zeros((cls_num, 4)) all_class_box_origin_score = np.zeros((cls_num, 32)) unique_class_len_sta = np.zeros(80) # print(cls_num) # print(save_score_final.shape[0]) for ii in range(80): if unique_class[ii] == 0: continue start = int(unique_class_len[ii]) end = int(unique_class_len[ii + 1]) valid_index = cls_list[ii] origin_temp = box_score[valid_index, ii].argsort()[::-1] valid_sort = valid_index[origin_temp] # final_sort = valid_index[origin_temp] # final_valid = np.where(save_score_final[start:end, 0] >= 0)[0] final_valid = np.where(save_score_final[start:end, 0] <= 0.2)[0] # print(save_score_final.shape) # final_temp = save_score_origin[start:end, 0:1][final_valid, 0].argsort()[::-1] # final_temp = save_score_final[start:end, 0:1][final_valid, 0].argsort()[::-1] # final_sort = valid_sort[final_valid[final_temp]] # final_sort = valid_sort[final_valid] final_sort = valid_sort[final_valid] # final_temp = save_score_final[start:end, 0].argsort()[::-1] # final_sort = valid_sort[final_temp] filter_num = len(final_sort) # print('{}/{}'.format(filter_num, end-start+1)) unique_class_len_sta[ii] = filter_num all_class_box_weight[start:start + filter_num, 0] = box_weight[final_sort, ii] all_class_box_label[start:start + filter_num, 0] = box_label[final_sort, ii] all_class_box_box[start:start + filter_num, :] = np.tile( box_box[final_sort, ii * 4:(ii + 1) * 4], 8) # print(np.concatenate((save_score_origin[start:end, 0:1][final_valid, 0:1], box_score[final_sort, ii].reshape(-1,1)), axis=1)) # print(save_score_stage1[start:end, 0:1]) # print() # input() all_class_box_origin_score[start:start + filter_num, :] = np.tile( save_score_final[start:end, 0:1][final_valid, 0:1], 32) # all_class_box_origin_score[start:start+filter_num, :] = np.tile(save_score_origin[start:end, 0:1][final_valid, 0:1], 32) # all_class_box_origin_score[start:start+filter_num, :] = np.tile(save_score_final[start:end, 0:1][final_temp, 0:1], 32) # all_class_box_origin_score[start:end, :] = np.tile(box_score_final[final_sort, ii:ii+1], 32) all_class_box_score[ start:start + filter_num, :] = self.search_table[0:filter_num, 0:32] all_class_box_feature[start:start + filter_num, :] = box_feature[final_sort, :] all_class_box_origin_box[ start:start + filter_num, :] = box_box_origin[final_sort, ii * 4:(ii + 1) * 4] # print(save_score_stage1[0:1]) # print(box_score[final_sort, ii]) # input() # final box all_class_num = int(np.sum(unique_class_len_sta)) # print(all_class_num) # input() # print(all_class_num) unique_class_final = np.zeros(80).astype(np.float) unique_class_len_final = np.zeros(81).astype(np.float) all_class_box_feature_final = np.zeros((all_class_num, 1024)) all_class_box_weight_final = np.ones((all_class_num, 1)) all_class_box_label_final = np.zeros((all_class_num, 1)) all_class_box_box_final = np.zeros((all_class_num, 32)) all_class_box_origin_box_final = np.zeros((all_class_num, 4)) all_class_box_origin_score_final = np.zeros((all_class_num, 32)) all_class_box_score_final = np.zeros((all_class_num, 32)) for ii in range(80): if unique_class[ii] == 0: unique_class_final[ii] = 0 unique_class_len_final[ii + 1] = unique_class_len_final[ii] continue start_old = int(unique_class_len[ii]) filter_num = int(unique_class_len_sta[ii]) if filter_num > 0: unique_class_final[ii] = 1 unique_class_len_final[ ii + 1] = unique_class_len_final[ii] + filter_num else: unique_class_final[ii] = 0 unique_class_len_final[ii + 1] = unique_class_len_final[ii] continue start_new = int(unique_class_len_final[ii]) end_new = int(unique_class_len_final[ii + 1]) end_old = start_old + filter_num all_class_box_weight_final[ start_new:end_new, 0] = all_class_box_weight[start_old:end_old, 0] all_class_box_label_final[ start_new:end_new, 0] = all_class_box_label[start_old:end_old, 0] all_class_box_box_final[start_new:end_new, :] = all_class_box_box[ start_old:end_old, :] all_class_box_origin_score_final[ start_new:end_new, :] = all_class_box_origin_score[ start_old:end_old, :] all_class_box_score_final[ start_new:end_new, :] = all_class_box_score[ start_old:end_old, :] all_class_box_feature_final[ start_new:end_new, :] = all_class_box_feature[ start_old:end_old, :] all_class_box_origin_box_final[ start_new:end_new, :] = all_class_box_origin_box[ start_old:end_old, :] # print(unique_class_len) # print(unique_class_len_final) # input() # for ii in range(80): # start_old = int(unique_class_len[ii]) # filter_num = int(unique_class_len_sta[ii]) # end_old = start_old+filter_num # start_new = int(unique_class_len_final[ii]) # end_new = int(unique_class_len_final[ii+1]) # print(np.concatenate((all_class_box_origin_score[start_old:end_old, 0:1], all_class_box_origin_score_final[start_new:end_new, 0:1].reshape(-1,1)), axis=1)) # # print('old') # # print(np.concatenate((all_class_box_origin_box[start_old:end_old, :], all_class_box_origin_score[start_old:end_old, :]), axis=1)) # # print('new') # # print(np.concatenate((all_class_box_origin_box_final[start_new:end_new, :], all_class_box_origin_score_final[start_new:end_new, :]), axis=1)) # input() # verify_score = np.concatenate((save_score_origin, all_class_box_origin_score[:, 0:1]), 1) # print(verify_score) phase_np = np.zeros(1) if self.phase == 'train': phase_np[0] = 1 if self.phase == 'bug': phase_np[0] = 2 # class_split_score_feature or box_feature if self.phase == 'train': return all_class_box_feature_final, all_class_box_box_final, all_class_box_score_final, all_class_box_label_final, all_class_box_weight_final, all_class_box_origin_score_final, unique_class_final, unique_class_len_final, phase_np # return all_class_box_feature, all_class_box_box, all_class_box_score, all_class_box_label, all_class_box_weight, all_class_box_origin_score, unique_class, unique_class_len, phase_np elif self.phase == 'test': return all_class_box_feature_final, all_class_box_box_final, all_class_box_score_final, all_class_box_label_final, all_class_box_weight_final, all_class_box_origin_score_final, all_class_box_origin_box_final, unique_class_final, unique_class_len_final, img_id, phase_np
def test_solver(model, dataset, output_dir, thread_index, thread_num): # load checkpoint load_checkpoint(model, output_dir[0]) New2Old = cvb.load( '/mnt/lustre/liushu1/mask_rcnn/coco-master/PythonAPI/Newlabel.pkl') # result_path = os.path.join(output_dir[1], 'result.json') np.set_printoptions(formatter={'float': '{: 0.4f}'.format}) log_dir = output_dir[1] # count = 0 logger = solver_log( os.path.join( log_dir, 'test_' + time.strftime('%Y%m%d_%H%M%S', time.localtime()) + '.log')) # logger = solver_log(os.path.join(log_dir, 'test1.log')) results = [] data_num = len(dataset) for count in range(data_num): if count % thread_num != thread_index: continue data_np = dataset[count] # input all_class_box_feature, all_class_box_box, all_class_box_score = torch.FloatTensor( data_np[0]), torch.FloatTensor(data_np[1]), torch.FloatTensor( data_np[2]) all_class_box_label = data_np[3] all_class_box_weight = data_np[4] if all_class_box_weight.shape[0] == 0: continue all_class_box_origin_score, all_class_box_origin_box = torch.FloatTensor( data_np[5]), data_np[6] unique_class, unique_class_len = torch.FloatTensor( data_np[7]), torch.FloatTensor(data_np[8]) unique_class_np, unique_class_len_np = data_np[7], data_np[8] image_id = int(data_np[9]) # if data_np[1].shape[0]==0: # results.extend(image_id) bboxes = [] start = time.time() # all_class_box_label_variable = Variable(all_class_box_label).cuda() all_class_box_score_variable = Variable(all_class_box_score).cuda() all_class_box_box_variable = Variable(all_class_box_box).cuda() all_class_box_feature_variable = Variable(all_class_box_feature).cuda() all_class_box_origin_score_variable = Variable( all_class_box_origin_score).cuda() unique_class_cuda = unique_class.cuda() unique_class_len_cuda = unique_class_len.cuda() output = test(all_class_box_feature_variable, all_class_box_box_variable, all_class_box_score_variable, all_class_box_origin_score_variable, unique_class_cuda, unique_class_len_cuda, model) box_score_origin = all_class_box_origin_score_variable.data.cpu( ).numpy().astype(np.float)[:, 0:1].reshape(-1, 1) # final_score = box_score_origin # final_score = (box_score_origin + output) / 2 final_score = box_score_origin * output # final_score = output for cls_index in range(80): if unique_class_np[cls_index] == 0: continue start_ = int(unique_class_len_np[cls_index]) end_ = int(unique_class_len_np[cls_index + 1]) # info_info = np.concatenate((box_score_origin[start_:end_, 0:1], output[start_:end_, 0:1], final_score[start_:end_,0:1], all_class_box_origin_box[start_:end_, 0:4].astype(np.int), all_class_box_label[start_:end_, 0:1]), axis=1) # qwe = DataFrame(info_info, columns=['score_origin', 'network', 'final', 'x1', 'y1', 'x2', 'y2', 'label']) # print(qwe) # qwe.style.apply(highlight_greaterthan,threshold=0.5,column=['label'], axis=1) # qwe # print(qwe.to_string()) # print(qwe.sort_values(by='final')) # print(qwe.sort_values(by='label')) # input() for index in range(start_, end_): x1, y1, x2, y2 = all_class_box_origin_box[index, 0:4] score = final_score[index, 0] # if(score<0.01): # continue category_id = New2Old[str(cls_index + 1)][1] bboxes.append({ 'bbox': [int(x1), int(y1), int(x2 - x1 + 1), int(y2 - y1 + 1)], 'score': float(score), 'category_id': category_id, 'image_id': int(image_id) }) end = time.time() print_time = float(end - start) results.extend(bboxes) # if count==20: # break logger.info('thread_index:{}, index:{}, image_id:{}, cost:{}'.format( thread_index, count, image_id, print_time)) return results
return aa if __name__ == '__main__': base_path = '/data/luqi/dataset/pytorch_data/' img_list = 'val.txt' use_mode = 'unique' cls_list = ['_' for _ in range(81)] train = TrainDataset(base_path, img_list, use_mode, cls_list, weight=2, phase='test') num = len(train) New2Old = cvb.load('/data/luqi/coco-master/PythonAPI/Newlabel.pkl') np.set_printoptions(formatter={'float': '{: 0.8f}'.format}) results = [] for i in range(num): all_class_box_feature, all_class_box_box, all_class_box_score, all_class_box_label, all_class_box_weight, all_class_box_origin_score, all_class_box_origin_box, unique_class, unique_class_len, img_id, phase_np = train[ i] # if i==20: # break image_id = int(img_id) bboxes = [] for ii in range(80): if unique_class[ii] == 0: continue start = int(unique_class_len[ii]) end = int(unique_class_len[ii + 1]) for index in range(start, end):
num = len(ann_dict) # x1, y1, x2, y2, score, label ann_np = np.zeros((num, 6), dtype=np.float) for index, ann in enumerate(ann_dict): x1, y1, width, height = ann['bbox'] x2 = x1 + width - 1 y2 = y1 + height - 1 score = ann['score'] cls_index = int(Old2New[str(ann['category_id'])][1]) ann_np[index, :] = x1, y1, x2, y2, score, cls_index return ann_np if __name__ == '__main__': args = parse_args() New2Old = cvb.load('/data/luqi/coco-master/PythonAPI/Newlabel.pkl') Old2New = cvb.load('/data/luqi/coco-master/PythonAPI/Oldlabel.pkl') all_anns = cvb.load(args.output_dir) anns_dict = make_json_dict(all_anns) txt_path = os.path.join(args.base_path, args.img_list) all_index = cvb.list_from_file(txt_path) results = [] for count, img_index in enumerate(all_index): gts_info = pkl.load( open(os.path.join(args.base_path, 'gt/' + img_index + '.pkl'), 'rb')) gts_box = np.zeros((len(gts_info), 5)) for index, gt in enumerate(gts_info): gts_box[index, :] = gt['bbox'] # if gts_box[index, 4]>=100: # gts_box[index, 4] -= 100
return aa if __name__ == '__main__': base_path = '/mnt/lustre/liushu1/qilu_ex/dataset/coco/pytorch_data/' img_list = 'val.txt' use_mode = 'unique' cls_list = ['_' for _ in range(81)] train = TrainDataset(base_path, img_list, use_mode, cls_list, weight=2, phase='test') num = len(train) New2Old = cvb.load( '/mnt/lustre/liushu1/mask_rcnn/coco-master/PythonAPI/Newlabel.pkl') np.set_printoptions(formatter={'float': '{: 0.8f}'.format}) results = [] for i in range(num): all_class_box_feature, all_class_box_box, all_class_box_score, all_class_box_label, all_class_box_weight, all_class_box_origin_score, all_class_box_origin_box, unique_class, unique_class_len, img_id, phase_np = train[ i] # if i==20: # break image_id = int(img_id) bboxes = [] for ii in range(80): if unique_class[ii] == 0: continue start = int(unique_class_len[ii]) end = int(unique_class_len[ii + 1]) for index in range(start, end):
def test_solver(model, dataset, output_dir, thread_index, thread_num): # load checkpoint load_checkpoint(model, output_dir[0]) New2Old = cvb.load('/mnt/lustre/liushu1/mask_rcnn/coco-master/PythonAPI/Newlabel.pkl') # result_path = os.path.join(output_dir[1], 'result.json') np.set_printoptions(formatter={'float': '{: 0.4f}'.format}) log_dir = output_dir[1] color_map = _get_voc_color_map() # count = 0 logger = solver_log(os.path.join(log_dir, 'test_'+ time.strftime('%Y%m%d_%H%M%S', time.localtime()) +'.log')) # logger = solver_log(os.path.join(log_dir, 'test1.log')) results = [] data_num = len(dataset) for count in range(data_num): if count % thread_num != thread_index: continue if count>=100: break data_np = dataset[count] # input # all_class_box_origin_score, all_class_box_origin_box, unique_class, unique_class_len, img_id # box_feature, rank_score, box_box = torch.FloatTensor(data_np[0]), torch.FloatTensor(data_np[1]), torch.FloatTensor(data_np[2]) all_class_box_feature, all_class_box_box, all_class_box_score = torch.FloatTensor(data_np[0]), torch.FloatTensor(data_np[1]), torch.FloatTensor(data_np[2]) all_class_box_label = data_np[3] all_class_box_weight = data_np[4] all_class_box_origin_score, all_class_box_origin_box = torch.FloatTensor(data_np[5]), data_np[6] unique_class, unique_class_len = torch.FloatTensor(data_np[7]), torch.FloatTensor(data_np[8]) unique_class_np, unique_class_len_np = data_np[7], data_np[8] image_id = int(data_np[9]) img_name = str(image_id).zfill(12) im_file = '/mnt/lustre/liushu1/qilu_ex/dataset/coco/fpn_bn_base/img/' + img_name + '.jpg' im = cv2.imread(im_file) bboxes = [] start = time.time() # all_class_box_label_variable = Variable(all_class_box_label).cuda() all_class_box_score_variable = Variable(all_class_box_score).cuda() all_class_box_box_variable = Variable(all_class_box_box).cuda() all_class_box_feature_variable = Variable(all_class_box_feature).cuda() all_class_box_origin_score_variable = Variable(all_class_box_origin_score).cuda() unique_class_cuda = unique_class.cuda() unique_class_len_cuda = unique_class_len.cuda() output = test(all_class_box_feature_variable, all_class_box_box_variable, all_class_box_score_variable, all_class_box_origin_score_variable, unique_class_cuda, unique_class_len_cuda, model) box_score_origin = all_class_box_origin_score_variable.data.cpu().numpy().astype(np.float)[:,0:1].reshape(-1, 1) # final_score = box_score_origin # final_score = (box_score_origin + output) / 2 final_score = box_score_origin * output for cls_index in range(80): if unique_class_np[cls_index] == 0: continue start_ = int(unique_class_len_np[cls_index]) end_ = int(unique_class_len_np[cls_index+1]) # info_info = np.concatenate((box_score_origin[start_:end_, 0:1], output[start_:end_, 0:1], final_score[start_:end_,0:1], all_class_box_origin_box[start_:end_, 0:4].astype(np.int), all_class_box_label[start_:end_, 0:1]), axis=1) # qwe = DataFrame(info_info, columns=['score_origin', 'network', 'final', 'x1', 'y1', 'x2', 'y2', 'label']) # print(qwe) # qwe.style.apply(highlight_greaterthan,threshold=0.5,column=['label'], axis=1) # qwe # print(qwe.to_string()) # print(qwe.sort_values(by='final')) # print(qwe.sort_values(by='label')) # input() for index in range(start_, end_): x1, y1, x2, y2 = all_class_box_origin_box[index, 0:4] score = final_score[index, 0] # if(score<0.05): # continue category_id = cls_index+1 bboxes.append({'bbox': [int(x1), int(y1), int(x2-x1+1), int(y2-y1+1)], 'score': float(score), 'category_id':category_id, 'image_id':int(image_id)}) for bbox_single in bboxes: cls_indx = int(bbox_single['category_id']) x1, y1, w, h = bbox_single['bbox'] score = bbox_single['score'] if score<0.01: continue cv2.rectangle(im, (int(x1), int(y1)), (int(x1+w-1), int(y1+h-1)), tuple(color_map[cls_indx, :]), 2) # count += 1 save_path = os.path.join('/mnt/lustre/liushu1/qilu_ex/Post_vis/', img_name+'.jpg') # save_gt_path = os.path.join(args.output_dir, img_name+'.jpg') # cv2.imwrite('/data/luqi/000000156500_proposal.png', im_proposal) cv2.imwrite(save_path, im) end = time.time() print_time = float(end-start) # results.extend(bboxes) # if count==20: # break logger.info('thread_index:{}, index:{}, image_id:{}, cost:{}'.format(thread_index, count, image_id, print_time)) return results # cvb.dump(results, result_path)
node_ymax.text = '%s' % bbox_ymax node_polygon = SubElement(node_object, 'rle') node_polygon.text = '%s' % rle node_inst_id = SubElement(node_object, 'inst_id') node_inst_id.text = '%s' % int(inst_id) count += 1 has_objects = True xml = tostring(node_root, pretty_print=True) dom = parseString(xml) save_xml = os.path.join(save_dir, img_name + '.xml') with open(save_xml, 'wb') as f: f.write(xml) if __name__ == "__main__": print('Loading', os.path.join(root, 'annotations', 'instances_train.json')) anns = cvb.load(train_json_path) # print(anns) imgs_dict, anns_dict = make_json_dict(anns["images"], anns["annotations"]) count = 0 save_dir = os.path.join(root, '') if not os.path.exists(save_dir): os.mkdir(save_dir) for img_id in tqdm(anns_dict.keys()): img_name = imgs_dict[img_id] # print(img_name) anns = anns_dict[img_id] # print(anns) run_one_image(img_id, anns, img_name) count += 1
def test_solver(model, dataset, output_dir, thread_index, thread_num): # load checkpoint load_checkpoint(model, output_dir[0]) New2Old = cvb.load('/data/luqi/coco-master/PythonAPI/Newlabel.pkl') # result_path = os.path.join(output_dir[1], 'result.json') log_dir = output_dir[1] # count = 0 logger = solver_log(os.path.join(log_dir, 'test_'+ time.strftime('%Y%m%d_%H%M%S', time.localtime()) +'.log')) # logger = solver_log(os.path.join(log_dir, 'test1.log')) results = [] data_num = len(dataset) for count in range(data_num): if count % thread_num != thread_index: continue data_np = dataset[count] # input all_class_box_feature, all_class_box_box, all_class_box_score, all_class_box_label, all_class_box_weight, all_class_box_origin_score, unique_class, unique_class_len, img_id, phase_np # box_feature, rank_score, box_box = torch.FloatTensor(data_np[0]), torch.FloatTensor(data_np[1]), torch.FloatTensor(data_np[2]) all_class_box_feature, all_class_box_box, all_class_box_score = torch.FloatTensor(data_np[0]), torch.FloatTensor(data_np[1]), torch.FloatTensor(data_np[2]) all_class_box_label = data_np[3] all_class_box_class, all_class_box_origin_score, all_class_box_origin_box = data_np[7], torch.FloatTensor(data_np[8]), data_np[9] unique_class, unique_class_len = torch.FloatTensor(data_np[10]), torch.FloatTensor(data_np[11]) image_id = int(data_np[12]) # all_class_box_single = all_class_box_origin_score[:, 0:1].numpy().copy() # print(all_class_box_single.shape) # input() bboxes = [] start = time.time() box_feature_variable = Variable(box_feature).cuda() box_score_variable = Variable(rank_score).cuda() # box_label_variable = Variable(box_label).cuda() box_box_variable = Variable(box_box).cuda() # all_class_box_label_variable = Variable(all_class_box_label).cuda() all_class_box_score_variable = Variable(all_class_box_score).cuda() all_class_box_box_variable = Variable(all_class_box_box).cuda() all_class_box_feature_variable = Variable(all_class_box_feature).cuda() all_class_box_origin_score_variable = Variable(all_class_box_origin_score).cuda() unique_class_cuda = unique_class.cuda() unique_class_len_cuda = unique_class_len.cuda() # output = test(box_feature_variable, box_score_variable, box_box_variable, all_class_box_feature_variable, all_class_box_score_variable, all_class_box_box_variable, all_class_box_origin_score_variable, unique_class_cuda, unique_class_len_cuda, model) box_score_origin = all_class_box_origin_score_variable.data.cpu().numpy().astype(np.float)[:,0:1].reshape(-1, 1) final_score = box_score_origin # final_score = box_score_origin * output # np.set_printoptions(formatter={'float': '{: 0.3f}'.format}) # print(np.concatenate((all_class_box_class, all_class_box_label, box_score_origin, output, final_score), axis=1)) # if count==20: # break for index in range(final_score.shape[0]): cls_index = int(all_class_box_class[index, 0]) x1, y1, x2, y2 = all_class_box_origin_box[index, 0:4] score = final_score[index, 0] category_id = New2Old[str(cls_index+1)][1] bboxes.append({'bbox': [int(x1), int(y1), int(x2)-int(x1)+1, int(y2)-int(y1)+1], 'score': float(score), 'category_id':category_id, 'image_id':int(image_id)}) # count += 1 end = time.time() print_time = float(end-start) results.extend(bboxes) logger.info('thread_index:{}, index:{}, image_id:{}, cost:{}'.format(thread_index, count, image_id, print_time)) return results # cvb.dump(results, result_path)
if is_train: base_img_path = "../data_object_image_2/training/image_2/" base_ann_path = "./update_train_2020.json" else: base_img_path = "../data_object_image_2/testing/image_2/" base_ann_path = "./update_test_2020.json" coco = COCO(base_ann_path) cats = coco.loadCats(coco.getCatIds()) cats_nms = [cat['name'] for cat in cats] # print('cats: \n{}\n'.format(' '.join(cats_nms))) catIds = coco.getCatIds(catNms=cats_nms) catDic = dict(zip(catIds, cats_nms)) anns = cvb.load(base_ann_path) imgs_info = anns['images'] anns_info = anns["annotations"] imgs_dict, anns_dict = make_json_dict(imgs_info, anns_info) img_file_dict = dict(zip(cats_nms, [[] for i in range(len(cats_nms))])) for img_id in anns_dict.keys(): img_name = imgs_dict[img_id] img_path = os.path.join(base_img_path, img_name) img = cv2.imread(img_path, cv2.IMREAD_COLOR) height, width, _ = img.shape anns = anns_dict[img_id]
def __getitem__(self, idx): # logger1 = solver_log(os.path.join('/mnt/lustre/liushu1/', 'load_dataset.log')) info_pkl_path = os.path.join(self.info_path, self.image_index[idx] + '.pkl') score_pkl_path = os.path.join(self.score_path, self.image_index[idx] + '.pkl') img_id = int(float(self.image_index[idx])) img_path = os.path.join(self.img_path, self.image_index[idx] + '.jpg') img = cv2.imread(img_path) height, width = img.shape[0:2] box_feature, box_box, box_score = pkl.load(open( os.path.join(info_pkl_path), 'rb'), encoding='iso-8859-1') box_feature = box_feature.astype(np.float) box_box = box_box.astype(np.float) box_score = box_score.astype(np.float) # gts_info = pkl.load(open(os.path.join(self.gt_path, self.image_index[idx] + '.pkl'), 'rb'), encoding='iso-8859-1') gts_info = pkl.load( open(os.path.join(self.gt_path, self.image_index[idx] + '.pkl'), 'rb')) gts_box = np.zeros((len(gts_info), 5)) for index, gt in enumerate(gts_info): gts_box[index, :] = gt['bbox'] # box_label, box_weight = stage1_before_assign_weight_slack_has_class(box_feature, box_box, box_score, gts_box, weight=self.weight) # box_label, box_weight = stage1_before_no_gt_assign_weight_slack_has_class(box_feature, box_box, box_score, gts_box, weight=self.weight) box_label, box_weight = stage_full_assign_weight_slack_has_class_v5( box_feature, box_box, box_score, gts_box, weight=self.weight) box_score = box_score[:, 1:] # box_feature = proposals_feature box_box = box_box[:, 4:] unique_class = np.zeros(80).astype(np.float) unique_class_len = np.zeros(81).astype(np.float) # cumulative sum cls_num = 0 cls_list = [] label_num = 0 for ii in range(80): equal_row_index = np.where( box_box[:, ii * 4:ii * 4 + 1] != box_box[:, ii * 4 + 2:ii * 4 + 3])[0] equal_col_index = np.where( box_box[:, ii * 4 + 1:ii * 4 + 2] != box_box[:, ii * 4 + 3:])[0] equal_index = np.intersect1d(equal_row_index, equal_col_index) valid_index = np.where( box_score[equal_index, ii] >= self.score_thresh)[0] cls_list.append(equal_index[valid_index]) valid_num = len(equal_index[valid_index]) cls_num += valid_num if not valid_num == 0: unique_class[ii] = 1 label_num += len( np.where(box_label[equal_index[valid_index], ii] == 1)[0]) unique_class_len[ii + 1] = valid_num + unique_class_len[ii] # origin box_box and box_score box_box_origin = box_box.copy() box_score_origin = box_score.copy() box_box[:, 0::2] = np.log(box_box[:, 0::2] / float(width) + 0.5) box_box[:, 1::2] = np.log(box_box[:, 1::2] / float(height) + 0.5) # num_box = box_feature.shape[0] # num_label = len(np.where(box_label==1)[0]) # num_all = num_box * 80 # num_label_proposal = len(np.where(np.max(box_label, axis=1)==1)[0]) # num_label_class = len(np.where(np.max(box_label, axis=0)==1)[0]) # print('label_num/num_label/all_num:{}/{}/{}, num_label_class/num_class:{}/{}'.format(label_num, num_label, cls_num, num_label_class, np.sum(unique_class))) # ranks = np.empty_like(box_score) # search_index = np.zeros((cls_num, 1)) score_info = cvb.load(score_pkl_path) save_score_stage1 = score_info[:, 0:1] save_score_origin = score_info[:, 1:2] save_score_final = save_score_stage1 * save_score_origin all_class_box_feature = np.zeros((cls_num, 1024)) all_class_box_weight = np.ones((cls_num, 1)) all_class_box_label = np.zeros((cls_num, 1)) all_class_box_box = np.zeros((cls_num, 32)) all_class_box_origin_box = np.zeros((cls_num, 4)) all_class_box_origin_score = np.zeros((cls_num, 32)) all_class_box_score = np.zeros((cls_num, 32)) # print(cls_num) # print(save_score_final.shape[0]) for ii in range(80): if unique_class[ii] == 0: continue start = int(unique_class_len[ii]) end = int(unique_class_len[ii + 1]) valid_index = cls_list[ii] origin_temp = box_score[valid_index, ii].argsort()[::-1] valid_sort = valid_index[origin_temp] # final_sort = valid_index[origin_temp] final_temp = save_score_final[start:end, 0].argsort()[::-1] final_sort = valid_sort[final_temp] all_class_box_weight[start:end, 0] = box_weight[final_sort, ii] all_class_box_label[start:end, 0] = box_label[final_sort, ii] all_class_box_box[start:end, :] = np.tile( box_box[final_sort, ii * 4:(ii + 1) * 4], 8) all_class_box_origin_score[start:end, :] = np.tile( save_score_final[start:end, 0:1][final_temp, 0:1], 32) # all_class_box_origin_score[start:end, :] = np.tile(box_score_origin[final_sort, ii:ii+1], 32) all_class_box_score[start:end, :] = self.search_table[0:end - start, 0:32] all_class_box_feature[start:end, :] = box_feature[final_sort, :] all_class_box_origin_box[start:end, :] = box_box_origin[final_sort, ii * 4:(ii + 1) * 4] # verify_score = np.concatenate((save_score_origin, all_class_box_origin_score[:, 0:1]), 1) # print(verify_score) phase_np = np.zeros(1) if self.phase == 'train': phase_np[0] = 1 if self.phase == 'bug': phase_np[0] = 2 # class_split_score_feature or box_feature if self.phase == 'train': return all_class_box_feature, all_class_box_box, all_class_box_score, all_class_box_label, all_class_box_weight, all_class_box_origin_score, all_class_box_origin_box, unique_class, unique_class_len, phase_np elif self.phase == 'test': return all_class_box_feature, all_class_box_box, all_class_box_score, all_class_box_label, all_class_box_weight, all_class_box_origin_score, all_class_box_origin_box, unique_class, unique_class_len, img_id, phase_np
def test_solver(model, dataset, output_dir, thread_index, thread_num): # load checkpoint load_checkpoint(model, output_dir[0]) New2Old = cvb.load( '/mnt/lustre/liushu1/mask_rcnn/coco-master/PythonAPI/Newlabel.pkl') # result_path = os.path.join(output_dir[1], 'result.json') log_dir = output_dir[1] # count = 0 logger = solver_log( os.path.join( log_dir, 'test_' + time.strftime('%Y%m%d_%H%M%S', time.localtime()) + '.log')) # logger = solver_log(os.path.join(log_dir, 'test1.log')) results = [] data_num = len(dataset) for count in range(data_num): if count % thread_num != thread_index: # count += 1 continue data_np = dataset[count] # print(type(data_np)) # print(len(data_np)) # input() box_feature, rank_score, box_box, box_label, box_score_origin, box_box_origin, image_id, box_keep_np = torch.FloatTensor( data_np[0]), torch.FloatTensor(data_np[1]), torch.FloatTensor( data_np[2]), torch.FloatTensor(data_np[3]), torch.FloatTensor( data_np[4]), torch.FloatTensor( data_np[5]), torch.IntTensor( [data_np[6]]), torch.FloatTensor(data_np[7]) # print(image_id) # input() image_id = int(image_id.numpy()) bboxes = [] start = time.time() box_feature_variable = Variable(box_feature).cuda() box_score_variable = Variable(rank_score).cuda() # box_label_variable = Variable(box_label).cuda() box_box_variable = Variable(box_box).cuda() output = test(box_feature_variable, box_score_variable, box_box_variable, model) # keep = list(np.where(output==1)[0]) box_score_origin = box_score_origin.cpu().numpy().astype(np.float) box_keep_np = box_keep_np.cpu().numpy().astype(np.int) # final_score = box_score_origin * output final_score = box_score_origin * output # for index in keep: for index in range(final_score.shape[0]): # cls_index = np.argmax(box_score_origin[index, :]) # if output[index, 0]==0: # continue cls_all_index = np.where(box_keep_np[index, :] == 1)[0] for cls_index in cls_all_index: # cls_index = np.argsort(final_score[index, :])[::-1][0] x1, y1, x2, y2 = box_box_origin[index, cls_index * 4:cls_index * 4 + 4] # if abs(box_score_origin[index, cls_index]-output[index, 0]) >= 0.8: # continue score = final_score[index, cls_index] # score = 1 # score = box_score_origin[index, cls_index] category_id = New2Old[str(cls_index + 1)][1] bboxes.append({ 'bbox': [ int(x1), int(y1), int(x2) - int(x1) + 1, int(y2) - int(y1) + 1 ], 'score': float(score), 'category_id': category_id, 'image_id': int(image_id) }) # count += 1 end = time.time() print_time = float(end - start) results.extend(bboxes) logger.info('thread_index:{}, index:{}, image_id:{}, cost:{}'.format( thread_index, count, image_id, print_time)) return results
if not image_id in anns_dict: anns_dict[image_id] = [] anns_dict[image_id].append(ann) else: anns_dict[image_id].append(ann) for img in imgs: image_id = img['id'] imgs_dict[image_id] = img['file_name'] return imgs_dict, anns_dict if __name__ == '__main__': src_img_path = "/home/qilu/Downloads/image/training/image_2" src_gt7_path = "/home/qilu/Amodal-Instance-Segmentation-through-KINS-Dataset/instances_train_1.json" anns = cvb.load(src_gt7_path) imgs_info = anns['images'] anns_info = anns["annotations"] imgs_dict, anns_dict = make_json_dict(imgs_info, anns_info) count = 0 for img_id in anns_dict.keys(): img_name = imgs_dict[img_id] img_path = os.path.join(src_img_path, img_name) img = cv2.imread(img_path, cv2.IMREAD_COLOR) height, width, _ = img.shape anns = anns_dict[img_id]
def test_solver(model, dataset, output_dir, thread_index, thread_num): # load checkpoint New2Old = cvb.load('/mnt/lustre/liushu1/mask_rcnn/coco-master/PythonAPI/Newlabel.pkl') # result_path = os.path.join(output_dir[1], 'result.json') np.set_printoptions(formatter={'float': '{: 0.4f}'.format}) log_dir = output_dir[1] # count = 0 logger = solver_log(os.path.join(log_dir, 'test_'+ time.strftime('%Y%m%d_%H%M%S', time.localtime()) +'.log')) # logger = solver_log(os.path.join(log_dir, 'test1.log')) results = [] data_num = len(dataset) for count in range(data_num): if count % thread_num != thread_index: continue data_np = dataset[count] # input all_class_box_feature, all_class_box_box, all_class_box_score = torch.FloatTensor(data_np[0]), torch.FloatTensor(data_np[1]), torch.FloatTensor(data_np[2]) all_class_box_label = data_np[3] all_class_box_weight = data_np[4] all_class_box_origin_score, all_class_box_origin_box = torch.FloatTensor(data_np[5]), torch.FloatTensor(data_np[6]) gts_box = torch.FloatTensor(data_np[7]) unique_class, unique_class_len = torch.FloatTensor(data_np[8]), torch.FloatTensor(data_np[9]) pre_unique_class, pre_unique_class_len = data_np[8], data_np[9] image_id = int(data_np[10]) # if data_np[1].shape[0]==0: # results.extend(image_id) bboxes = [] start = time.time() # all_class_box_label_variable = Variable(all_class_box_label).cuda() all_class_box_score_variable = Variable(all_class_box_score).cuda() all_class_box_box_variable = Variable(all_class_box_box).cuda() all_class_box_feature_variable = Variable(all_class_box_feature).cuda() all_class_box_origin_score_variable = Variable(all_class_box_origin_score).cuda() all_class_box_origin_box_variable = Variable(all_class_box_origin_box).cuda() gts_box_tensor = gts_box.cuda() unique_class_cuda = unique_class.cuda() unique_class_len_cuda = unique_class_len.cuda() pre_stage_output, post_stage_output, post_stage_label, post_stage_weight, post_stage_box_origin_score_variable, post_stage_box_origin_box_tensor, post_unique_class, post_unique_class_len = model(all_class_box_feature_variable, all_class_box_box_variable, all_class_box_score_variable, all_class_box_origin_score_variable, all_class_box_origin_box_variable, gts_box_tensor, unique_class_cuda, unique_class_len_cuda) # output_record = model(box_feature_variable, box_score_variable, box_box_variable, all_class_box_feature_variable, all_class_box_score_variable, all_class_box_box_variable, unique_class, unique_class_len) pre_output = pre_stage_output.data.cpu().numpy().reshape(-1, 1).astype(np.float) # output = test(all_class_box_feature_variable, all_class_box_box_variable, all_class_box_score_variable, all_class_box_origin_score_variable, all_class_box_origin_box_variable, gts_box_tensor, unique_class_cuda, unique_class_len_cuda, model) pre_score = all_class_box_origin_score_variable.data.cpu().numpy().astype(np.float)[:,0:1].reshape(-1, 1) pre_box = all_class_box_origin_box_variable.data.cpu().numpy() pre_label = all_class_box_label # post post_score = post_stage_box_origin_score_variable.data.cpu().numpy().astype(np.float)[:,0:1].reshape(-1,1) post_box = post_stage_box_origin_box_tensor.cpu().numpy() post_output = post_stage_output.data.cpu().numpy().reshape(-1, 1).astype(np.float) post_unique_class_np = post_unique_class.cpu().numpy() post_unique_class_len_np = post_unique_class_len.cpu().numpy() post_label = post_stage_label.data.cpu().numpy() torch.cuda.empty_cache() # final_score = box_score_origin # final_score = (box_score_origin + output) / 2 pre_flag=False if pre_flag: final_score = pre_score * pre_output unique_class_np = pre_unique_class unique_class_len_np = pre_unique_class_len final_box = pre_box final_label = pre_label else: final_score = post_score * post_output unique_class_np = post_unique_class_np unique_class_len_np = post_unique_class_len_np final_box = post_box final_label = post_label # final_score = output for cls_index in range(80): if unique_class_np[cls_index] == 0: continue start_ = int(unique_class_len_np[cls_index]) end_ = int(unique_class_len_np[cls_index+1]) for index in range(start_, end_): x1, y1, x2, y2 = final_box[index, 0:4] score = final_score[index, 0] # if final_label[index, 0]==0: # continue # if(score<0.01): # continue category_id = New2Old[str(cls_index+1)][1] bboxes.append({'bbox': [int(x1), int(y1), int(x2-x1+1), int(y2-y1+1)], 'score': float(score), 'category_id':category_id, 'image_id':int(image_id)}) end = time.time() print_time = float(end-start) results.extend(bboxes) logger.info('thread_index:{}, index:{}, image_id:{}, cost:{}'.format(thread_index, count, image_id, print_time)) return results # cvb.dump(results, result_path)
def test_solver(model, dataset, output_dir, thread_index, thread_num): # load checkpoint load_checkpoint(model, output_dir[0]) New2Old = cvb.load( '/mnt/lustre/liushu1/mask_rcnn/coco-master/PythonAPI/Newlabel.pkl') # result_path = os.path.join(output_dir[1], 'result.json') np.set_printoptions(formatter={'float': '{: 0.4f}'.format}) log_dir = output_dir[1] # count = 0 logger = solver_log( os.path.join( log_dir, 'test_' + time.strftime('%Y%m%d_%H%M%S', time.localtime()) + '.log')) # logger = solver_log(os.path.join(log_dir, 'test1.log')) results = [] data_num = len(dataset) for count in range(data_num): if count % thread_num != thread_index: continue data_np = dataset[count] # input # all_class_box_origin_score, all_class_box_origin_box, unique_class, unique_class_len, img_id # box_feature, rank_score, box_box = torch.FloatTensor(data_np[0]), torch.FloatTensor(data_np[1]), torch.FloatTensor(data_np[2]) all_class_box_feature, all_class_box_box, all_class_box_score = torch.FloatTensor( data_np[0]), torch.FloatTensor(data_np[1]), torch.FloatTensor( data_np[2]) all_class_box_label = data_np[3] if all_class_box_label.shape[0] == 0: continue all_class_box_weight = data_np[4] all_class_box_origin_score, all_class_box_origin_box = torch.FloatTensor( data_np[5]), data_np[6] unique_class, unique_class_len = torch.FloatTensor( data_np[7]), torch.FloatTensor(data_np[8]) unique_class_np, unique_class_len_np = data_np[7], data_np[8] image_id = int(data_np[9]) bboxes = [] start = time.time() # all_class_box_label_variable = Variable(all_class_box_label).cuda() all_class_box_score_variable = Variable(all_class_box_score).cuda() all_class_box_box_variable = Variable(all_class_box_box).cuda() all_class_box_feature_variable = Variable(all_class_box_feature).cuda() all_class_box_origin_score_variable = Variable( all_class_box_origin_score).cuda() unique_class_cuda = unique_class.cuda() unique_class_len_cuda = unique_class_len.cuda() output = test(all_class_box_feature_variable, all_class_box_box_variable, all_class_box_score_variable, all_class_box_origin_score_variable, unique_class_cuda, unique_class_len_cuda, model) box_score_origin = all_class_box_origin_score_variable.data.cpu( ).numpy().astype(np.float)[:, 0:1].reshape(-1, 1) # final_score = box_score_origin save_score = np.concatenate((output, box_score_origin), 1) save_path = '/mnt/lustre/liushu1/qilu_ex/dataset/test_dev/panet/score/' + str( image_id).zfill(12) + '.pkl' cvb.dump(save_score, save_path) # iii = cvb.load(save_path) # output = iii[:,0:1] # box_score_origin = iii[:,1:2] # final_score = box_score_origin * output # for cls_index in range(80): # if unique_class_np[cls_index] == 0: # continue # start_ = int(unique_class_len_np[cls_index]) # end_ = int(unique_class_len_np[cls_index+1]) # # info_info = np.concatenate((box_score_origin[start_:end_, 0:1], output[start_:end_, 0:1], final_score[start_:end_,0:1], all_class_box_origin_box[start_:end_, 0:4].astype(np.int), all_class_box_label[start_:end_, 0:1]), axis=1) # # qwe = DataFrame(info_info, columns=['score_origin', 'network', 'final', 'x1', 'y1', 'x2', 'y2', 'label']) # # print(qwe) # # print(qwe.sort_values(by='score_origin')) # # input() # for index in range(start_, end_): # x1, y1, x2, y2 = all_class_box_origin_box[index, 0:4] # score = final_score[index, 0] # category_id = New2Old[str(cls_index+1)][1] # bboxes.append({'bbox': [int(x1), int(y1), int(x2)-int(x1)+1, int(y2)-int(y1)+1], 'score': float(score), 'category_id':category_id, 'image_id':int(image_id)}) # # count += 1 end = time.time() print_time = float(end - start) # results.extend(bboxes) # # if count==20: # # break logger.info('thread_index:{}, index:{}, image_id:{}, cost:{}'.format( thread_index, count, image_id, print_time)) return results