def read_worker(q_in, q_out): backoff = 0.1 cnt = 0 while True: deq = q_in.get() if deq is None: break sample_a, sample_b, sample_label = deq try: im_blob, _ = get_image_blob(sample_a, sample_b) except Exception, e: print e print 'bad data: ' # sample_a, sample_b continue labels_blob = np.array(sample_label, dtype='float32') blobs = {'data': im_blob, 'labels': labels_blob} # q_out.put(blobs) if q_out.qsize() < 40: q_out.put(blobs) backoff = 0.1 else: # print 'QUEUE!!!!!!!!!!!!!!!!!!!!!!!!!!' q_out.put(blobs) time.sleep(backoff) backoff *= 2
def read_worker(q_in, q_out): backoff = 0.1 while True: deq = q_in.get() if deq is None: break sample = deq try: im_blob, labels_blob = get_image_blob(sample) except: print 'bad data: ', sample continue blobs = {'data': im_blob, 'labels': labels_blob} if q_out.qsize() < 40: q_out.put(blobs) backoff = 0.1 else: time.sleep(backoff) backoff *= 2
fasterRCNN = resnet(N_CLASSES, 101, pretrained=False) fasterRCNN.create_architecture() fasterRCNN.load_state_dict(torch.load(args.model_file)) fasterRCNN.to(device) fasterRCNN.eval() print('Model is loaded.') # Load images. imglist = os.listdir(args.image_dir) num_images = len(imglist) print('Number of images: {}.'.format(num_images)) # Extract features. for im_file in tqdm(imglist): im = cv2.imread(os.path.join(args.image_dir, im_file)) blobs, im_scales = get_image_blob(im) assert len(im_scales) == 1, 'Only single-image batch is implemented' im_data = torch.from_numpy(blobs).permute(0, 3, 1, 2).to(device) im_info = torch.tensor([[blobs.shape[1], blobs.shape[2], im_scales[0]]]).to(device) gt_boxes = torch.zeros(1, 1, 5).to(device) num_boxes = torch.zeros(1).to(device) with torch.set_grad_enabled(False): rois, cls_prob, _, _, _, _, _, _, \ pooled_feat = fasterRCNN(im_data, im_info, gt_boxes, num_boxes) boxes = rois.data.cpu().numpy()[:, :, 1:5].squeeze() boxes /= im_scales[0] cls_prob = cls_prob.data.cpu().numpy().squeeze() pooled_feat = pooled_feat.data.cpu().numpy()
def extract_feature(): MIN_BOXES = 10 MAX_BOXES = 100 N_CLASSES = 1601 CONF_THRESH = 0.2 args = parse_args() if args.cfg_file is not None: cfg_from_file(args.cfg_file) os.makedirs(args.output_dir, exist_ok=True) use_cuda = torch.cuda.is_available() assert use_cuda, 'Works only with CUDA' device = torch.device('cuda') if use_cuda else torch.device('cpu') # device = torch.device('cpu') cfg.CUDA = use_cuda np.random.seed(cfg.RNG_SEED) # Load the model. fasterRCNN = resnet(N_CLASSES, 101, pretrained=False) fasterRCNN.create_architecture() fasterRCNN.load_state_dict(torch.load(args.model_file)) fasterRCNN.to(device) fasterRCNN.eval() print('Model is loaded.') # Load images. imglist = os.listdir(args.image_dir) num_images = len(imglist) print('Number of images: {}.'.format(num_images)) # Extract features. for im_file in tqdm(imglist): im = cv2.imread(os.path.join(args.image_dir, im_file)) blobs, im_scales = get_image_blob(im) assert len(im_scales) == 1, 'Only single-image batch is implemented' im_data = torch.from_numpy(blobs).permute(0, 3, 1, 2).to(device) im_info = torch.tensor([[blobs.shape[1], blobs.shape[2], im_scales[0]]]).to(device) gt_boxes = torch.zeros(1, 1, 5).to(device) num_boxes = torch.zeros(1).to(device) with torch.set_grad_enabled(False): rois, cls_prob, _, _, _, _, _, _, \ pooled_feat = fasterRCNN(im_data, im_info, gt_boxes, num_boxes) boxes = rois.data.cpu().numpy()[:, :, 1:5].squeeze() boxes /= im_scales[0] cls_prob = cls_prob.data.cpu().numpy().squeeze() pooled_feat = pooled_feat.data.cpu().numpy() # Keep only the best detections. max_conf = np.zeros((boxes.shape[0])) for cls_ind in range(1, cls_prob.shape[1]): cls_scores = cls_prob[:, cls_ind] dets = np.hstack( (boxes, cls_scores[:, np.newaxis])).astype(np.float32) keep = np.array(cpu_nms(dets, cfg.TEST.NMS)) max_conf[keep] = np.where(cls_scores[keep] > max_conf[keep], cls_scores[keep], max_conf[keep]) keep_boxes = np.where(max_conf >= CONF_THRESH)[0] if len(keep_boxes) < MIN_BOXES: keep_boxes = np.argsort(max_conf)[::-1][:MIN_BOXES] elif len(keep_boxes) > MAX_BOXES: keep_boxes = np.argsort(max_conf)[::-1][:MAX_BOXES] image_feat = pooled_feat[keep_boxes] if args.save_boxes: image_bboxes = boxes[keep_boxes] else: image_bboxes = None output_file = os.path.join(args.output_dir, im_file.split('.')[0] + '.npy') save_features(output_file, image_feat, image_bboxes)