cfg.TRAIN.USE_JPG_AUG=False _, valroidb = combined_roidb(args.imdbval_name) print('{:d} validation roidb entries'.format(len(valroidb))) cfg.TRAIN.USE_FLIPPED = orgflip cfg.TRAIN.USE_NOISE_AUG = orgnoise cfg.TRAIN.USE_JPG_AUG=orgjpg # load network if args.net=='inception_v3': net=[] elif args.net == 'vgg16': net = vgg16(batch_size=cfg.TRAIN.IMS_PER_BATCH) elif args.net == 'vgg16_noise': net = vgg16_noise(batch_size=cfg.TRAIN.IMS_PER_BATCH) elif args.net == 'res50': net = resnetv1(batch_size=cfg.TRAIN.IMS_PER_BATCH, num_layers=50) elif args.net == 'res50_noise': net = resnet_noise(batch_size=cfg.TRAIN.IMS_PER_BATCH, num_layers=50) elif args.net == 'res101': net = resnetv1(batch_size=cfg.TRAIN.IMS_PER_BATCH, num_layers=101) elif args.net == 'res101_noise': net = resnet_noise(batch_size=cfg.TRAIN.IMS_PER_BATCH, num_layers=101) elif args.net == 'res101_noise_init': net = resnet_noise_init(batch_size=cfg.TRAIN.IMS_PER_BATCH, num_layers=101) elif args.net == 'res101_fusion': net = resnet_fusion(batch_size=cfg.TRAIN.IMS_PER_BATCH, num_layers=101) elif args.net == 'res101_fusion_2rpn': net = resnet_fusion_2rpn(batch_size=cfg.TRAIN.IMS_PER_BATCH, num_layers=101) elif args.net == 'res101_fusion_2rpn_sep': net = resnet_fusion_2rpn_sep(batch_size=cfg.TRAIN.IMS_PER_BATCH, num_layers=101) elif args.net == 'res101_fusion_late_fusion':
if not os.path.isfile(tfmodel + '.meta'): raise IOError( ('{:s} not found.\nDid you download the proper networks from ' 'our server and place them properly?').format(tfmodel + '.meta')) # set config tfconfig = tf.ConfigProto(allow_soft_placement=True) tfconfig.gpu_options.allow_growth = True # init session sess = tf.Session(config=tfconfig) # load network if demonet == 'vgg16': net = vgg16(batch_size=1) elif demonet == 'res101': net = resnetv1(batch_size=1, num_layers=101) elif demonet == 'res50': net = resnetv1(batch_size=1, num_layers=50) else: raise NotImplementedError net.create_architecture(sess, "TEST", 20, tag='default', anchor_scales=[8, 16, 32]) saver = tf.train.Saver() saver.restore(sess, tfmodel) print('Loaded network {:s}'.format(tfmodel)) CONF_THRESH = 0.0
cap = cv2.VideoCapture(int(args.demo_file)) else: AssertionError('type is not correct') prior_mask = pickle.load(open(cfg.DATA_DIR + '/' + 'prior_mask.pkl', "rb"), encoding='iso-8859-1') Action_dic = json.load(open(cfg.DATA_DIR + '/' + 'action_index.json')) Action_dic_inv = {y: x for x, y in Action_dic.items()} # load detection model detection_model = 'output/res50_faster_rcnn_iter_1190000.pth' if not os.path.isfile(detection_model): raise IOError( ('{:s} not found.\nDid you download the proper networks from ' 'our server and place them properly?').format(detection_model)) detection_net = resnetv1(num_layers=50) detection_net.create_architecture(81, tag='default', anchor_scales=[4, 8, 16, 32], anchor_ratios=[0.5, 1, 2]) detection_net.load_state_dict( torch.load(detection_model, map_location=lambda storage, loc: storage)) # load hoi_detection model hoi_model = 'output/HOI_iter_250000.pth' if not os.path.isfile(hoi_model): raise IOError( ('{:s} not found.\nDid you download the proper networks from ' 'our server and place them properly?').format(hoi_model)) # load network
tag = tag if tag else 'default' filename = tag + '/' + filename imdb = get_imdb(args.imdb_name) imdb.competition_mode(args.comp_mode) tfconfig = tf.ConfigProto(allow_soft_placement=True) tfconfig.gpu_options.allow_growth = True # init session sess = tf.Session(config=tfconfig) # load network if args.net == 'vgg16': net = vgg16() elif args.net == 'res50': net = resnetv1(num_layers=50) elif args.net == 'res101': net = resnetv1(num_layers=101) elif args.net == 'res152': net = resnetv1(num_layers=152) elif args.net == 'mobile': net = mobilenetv1() else: raise NotImplementedError # load model net.create_architecture("TEST", imdb.num_classes, tag='default', anchor_scales=cfg.ANCHOR_SCALES, anchor_ratios=cfg.ANCHOR_RATIOS)
print('Output will be saved to `{:s}`'.format(output_dir)) # tensorboard directory where the summaries are saved during training tb_dir = get_output_tb_dir(imdb, args.tag) print('TensorFlow summaries will be saved to `{:s}`'.format(tb_dir)) # also add the validation set, but with no flipping images orgflip = cfg.TRAIN.USE_FLIPPED cfg.TRAIN.USE_FLIPPED = False _, valroidb = combined_roidb(args.imdbval_name) print('{:d} validation roidb entries'.format(len(valroidb))) cfg.TRAIN.USE_FLIPPED = orgflip # load network if args.net == 'vgg16': net = vgg16(batch_size=cfg.TRAIN.IMS_PER_BATCH) elif args.net == 'res50': net = resnetv1(batch_size=cfg.TRAIN.IMS_PER_BATCH, num_layers=50) elif args.net == 'res101': net = resnetv1(batch_size=cfg.TRAIN.IMS_PER_BATCH, num_layers=101) elif args.net == 'res152': net = resnetv1(batch_size=cfg.TRAIN.IMS_PER_BATCH, num_layers=152) elif args.net == 'mobile': net = mobilenetv1(batch_size=cfg.TRAIN.IMS_PER_BATCH) else: raise NotImplementedError train_net(net, imdb, roidb, valroidb, output_dir, tb_dir, pretrained_model=args.weight, max_iters=args.max_iters)
def __init__(self): ## Object of predictNearCollision class will be created here #self.predNet = predictNearCollision() self.nstream = MultiStreamNearCollision().cuda() self.nstream.eval() #self.nstream.load_state_dict(torch.load('../../../data/model_files/4Image6s_004')) self.nstream.load_state_dict( torch.load('../../../data/model_files/6Image6s_027')) self.bridge = CvBridge() self.image_sub = rospy.Subscriber("/zed/zed_node/rgb/image_rect_color", Image, self.callback) cfg.TEST.HAS_PRN = True args = self.parse_args() # model path demonet = args.demo_net dataset = args.dataset tfmodel = os.path.join('../../../output', demonet, DATASETS[dataset][0], 'default', NETS[demonet][0]) if not os.path.isfile(tfmodel + '.meta'): raise IOError( ('{:s} not found.\nDid you download the proper networks from ' 'our server and place them properly?').format(tfmodel + '.meta')) # set config tfconfig = tf.ConfigProto(allow_soft_placement=True) tfconfig.gpu_options.allow_growth = True # init session self.sess = tf.Session(config=tfconfig) # load network if demonet == 'vgg16': self.net = vgg16() elif demonet == 'res101': self.net = resnetv1(num_layers=101) else: raise NotImplementedError self.net.create_architecture("TEST", 21, tag='default', anchor_scales=[8, 16, 32]) saver = tf.train.Saver() saver.restore(self.sess, tfmodel) print('Loaded network {:s}'.format(tfmodel)) self.counter = 0 ## Intializing a counter, alternately I can initialize a queue self.stack_imgs = deque(maxlen=6) ## 4 frames ## To check the frequency #self.image_pub = rospy.Publisher("image_topic_2", Image) self.time_pub = rospy.Publisher('near_collision_time', String, queue_size=10)
def __init__(self, net_name, model_path, cfg_file, num_classes=2, max_object_per_image=15, conf_thresh=0.3, nms_thresh=0.5, iou_thresh=0.5): self.net_name = net_name # self.sess = sess self.model_path = model_path self.cfg_file = cfg_file self.num_images = 1 self.num_classes = num_classes self.conf_thresh = conf_thresh self.nms_thresh = nms_thresh self.iou_thresh = iou_thresh self.max_object_per_image = max_object_per_image # set config tfconfig = tf.ConfigProto(allow_soft_placement=True) tfconfig.gpu_options.allow_growth = True # tfconfig=tf.ConfigProto(log_device_placement=False,allow_soft_placement=True) # init session self.sess = tf.Session(config=tfconfig) if not os.path.isfile(self.model_path + '.meta'): raise IOError(( '{:s} not found.\nDid you download the proper networks from ' 'our server and place them properly?').format(self.model_path + '.meta')) # load network configuration cfg_from_file(self.cfg_file) # pprint.pprint(cfg) # load network if self.net_name == 'vgg16': self.net = vgg16(batch_size=1) elif self.net_name == 'res50': self.net = resnetv1(batch_size=1, num_layers=50) elif self.net_name == 'res101': self.net = resnetv1(batch_size=1, num_layers=101) elif self.net_name == 'res152': self.net = resnetv1(batch_size=1, num_layers=152) elif self.net_name == 'mobile': self.net = mobilenetv1(batch_size=1) else: raise NotImplementedError with self.sess.as_default(): self.net.create_architecture(self.sess, "TEST", self.num_classes, tag='default', anchor_scales=cfg.ANCHOR_SCALES, anchor_ratios=cfg.ANCHOR_RATIOS) saver = tf.train.Saver() saver.restore(self.sess, self.model_path)
if not os.path.isfile(tfmodel + '.meta'): raise IOError(('{:s} not found.\nDid you download the proper networks from ' 'our server and place them properly?').format(tfmodel + '.meta')) # set config tfconfig = tf.ConfigProto(allow_soft_placement=True) tfconfig.gpu_options.allow_growth=True # init session sess = tf.Session(config=tfconfig) # load network if demonet == 'vgg16': net = vgg16(batch_size=1) elif demonet == 'res101': net = resnetv1(batch_size=1, num_layers=101) else: raise NotImplementedError net.create_architecture(sess, "TEST", 21, tag='default', anchor_scales=[8, 16, 32]) saver = tf.train.Saver() saver.restore(sess, tfmodel) print('Loaded network {:s}'.format(tfmodel)) im_names = ['000456.jpg', '000542.jpg', '001150.jpg', '001763.jpg', '004545.jpg'] for im_name in im_names: print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') print('Demo for data/demo/{}'.format(im_name)) demo(sess, net, im_name)
def sgv_test(sess, dataset, demonet, checkpoint_file, tfmodel, result_path, config=None): """Test one sequence Args: dataset: Reference to a Dataset object instance checkpoint_path: Path of the checkpoint to use for the evaluation result_path: Path to save the output images config: Reference to a Configuration object used in the creation of a Session Returns: """ if config is None: config = tf.ConfigProto() config.gpu_options.allow_growth = True # config.log_device_placement = True config.allow_soft_placement = True tf.logging.set_verbosity(tf.logging.INFO) # Input data batch_size = 1 input_image = tf.placeholder(tf.float32, [batch_size, None, None, 3]) # Create the cnn with slim.arg_scope(osvos.osvos_arg_scope()): net, end_points = osvos.osvos(input_image) probabilities = tf.nn.sigmoid(net) # global_step = tf.Variable(0, name='global_step', trainable=False) # Create a saver to load the network saver = tf.train.Saver([ v for v in tf.global_variables() if '-up' not in v.name and '-cr' not in v.name ]) # with g.as_default(): # with tf.device('/gpu:' + str(gpu_id)): # with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) sess.run(osvos.interp_surgery(tf.global_variables())) saver.restore(sess, checkpoint_file) if not os.path.exists(result_path): os.makedirs(result_path) #run osvos on all the frames for frame in range(0, dataset.get_test_size()): img, curr_img = dataset.next_batch(batch_size, 'test') curr_frame = curr_img[0].split('/')[-1].split('.')[0] + '.png' #test - osvos image = osvos.preprocess_img(img[0]) res = sess.run(probabilities, feed_dict={input_image: image}) res_np = res.astype(np.float32)[0, :, :, 0] > 162.0 / 255.0 scipy.misc.imsave(os.path.join(result_path, curr_frame), res_np.astype(np.float32)) # mask = res_np # fig, ax = plt.subplots(figsize=(12, 12)) # vis_masks(img[0], mask, ax ) # plt.imsave(os.path.join("output","mask_"+curr_frame),mask) #run faster-rcnn on all the frames if demonet == 'vgg16': net_rcnn = vgg16(batch_size=1) elif demonet == 'res101': net_rcnn = resnetv1(batch_size=1, num_layers=101) else: raise NotImplementedError net_rcnn.create_architecture(sess, "TEST", 21, tag='default', anchor_scales=[8, 16, 32]) vlist = [ v for v in tf.global_variables() if 'osvos' not in v.name.split('/')[0] and 'global_step' not in v.name ] saver = tf.train.Saver(vlist) saver.restore(sess, tfmodel) # saver = tf.train.Saver() # saver.restore(sess, tfmodel) print('Loaded network {:s}'.format(tfmodel)) dataset.reset_iter() for frame in range(0, dataset.get_test_size()): img, curr_img = dataset.next_batch(batch_size, 'test') curr_frame = curr_img[0].split('/')[-1].split('.')[0] + '.png' #load mask mask = scipy.misc.imread(os.path.join(result_path, curr_frame)) # mask = plt.imread(os.path.join("output", "mask_"+curr_frame)) #convert image rgb --> bgr image = img[0][..., (2, 1, 0)] #test - faster rcnn timer = Timer() timer.tic() scores, boxes = im_detect(sess, net_rcnn, image) timer.toc() print('Detection took {:.3f}s for {:d} object proposals'.format( timer.total_time, boxes.shape[0])) CONF_THRESH = 0.8 NMS_THRESH = 0.3 classify_fg_bk(sess, mask, boxes) #save the mask + detections overlay fig, ax = plt.subplots(figsize=(12, 12)) # vis_masks(img[0], mask, ax) for cls_ind, cls in enumerate(CLASSES[1:]): cls_ind += 1 # because we skipped background cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)] cls_scores = scores[:, cls_ind] dets = np.hstack( (cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32) keep = nms(dets, NMS_THRESH) dets = dets[keep, :] vis_detections_masks(image, mask, ax, curr_frame, cls, dets, thresh=CONF_THRESH) # image = osvos.preprocess_img(img[0]) # res = sess.run(probabilities, feed_dict={input_image: image}) # res_np = res.astype(np.float32)[0, :, :, 0] > 162.0/255.0 # scipy.misc.imsave(os.path.join(result_path, curr_frame), res_np.astype(np.float32)) outputpath = 'output' plt.savefig(os.path.join(outputpath, curr_frame)) print('Saving ' + os.path.join(result_path, curr_frame))
def main(): global img net = resnetv1(num_layers=50) net.create_architecture(41, tag='default', anchor_scales=[8, 16, 32]) saved_model = '/home/zhbli/Project/fast-rcnn/output/res50/voc_2007_trainval/default/res50_faster_rcnn_iter_70000.pth' net.load_state_dict(torch.load(saved_model)) net.eval() net.cuda() # v4.0 # hook the feature extractor finalconv_name = 'resnet' features_blobs = [-1] # shape shoule be [2048, 7, 7] def hook_feature(module, input, output): features_blobs[0] = output.data.cpu().numpy() net._modules.get(finalconv_name)._modules.get('layer4').register_forward_hook(hook_feature) # get the softmax weight params = list(net.parameters()) weight_softmax = np.squeeze(params[-4].data.cpu().numpy()) # shape = [41, 2048] def returnCAM(feature_conv, weight_softmax, class_idx): # generate the class activation maps upsample to 256x256 size_upsample = (256, 256) bz, nc, h, w = feature_conv.shape output_cam = [] for idx in class_idx: cam = weight_softmax[class_idx].dot(feature_conv.reshape((nc, h * w))) cam = cam.reshape(h, w) cam = cam - np.min(cam) cam_img = cam / np.max(cam) cam_img = np.uint8(255 * cam_img) output_cam.append(cv2.resize(cam_img, size_upsample)) return output_cam # v4.0 """loop""" while 1: img = cv2.imread('/data/zhbli/VOCdevkit/VOC2007/JPEGImages/000698.jpg') assert img is not None, "fail to load img" cv2.namedWindow('image') cv2.setMouseCallback('image', on_mouse) cv2.imshow('image', img) cv2.waitKey(5000) print('got rectangle') cv2.destroyAllWindows() print('Loaded network {:s}'.format(saved_model)) scores, boxes = im_detect(net, img) CLASSES = ('__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor', 'aeroplane_truncated', 'bicycle_truncated', 'bird_truncated', 'boat_truncated', 'bottle_truncated', 'bus_truncated', 'car_truncated', 'cat_truncated', 'chair_truncated', 'cow_truncated', 'diningtable_truncated', 'dog_truncated', 'horse_truncated', 'motorbike_truncated', 'person_truncated', 'pottedplant_truncated', 'sheep_truncated', 'sofa_truncated', 'train_truncated', 'tvmonitor_truncated' ) idx = np.argmax(scores, 1).squeeze() box = boxes[:, 4 * idx:4 * (idx + 1)][0] cls = CLASSES[idx] # v4.0 CAMs = returnCAM(features_blobs[0], weight_softmax, [idx]) heatmap = cv2.applyColorMap(cv2.resize(CAMs[0], (roi[2]-roi[0], roi[3]-roi[1])), cv2.COLORMAP_JET) result = heatmap * 0.3 + img[roi[1]:roi[3], roi[0]:roi[2], :] * 0.5 cv2.imwrite('CAM.jpg', result) # v4.0 im = img[:, :, (2, 1, 0)] fig, ax = plt.subplots(figsize=(12, 12)) ax.imshow(im, aspect='equal') bbox = box score = np.max(scores) ax.add_patch( plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor='red', linewidth=3.5) ) ax.text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(cls, score), bbox=dict(facecolor='blue', alpha=0.5), fontsize=14, color='white') plt.axis('off') plt.tight_layout() plt.draw() plt.show()
print('Called with args:') print(args) if args.cfg_file is not None: cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) print('Using config:') pprint.pprint(cfg) np.random.seed(cfg.RNG_SEED) # test set orgflip = cfg.TRAIN.USE_FLIPPED cfg.TRAIN.USE_FLIPPED = False imdb, roidb = combined_roidb(args.imdbtest_name) print('{:d} test roidb entries'.format(len(roidb))) cfg.TRAIN.USE_FLIPPED = orgflip cfg.TRAIN.SNAPSHOT_PREFIX = "" cfg.TRAIN.SNAPSHOT_LOAD_PREFIX = "" net = resnetv1(imdb.nof_ent_classes, imdb.nof_rel_classes, num_layers=101) train_net(net, imdb, [], roidb, "", "", pretrained_model=args.model, max_iters=1, just_test=True)
def launch_train(self, conf): ''' ''' args = {} args['cfg_file'] = conf.frcnn_cfg args['weight'] = conf.starting_weights args['imdb_name'] = conf.train_set args['imdbval_name'] = conf.valid_set args['max_iters'] = conf.iters args['tag'] = conf.frcnn_tag args['net'] = conf.frcnn_net args['set_cfgs'] = None print('Called with args:') print(args) if args['cfg_file'] is not None: cfg_from_file(args['cfg_file']) if args['set_cfgs'] is not None: cfg_from_list(args['set_cfgs']) print('Using config:') pprint.pprint(cfg) np.random.seed(cfg.RNG_SEED) # train set imdb, roidb = combined_roidb(args['imdb_name'], conf) print('{:d} roidb entries'.format(len(roidb))) # output directory where the models are saved output_dir = conf.backup_folder #get_output_dir(imdb, args.tag) print('Output will be saved to `{:s}`'.format(output_dir)) # tensorboard directory where the summaries are saved during training tb_dir = conf.backup_folder # get_output_tb_dir(imdb, args.tag) print('TensorFlow summaries will be saved to `{:s}`'.format(tb_dir)) # also add the validation set, but with no flipping images orgflip = cfg.TRAIN.USE_FLIPPED cfg.TRAIN.USE_FLIPPED = False _, valroidb = combined_roidb(args['imdbval_name'], conf) print('{:d} validation roidb entries'.format(len(valroidb))) cfg.TRAIN.USE_FLIPPED = orgflip if args['net'] == 'vgg16': net = vgg16(batch_size=cfg.TRAIN.IMS_PER_BATCH) elif args['net'] == 'res50': net = resnetv1(batch_size=cfg.TRAIN.IMS_PER_BATCH, num_layers=50) elif args['net'] == 'res101': net = resnetv1(batch_size=cfg.TRAIN.IMS_PER_BATCH, num_layers=101) # load network elif args['net'] == 'res152': net = resnetv1(batch_size=cfg.TRAIN.IMS_PER_BATCH, num_layers=152) elif args['net'] == 'mobile': net = mobilenetv1(batch_size=cfg.TRAIN.IMS_PER_BATCH) else: raise NotImplementedError train_net(net, imdb, roidb, valroidb, output_dir, tb_dir, pretrained_model=args['weight'], max_iters=args['max_iters'])
def launch_test(self, conf, hash_model): ''' ''' args = {} args['cfg_file'] = conf.frcnn_cfg args['weight'] = conf.starting_weights args['model'] = hash_model args['imdb_name'] = conf.valid_set args['comp_mode'] = False args['tag'] = conf.frcnn_tag args['net'] = conf.frcnn_net args['set_cfgs'] = None args['max_per_image'] = 5 print('Called with args:') print(args) if args['cfg_file'] is not None: cfg_from_file(argsargs['cfg_file']) if args['set_cfgs'] is not None: cfg_from_list(args['set_cfgs']) print('Using config:') pprint.pprint(cfg) # if has model, get the name from it # if does not, then just use the inialization weights if args['model']: filename = os.path.splitext(os.path.basename(args['model']))[0] else: filename = os.path.splitext(os.path.basename(args['weight']))[0] tag = args['tag'] tag = tag if tag else 'default' filename = tag + '/' + filename # TODO This is really bad but it works, I'm sincerely sorry conf_copy = copy.deepcopy(conf) conf_copy.train_set = conf_copy.valid_set imdb = get_imdb(args['imdb_name'], conf_copy) print(args['imdb_name']) imdb.competition_mode(args['comp_mode']) tfconfig = tf.ConfigProto(allow_soft_placement=True) tfconfig.gpu_options.allow_growth = True # init session sess = tf.Session(config=tfconfig) # load network if args['net'] == 'vgg16': net = vgg16(batch_size=1) elif args['net'] == 'res50': net = resnetv1(batch_size=1, num_layers=50) elif args['net'] == 'res101': net = resnetv1(batch_size=1, num_layers=101) elif args['net'] == 'res152': net = resnetv1(batch_size=1, num_layers=152) elif args['net'] == 'mobile': net = mobilenetv1(batch_size=1) else: raise NotImplementedError # load model net.create_architecture(sess, "TEST", imdb.num_classes, tag='default', anchor_scales=cfg.ANCHOR_SCALES, anchor_ratios=cfg.ANCHOR_RATIOS) if args['model']: print( ('Loading model check point from {:s}').format(args['model'])) saver = tf.train.Saver() saver.restore(sess, args['model']) print('Loaded.') else: print(('Loading initial weights from {:s}').format(args['weight'])) sess.run(tf.global_variables_initializer()) print('Loaded.') test_net(sess, net, imdb, filename, max_per_image=args['max_per_image']) sess.close()
if not os.path.isfile(tfmodel + '.meta'): raise IOError( ('{:s} not found.\nDid you download the proper networks from ' 'our server and place them properly?').format(tfmodel + '.meta')) # set config tfconfig = tf.ConfigProto(allow_soft_placement=True) tfconfig.gpu_options.allow_growth = True # init session sess = tf.Session(config=tfconfig) # load network if demonet == 'res101': net = resnetv1(num_layers=101) else: raise NotImplementedError net.create_architecture("TEST", len(CLASSES), tag='default', anchor_scales=[4, 8, 16, 32]) saver = tf.train.Saver() saver.restore(sess, tfmodel) print('Loaded network {:s}'.format(tfmodel)) if not os.path.exists(OUTPUT_DIR): os.mkdir(OUTPUT_DIR)
def print_detection(im_file,im, class_name, dets, img_shape,thresh=0.5): """Draw detected bounding boxes.""" inds = np.where(dets[:, -1] >= thresh)[0] if len(inds) == 0: return contents=[] for i in inds: bbox = dets[i, :4] score = dets[i, -1] cv2.rectangle(im,(bbox[0],bbox[1]),(bbox[2],bbox[3]),(0,0,255),2) font = cv2.FONT_HERSHEY_SIMPLEX score=("%.3f" % float(score)) cv2.putText(im,str(class_name)+":"+str(score),(int(bbox[0]),int(bbox[1]-2)),font,0.45,(255,0,0),1) print(("{} detections with p({} | box) >= {:.1f}").format(class_name, class_name,thresh)) element=[] element.append(int(bbox[0])) element.append(int(bbox[1])) element.append(int(bbox[2])) element.append(int(bbox[3])) element.append(score) element.append(class_name) contents.append(element) im_file=im_file.split('/')[-1] if WRITE_IMG: cv2.imwrite(os.path.join(output_img_path,im_file),im) if WRITE_XML: xml_write(test_img_path, output_xml_path, im_file,img_shape,contents,with_score=False) def demo(sess, net, image_name): """Detect object classes in an image using pre-computed object proposals.""" # Load the demo image im_file = os.path.join(cfg.DATA_DIR, 'demo/test_imgs', image_name) if not os.path.exists(im_file): print("Please check where test images exist!!!!!\n") im = cv2.imread(im_file) shape=im.shape # Detect all object classes and regress object bounds timer = Timer() timer.tic() scores, boxes = im_detect(sess, net, im) timer.toc() print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0])) # Visualize detections for each class CONF_THRESH = 0.8 NMS_THRESH = 0.3 for cls_ind, cls in enumerate(CLASSES[1:]): cls_ind += 1 # because we skipped background cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)] cls_scores = scores[:, cls_ind] dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32) keep = nms(dets, NMS_THRESH) dets = dets[keep, :] # vis_detections(im, cls, dets, thresh=CONF_THRESH) print_detection(im_file,im,cls,dets,shape,thresh=CONF_THRESH) def parse_args(): """Parse input arguments.""" parser = argparse.ArgumentParser(description='Tensorflow Faster R-CNN demo') parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16 res101]', choices=NETS.keys(), default='res101') parser.add_argument('--dataset', dest='dataset', help='Trained dataset [pascal_voc pascal_voc_0712]', choices=DATASETS.keys(), default='pascal_voc') args = parser.parse_args() return args if __name__ == '__main__': cfg.TEST.HAS_RPN = True # Use RPN for proposals args = parse_args() # model path demonet = args.demo_net dataset = args.dataset tfmodel = os.path.join('output', demonet, DATASETS[dataset][0], 'default', 'res101_faster_rcnn_iter_50000.ckpt') if not os.path.isfile(tfmodel + '.meta'): raise IOError(('{:s} not found.\nDid you download the proper networks from ' 'our server and place them properly?').format(tfmodel + '.meta')) # set config tfconfig = tf.ConfigProto(allow_soft_placement=True) tfconfig.gpu_options.allow_growth=True # init session sess = tf.Session(config=tfconfig) # load network if demonet == 'vgg16': net = vgg16() elif demonet == 'res101': net = resnetv1(num_layers=101) else: raise NotImplementedError net.create_architecture("TEST", 6, tag='default', anchor_scales=[8, 16, 32]) saver = tf.train.Saver() saver.restore(sess, tfmodel) print('Loaded network {:s}'.format(tfmodel)) start_time=cv2.getTickCount() im_names=[] for temp_file in os.listdir(test_img_path): im_names.append(temp_file) for im_name in im_names: print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') print('Demo for {}{}'.format(test_img_path,im_name)) demo(sess, net, im_name) total_time=(cv2.getTickCount()-start_time)/float(cv2.getTickFrequency()) num_test_img=len(im_names) time_per_img=float(total_time)/num_test_img print("per image costs %f s in average!" % time_per_img)
tag = tag if tag else 'default' filename = tag + '/' + filename imdb = get_imdb(args.imdb_name) imdb.competition_mode(args.comp_mode) tfconfig = tf.ConfigProto(allow_soft_placement=True) tfconfig.gpu_options.allow_growth=True # init session sess = tf.Session(config=tfconfig) # load network if args.net == 'vgg16': net = vgg16() elif args.net == 'res50': net = resnetv1(num_layers=50) elif args.net == 'res101': net = resnetv1(num_layers=101) elif args.net == 'res152': net = resnetv1(num_layers=152) elif args.net == 'mobile': net = mobilenetv1() else: raise NotImplementedError # load model net.create_architecture("TEST", imdb.num_classes, tag='default', anchor_scales=cfg.ANCHOR_SCALES, anchor_ratios=cfg.ANCHOR_RATIOS) if args.model:
tag = tag if tag else 'default' filename = tag + '/' + filename imdb = get_imdb(args.imdb_name) imdb.competition_mode(args.comp_mode) tfconfig = tf.ConfigProto(allow_soft_placement=True) tfconfig.gpu_options.allow_growth = True # init session sess = tf.Session(config=tfconfig) # load network if args.net == 'res41': net = resnetv1_41(num_layers=41) elif args.net == 'res50': net = resnetv1(num_layers=50) elif args.net == 'mobile': net = mobilenetv1() elif args.net == 'mobileenh': net = mobilenetv1enh() else: raise NotImplementedError # load model net.create_architecture("TEST", imdb.num_classes, tag='default', anchor_scales=cfg.ANCHOR_SCALES, anchor_ratios=cfg.ANCHOR_RATIOS) if args.model:
# model path demonet = args.demo_net dataset = args.dataset saved_model = os.path.join('output', demonet, DATASETS[dataset][0], 'default', NETS[demonet][0] % (70000 if dataset == 'pascal_voc' else 110000)) if not os.path.isfile(saved_model): raise IOError(('{:s} not found.\nDid you download the proper networks from ' 'our server and place them properly?').format(saved_model)) # load network if demonet == 'vgg16': net = vgg16() elif demonet == 'res101': net = resnetv1(num_layers=101) else: raise NotImplementedError net.create_architecture(21, tag='default', anchor_scales=[8, 16, 32]) net.load_state_dict(torch.load(saved_model)) net.eval() net.cuda() print('Loaded network {:s}'.format(saved_model)) im_names = [i for i in os.listdir('data/demo/') # Pull in all jpgs if i.lower().endswith(".jpg")] for im_name in im_names:
def testing(imdbval_name, classes, cfg_file, model, weights, tag, net, max_per_image): __sets = {} for split in ['train', 'val', 'trainval', 'test']: name = imdbval_name.split('_')[0] + '_{}'.format(split) __sets[name] = (lambda split=split: dataset(split, classes, name.split('_')[0])) if cfg_file is not None: cfg_from_file(cfg_file) print('Using config:') pprint.pprint(cfg) # if has model, get the name from it # if does not, then just use the inialization weights if model: filename = os.path.splitext(os.path.basename(model))[0] else: filename = os.path.splitext(os.path.basename(weights))[0] tag = tag if tag else 'default' filename = tag + '/' + filename imdb = get_imdb(imdbval_name, __sets) tfconfig = tf.ConfigProto(allow_soft_placement=True) tfconfig.gpu_options.allow_growth = True # init session sess = tf.Session(config=tfconfig) # load network if net == 'vgg16': net = vgg16(batch_size=1) elif net == 'res50': net = resnetv1(batch_size=1, num_layers=50) elif net == 'res101': net = resnetv1(batch_size=1, num_layers=101) elif net == 'res152': net = resnetv1(batch_size=1, num_layers=152) else: raise NotImplementedError # load model net.create_architecture(sess, "TEST", imdb.num_classes, tag='default', anchor_scales=cfg.ANCHOR_SCALES, anchor_ratios=cfg.ANCHOR_RATIOS) if model: print(('Loading model check point from {:s}').format(model)) saver = tf.train.Saver() saver.restore(sess, model) print('Loaded.') else: print(('Loading initial weights from {:s}').format(weights)) sess.run(tf.global_variables_initializer()) print('Loaded.') test_net(sess, net, imdb, filename, max_per_image=max_per_image) sess.close()