def main(args_list): args = parse_args(args_list) print('Called with args:') print(args) if args.cfg_file is not None: cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) cfg.GPU_ID = args.gpu_id print('Using config:') pprint.pprint(cfg) while not os.path.exists(args.caffemodel) and args.wait: print('Waiting for {} to exist...'.format(args.caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(args.gpu_id) net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(args.caffemodel))[0] imdb = get_imdb(args.imdb_name) imdb.competition_mode(args.comp_mode) if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) test_net(net, imdb, max_per_image=args.max_per_image, vis=args.vis)
def train_model(self, max_iters): """Network training loop.""" last_snapshot_iter = -1 timer = Timer() model_paths = [] while self.solver.iter < max_iters: # Make one SGD update timer.tic() self.solver.step(1) timer.toc() if self.solver.iter % (10 * self.solver_param.display) == 0: print 'speed: {:.3f}s / iter'.format(timer.average_time) if self.solver.iter % cfg.TRAIN.SNAPSHOT_ITERS == 0: last_snapshot_iter = self.solver.iter model_paths.append(self.snapshot()) if self.validation_cfg is not None: val_caffe_model = str(model_paths[-1]) val_prototxt = self.validation_cfg[0] val_imdb = self.validation_cfg[1] val_net = caffe.Net(val_prototxt, val_caffe_model, caffe.TEST) val_net.name = os.path.splitext( os.path.basename(val_caffe_model))[0] test_net(val_net, val_imdb, max_per_image=10000) if last_snapshot_iter != self.solver.iter: model_paths.append(self.snapshot()) return model_paths
def getPrediction(image_folder, image_set, annotation_path,image_set_file_name, net): imdb = usts(image_set_file_name)#what should I use here? if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) test_net(net, imdb) verify_detections = obtain_detections_random_cover(image_set_file_name) return verify_detections
def evaluate_faster_rcnn(conf_thresh, nms_thresh): """Evaluate a Faster R-CNN network on a image database.""" # Set prototxt prototxt = osp.join(cfg.MODELS_DIR, cfg.DATASET_NAME, cfg.METHOD_NAME, cfg.MODEL_NAME, 'test.prototxt') check_if_exist('Prototxt', prototxt) # Get most recent model test_model = get_model_path(cfg.OUTPUT_DIR, '.caffemodel', '_iter_') if test_model is None: print('No model found in `{:s}`.'.format(cfg.OUTPUT_DIR)) sys.exit() caffe.set_mode_gpu() caffe.set_device(cfg.GPU_ID) net = caffe.Net(prototxt, caffe.TEST, weights=test_model) net.name = osp.splitext(osp.basename(test_model))[0] # Get imdb imdb_name = '{:s}_val'.format(cfg.DATASET_NAME) imdb = get_imdb(imdb_name) # results_dir = osp.join(cfg.OUTPUT_DIR, 'results') # imdb._do_pascal_voc_eval(results_dir) if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) test_net(net, imdb, conf_thresh, nms_thresh)
def test(classes, cfg_file, test_prototxt, test_model, comp_mode, max_per_image, vis, imdb_name = 'voc_2007_test',set_cfgs = None): if cfg_file is not None: cfg_from_file(cfg_file) if set_cfgs is not None: cfg_from_list(set_cfgs) print('Using config:') pprint.pprint(cfg) while not os.path.exists(test_model): print('{} is not existed...'.format(test_model)) if test_prototxt is not None: aux_tools.change_test_prototxt(test_prototxt,len(classes)+1) # add 1 -- should consider background net = caffe.Net(test_prototxt, test_model, caffe.TEST) net.name = os.path.splitext(os.path.basename(test_model))[0] #imdb = get_imdb(imdb_name) imdb=get_imdb_class(imdb_name,classes) imdb.competition_mode(comp_mode) print imdb.classes if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) test_net(net, imdb, max_per_image=max_per_image, vis=vis)
def runTestNet(self, test_name): ''' args = parse_args() print('Called with args:') print(args) if args.cfg_file is not None: cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) ''' cfg_from_file(self.cfg_file) print('Using config:') pprint.pprint(cfg) caffe.set_mode_gpu() caffe.set_device(0) net = caffe.Net(self.prototxt, self.net, caffe.TEST) print 'prototxt ', self.prototxt print 'caffemodel ', self.net print 'test ', caffe.TEST net.name = os.path.splitext(os.path.basename(self.net))[0] imdb = get_imdb(test_name) if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) test_net(net, imdb)
def test_imdb(net_def, caffemodel, imdb): """ test the fast-rcnn model on a image dataset """ caffe.set_mode_gpu() net = caffe.Net(net_def, caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(caffemodel))[0] # imdb.competition_mode(args.comp_mode) test_net(net, imdb)
def test_controller(faster_rcnn_exp): init_new_dataset(faster_rcnn_exp) test_proto_file = faster_rcnn_exp.test_proto_file faster_rcnn_cfg = faster_rcnn_exp.cfg_file cfg_from_file(faster_rcnn_cfg) GPU_ID = faster_rcnn_exp.misc.gpu_id if faster_rcnn_exp.use_trained_weights_test: caffemodel = faster_rcnn_exp.trained_model_path else: caffemodel = faster_rcnn_exp.weights_file_test caffe.set_mode_gpu() caffe.set_device(GPU_ID) print test_proto_file print caffemodel net = caffe.Net(test_proto_file, str(caffemodel), caffe.TEST) net.name = os.path.splitext(os.path.basename(caffemodel))[0] imdb_name = faster_rcnn_exp.experiment_name + '_' + faster_rcnn_exp.test_split imdb = get_imdb(imdb_name) if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) mAP, aps = test_net(net, imdb) return mAP, aps
def final_predict(self): #self.lungmask = self.seg_lung() self.seg_lung() self.candis_thresh = 0.98 all_bbx = test_net(net=self.net, vol=self.data) candis = self.cdd_filter_lungmask(thresh_r=10, candis=all_bbx) self.gen_list(candis) self.preprocess_data() config_dict = get_config(self.pkl_candis, self.vol_candis, batchsize=32, crop_shape=[64, 64, 64]) valid_generator = config_dict['valid_generator'] nb_valid_samples = config_dict['nb_valid_samples'] model = fp_init.init(self.modelDir) probs = [] # try: nb_seen_samples = 0 while nb_seen_samples < nb_valid_samples: samples = next(valid_generator) #valid_generator.next()# probs.append(model.predict_on_batch(samples)) nb_seen_samples += len(samples) assert nb_seen_samples == nb_valid_samples probs = np.concatenate(probs, axis=0) # write_pkl_file(result_saveto, probs) # except KeyboardInterrupt: # pass self.probs = probs
def test_imdb(rpn_net, fast_rcnn_net, imdb, anchors): """ Test Faster-rcnn model on a image dataset """ print 'Run RPN model, get the proposalboxes...' proposal_boxes = test_rpn_net.test_imdb(rpn_net, imdb, anchors) print 'done!' print 'Append proposal boxes into imdb' roidb = [] for box in proposal_boxes: roidb.append({'boxes': box[:, 0:4], 'gt_classes' : np.zeros((len(box),), dtype=np.int32)}) imdb.roidb = roidb print 'done!' print 'Run Fast-RCNN model to detect...' fast_rcnn_test.test_net(fast_rcnn_net, imdb) print 'done!'
def test_imdb(rpn_net, fast_rcnn_net, imdb, anchors): """ Test Faster-rcnn model on a image dataset """ print 'Run RPN model, get the proposalboxes...' proposal_boxes = test_rpn_net.test_imdb(rpn_net, imdb, anchors) print 'done!' print 'Append proposal boxes into imdb' roidb = [] for box in proposal_boxes: roidb.append({ 'boxes': box[:, 0:4], 'gt_classes': np.zeros((len(box), ), dtype=np.int32) }) imdb.roidb = roidb print 'done!' print 'Run Fast-RCNN model to detect...' fast_rcnn_test.test_net(fast_rcnn_net, imdb) print 'done!'
def test_model_mp(gpu_id, test_prototxt, test_model, cfg, imdb_name, num_dets=100, comp=False, vis=False): ''' Test the snapshot using a separate process. ''' print 'Testing model: {}'.format(test_model) print 'Using cfgs: ' pprint.pprint(cfg) import caffe caffe.set_mode_gpu() caffe.set_device(gpu_id) net = caffe.Net(test_prototxt, test_model, caffe.TEST) net.name = os.path.splitext(os.path.basename(test_model))[0] imdb = get_imdb(imdb_name) imdb.competition_mode(comp) if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) test_net(net, imdb, max_per_image=num_dets, vis=vis)
# Path of model is given by user, use this model_paths = [args.caffe_model_path] print "Model path(s)" print model_paths performance_list = [] iterations_list = [] #print sorted(os.listdir(args.caffe_model_path)) for model_file in model_paths: print "caffemodel:" print model_file print "modelpath:" print os.path.join(args.caffe_model_path,model_file) print "Iterations" iterations = os.path.splitext(os.path.basename(model_file))[0].split("_") print iterations[-1] iterations_list += [iterations[-1]] print iterations_list net = caffe.Net(args.prototxt, model_file, caffe.TEST) net.name = os.path.splitext(os.path.basename(model_file))[0] (classes, performance) = test_net(net, imdb, max_per_image=args.max_per_image, thresh=args.thresh_detect, vis=args.vis) performance_list += [performance['aps']] if(args.eval_iters == True): pass # make plot make_figure(classes, iterations_list, performance_list)
print('Called with args:') print(args) if args.cfg_file is not None: cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) cfg.GPU_ID = args.gpu_id print('Using config:') pprint.pprint(cfg) while not os.path.exists(args.caffemodel) and args.wait: print('Waiting for {} to exist...'.format(args.caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(args.gpu_id) net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(args.caffemodel))[0] imdb = get_imdb(args.imdb_name) #import ipdb; ipdb.set_trace() imdb.competition_mode(args.comp_mode) if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) test_net(net, imdb, max_per_image=args.max_per_image, vis=args.vis)
print "IMDB: " print imdb imdb.competition_mode(args.comp_mode) if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) print "Model path:" # print args.caffemodel # do one detection and save the detections.pkl net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(args.caffemodel))[0] output_dir = get_output_dir(imdb, net) if DEBUG: (classes, performance) = readCache(output_dir) else: (classes, performance) = test_net(net, imdb, max_per_image=args.max_per_image, thresh=0.) recss = performance['recs'] precss = performance['precs'] tprss = performance['tprs'] fprss = performance['fprs'] ROC_Vis(classes, tprss, fprss) PR_Vis(classes, recss, precss)
print('Using config:') pprint.pprint(cfg) cfg.GPU_ID = args.gpu_id config = tf.ConfigProto() config.allow_soft_placement = True if args.multi_label == 1: imdb = get_imdb_multi(args.roidb, args.imdb, args.rpndb, args.data_dir, split=args.split, num_im=args.test_size) else: imdb = get_imdb(args.roidb, args.imdb, args.rpndb, args.data_dir, split=args.split, num_im=args.test_size) if args.test_mode == 'viz_cls' or args.test_mode == 'viz_det': # visualize result viz_net2(args.network_name, args.model, imdb, args.dump_file, args.test_mode) elif args.load_score is not None: eval_net(args.load_score, imdb, args.test_mode, args.write_rel_f) else: test_net(args.network_name, args.model, imdb, args.test_mode)
if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) imdb_names = [] el_range = range(0, 61, 30) az_range = range(90, 271, 45) for el in el_range: for az in az_range: imdb_names.append('unrealcv_%d_%d' % (el, az)) vis = True for imdb_name in imdb_names: print imdb_name imdb = get_imdb(imdb_name) imdb.competition_mode(comp_mode) test_net(net, imdb, max_per_image=max_per_image, vis=vis) cls = 'sofa' table = '' header = '%30s & ' % '\diagbox{Elevation}{Azimuth}' + ' & '.join(['%9.3f' % az for az in az_range]) table += header + '\\\\ \n' + '\hline\n' for el in el_range: row = '%30d' % el for az in az_range: imdb_name = 'unrealcv_%d_%d' % (el, az) imdb = get_imdb(imdb_name) imdb.competition_mode(comp_mode) output_dir = get_output_dir(imdb, net) with open(os.path.join(output_dir, cls + '_pr.pkl')) as f:
performance_list = [] iterations_list = [] #print sorted(os.listdir(args.caffe_model_path)) for model_file in model_paths: print "caffemodel:" print model_file print "modelpath:" print os.path.join(args.caffe_model_path, model_file) print "Iterations" iterations = os.path.splitext( os.path.basename(model_file))[0].split("_") print iterations[-1] iterations_list += [iterations[-1]] print iterations_list net = caffe.Net(args.prototxt, model_file, caffe.TEST) net.name = os.path.splitext(os.path.basename(model_file))[0] (classes, performance) = test_net(net, imdb, max_per_image=args.max_per_image, thresh=args.thresh_detect, vis=args.vis) performance_list += [performance['aps']] if (args.eval_iters == True): pass # make plot make_figure(classes, iterations_list, performance_list)
#ckpt = tf.train.get_checkpoint_state(checkpoint_dir) #print "ckpt: ", ckpt #print "ckpt model_checkpoint_path: ", ckpt.model_checkpoint_path #if ckpt is None or ckpt.model_checkpoint_path is None: # raise RuntimeError('Waiting for checkpoint in directory {} to exist...'.format(checkpoint_dir)) device_name = '/{}:{:d}'.format(args.device,args.device_id) print device_name network = get_network(args.network_name) print 'Use network `{:s}` in training'.format(args.network_name) if args.device == 'gpu': cfg.USE_GPU_NMS = True cfg.GPU_ID = args.device_id else: cfg.USE_GPU_NMS = False # start a session saver = tf.train.Saver() sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) #saver.restore(sess, args.model) #print ('Loading model weights from {:s}').format(args.model) saver.restore(sess, "/home/ubuntu/Tensorflow-Faster-RCNN/output/faster_rcnn_end2end/voc_2007_trainval/VGGnet_fast_rcnn_iter_70000.ckpt") #print ('Loading model weights from {:s}').format(ckpt.model_checkpoint_path) test_net(sess, network, imdb, "VGGnet_fast_rcnn_iter_70000")
if args.max_size != -1: cfg.TEST.MAX_SIZE = args.max_size postfix += '_max{}'.format(args.max_size) if args.min_size != -1: cfg.TEST.SCALES = [ args.min_size, ] postfix += '_min{}'.format(args.min_size) if args.orig_scale > 0: postfix += '_orig_{}'.format(int(args.orig_scale * 10)) net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST) net.name = net_name postfix = net_name + '_' + postfix if args.single: output_dir = './debug/single/' ensure_dir(output_dir) else: output_dir = get_output_dir(imdb, postfix=postfix) test_net(net, imdb, vis=args.vis, matlab_val=args.matlab_eval, postfix=postfix, single_img=args.single, shuffle=args.shuffle, fddb_pascal_path=args.fddb_pascal_path) if not args.shuffle: gather_results_csv()
# time.sleep(10) weights_filename = os.path.splitext(os.path.basename(args.model))[0] imdb = get_imdb(args.imdb_name) imdb.competition_mode(args.comp_mode) network = get_network(args.network_name) print('Use network {:s} in training'.format(args.network_name)) if args.device == 'gpu': cfg.USE_GPU_NMS = True cfg.GPU_ID = args.device_id device_name = '/{}:{:d}'.format(args.device, args.device_id) print(device_name) else: cfg.USE_GPU_NMS = False # start a session saver = tf.train.Saver() sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) saver.restore(sess, args.model) print(('Loading model weights from {:s}').format(args.model)) test_net(sess, network, imdb, weights_filename, thresh=args.thresh, force=args.force)
if args.cfg_file is not None: cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) cfg.GPU_ID = args.gpu_id print('Using config:') pprint.pprint(cfg) while not os.path.exists(args.caffemodel) and args.wait: print('Waiting for {} to exist...'.format(args.caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(args.gpu_id) net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(args.caffemodel))[0] imdb = get_imdb(args.imdb_name) imdb.competition_mode(args.comp_mode) if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) test_net(net, imdb, max_per_image=args.max_per_image, vis=True, thresh=args.thresh)
print('Called with args:') print(args) if args.cfg_file is not None: cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) cfg.GPU_ID = args.gpu_id print('Using config:') pprint.pprint(cfg) while not os.path.exists(args.caffemodel) and args.wait: print('Waiting for {} to exist...'.format(args.caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(args.gpu_id) net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(args.caffemodel))[0] imdb = get_imdb(args.imdb_name) imdb.competition_mode(args.comp_mode) if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) import pdb; pdb.set_trace() test_net(net, imdb)
def locate_backdoor(net, test_images, verification_images): """ net: caffe net test_images: list of strings with the names of the images you want to test verification_images: list of images to perform the 20 image check on returns average_cpos """ imdb = usts("verify_20") if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) test_net(net, imdb) verify_detections = obtain_detections_random_cover("verify_20") # For each image in the list of images for i, image in enumerate(test_images): #Write the current image onto single_image_detection.txt with open("datasets/usts/ImageSets/single_image_detection.txt", "w") as f: f.write("{}".format(image)) # Perform inference on the image imdb = usts("single_image_detection") if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) test_net(net, imdb) # Obtain detections detections = obtain_detections("single_image_detection") # Obtain annotations of the original image with open("datasets/usts/Annotations/{}.txt".format(image), "r") as f: annot = [line.strip().split(',') for line in f.readlines()] # Place random covers on the image print "Generating random covers for image {}, detections: {}".format( i, detections) cpos_dict = generate_random_covers(image, annot) # Perform inference on the covered images print "Completed generation, detecting now" imdb = usts("random_covers") if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) test_net(net, imdb) # Obtain detections on these images random_covers_detections = obtain_detections_random_cover( "random_covers") # Create a transition dictionary -> transitions[original-class][new-class]: list of images (random_cover) transition = defaultdict( lambda: defaultdict(lambda: defaultdict(list))) # Loop through random_cover dictionary for im, detection_list in random_covers_detections.iteritems(): # Loop through detections (list of lists) of the original image for orig_idx, orig_detection in enumerate(detections): # Loop through the list obtained from random_cover dictionary for new_detection in detection_list: # If iou > 0 && there is change in transition, append if iou(orig_detection[2:], new_detection[2:] ) > 0 and orig_detection[0] != new_detection[0]: if float(new_detection[1]) > 0.9: transition[orig_idx][orig_detection[0]][ new_detection[0]].append(im) for orig_idx, transition_dict in transition.iteritems(): # Loop through each of the original class for from_type, sub_dict in transition_dict.iteritems(): # If detection from the original image matches an annotation, let the coordinates be the annotations obtained_coord = False for detection in detections: if detection[0] == from_type: for anno in annot: if iou(detection[2:], anno[1:5]) > 0: a = int(float(anno[1])) b = int(float(anno[2])) c = int(float(anno[3])) d = int(float(anno[4])) obtained_coord = True if not obtained_coord: continue # Loop through each of the new class for to_type, im_list in sub_dict.iteritems(): # Obtain the average cpos average_cpos_a = 0 average_cpos_b = 0 for im in im_list: average_cpos_a += cpos_dict[im][0] average_cpos_b += cpos_dict[im][1] average_cpos_a /= len(im_list) average_cpos_b /= len(im_list) # Read image, obtain potential trigger im_cv2 = cv2.imread( "datasets/usts/Images/{}.png".format(image), -1) x1 = min(a, c) x2 = max(a, c) y1 = min(b, d) y2 = max(b, d) w, h = x2 - x1, y2 - y1 size = (0.1, 0.1) bw = max(int(w * size[0]), 1) bh = max(int(h * size[1]), 1) cpos = (average_cpos_a, average_cpos_b) bx1 = min(int(x1 + w * (cpos[0] - size[0] / 2.)), im_cv2.shape[1] - 1) bx2 = min(bx1 + bw, im_cv2.shape[1]) by1 = min(int(y1 + h * (cpos[1] - size[1] / 2.)), im_cv2.shape[0] - 1) by2 = min(by1 + bh, im_cv2.shape[0]) bx1_new = int(bx1 - (bx2 - bx1) * 0.25) bx2_new = int(bx2 + (bx2 - bx1) * 0.25) by1_new = int(by1 - (by2 - by1) * 0.25) by2_new = int(by2 + (by2 - by1) * 0.25) img_esq = im_cv2[by1_new:by2_new, bx1_new:bx2_new] with open("datasets/usts/ImageSets/verify_20_temp.txt", "w") as f: for verify_im, verify_detection in verify_detections.iteritems( ): with open( "datasets/usts/Annotations/{}.txt".format( verify_im), "r") as g: verify_detection = [ line.strip().split(',') for line in g.readlines() ] verify_image = cv2.imread( "datasets/usts/Images/{}.png".format( verify_im), -1) for num, each_det in enumerate(verify_detection): va = int(float(each_det[1])) vb = int(float(each_det[2])) vc = int(float(each_det[3])) vd = int(float(each_det[4])) vx1 = min(va, vc) vx2 = max(va, vc) vy1 = min(vb, vd) vy2 = max(vb, vd) vw, vh = vx2 - vx1, vy2 - vy1 vbw = max(int(vw * size[0]), 1) vbh = max(int(vh * size[1]), 1) vbx1 = min( int(vx1 + vw * (cpos[0] - size[0] / 2.)), verify_image.shape[1] - 1) vbx2 = min(vbx1 + vbw, verify_image.shape[1]) vby1 = min( int(vy1 + vh * (cpos[1] - size[1] / 2.)), verify_image.shape[0] - 1) vby2 = min(vby1 + vbh, verify_image.shape[0]) vbx1_new = int(vbx1 - (vbx2 - vbx1) * 0.25) vbx2_new = int(vbx2 + (vbx2 - vbx1) * 0.25) vby1_new = int(vby1 - (vby2 - vby1) * 0.25) vby2_new = int(vby2 + (vby2 - vby1) * 0.25) vbw_new, vbh_new = vbx2_new - vbx1_new, vby2_new - vby1_new backdoor = cv2.resize( img_esq, (vbw_new, vbh_new), interpolation=cv2.INTER_CUBIC) verify_image[vby1_new:vby2_new, vbx1_new:vbx2_new] = backdoor cv2.imwrite( "datasets/usts/Images/{}.png".format( verify_im[0] + "1" + verify_im[2:]), verify_image) f.write("{}\n".format(verify_im[0] + "1" + verify_im[2:])) imdb = usts("verify_20_temp") if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) test_net(net, imdb) new_verify = obtain_detections_random_cover( "verify_20_temp") transitions = detect_transitions(verify_detections, new_verify) print "Transitions: " + str(transitions) print "Number of images contributing to average_cpos: " + str( len(im_list)) if transitions > 15: return cpos, image return None
args = parse_args() print('Called with args:') print(args) if args.cfg_file is not None: cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) cfg.GPU_ID = args.gpu_id print('Using config:') pprint.pprint(cfg) while not os.path.exists(args.caffemodel) and args.wait: print('Waiting for {} to exist...'.format(args.caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(args.gpu_id) net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(args.caffemodel))[0] imdb = get_imdb(args.imdb_name) imdb.competition_mode(args.comp_mode) if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) test_net(net, imdb, max_per_image=args.max_per_image, vis=False)
return args if __name__ == '__main__': args = parse_args() print('Called with args:') print(args) if args.cfg_file is not None: cfg_from_file(args.cfg_file) cfg.GPU_ID = args.gpu_id print('Using config:') pprint.pprint(cfg) while not os.path.exists(args.caffemodel) and args.wait: print('Waiting for {} to exist...'.format(args.caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(args.gpu_id) feature_net = caffe.Net(args.feature_prototxt, caffe.TEST, weights=args.caffemodel) embed_net = caffe.Net(args.embed_prototxt, caffe.TEST, weights=args.caffemodel) recurrent_net = caffe.Net(args.recurrent_prototxt, caffe.TEST, weights=args.caffemodel) feature_net.name = os.path.splitext(os.path.basename(args.caffemodel))[0] imdb = get_imdb(args.imdb_name) #print args.max_per_image test_net(feature_net, embed_net, recurrent_net, imdb, \ vis=args.vis, use_box_at=args.use_box_at)
print(args) if args.cfg_file is not None: cfg_from_file(args.cfg_file) cfg.GPU_ID = args.gpu_id print('Using config:') pprint.pprint(cfg) while not os.path.exists(args.caffemodel) and args.wait: print('Waiting for {} to exist...'.format(args.caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(args.gpu_id) feature_net = caffe.Net(args.feature_prototxt, caffe.TEST, weights=args.caffemodel) embed_net = caffe.Net(args.embed_prototxt, caffe.TEST, weights=args.caffemodel) recurrent_net = caffe.Net(args.recurrent_prototxt, caffe.TEST, weights=args.caffemodel) feature_net.name = os.path.splitext(os.path.basename(args.caffemodel))[0] imdb = get_imdb(args.imdb_name) #print args.max_per_image test_net(feature_net, embed_net, recurrent_net, imdb, \ vis=args.vis, use_box_at=args.use_box_at)
if __name__ == '__main__': args = parse_args() print('Called with args:') print(args) if args.cfg_file is not None: cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) print('Using config:') pprint.pprint(cfg) while not os.path.exists(args.caffemodel) and args.wait: print('Waiting for {} to exist...'.format(args.caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(args.gpu_id) net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(args.caffemodel))[0] imdb = get_imdb(args.imdb_name) if args.use_flip: imdb.append_flipped_images() imdb.competition_mode(args.comp_mode) test_net(net, imdb, args)
print('Using config:') pprint.pprint(cfg) while not os.path.exists(args.model) and args.wait: print('Waiting for {} to exist...'.format(args.model)) time.sleep(10) weights_filename = os.path.splitext(os.path.basename(args.model))[0] imdb = get_imdb(args.imdb_name) imdb.competition_mode(args.comp_mode) device_name = '/gpu:{:d}'.format(args.gpu_id) print device_name network = get_network(args.network_name) print 'Use network `{:s}` in training'.format(args.network_name) cfg.GPU_ID = args.gpu_id # start a session saver = tf.train.Saver() sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) saver.restore(sess, args.model) print ('Loading model weights from {:s}').format(args.model) if cfg.IS_RPN == True and cfg.IS_MULTISCALE == False: test_rpn_msr_net(sess, network, imdb, weights_filename) else: test_net(sess, network, imdb, weights_filename)
for box_ind, box in enumerate(boxes): if hardNeg_learning(scores[box_ind], box, gt_boxes, roidb[im_ind]): total_hardNeg = total_hardNeg + 1 hard_negs.append(total_hardNeg) print 'Total proposal: ', total_proposal print 'Total Hard Negative', total_hardNeg with open(log_file, 'a') as f: f.write('---------------------------\n') f.write(str(iters) + ' time recursion\n') f.write('Total proposal: ' + str(total_proposal) + '\n') f.write('Total Hard Negtive: ' + str(total_hardNeg) + '\n') f.write('Test Result:\n') test_imdb = get_imdb(args.test_imdb) f.write(str(test_net(net, test_imdb)) + '\n') f.write('---------------------------\n') if total_hardNeg < threshold: print 'Done' break print 'Train recursively...' # print len(roidb) caffemodel = train_net(args.solver, roidb, output_dir, pretrained_model=str(caffemodel[-1]), max_iters=args.max_iters) iters = iters + 1
args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() print('Called with args:') print(args) if args.cfg_file is not None: cfg_from_file(args.cfg_file) print('Using config:') pprint.pprint(cfg) while not os.path.exists(args.caffemodel) and args.wait: print('Waiting for {} to exist...'.format(args.caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(args.gpu_id) net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(args.caffemodel))[0] imdb = get_imdb(args.imdb_name) imdb.competition_mode(args.comp_mode) output_dir = os.path.dirname(args.caffemodel) images_subdir = ['ImagesQhd','DepthJetQhd'] test_net(net, imdb, output_dir, images_subdir)
print device_name if args.imdb_name.startswith('sz_veh') or args.imdb_name.startswith('sz_cyc') or args.imdb_name.startswith('sz_ped') or args.imdb_name.startswith('sz_lights'): n_classes = 2 elif args.imdb_name.startswith('ksz_veh') or args.imdb_name.startswith('ksz_cyc') or args.imdb_name.startswith('ksz_ped') or args.imdb_name.startswith('ksz_lights'): n_classes = 2 elif args.imdb_name.startswith('sz'): n_classes = 5 elif args.imdb_name.startswith('voc'): n_classes = 21 else: raise Exception('Give me the correct n_classes of %s' % (args.imdb_name)) assert len(imdb._classes) == n_classes network = get_network(args.network_name, n_classes) print 'Use network `{:s}` in training'.format(args.network_name) cfg.GPU_ID = args.gpu_id with tf.variable_scope('custom', reuse=False): bbox_means = tf.get_variable("bbox_means", shape=(n_classes * 4, ), trainable=False) bbox_stds = tf.get_variable("bbox_stds", shape=(n_classes * 4, ), trainable=False) saver = tf.train.Saver() sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) saver.restore(sess, args.model) print ('Loading model weights from {:s}').format(args.model) test_net(sess, network, imdb, weights_filename, vis=args.vis)
net_proto = caffe.proto.caffe_pb2.NetParameter() text_format.Merge(all_content, net_proto) all_finded = [layer for layer in net_proto.layer \ if layer.name == 'cls_score'] assert len(all_finded) == 1 all_finded[0].inner_product_param.num_output = len(imdb.classes) all_finded = [layer for layer in net_proto.layer \ if layer.name == 'bbox_pred'] assert len(all_finded) == 1 all_finded[0].inner_product_param.num_output = 4 * len(imdb.classes) with open(proto_file + '.out', 'w') as fp: fp.write(str(net_proto)) net = caffe.Net(proto_file + '.out', args.caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(args.caffemodel))[0] dets = test_net(net, imdb) imdb.evaluate_detections(dets) result = [] if 1: classes = imdb.classes idx_image = 0 for j, index in enumerate(imdb.image_index): curr_boxes = [] for i, cls in enumerate(classes): if i == 0: continue box_info = dets[i][j] if type(box_info) is list and len(box_info) == 0: continue assert type(box_info) == np.ndarray, (box_info, type(box_info)) im = cv2.imread(imdb.image_path_at(j))
pprint.pprint(cfg) while not os.path.exists(args.model) and args.wait: print('Waiting for {} to exist...'.format(args.model)) time.sleep(10) weights_filename = os.path.splitext(os.path.basename(args.model))[0] imdb = get_imdb(args.imdb_name) imdb.competition_mode(args.comp_mode) device_name = '/{}:{:d}'.format(args.device, args.device_id) print(device_name) network = get_network(args.network_name) print('Use network `{:s}` in training'.format(args.network_name)) if args.device == 'gpu': cfg.USE_GPU_NMS = True cfg.GPU_ID = args.device_id else: cfg.USE_GPU_NMS = False # start a session saver = tf.train.Saver() sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) saver.restore(sess, args.model) print('Loading model weights from {:s}').format(args.model) test_net(sess, network, imdb, weights_filename)
if args.cfg_file is not None: cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) cfg.GPU_ID = args.gpu_id print('Using config:') pprint.pprint(cfg) while not os.path.exists(args.caffemodel) and args.wait: print('Waiting for {} to exist...'.format(args.caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(args.gpu_id) net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(args.caffemodel))[0] imdb = get_imdb(args.imdb_name) # print("DEBUG: imdb = {} ({}); Comp_mode: {}".format(args.imdb_name, imdb, args.comp_mode)) imdb.competition_mode(args.comp_mode) if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) #print("DEBUG: vis=", args.vis) test_net(net, imdb, max_per_image=args.max_per_image, vis=args.vis, thresh=args.threshold)
print('Called with args:') print(args) cfg_basic_generation(cfg_fast_rcnn) if args.cfg_file is not None: cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) cfg.GPU_ID = args.gpu_id print('Using config:') pprint.pprint(cfg) while not os.path.exists(args.caffemodel) and args.wait: print('Waiting for {} to exist...'.format(args.caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(args.gpu_id) net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(args.caffemodel))[0] imdb = get_imdb(args.imdb_name) imdb.competition_mode(args.comp_mode) if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) test_net(net, imdb, max_per_image=args.max_per_image, vis=args.vis)
if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) cfg.GPU_ID = args.gpu_id print('Using config:') pprint.pprint(cfg) while not os.path.exists(args.caffemodel) and args.wait: print('Waiting for {} to exist...'.format(args.caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(args.gpu_id) net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(args.caffemodel))[0] imdb = get_imdb(args.imdb_name) imdb.competition_mode(args.comp_mode) if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) test_net(net, imdb, max_per_image=args.max_per_image, vis=args.vis, wrt=args.wrt, thresh=args.thresh, iou=args.iou) #test_rpn(net, imdb, max_per_image=args.max_per_image, vis=args.vis, wrt=args.wrt)