Exemplo n.º 1
0
def test_net(sess,
             net,
             imdb,
             weights_filename,
             max_per_image=100,
             thresh=0.05):
    np.random.seed(cfg.RNG_SEED)
    """Test a Fast R-CNN network on an image database."""
    num_images = len(imdb.image_index)
    # all detections are collected into:
    #  all_boxes[cls][image] = N x 5 array of detections in
    #  (x1, y1, x2, y2, score)
    all_boxes = [[[] for _ in range(num_images)]
                 for _ in range(imdb.num_classes)]

    output_dir = get_output_dir(imdb, weights_filename)
    # timers
    _t = {'im_detect': Timer(), 'misc': Timer()}

    for i in range(num_images):
        im = cv2.imread(imdb.image_path_at(i))

        _t['im_detect'].tic()
        scores, boxes = im_detect(sess, net, im)
        _t['im_detect'].toc()

        _t['misc'].tic()

        # skip j = 0, because it's the background class
        for j in range(1, imdb.num_classes):
            inds = np.where(scores[:, j] > thresh)[0]
            cls_scores = scores[inds, j]
            cls_boxes = boxes[inds, j * 4:(j + 1) * 4]
            cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
              .astype(np.float32, copy=False)
            keep = nms(cls_dets, cfg.TEST.NMS)
            cls_dets = cls_dets[keep, :]
            all_boxes[j][i] = cls_dets

        # Limit to max_per_image detections *over all classes*
        if max_per_image > 0:
            image_scores = np.hstack(
                [all_boxes[j][i][:, -1] for j in range(1, imdb.num_classes)])
            if len(image_scores) > max_per_image:
                image_thresh = np.sort(image_scores)[-max_per_image]
                for j in range(1, imdb.num_classes):
                    keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
                    all_boxes[j][i] = all_boxes[j][i][keep, :]
        _t['misc'].toc()

        print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
            .format(i + 1, num_images, _t['im_detect'].average_time,
                _t['misc'].average_time))

    det_file = os.path.join(output_dir, 'detections.pkl')
    with open(det_file, 'wb') as f:
        pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)

    print('Evaluating detections')
    imdb.evaluate_detections(all_boxes, output_dir)
Exemplo n.º 2
0
def extract_net(sess,
                net,
                imdb,
                weights_filename,
                roidb,
                max_per_image=100,
                thresh=0.05):
    #change from test_net, and this function is used to extract features from conv5 of vgg16
    np.random.seed(cfg.RNG_SEED)
    """Test a Fast R-CNN network on an image database."""
    num_images = len(imdb.image_index)
    # all detections are collected into:
    #  all_boxes[cls][image] = N x 5 array of detections in
    #  (x1, y1, x2, y2, score)
    all_boxes = [[[] for _ in range(num_images)]
                 for _ in range(imdb.num_classes)]

    output_dir = get_output_dir(imdb, weights_filename)
    print("output_dir is:", output_dir)
    print("\n")
    # timers
    _t = {'im_detect': Timer(), 'misc': Timer()}

    data_input = []
    #N_save is the number of processed images which will be saved into one file
    N_save = 100
    for i in range(1000):
        im = cv2.imread(imdb.image_path_at(i))

        _t['im_detect'].tic()
        feature_maps = im_extract(sess, net, im)

        fea = np.squeeze(feature_maps)
        data_temp = {}
        data_temp['img_path'] = imdb.image_path_at(i)
        data_temp['box'] = roidb[i]['boxes']
        data_temp['img_shape'] = np.shape(im)
        data_temp['fea'] = fea
        data_input.append(data_temp)
        if (i + 1) % N_save == 0:
            file_name_of_input = '/home/yangxu/project/rd/input/input' + format(
                int((i + 1) / N_save), '03') + '.npz'
            np.savez(file_name_of_input, data_input=data_input)
            data_input = []

        _t['im_detect'].toc()

        _t['misc'].tic()


        print('im_extract: {:d}/{:d} {:.3f}s {:.3f}s' \
            .format(i + 1, num_images, _t['im_detect'].average_time,
                _t['misc'].average_time))
        print(
            "i is: {0}, the path of image is {1}, shape of the image is {2}, the shape of the feature map is{3}\n"
            .format(i, imdb.image_path_at(i), np.shape(im), np.shape(fea)))
Exemplo n.º 3
0
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    print('Using config:')
    pprint.pprint(cfg)

    np.random.seed(cfg.RNG_SEED)

    # train set
    imdb, roidb = combined_roidb(args.imdb_name)
    print('{:d} roidb entries'.format(len(roidb)))

    # output directory where the models are saved
    output_dir = get_output_dir(imdb, args.tag)
    print('Output will be saved to `{:s}`'.format(output_dir))

    # tensorboard directory where the summaries are saved during training
    tb_dir = get_output_tb_dir(imdb, args.tag)
    print('TensorFlow summaries will be saved to `{:s}`'.format(tb_dir))

    # also add the validation set, but with no flipping images
    orgflip = cfg.TRAIN.USE_FLIPPED
    cfg.TRAIN.USE_FLIPPED = False
    _, valroidb = combined_roidb(args.imdbval_name)
    print('{:d} validation roidb entries'.format(len(valroidb)))
    cfg.TRAIN.USE_FLIPPED = orgflip

    # load network
    if args.net == 'vgg16':
Exemplo n.º 4
0
    pre_train_weight = project_path + "/data/pre_train_weight/" + arg_net + ".ckpt"

    cfg_from_file(project_path + "/experiments/cfgs/" + arg_net +
                  ".yml")  #载入参数配置
    cfg_from_list(set_cfgs)  #修改参数配置
    print('Using config:')
    pprint.pprint(cfg)

    # roidb:所有训练图片的gt_boxes
    # imdb:训练数据集的相关信息:包括类别列表,所有的图片名称的索引,数据集名称等等
    imdb, roidb = combined_roidb("gridsum_car_train")
    print(roidb[0]['boxes'])
    print(roidb[0])
    print('{:d} roidb entries'.format(len(roidb)))
    # output directory where the models are saved
    output_dir = get_output_dir(imdb, "")
    print('Output will be saved to `{:s}`'.format(output_dir))
    # tensorboard directory where the summaries are saved during training
    tb_dir = get_output_tb_dir(imdb, "")
    # 同样的方法载入val数据集
    print('TensorFlow summaries will be saved to `{:s}`'.format(tb_dir))
    _, valroidb = combined_roidb("gridsum_car_train")
    print('{:d} validation roidb entries'.format(len(valroidb)))

    if arg_net == 'vgg16':
        net = vgg16()
    elif arg_net == 'res50':
        net = resnetv1(num_layers=50)
    elif arg_net == 'res101':
        net = resnetv1(num_layers=101)
    elif arg_net == 'res152':
    # train set
    imdb, roidb = get_roidb('voc_2007_trainval')  #训练集

    #print('{:d} roidb entries'.format(len(roidb)))
    pretrained_model = 'E:/fasterwrite/data/imagenet_weights/vgg16.ckpt'
    '''
  print(len(roidb))
  print (type(roidb[0]['image']))
  im=cv2.imread(roidb[0]['image']) 
  print(im)
  im=cv2.imread(roidb[6000]['image']) 
  print(im)
  '''

    weights_filename = 'default'
    output_dir = get_output_dir(imdb, weights_filename)
    print('Output will be saved to `{:s}`'.format(output_dir))
    # tb_dir = get_output_tb_dir(imdb,weights_filename)

    net = vgg16()
    orgflip = cfg.TRAIN.USE_FLIPPED
    cfg.TRAIN.USE_FLIPPED = False
    valimdb, valroidb = get_roidb('voc_2007_test')  #获取测试集数据
    #测试验证集数据
    #im=cv2.imread(valroidb[6000]['image']
    #print(im)

    print('{:d} validatio  n roidb entries'.format(len(valroidb)))
    cfg.TRAIN.USE_FLIPPED = orgflip
    #截止目前测试均通过