Example #1
0
def ssearch_fast(filename, seg_dir='/home/sai/Documents/selective_search/VOC2007/segments/', save_name=None):
    ''' Helper function for calling ssearch in fast mode '''
    a = time.time()
    blob_array = []
    priority = []
    img = plt.imread(filename)
    cc = convert_colorspace(img, ['hsv', 'LAB'])

    seg_dir = '/home/sai/Documents/selective_search/VOC2007/segments/'
    seg_filename = [seg_dir + 'HSV/50/' + filename[-10:-4] + '.mat', seg_dir + 'HSV/100/' + filename[-10:-4] +
                    '.mat', seg_dir + 'LAB/50/' + filename[-10:-4] + '.mat', seg_dir + 'LAB/100/' + filename[-10:-4] + '.mat']

    for i, _file in enumerate(seg_filename):
        _img = cc[i / 2]
        _blob_array = _ssearch(_img, load_segment_mask(_file), sim_feats=[sf.color_hist_sim(
        ), sf.texture_hist_sim(), sf.size_sim(img.shape), sf.fill_sim(img.shape)])
        blob_array.append(_blob_array)
        priority.append(np.arange(len(_blob_array), 0, -
                                  1).clip(0, (len(_blob_array) + 1) / 2))

        _blob_array = _ssearch(_img, load_segment_mask(_file), sim_feats=[
                               sf.texture_hist_sim(), sf.size_sim(img.shape), sf.fill_sim(img.shape)])
        blob_array.append(_blob_array)
        priority.append(np.arange(len(_blob_array), 0, -
                                  1).clip(0, (len(_blob_array) + 1) / 2))

    bboxes = remove_duplicate(blob_array, priority)
    print('Computed %d proposals' % (len(bboxes)))
    np.savetxt(save_name + '.txt', bboxes)
    print('Time taken: %f' % (time.time() - a))
def demo(image_name,color_space_list=None,ks=None,sim_feats_list=None,net='vgg16', cpu_mode=True):
	''' Object Recognition Demo : Selective Search + RCNN
	parameters
	----------
	image_name : filename of image stored in 'Data/img'
	
	color_space_list : list of colorspaces to be used. Refer color_utils for list of possible colorspaces.
	Default : [ 'HSV', 'LAB']

	ks : list felzenszwalb scale/threshold and minimum segment size.
	Default : [50, 100]
	
	'''
	
	blob_array = []	
	priority = []
	img = plt.imread('Data/img/' + image_name + '.jpg')
	seg_dir = 'Data/segments/'
	if color_space_list is None: color_space_list = ['HSV','LAB']
	if ks is None: ks = [50,100]
	if sim_feats_list is None: sim_feats_list = [[ sf.color_hist_sim(), sf.texture_hist_sim(), sf.size_sim(img.shape), sf.fill_sim(img.shape) ],[ sf.texture_hist_sim(), sf.size_sim(img.shape), sf.fill_sim(img.shape) ]]

	cc = convert_colorspace(img,color_space_list)
	seg_filename = [seg_dir + 'HSV/50/' + image_name +'.mat',seg_dir + 'HSV/100/' + image_name +'.mat', seg_dir + 'LAB/50/' + image_name +'.mat',seg_dir + 'LAB/100/' + image_name +'.mat']

	for i in range(len(color_space_list)):
		for j in range(len(ks)):
			for k in range(len(sim_feats_list)):
				_img = cc[i]
				_file = "%s%s/%d/%s.mat"%(seg_dir,color_space_list[i].upper(),ks[j],image_name)
				if not os.path.exists(_file):
					segment_mask = felzenszwalb(_img,scale=ks[j],sigma=0.8,min_size=ks[j])
					_temp_dict = dict()
					_temp_dict['blobIndIm'] = segment_mask + 1
					scipy.io.savemat(_file,_temp_dict)
				_blob_array = ssearch._ssearch(_img,ssearch.load_segment_mask(_file),sim_feats = sim_feats_list[k])
				blob_array.append(_blob_array)
				priority.append( np.arange(len(_blob_array),0,-1).clip(0,(len(_blob_array)+1)/2))
		
	bboxes = ssearch.remove_duplicate(blob_array,priority)
	bbox_dict = {}
	bbox_dict['boxes'] = np.vstack([np.asarray(bboxes)[:,2],np.asarray(bboxes)[:,1],np.asarray(bboxes)[:,4],np.asarray(bboxes)[:,3]]).T
	print('\nComputed %d proposals'%(len(bboxes)))
	scipy.io.savemat('Data/Boxes/' + image_name + '.mat',bbox_dict)
	rcnn.rcnn_demo(image_name,net=net, cpu_mode=cpu_mode)
Example #3
0
def ssearch_fast(filename, seg_dir="/home/sai/Documents/selective_search/VOC2007/segments/", save_name=None):

    """ Helper function for calling ssearch in fast mode """
    a = time.time()
    blob_array = []
    priority = []
    img = plt.imread(filename)
    cc = convert_colorspace(img, ["hsv", "LAB"])

    seg_dir = "/home/sai/Documents/selective_search/VOC2007/segments/"
    seg_filename = [
        seg_dir + "HSV/50/" + filename[-10:-4] + ".mat",
        seg_dir + "HSV/100/" + filename[-10:-4] + ".mat",
        seg_dir + "LAB/50/" + filename[-10:-4] + ".mat",
        seg_dir + "LAB/100/" + filename[-10:-4] + ".mat",
    ]

    for i, _file in enumerate(seg_filename):
        _img = cc[i / 2]
        _blob_array = _ssearch(
            _img,
            load_segment_mask(_file),
            sim_feats=[sf.color_hist_sim(), sf.texture_hist_sim(), sf.size_sim(img.shape), sf.fill_sim(img.shape)],
        )
        blob_array.append(_blob_array)
        priority.append(np.arange(len(_blob_array), 0, -1).clip(0, (len(_blob_array) + 1) / 2))

        _blob_array = _ssearch(
            _img,
            load_segment_mask(_file),
            sim_feats=[sf.texture_hist_sim(), sf.size_sim(img.shape), sf.fill_sim(img.shape)],
        )
        blob_array.append(_blob_array)
        priority.append(np.arange(len(_blob_array), 0, -1).clip(0, (len(_blob_array) + 1) / 2))

    bboxes = remove_duplicate(blob_array, priority)
    print("Computed %d proposals" % (len(bboxes)))
    np.savetxt(save_name + ".txt", bboxes)
    print("Time taken: %f" % (time.time() - a))
Example #4
0
def segment_image(image, color_space_list = ['HSV','LAB'],
                  ks = [50,100]):

    blob_array =[]
    priority = []
    seg_masks = []
    converted_images = convert_colorspace(image,color_space_list)
    sim_feats_list = [ sf.color_hist_sim(), sf.texture_hist_sim(),
                      sf.size_sim(image.shape), sf.fill_sim(image.shape) ]
    for img in converted_images:
        for j in ks:
            print("segmenting",j)
            segmented_mask = felzenszwalb(img,j,sigma = 0.8,
                                         min_size = j)
            print("blobbing",j)
            blobs = ssearch._ssearch(img,segmented_mask,sim_feats =
                                          sim_feats_list)
            blob_array.append(blobs)
            priority.append(
                np.arange(len(blobs),0,-1).clip(0,(len(blobs)+1)/2))
            seg_masks.append(segmented_mask)
    blob_array = ssearch.remove_duplicate(blob_array)
    return blob_array
Example #5
0
def demo(image_name,
         color_space_list=None,
         ks=None,
         sim_feats_list=None,
         net='vgg16',
         cpu_mode=True):
    ''' Object Recognition Demo : Selective Search + RCNN
	parameters
	----------
	image_name : filename of image stored in 'Data/img'
	
	color_space_list : list of colorspaces to be used. Refer color_utils for list of possible colorspaces.
	Default : [ 'HSV', 'LAB']

	ks : list felzenszwalb scale/threshold and minimum segment size.
	Default : [50, 100]
	
	'''

    blob_array = []
    priority = []
    img = plt.imread('Data/img/' + image_name + '.jpg')
    seg_dir = 'Data/segments/'
    if color_space_list is None: color_space_list = ['HSV', 'LAB']
    if ks is None: ks = [50, 100]
    if sim_feats_list is None:
        sim_feats_list = [[
            sf.color_hist_sim(),
            sf.texture_hist_sim(),
            sf.size_sim(img.shape),
            sf.fill_sim(img.shape)
        ],
                          [
                              sf.texture_hist_sim(),
                              sf.size_sim(img.shape),
                              sf.fill_sim(img.shape)
                          ]]

    cc = convert_colorspace(img, color_space_list)
    seg_filename = [
        seg_dir + 'HSV/50/' + image_name + '.mat',
        seg_dir + 'HSV/100/' + image_name + '.mat',
        seg_dir + 'LAB/50/' + image_name + '.mat',
        seg_dir + 'LAB/100/' + image_name + '.mat'
    ]

    for i in range(len(color_space_list)):
        for j in range(len(ks)):
            for k in range(len(sim_feats_list)):
                _img = cc[i]
                _file = "%s%s/%d/%s.mat" % (
                    seg_dir, color_space_list[i].upper(), ks[j], image_name)
                if not os.path.exists(_file):
                    segment_mask = felzenszwalb(_img,
                                                scale=ks[j],
                                                sigma=0.8,
                                                min_size=ks[j])
                    _temp_dict = dict()
                    _temp_dict['blobIndIm'] = segment_mask + 1
                    scipy.io.savemat(_file, _temp_dict)
                _blob_array = ssearch._ssearch(
                    _img,
                    ssearch.load_segment_mask(_file),
                    sim_feats=sim_feats_list[k])
                blob_array.append(_blob_array)
                priority.append(
                    np.arange(len(_blob_array), 0,
                              -1).clip(0, (len(_blob_array) + 1) / 2))

    bboxes = ssearch.remove_duplicate(blob_array, priority)
    bbox_dict = {}
    bbox_dict['boxes'] = np.vstack([
        np.asarray(bboxes)[:, 2],
        np.asarray(bboxes)[:, 1],
        np.asarray(bboxes)[:, 4],
        np.asarray(bboxes)[:, 3]
    ]).T
    print('\nComputed %d proposals' % (len(bboxes)))
    scipy.io.savemat('Data/Boxes/' + image_name + '.mat', bbox_dict)
    rcnn.rcnn_demo(image_name, net=net, cpu_mode=cpu_mode)