def main(model_name, iteration, storage_name): name = '%s-%s' % (model_name, iteration) print settings.model(name), settings.pretrained(name) safe = datastore(settings.storage(storage_name)) safe.super_name = 'features' safe.sub_name = name layer_names = ['fc7', 'fc6', 'pool5', 'conv5', 'conv4', 'conv3'] layer_dims = [4096, 4096, 9216, 43264, 64896, 64896] net = caffe.Classifier(settings.model(name), settings.pretrained(name), mean=np.load(settings.ILSVRC_MEAN), channel_swap=(2, 1, 0), raw_scale=255) net.set_mode_gpu() net.set_phase_test() cub = CUB_200_2011(settings.CUB_ROOT) dataset_size = sum(1 for _ in cub.get_all_images()) instance = {} for layer, dim in zip(layer_names, layer_dims): instance[layer] = np.zeros((dataset_size, dim)) print instance[layer].shape for i, info in enumerate(cub.get_all_images(cropped=True)): print info['img_id'] img = caffe.io.load_image(info['img_file']) net.predict([img], oversample=False) for layer in layer_names: instance[layer][i, :] = net.blobs[layer].data[0].flatten() for layer in layer_names: safe.save_large_instance(safe.get_instance_path(safe.super_name, safe.sub_name, 'feat_cache_%s' % layer), instance[layer], 4)
def main(out_path, part, random_state, pgs, net_name): utils.ensure_dir(out_path) cub = CUB_200_2011(settings.CUB_ROOT) lfrg = rects.BerkeleyRG(settings.BERKELEY_ANNOTATION_BASE_PATH, cub, part) RG = rects.RandomForestRG(datastore(settings.storage('rf')), lfrg, cub_utils.DeepHelper.get_custom_net( settings.model(net_name), settings.pretrained(net_name)), net_name, cub, random_state=random_state, point_gen_strategy=pgs, use_seg=True, pt_n_part=20, pt_n_bg=100) RG.setup() for i, image in enumerate(cub.get_all_images()): print i image_path = image['img_file'] img_id = int(image['img_id']) rel_image_path = image['img_file_rel'] o_image = cv2.imread(image_path) rect = RG.generate(img_id) t_img_part = rect.get_rect(o_image) out_image_path = os.path.join(out_path, rel_image_path) utils.ensure_dir(os.path.dirname(out_image_path)) cv2.imwrite(out_image_path, t_img_part) print 'Done'
def compute_estimated_part_data(model_name, shape, IDS, part_names_to_filter, add_noise, noise_std_c, noise_std_d): net = caffe.Classifier(settings.model(model_name), settings.pretrained(model_name), mean=np.load(settings.ILSVRC_MEAN), channel_swap=(2, 1, 0), raw_scale=255) net.set_phase_test() net.set_mode_gpu() # compute estimated head data new_Xtest_part = np.zeros(shape) for i, t_id in enumerate(IDS): if to_oracle: t_parts = all_parts_cub.for_image(t_id) else: t_parts = estimated_test_parts.for_image(t_id) t_img_addr = all_image_infos[t_id] t_img = caffe.io.load_image(t_img_addr) t_parts_part = t_parts.filter_by_name(part_names_to_filter) t_img_part = t_parts_part.get_rect(t_img, add_noise=add_noise, noise_std_c=noise_std_c, noise_std_d=noise_std_d) try: net.predict([t_img_part], oversample=False) except Exception, e: print '------', t_id, '----------' print part_names_to_filter print t_img_addr print '------------' print t_img.shape print t_parts print '------------' print t_img_part.shape print t_parts_part raise e new_Xtest_part[i, :] = net.blobs[feat_layer].data[0].flatten()
def main(model_name, iteration, storage_name): name = '%s-%s' % (model_name, iteration) print settings.model(name), settings.pretrained(name) safe = datastore(settings.storage(storage_name)) safe.super_name = 'features' safe.sub_name = name layer_names = ['fc7', 'fc6', 'pool5', 'conv5', 'conv4', 'conv3'] layer_dims = [4096, 4096, 9216, 43264, 64896, 64896] net = caffe.Classifier(settings.model(name), settings.pretrained(name), mean=np.load(settings.ILSVRC_MEAN), channel_swap=(2, 1, 0), raw_scale=255) net.set_mode_gpu() net.set_phase_test() cub = CUB_200_2011(settings.CUB_ROOT) dataset_size = sum(1 for _ in cub.get_all_images()) instance = {} for layer, dim in zip(layer_names, layer_dims): instance[layer] = np.zeros((dataset_size, dim)) print instance[layer].shape for i, info in enumerate(cub.get_all_images(cropped=True)): print info['img_id'] img = caffe.io.load_image(info['img_file']) net.predict([img], oversample=False) for layer in layer_names: instance[layer][i, :] = net.blobs[layer].data[0].flatten() for layer in layer_names: safe.save_large_instance( safe.get_instance_path(safe.super_name, safe.sub_name, 'feat_cache_%s' % layer), instance[layer], 4)
def compute_estimated_part_data(model_name, shape, IDS, model_rf): net = caffe.Classifier(settings.model(model_name), settings.pretrained(model_name), mean=np.load(settings.ILSVRC_MEAN), channel_swap=(2, 1, 0), raw_scale=255) net.set_phase_test() net.set_mode_gpu() # compute estimated head data new_Xtest_part = np.zeros(shape) for i, t_id in enumerate(IDS): print i img = caffe.io.load_image(all_image_infos[t_id]) dh.init_with_image(img) X = dh.features(dense_points) preds_prob = model_rf.predict_proba(X) max_prob = np.max(preds_prob[:, 1]) preds_prob = preds_prob[:, 1].reshape((227, 227)).T preds = preds_prob >= (max_prob / 2) preds = skimage.morphology.closing(preds, skimage.morphology.square(10)) preds = skimage.morphology.remove_small_objects(preds, min_size=10, connectivity=1) L, N = skimage.measure.label(preds, return_num=True, background=0) L_no_bg = L[L != -1].flatten() vals, counts = scipy.stats.mode(L_no_bg) part_label = int(vals[0]) indices = np.where(L == part_label) xmin = indices[0].min() xmax = indices[0].max() ymin = indices[1].min() ymax = indices[1].max() pmin = Part(-1, '?', -1, xmin, ymin, 1) pmax = Part(-1, '?', -1, xmax, ymax, 1) rect_parts = Parts(parts=[pmin, pmax]) rect_parts.denorm_for_size(img.shape[0], img.shape[1], size=227) rect_info = rect_parts[0].x, rect_parts[1].x, rect_parts[ 0].y, rect_parts[1].y t_img_part = Parts().get_rect(img, rect_info=rect_info) try: net.predict([t_img_part], oversample=False) except Exception: print '------', t_id, '----------' new_Xtest_part[i, :] = net.blobs[feat_layer].data[0].flatten() return new_Xtest_part
def compute_estimated_part_data(model_name, shape, IDS, model_rf): net = caffe.Classifier(settings.model(model_name), settings.pretrained(model_name), mean=np.load(settings.ILSVRC_MEAN), channel_swap=(2, 1, 0), raw_scale=255) net.set_phase_test() net.set_mode_gpu() # compute estimated head data new_Xtest_part = np.zeros(shape) for i, t_id in enumerate(IDS): print i img = caffe.io.load_image(all_image_infos[t_id]) dh.init_with_image(img) X = dh.features(dense_points) preds_prob = model_rf.predict_proba(X) max_prob = np.max(preds_prob[:, 1]) preds_prob = preds_prob[:, 1].reshape((227, 227)).T preds = preds_prob >= (max_prob/2) preds = skimage.morphology.closing(preds, skimage.morphology.square(10)) preds = skimage.morphology.remove_small_objects(preds, min_size=10, connectivity=1) L, N = skimage.measure.label(preds, return_num=True, background=0) L_no_bg = L[L != -1].flatten() vals, counts = scipy.stats.mode(L_no_bg) part_label = int(vals[0]) indices = np.where(L == part_label) xmin = indices[0].min() xmax = indices[0].max() ymin = indices[1].min() ymax = indices[1].max() pmin = Part(-1, '?', -1, xmin, ymin, 1) pmax = Part(-1, '?', -1, xmax, ymax, 1) rect_parts = Parts(parts=[pmin, pmax]) rect_parts.denorm_for_size(img.shape[0], img.shape[1], size=227) rect_info = rect_parts[0].x, rect_parts[1].x, rect_parts[0].y, rect_parts[1].y t_img_part = Parts().get_rect(img, rect_info=rect_info) try: net.predict([t_img_part], oversample=False) except Exception: print '------', t_id, '----------' new_Xtest_part[i, :] = net.blobs[feat_layer].data[0].flatten() return new_Xtest_part
def main(sname, iteration, cropped, full, flipped, force, dataset, storage_name): new_name = '%s-%d' % (sname, iteration) if dataset == 'segmented': cub = CUB_200_2011_Segmented(settings.CUB_ROOT, full=full) elif dataset == 'part-head': cub = CUB_200_2011_Parts_Head(settings.CUB_ROOT, full=full) elif dataset == 'part-body': cub = CUB_200_2011_Parts_Body(settings.CUB_ROOT, full=full) elif dataset == 'part-head-rf-new': cub = CUB_200_2011(settings.CUB_ROOT, 'images_head_rf_new') elif dataset == 'part-body-rf-new': cub = CUB_200_2011(settings.CUB_ROOT, 'images_body_rf_new') else: cub = CUB_200_2011(settings.CUB_ROOT, images_folder_name=dataset, full=full) if not storage_name: ft_storage = datastore(settings.storage(new_name)) else: ft_storage = datastore(settings.storage(storage_name)) ft_extractor = CNN_Features_CAFFE_REFERENCE( ft_storage, model_file=settings.model(new_name), pretrained_file=settings.pretrained(new_name), full=full, crop_index=0) number_of_images_in_dataset = sum(1 for _ in cub.get_all_images()) bar = pyprind.ProgBar(number_of_images_in_dataset, width=80) for t, des in ft_extractor.extract_all(cub.get_all_images(), flip=flipped, crop=cropped, bbox=cub.get_bbox(), force=force): bar.update() print 'DONE'
def main(sname, iteration, cropped, full, flipped, force, dataset, storage_name): new_name = '%s-%d' % (sname, iteration) if dataset == 'segmented': cub = CUB_200_2011_Segmented(settings.CUB_ROOT, full=full) elif dataset == 'part-head': cub = CUB_200_2011_Parts_Head(settings.CUB_ROOT, full=full) elif dataset == 'part-body': cub = CUB_200_2011_Parts_Body(settings.CUB_ROOT, full=full) elif dataset == 'part-head-rf-new': cub = CUB_200_2011(settings.CUB_ROOT, 'images_head_rf_new') elif dataset == 'part-body-rf-new': cub = CUB_200_2011(settings.CUB_ROOT, 'images_body_rf_new') else: cub = CUB_200_2011(settings.CUB_ROOT, images_folder_name=dataset, full=full) if not storage_name: ft_storage = datastore(settings.storage(new_name)) else: ft_storage = datastore(settings.storage(storage_name)) ft_extractor = CNN_Features_CAFFE_REFERENCE(ft_storage, model_file=settings.model(new_name), pretrained_file=settings.pretrained(new_name), full=full, crop_index=0) number_of_images_in_dataset = sum(1 for _ in cub.get_all_images()) bar = pyprind.ProgBar(number_of_images_in_dataset, width=80) for t, des in ft_extractor.extract_all(cub.get_all_images(), flip=flipped, crop=cropped, bbox=cub.get_bbox(), force=force): bar.update() print 'DONE'
def main(out_path, part, random_state, pgs, net_name): utils.ensure_dir(out_path) cub = CUB_200_2011(settings.CUB_ROOT) lfrg = rects.BerkeleyRG(settings.BERKELEY_ANNOTATION_BASE_PATH, cub, part) RG = rects.RandomForestRG(datastore(settings.storage('rf')), lfrg, cub_utils.DeepHelper.get_custom_net(settings.model(net_name), settings.pretrained(net_name)), net_name, cub, random_state=random_state, point_gen_strategy=pgs, use_seg=True, pt_n_part=20, pt_n_bg=100) RG.setup() for i, image in enumerate(cub.get_all_images()): print i image_path = image['img_file'] img_id = int(image['img_id']) rel_image_path = image['img_file_rel'] o_image = cv2.imread(image_path) rect = RG.generate(img_id) t_img_part = rect.get_rect(o_image) out_image_path = os.path.join(out_path, rel_image_path) utils.ensure_dir(os.path.dirname(out_image_path)) cv2.imwrite(out_image_path, t_img_part) print 'Done'