def main(): from common import get_train_imgpaths import jsk_apc2015_common rospy.init_node('extract_color_histogram') all_objects = jsk_apc2015_common.get_object_list() color_space_param = rospy.get_param('~color_space', 'lab') if color_space_param == 'rgb': colors = ['red', 'green', 'blue'] elif color_space_param == 'lab': colors = ['l'] else: raise ValueError('Unknown color space') rospy.loginfo('color space: {c}'.format(c=color_space_param)) object_param = rospy.get_param('~object', all_objects) object_nms = object_param.split(',') if len(object_nms) == 1 and object_nms[0] == 'all': object_nms = all_objects rospy.loginfo('objects: {obj}'.format(obj=object_nms)) for object_nm in object_nms: if object_nm not in all_objects: rospy.logwarn('Unknown object, skipping: {}'.format(object_nm)) else: imgpaths = get_train_imgpaths(object_nm) raw_paths, mask_paths = zip(*imgpaths) for color in colors: e = ExtractColorHistogram(object_nm=object_nm, color=color, raw_paths=raw_paths, mask_paths=mask_paths) e.extract_and_save()
def main(): parser = argparse.ArgumentParser() parser.add_argument("bof_histogram") args = parser.parse_args(sys.argv[1:]) print("loading bof histogram") with gzip.open(args.bof_histogram, "rb") as f: obj_hists = pickle.load(f) target_names = jsk_apc2015_common.get_object_list() # create train and test data X, y = [], [] for i, obj_name in enumerate(target_names): X.append(obj_hists[obj_name]) y += [i] * len(obj_hists[obj_name]) X = np.vstack(X) normalize(X, copy=False) y = np.array(y) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=np.random.randint(1234)) # train and test lgr = LogisticRegression() print("fitting LogisticRegression") lgr.fit(X_train, y_train) with gzip.open("lgr.pkl.gz", "wb") as f: pickle.dump(lgr, f) y_pred = lgr.predict(X_test) print("score lgr: {}".format(accuracy_score(y_test, y_pred))) print(classification_report(y_test, y_pred, target_names=target_names))
def get_object_sizes(data_dir): cache_file = 'object_sizes.pkl' if osp.exists(cache_file): return pickle.load(open(cache_file, 'rb')) img_shape = None objects = jsk_apc2015_common.get_object_list() df = [] for obj in objects: mask_files = os.listdir(osp.join(data_dir, obj, 'masks')) for f in mask_files: if f.startswith('NP'): continue mask = cv2.imread(osp.join(data_dir, obj, 'masks', f), 0) if img_shape is None: img_shape = mask.shape else: assert img_shape == mask.shape mask = (mask > 127).astype(int) size = mask.sum() df.append([objects.index(obj), obj, f, size]) df = pd.DataFrame(df) df.columns = ['object_index', 'object_name', 'fname', 'size'] pickle.dump(df, open(cache_file, 'wb')) return df
def _predict(self, img_msg, label_msg): # convert image bridge = cv_bridge.CvBridge() input_image = bridge.imgmsg_to_cv2(img_msg, 'rgb8') input_label = bridge.imgmsg_to_cv2(label_msg) # predict region_imgs = [] for l in np.unique(input_label): if l == 0: # bg_label continue mask = (input_label == l) region = jsk_recognition_utils.bounding_rect_of_mask( input_image, mask) region_imgs.append(region) y_proba = self.estimator.predict(region_imgs) target_names = np.array(jsk_apc2015_common.get_object_list()) y_pred = np.argmax(y_proba, axis=-1) label_proba = [p[i] for p, i in zip(y_proba, y_pred)] # prepare message res = ClassificationResult() res.header = img_msg.header res.labels = y_pred res.label_names = target_names[y_pred] res.label_proba = label_proba res.probabilities = y_proba.reshape(-1) res.classifier = '<jsk_2015_05_baxter_apc.ColorHistogramFeatures>' res.target_names = target_names self._pub.publish(res)
def main(): rospy.init_node('extract_sift') obj_names = jsk_apc2015_common.get_object_list() for obj_name in obj_names: if load_siftdata(obj_name, dry_run=True): continue # already extracted extract_sift(obj_name)
def visualize_stow_contents(work_order): from jsk_apc2015_common.util import rescale tote_img = cv2.imread(osp.join(PKG_PATH, 'models/tote/image.jpg')) object_list = jsk_apc2015_common.get_object_list() object_imgs = {} for obj in object_list: img_path = osp.join(OLD_PKG_PATH, 'models/{obj}/image.jpg'.format(obj=obj)) img = cv2.imread(img_path) h, w = img.shape[:2] if h > w: img = np.rollaxis(img, 1) object_imgs[obj] = img # draw object images on tote image tote_region = [[190,230],[1080,790]] region_h = tote_region[1][1] - tote_region[0][1] region_w = tote_region[1][0] - tote_region[0][0] max_obj_h,max_obj_w = region_h / 3, region_w / 4 tote_x_min,tote_y_min = tote_region[0][0], tote_region[0][1] x_min, y_min = tote_x_min, tote_y_min for obj in work_order: obj_img = object_imgs[obj] scale_h = 1. * max_obj_h / obj_img.shape[0] scale_w = 1. * max_obj_w / obj_img.shape[1] scale = min([scale_h, scale_w]) obj_img = rescale(obj_img, scale) obj_h, obj_w = obj_img.shape[:2] x_max, y_max = x_min + obj_w, y_min + obj_h tote_img[y_min:y_max, x_min:x_max] = obj_img x_min += max_obj_w if x_max >= region_w : x_min = tote_x_min y_min += max_obj_h return tote_img
def spin_once(self): if self.bof_data is None or self.cfeature is None: return stamp, bof_objects_proba = self.bof_data stamp, cfeature_objects_proba = self.cfeature weight = self.weight target_bin = rospy.get_param('target_bin', None) object_list = jsk_apc2015_common.get_object_list() all_proba = [ (o, (weight[o]['bof'] * bof_objects_proba[o]) + (weight[o]['color'] * cfeature_objects_proba[o]) ) for o in object_list ] # verification result for debug candidates = self.bin_contents.get(target_bin, None) if candidates is None: candidates = object_list matched = sorted(all_proba, key=lambda x: x[1])[-1][0] # compose msg msg = ObjectRecognition() msg.header.stamp = stamp msg.matched = matched msg.probability = dict(all_proba)[matched] / sum(dict(all_proba).values()) msg.candidates = candidates msg.probabilities = np.array([dict(all_proba)[c] for c in candidates]) msg.probabilities /= msg.probabilities.sum() self.pub_debug.publish(msg) # verification result with json target if target_bin is None or target_bin == '': return proba = [ (c, (weight[c]['bof'] * bof_objects_proba[c]) + (weight[c]['color'] * cfeature_objects_proba[c]) ) for c in candidates ] matched = sorted(proba, key=lambda x: x[1])[-1][0] # compose msg msg = ObjectRecognition() msg.header.stamp = stamp msg.matched = matched msg.probability = dict(proba)[matched] / sum(dict(proba).values()) msg.candidates = candidates msg.probabilities = np.array([dict(proba)[c] for c in candidates]) msg.probabilities /= msg.probabilities.sum() self.pub.publish(msg)
def get_sift_descriptors(n_imgs=None, data_dir=None): objects = jsk_apc2015_common.get_object_list() obj_descs = [] for obj in objects: descs = load_siftdata(obj_name=obj, return_pos=False, data_dir=data_dir) if descs is None: continue if n_imgs is None: n_imgs = len(descs) p = np.random.randint(0, len(descs), size=n_imgs) descs = np.array(map(lambda x: x.astype('float16'), descs)) obj_descs.append((obj, descs[p])) return obj_descs
def spin_once(self): if self.bof_data is None or self.cfeature is None: return stamp, bof_objects_proba = self.bof_data stamp, cfeature_objects_proba = self.cfeature weight = self.weight target_bin = rospy.get_param('target_bin', None) object_list = jsk_apc2015_common.get_object_list() all_proba = [(o, (weight[o]['bof'] * bof_objects_proba[o]) + (weight[o]['color'] * cfeature_objects_proba[o])) for o in object_list] # verification result for debug candidates = self.bin_contents.get(target_bin, None) if candidates is None: candidates = object_list matched = sorted(all_proba, key=lambda x: x[1])[-1][0] # compose msg msg = ObjectRecognition() msg.header.stamp = stamp msg.matched = matched msg.probability = dict(all_proba)[matched] / sum( dict(all_proba).values()) msg.candidates = candidates msg.probabilities = np.array([dict(all_proba)[c] for c in candidates]) msg.probabilities /= msg.probabilities.sum() self.pub_debug.publish(msg) # verification result with json target if target_bin is None or target_bin == '': return proba = [(c, (weight[c]['bof'] * bof_objects_proba[c]) + (weight[c]['color'] * cfeature_objects_proba[c])) for c in candidates] matched = sorted(proba, key=lambda x: x[1])[-1][0] # compose msg msg = ObjectRecognition() msg.header.stamp = stamp msg.matched = matched msg.probability = dict(proba)[matched] / sum(dict(proba).values()) msg.candidates = candidates msg.probabilities = np.array([dict(proba)[c] for c in candidates]) msg.probabilities /= msg.probabilities.sum() self.pub.publish(msg)
def visualize_stow_contents(work_order): """Visualize stow contents with passed work order. Args: work_order (list): objects in the stow. Returns: tote_img (~numpy.ndarray): image of objects over the tote. """ from jsk_apc2015_common.util import rescale rp = rospkg.RosPack() pkg_path = rp.get_path(PKG) tote_img = cv2.imread(osp.join(pkg_path, 'models/tote/image.jpg')) object_list = jsk_apc2015_common.get_object_list() object_imgs = {} pkg_path = rp.get_path('jsk_apc2015_common') for obj in object_list: img_path = osp.join(pkg_path, 'models/{obj}/image.jpg'.format(obj=obj)) img = cv2.imread(img_path) h, w = img.shape[:2] if h > w: img = np.rollaxis(img, 1) object_imgs[obj] = img # draw object images on tote image tote_region = [[190, 230], [1080, 790]] region_h = tote_region[1][1] - tote_region[0][1] region_w = tote_region[1][0] - tote_region[0][0] max_obj_h, max_obj_w = region_h / 3, region_w / 4 tote_x_min, tote_y_min = tote_region[0][0], tote_region[0][1] x_min, y_min = tote_x_min, tote_y_min for obj in work_order: obj_img = object_imgs[obj] scale_h = 1. * max_obj_h / obj_img.shape[0] scale_w = 1. * max_obj_w / obj_img.shape[1] scale = min([scale_h, scale_w]) obj_img = rescale(obj_img, scale) obj_h, obj_w = obj_img.shape[:2] x_max, y_max = x_min + obj_w, y_min + obj_h tote_img[y_min:y_max, x_min:x_max] = obj_img x_min += max_obj_w if x_max >= region_w: x_min = tote_x_min y_min += max_obj_h return tote_img
def main(): parser = argparse.ArgumentParser() parser.add_argument('--year', type=int, default=2016) args = parser.parse_args() if args.year == 2015: import jsk_apc2015_common cls_names = ['background'] + jsk_apc2015_common.get_object_list() elif args.year == 2016: import jsk_apc2016_common data = jsk_apc2016_common.get_object_data() cls_names = ['background'] + [d['name'] for d in data] else: raise ValueError text = [] for cls_id, cls_name in enumerate(cls_names): text.append('{:2}: {}'.format(cls_id, cls_name)) print('\n'.join(text))
parser = argparse.ArgumentParser() parser.add_argument('json_id') args = parser.parse_args() json_id = args.json_id N = 2 target_bin = 'h' abandon_objects = [ 'genuine_joe_plastic_stir_sticks', 'cheezit_big_original', 'rolodex_jumbo_pencil_cup', 'champion_copper_plus_spark_plug', 'oreo_mega_stuf', ] objects = jsk_apc2015_common.get_object_list() target_obj = None while (target_obj is None) or (target_obj in abandon_objects): candidates = [] for i in xrange(N): i_obj = np.random.randint(0, len(objects)) candidates.append(objects[i_obj]) i_target = np.random.randint(0, len(candidates)) target_obj = candidates[i_target] json_data = { 'bin_contents': { 'bin_{0}'.format(target_bin.upper()): candidates, }, 'work_order': [
def __init__(self): SiftMatcher.__init__(self) ObjectMatcher.__init__(self, '/semi/sift_matcher') self.object_list = jsk_apc2015_common.get_object_list() self.siftdata_cache = {}
def test_get_object_list(): objects = jsk_apc2015_common.get_object_list() assert_equal(25, len(objects))
def __init__(self): self.file_name = 'rgb' self.object_names = jsk_apc2015_common.get_object_list() self.cfeatures = [] self.labels = []