def _action_handle(function, params, image):
    print('Action[%s]' % function)
    try:
        ff = FEATURE_FUN[function]
        image = imfeat.resize_image_max_side(image, 320)  # TODO: Expose this
        return {'feature': ff(**params)(image).tolist()}
    except KeyError:
        pass
    try:
        sf = SEARCH_FUN[function]
        image = imfeat.resize_image_max_side(image, 320)  # TODO: Expose this
        print(sf.feature)
        out = {'results': sf.analyze_cropped(image)}
        if function == 'see/search/masks':
            out['classes'] = CLASS_COLORS
        return out
    except KeyError:
        pass
    try:
        cf = CLASSIFY_FUN[function]
        image = imfeat.resize_image_max_side(image, 320)  # TODO: Expose this
        return {'results': cf(image)}
    except KeyError:
        pass
    if function == 'see/texton' or function == 'see/texton_ilp':
        image = cv2.resize(image, (320, int(image.shape[0] * 320. / image.shape[1])))
        image = np.ascontiguousarray(image)
        semantic_masks = TEXTON(image)
        if function == 'see/texton_ilp':
            ilp_pred = CLASSIFY_FUN['see/classify/indoor'](imfeat.resize_image_max_side(image, 320))
            try:
                bin_index = [x for x, y in enumerate(ILP_WEIGHTS['bins']) if y >= ilp_pred][0]
            except IndexError:
                bin_index = ILP_WEIGHTS['ilp_tables'].shape[1]
            if bin_index != 0:
                bin_index -= 1
            ilp_weights = ILP_WEIGHTS['ilp_tables'][:, bin_index]
            print('ILP Pred[%s] Weights[%s]' % (ilp_pred, ilp_weights))
            semantic_masks *= ilp_weights
        #min_probability = float(params.get('min_probability', 0.5))
        #semantic_masks = np.dstack([semantic_masks, np.ones_like(semantic_masks[:, :, 0]) * min_probability])
        texton_argmax2 = np.argmax(semantic_masks, 2)
        image_string = imfeat.image_tostring(COLORS_BGR[texton_argmax2], 'png')
        out = {'argmax_pngb64': base64.b64encode(image_string)}
        out['classes'] = CLASS_COLORS
        return out
    if function == 'see/colors':
        image = cv2.resize(image, (320, int(image.shape[0] * 320. / image.shape[1])))
        image = np.ascontiguousarray(image)
        masks = COLOR_NAMING.make_feature_mask(image)
        mask_argmax = np.argmax(masks, 2)
        image_string = imfeat.image_tostring(COLOR_NAMING.color_values[mask_argmax], 'png')
        return {'argmax_pngb64': base64.b64encode(image_string)}
    if function == 'see/faces':
        results = [map(float, x) for x in FACES._detect_faces(image)]
        return {'faces': [{'tl_x': x[0], 'tl_y': x[1], 'width': x[2], 'height': x[3]}
                          for x in results]}
    return {}
예제 #2
0
    def _feature(self, image):
        import time

        st = time.time()
        out = self.feature(imfeat.resize_image_max_side(image, 160))
        print time.time() - st
        return out
예제 #3
0
def image():
    print_request()
    # TODO(Cleanup)
    image, params = _get_image()
    image = imfeat.resize_image_max_side(image, 320)
    image_string = imfeat.image_tostring(image, 'jpg')
    return {'jpgb64': base64.b64encode(image_string)}
예제 #4
0
 def feature(self, image):
     points = self._feature(
         imfeat.resize_image_max_side(image, self.max_side))
     if self.num_points is not None:
         return np.ascontiguousarray(
             random.sample(points, min(self.num_points, len(points))))
     return points
def image():
    print_request()
    # TODO(Cleanup)
    image, params = _get_image()
    image = imfeat.resize_image_max_side(image, 320)
    image_string = imfeat.image_tostring(image, 'jpg')
    return {'jpgb64': base64.b64encode(image_string)}
예제 #6
0
 def load(self, proto_data, load_feature=True, load_hasher=True, load_index=True):
     si = picarus.api.SearchIndex()
     si.ParseFromString(proto_data)
     loader = lambda x, y: pickle.loads(y) if x == si.PICKLE else call_import(json.loads(y))
     self.metadata = np.array(si.metadata)
     if load_index:
         self.index = loader(si.index_format, si.index)
     if load_hasher:
         self.hasher = loader(si.hash_format, si.hash)
     if load_feature:
         f = loader(si.feature_format, si.feature)
         self.feature = lambda y: f(imfeat.resize_image_max_side(y, self.max_side))
     return self
예제 #7
0
파일: _meta.py 프로젝트: bwhite/imfeat
 def __call__(self, image):
     if self._max_side is not None:
         image = imfeat.resize_image_max_side(image, self._max_side)
     if self._norm is None:
         norm = lambda x: x
     elif self._norm == 'dims':
         norm = lambda x: x / float(x.size)
     elif self._norm == 'l1':
         norm = lambda x: x / np.sum(x)
     elif self._norm == 'l2':
         norm = lambda x: x / np.linalg.norm(x)
     else:
         raise ValueError('Unknown value for norm=%s' % self._norm)
     return np.hstack([norm(f(image)) for f in self._features])
예제 #8
0
 def __call__(self, image):
     if self._max_side is not None:
         image = imfeat.resize_image_max_side(image, self._max_side)
     if self._norm is None:
         norm = lambda x: x
     elif self._norm == 'dims':
         norm = lambda x: x / float(x.size)
     elif self._norm == 'l1':
         norm = lambda x: x / np.sum(x)
     elif self._norm == 'l2':
         norm = lambda x: x / np.linalg.norm(x)
     else:
         raise ValueError('Unknown value for norm=%s' % self._norm)
     return np.hstack([norm(f(image)) for f in self._features])
 def map(self, name, image_data):
     try:
         image = imfeat.image_fromstring(image_data)
     except:
         hadoopy.counter('DATA_ERRORS', 'ImageLoadError')
         return
     if self.filter_side is not None and min(image.shape[0], image.shape[1]) < self.filter_side:
         hadoopy.counter('DATA_ERRORS', 'ImageTooSmallPre')
         return
     if self.max_side is not None:
         image = imfeat.resize_image_max_side(image, self.max_side)
     if self.filter_side is not None and min(image.shape[0], image.shape[1]) < self.filter_side:
         hadoopy.counter('DATA_ERRORS', 'ImageTooSmallPost')
         return
     yield name, imfeat.image_tostring(image, 'jpg')
 def map(self, name, image_data):
     try:
         image = imfeat.image_fromstring(image_data)
     except:
         hadoopy.counter('DATA_ERRORS', 'ImageLoadError')
         return
     if self.filter_side is not None and min(
             image.shape[0], image.shape[1]) < self.filter_side:
         hadoopy.counter('DATA_ERRORS', 'ImageTooSmallPre')
         return
     if self.max_side is not None:
         image = imfeat.resize_image_max_side(image, self.max_side)
     if self.filter_side is not None and min(
             image.shape[0], image.shape[1]) < self.filter_side:
         hadoopy.counter('DATA_ERRORS', 'ImageTooSmallPost')
         return
     yield name, imfeat.image_tostring(image, 'jpg')
예제 #11
0
 def load(self,
          proto_data,
          load_feature=True,
          load_hasher=True,
          load_index=True):
     si = picarus.api.SearchIndex()
     si.ParseFromString(proto_data)
     loader = lambda x, y: pickle.loads(
         y) if x == si.PICKLE else call_import(json.loads(y))
     self.metadata = np.array(si.metadata)
     if load_index:
         self.index = loader(si.index_format, si.index)
     if load_hasher:
         self.hasher = loader(si.hash_format, si.hash)
     if load_feature:
         f = loader(si.feature_format, si.feature)
         self.feature = lambda y: f(
             imfeat.resize_image_max_side(y, self.max_side))
     return self
예제 #12
0
 def compute_dense(self, image):
     points = []
     max_side = np.max(image.shape[:2])
     for x in range(self.num_sizes):
         if max_side <= 0:
             break
         image = imfeat.resize_image_max_side(image, max_side)
         cur_points = self._feature(image)
         if cur_points.size:
             points.append(cur_points)
         max_side = int(max_side / 2)
     if points:
         points = np.vstack(points)
     else:
         points = np.array([])
     if self.num_points is not None:
         points = random.sample(points, min(self.num_points, len(points)))
     points = np.ascontiguousarray(points)
     print(points.shape)
     return points
 def _map(self, row, image_binary):
     try:
         image = imfeat.image_fromstring(image_binary)
         yield row, imfeat.image_tostring(imfeat.resize_image_max_side(image, self.max_side), 'jpg')
     except:
         hadoopy.counter('DATA_ERRORS', 'ImageLoadError')
예제 #14
0
def _action_handle(function, params, image):
    print('Action[%s]' % function)
    try:
        ff = FEATURE_FUN[function]
        image = imfeat.resize_image_max_side(image, 320)  # TODO: Expose this
        return {'feature': ff(**params)(image).tolist()}
    except KeyError:
        pass
    try:
        sf = SEARCH_FUN[function]
        image = imfeat.resize_image_max_side(image, 320)  # TODO: Expose this
        print(sf.feature)
        out = {'results': sf.analyze_cropped(image)}
        if function == 'see/search/masks':
            out['classes'] = CLASS_COLORS
        return out
    except KeyError:
        pass
    try:
        cf = CLASSIFY_FUN[function]
        image = imfeat.resize_image_max_side(image, 320)  # TODO: Expose this
        return {'results': cf(image)}
    except KeyError:
        pass
    if function == 'see/texton' or function == 'see/texton_ilp':
        image = cv2.resize(image,
                           (320, int(image.shape[0] * 320. / image.shape[1])))
        image = np.ascontiguousarray(image)
        semantic_masks = TEXTON(image)
        if function == 'see/texton_ilp':
            ilp_pred = CLASSIFY_FUN['see/classify/indoor'](
                imfeat.resize_image_max_side(image, 320))
            try:
                bin_index = [
                    x for x, y in enumerate(ILP_WEIGHTS['bins'])
                    if y >= ilp_pred
                ][0]
            except IndexError:
                bin_index = ILP_WEIGHTS['ilp_tables'].shape[1]
            if bin_index != 0:
                bin_index -= 1
            ilp_weights = ILP_WEIGHTS['ilp_tables'][:, bin_index]
            print('ILP Pred[%s] Weights[%s]' % (ilp_pred, ilp_weights))
            semantic_masks *= ilp_weights
        #min_probability = float(params.get('min_probability', 0.5))
        #semantic_masks = np.dstack([semantic_masks, np.ones_like(semantic_masks[:, :, 0]) * min_probability])
        texton_argmax2 = np.argmax(semantic_masks, 2)
        image_string = imfeat.image_tostring(COLORS_BGR[texton_argmax2], 'png')
        out = {'argmax_pngb64': base64.b64encode(image_string)}
        out['classes'] = CLASS_COLORS
        return out
    if function == 'see/colors':
        image = cv2.resize(image,
                           (320, int(image.shape[0] * 320. / image.shape[1])))
        image = np.ascontiguousarray(image)
        masks = COLOR_NAMING.make_feature_mask(image)
        mask_argmax = np.argmax(masks, 2)
        image_string = imfeat.image_tostring(
            COLOR_NAMING.color_values[mask_argmax], 'png')
        return {'argmax_pngb64': base64.b64encode(image_string)}
    if function == 'see/faces':
        results = [map(float, x) for x in FACES._detect_faces(image)]
        return {
            'faces': [{
                'tl_x': x[0],
                'tl_y': x[1],
                'width': x[2],
                'height': x[3]
            } for x in results]
        }
    return {}
예제 #15
0
 def feature(self, image):
     points = self._feature(imfeat.resize_image_max_side(image, self.max_side))
     if self.num_points is not None:
         return np.ascontiguousarray(random.sample(points, min(self.num_points, len(points))))
     return points
예제 #16
0
파일: _gray.py 프로젝트: bwhite/vidfeat
 def _feature(self, image):
     out = color_stats(imfeat.resize_image_max_side(image, 128))
     return out