def get_x_y(self, tag_set, reduction_factor=0):
        x = None
        y = None
        for t in tqdm(tag_set):
            use_sample = True
            if reduction_factor > 1:
                if not np.random.randint(0, reduction_factor):
                    use_sample = False

            if use_sample:
                x_img = t.load_x()
                segments = generate_segments(x_img, self.opt)
                x_img, _ = self.get_features(x_img)
                h_img, w_img = x_img.shape[:2]
                x_img = get_features_for_segments(
                    x_img, segments, self.opt["feature_aggregation"])
                y_img = t.load_y([h_img, w_img])
                y_img = get_y_for_segments(y_img, segments)
                assert x_img.shape[0] == y_img.shape[
                    0], "Not equal dimensions. [{} - {}]".format(
                        x_img.shape, y_img.shape)

                if x is None:
                    x = x_img
                    y = y_img
                else:
                    x = np.append(x, x_img, axis=0)
                    y = np.append(y, y_img, axis=0)
        return x, y
    def inference(self, x_input, interpolation="nearest"):
        segments = generate_segments(x_input, self.opt)
        x_img, x_pass = self.get_features(x_input)
        o_height, o_width = x_pass.shape[:2]
        x_height, x_width = x_img.shape[:2]
        x_img = get_features_for_segments(x_img, segments,
                                          self.opt["feature_aggregation"])
        y_pred = self.clf.predict_proba(x_img)
        segments = resize(segments,
                          width=x_width,
                          height=x_height,
                          interpolation="nearest")
        y_img = map_segments(segments, y_pred)
        x_img_pass = resize(x_pass,
                            width=o_width,
                            height=o_height,
                            interpolation="nearest")
        y_img = resize(y_img,
                       width=o_width,
                       height=o_height,
                       interpolation=interpolation)

        if len(x_img_pass.shape) < 3:
            x_img_pass = np.expand_dims(x_img_pass, axis=2)
        if len(y_img.shape) < 3:
            y_img = np.expand_dims(y_img, axis=2)
        y_img = np.concatenate([x_img_pass, y_img], axis=2)
        return y_img
 def predict(self, tag):
     x_input = tag.load_x()
     segments = generate_segments(x_input, self.opt)
     x_img, x_pass = self.get_features(tag)
     o_height, o_width = x_pass.shape[:2]
     x_height, x_width = x_img.shape[:2]
     x_img = get_features_for_segments(x_img, segments, self.opt["feature_aggregation"])
     y_img = self.clf.predict(x_img)
     segments = resize(segments, width=x_width, height=x_height, interpolation="nearest")
     y_img = map_segments(segments, y_img)
     y_img = resize(y_img, width=o_width, height=o_height, interpolation="nearest")
     return y_img