コード例 #1
0
ファイル: renderer.py プロジェクト: d33jiang/dj-experiments
def _render_block(data_block: np.ndarray, tile_shape: TileShape):
    if np.shape(data_block) != tile_shape.shape:
        data_block = np.copy(data_block)
        data_block.resize(tile_shape.shape)

    code_point = tile_shape.encode_function(data_block)
    return chr(code_point)
コード例 #2
0
    def _detect_tip_feature(
            self,
            tip_area: np.ndarray,
            rejected: np.ndarray = None) -> Union[np.ndarray, None]:
        """
        tries to find the tip via features from a crop, and returns it position in the crop

        :param tip_area: is the crop where should be the tip
        :param rejected: if is not `None` is used to returns the rejected features
        """
        hsv = cv.cvtColor(tip_area, cv.COLOR_BGR2HSV)
        mask = cv.inRange(hsv, np.array([0, 0, 0]), np.array([180, 255, 50]))
        mask = cv.bitwise_not(mask)
        mask_rgb = cv.cvtColor(mask, cv.COLOR_GRAY2RGB)  # needed by bitwise_or
        cv.bitwise_or(
            mask_rgb, tip_area, tip_area
        )  # replace everything brighter than V = 40 with pure white

        tip_area_gray = cv.cvtColor(
            tip_area, cv.COLOR_BGR2GRAY)  # needed by shi-tomasi detector
        features = cv.goodFeaturesToTrack(
            tip_area_gray, 3, .3,
            5)  # top 3 features are returned as an array of [x,y] points

        # if no feature is detected return None
        if features is None:
            return None

        features = np.squeeze(features, axis=1)

        kernel = cv.getStructuringElement(
            cv.MORPH_RECT, (11, 11))  # basically a 7x7 matrix of 1s
        filtered_area = cv.filter2D(mask, cv.CV_16S, kernel, anchor=(5, 0))
        # brightest -> less dark around the candidate -> high likelihood of being the tip
        # normalize the results from 0 to 255
        filtered_area = cv.normalize(filtered_area,
                                     filtered_area,
                                     alpha=0,
                                     beta=255,
                                     norm_type=cv.NORM_MINMAX,
                                     dtype=cv.CV_8U)

        # sort by brightness in the filtered area
        features = np.array(
            sorted(features,
                   key=lambda feature: filtered_area[feature[1].astype(
                       np.uint), feature[0].astype(np.uint)],
                   reverse=True))

        # if rejected elements are required put them in the array
        if rejected is not None and features.shape[0] > 1:
            rej_feat = features[1:]
            rejected.resize(rej_feat.shape, refcheck=False)
            np.copyto(rejected, rej_feat, casting="unsafe")

        # adds a static offset on the tip position
        tip = features[0] + np.array([0, 2])

        return tip
コード例 #3
0
    def forward(self, input: np.ndarray):
        if self.verbose:
            print("Net forward input", input, "Input shape", input.shape)

        input.resize((input.size, 1))
        for l in self.layers:
            input = l.forward(input, self.verbose)
        return input
コード例 #4
0
ファイル: histogram.py プロジェクト: yqbgq/task
def histogram_gray_cal(image: np.ndarray, np_opt=True):
    image = image.astype(np.int)
    height = image.shape[0]
    width = image.shape[1]
    if np_opt:
        image.resize(image.shape[0] * image.shape[1])
        his = np.bincount(image, minlength=256) / (height * width)
        his_list = [(his[2 * i] + his[2 * i + 1]) / 2 for i in range(128)]
        return np.array(his_list)
    result = [0 for _ in range(256)]

    for i in range(height):
        for j in range(width):
            result[image[i, j]] += 1
    return np.array(result) / (height * width)
コード例 #5
0
    def preprocess_ob(self, ob: np.ndarray) -> Tensor:
        """Preprocess observation:\n
        - shrink the image to 100x100\n
        - transform it to black and white\n
        - transform it into a Tensor\n

        Args:
            ob (np.ndarray): Observation to preprocess

        Returns:
            Tensor: Preprocessed observation
        """
        # shrink image
        ob = Image.fromarray(ob)
        ob = ob.resize((100, 100))
        ob = np.asarray(ob)

        # grayscale image
        ob = rgb2grayscale(ob)
        ob[ob != ob[0][0]] = 1
        ob[ob == ob[0][0]] = 0

        # Tensor definition
        ob = torch.from_numpy(ob).float().to(torch.device(device))

        return ob
コード例 #6
0
ファイル: images.py プロジェクト: epoch8/cv-pipeliner
def thumbnail_image(image: np.ndarray,
                    size: Tuple[int, int],
                    resample: Optional[int] = None) -> np.ndarray:
    image = Image.fromarray(image)
    new_width, new_height = get_thumbnail_resize(image, size)
    image = image.resize((new_width, new_height), resample=resample)
    image = np.array(image)
    return image
コード例 #7
0
ファイル: image_util.py プロジェクト: stfnwong/lernomatic
def scale_width(img: np.ndarray,
                target_w: int,
                interp_method=Image.BICUBIC) -> np.ndarray:
    ow, oh = img.size
    if (ow == target_w):
        return img
    w = target_w
    h = int(target_w * oh / ow)

    return img.resize((w, h), interp_method)
コード例 #8
0
 def imshow(self, image: np.ndarray) -> None:
     self._last_image = image
     was_none = self.window is None
     image = Image.fromarray(image)
     image = image.resize((self._resolution, self._resolution))
     image = np.array(image)
     super().imshow(image)
     if was_none:
         self.window.event(self.on_key_press)
         self.window.push_handlers(self._keys)
コード例 #9
0
ファイル: prototype.py プロジェクト: hpnok/plumbingg2
 def __init__(self, pts: np.ndarray, holes: List[np.ndarray] = None):
     n = len(pts)
     if n < 3:
         raise ValueError('A polygon must contains at least 3 points')
     pts.resize((n, 2))
     s = [(i, (i + 1) % n) for i in range(n)]
     self.hole_pts = []
     for hole in holes:
         s, n, hole_pt = self._add_hole(n, s, hole)
         self.hole_pts.append(hole_pt)
     s = np.array(s)
     poly_dict = dict(vertices=pts, segments=s)
     if holes:
         pts = np.concatenate([pts] + holes)
         poly_dict['vertices'] = pts
         poly_dict['holes'] = np.array(self.hole_pts)
     tri_dict = tr.triangulate(poly_dict, 'p')
     self._plane_triangles = tri_dict['triangles']
     self._contour = pts
     self._winding = self._compute_winding(pts)
コード例 #10
0
ファイル: granularity.py プロジェクト: YourThomasLee/PRSer
 def get_partition(self, data: np.ndarray):
     #take two dimensional array as input
     #return array with same distribution
     #for example
     #in_array:	output:
     #	s 1 s 	1
     #	1 2 1 	2
     #	1 2 1 	2
     #	2 2 1 	3
     #output domain value [1,+oo]
     if len(data.shape) == 1:
         data.resize(data.shape[0], 1)
     rows = data.shape[0]
     cols = data.shape[1]
     partition = np.array([0 for i in range(rows)])
     value2dstr = dict()
     valnum = 1
     for i in range(
             rows):  #input list(cols) of value return equivalence class id
         tmp = value2dstr
         for j in range(0, cols):
             if tmp.get(data[i][j]) == None:  #new equaivalence class
                 partition[i] = valnum
                 #return a num
                 for k in range(j, cols - 1):
                     tmp[data[i][k]] = dict()
                     tmp = tmp[data[i][k]]
                 try:
                     tmp[data[i][cols - 1]] = valnum
                 except IndexError:
                     print('Error happened in Reducer.get_partition()',
                           type(data[i]), data[i])
                 valnum += 1
                 break
             else:
                 if j + 1 == cols:  #last node
                     partition[i] = tmp[data[i][j]]
                 else:  #next node
                     tmp = tmp[data[i][j]]
     return partition
コード例 #11
0
def _tile_chw(arr: np.ndarray, nrows: int, ncols: int) -> np.ndarray:
    """
    Args:
        arr: chw tensor
        nrows: number of tiled rows
        ncols: number of tiled columns

    Returns:
        np.ndarray: tiled array
    """
    c, h, w = arr.shape
    assert c <= nrows * ncols

    if c < nrows * ncols:
        arr = arr.reshape(-1).copy()
        arr.resize(nrows * ncols * h * w)

    return (
        arr.reshape(nrows, ncols, h, w)
        .swapaxes(1, 2)
        .reshape(nrows * h, ncols * w)
    )
コード例 #12
0
def PIL_resize(img: np.ndarray, size: Tuple[int, int]) -> np.ndarray:
    """
    Args:
        img: Array representing an image
        size: Tuple representing new desired (width, height)

    Returns:
        img
    """
    img = numpy_arr_to_PIL_image(img, scale_to_255=True)
    img = img.resize(size)
    img = PIL_image_to_numpy_arr(img)
    return img
コード例 #13
0
    def _bilinear_resample(self, x: np.ndarray, n: int = 120) -> np.array:
        dtype = x.dtype
        assert len(x.shape) == 2
        if (x.shape[0] == n) and (x.shape[1] == n):
            return x
        else:
            x = x.astype(np.float)
            x = Image.fromarray(x)
            x = x.resize((n, n), Image.BILINEAR)
            x = np.array(x)
            x = x.astype(dtype)

        return x
コード例 #14
0
def PIL_resize(img: np.ndarray, ratio: Tuple[float, float]) -> np.ndarray:
    """
    Args:
    - img: Array representing an image
    - size: Tuple representing new desired (width, height)

    Returns:
    - img
    """
    H, W, _ = img.shape
    img = numpy_arr_to_PIL_image(img, scale_to_255=True)
    img = img.resize((int(W * ratio[1]), int(H * ratio[0])), PIL.Image.LANCZOS)
    img = PIL_image_to_numpy_arr(img)
    return img
コード例 #15
0
ファイル: core.py プロジェクト: l1uw3n/xfuse
def resize(
    image: np.ndarray,
    target_shape: Sequence[int],
    resample: int = Image.NEAREST,
) -> np.ndarray:
    r"""
    Resizes image to a given `target_shape`

    :param image: Image array
    :param target_shape: Target shape
    :param resample: Resampling filter
    :returns: The rescaled image
    """
    image = Image.fromarray(image)
    image = image.resize(target_shape[::-1], resample=resample)
    image = np.array(image)
    return image
コード例 #16
0
ファイル: utility.py プロジェクト: roromaniac/xfuse
def rescale(image: np.ndarray,
            scaling_factor: float,
            resample: int = Image.NEAREST) -> np.ndarray:
    r"""
    Rescales image

    :param image: Image array
    :param scaling_factor: Scaling factor
    :param resample: Resampling filter
    :returns: The rescaled image
    """
    image = Image.fromarray(image)
    image = image.resize(
        [round(x * scaling_factor) for x in image.size],
        resample=resample,
    )
    image = np.array(image)
    return image
コード例 #17
0
ファイル: utilities.py プロジェクト: FrostbiteXSW/SCAE_Attack
def imresize(im: np.ndarray, shape):
	"""
	:param im: 3-dim image of type np.uint8
	:param shape: 2-dim array
	:return: resized image
	"""
	im = deepcopy(im)
	reshape = im.ndim == 3 and im.shape[-1] == 1

	if reshape:
		im = im.squeeze(axis=-1)

	im = Image.fromarray(im)
	im = im.resize(shape)
	im = np.array(im)
	im = im.clip(0, 255)

	if reshape:
		im = im[:, :, None]

	return im
コード例 #18
0
ファイル: utils.py プロジェクト: lagka/sockeye
def crop_resize_image(image: np.ndarray, size) -> np.ndarray:
    """
    Resize the input image.

    :param image: Original image which is a  PIL object.
    :param size: Tuple of height and width to resize the image to.
    :return: Resized image which is a PIL object
    """
    width, height = image.size
    if width > height:
        left = (width - height) / 2
        right = width - left
        top = 0
        bottom = height
    else:
        top = (height - width) / 2
        bottom = height - top
        left = 0
        right = width
    image = image.crop((left, top, right, bottom))
    image = image.resize(size, Image.ANTIALIAS)
    return image
コード例 #19
0
ファイル: utils.py プロジェクト: yanzhangnlp/LDGCNs
def crop_resize_image(image: np.ndarray, size) -> np.ndarray:
    """
    Resize the input image.

    :param image: Original image which is a  PIL object.
    :param size: Tuple of height and width to resize the image to.
    :return: Resized image which is a PIL object
    """
    width, height = image.size
    if width > height:
        left = (width - height) / 2
        right = width - left
        top = 0
        bottom = height
    else:
        top = (height - width) / 2
        bottom = height - top
        left = 0
        right = width
    image = image.crop((left, top, right, bottom))
    image = image.resize(size, Image.ANTIALIAS)
    return image
コード例 #20
0
 def log_predictions(
     self, pred: np.ndarray, clusterings: Tuple[List[np.ndarray],
                                                List[np.ndarray],
                                                List[np.ndarray]]
 ) -> None:
     type_count = pred.shape[-1]
     new_size = self.preprocessed_screen.shape[:2]
     final_pred_size = (new_size[0] * int(type_count**.5),
                        new_size[1] * (type_count // int(type_count**.5)))
     final_pred = np.zeros((*final_pred_size, 3), dtype=np.uint8)
     original_pred = np.uint8(cm.viridis(pred)[:, :, :, :3] * 255)
     for type in range(type_count):
         pred = original_pred[:, :, type, :]
         if clusterings is not None or self.prediction_overlay_factor > 0:
             prev_size = pred.shape[:2]
             pred = Image.fromarray(pred)
             pred = pred.resize((new_size[1], new_size[0]))
             pred = Image.blend(pred,
                                Image.fromarray(self.preprocessed_screen),
                                self.prediction_overlay_factor)
             pred = np.array(pred)
             if clusterings is not None:
                 clustering = tuple(x[type] for x in clusterings)
                 cluster_colors = cm.Reds(
                     np.linspace(0, 1, len(clustering[2])))[:, :3] * 255
                 for cluster in zip(*clustering[:2]):
                     cl_ind = np.where(clustering[2] == cluster[1])[0]
                     if len(cl_ind) > 0:
                         y, x = tuple(cluster[0][:2] * new_size //
                                      prev_size)
                         pred[y:y + self.cluster_color_size, x:x + self.cluster_color_size] \
                             = cluster_colors[cl_ind[0]]
         type_x = type // (final_pred_size[1] // new_size[1])
         type_y = type % (final_pred_size[1] // new_size[1])
         final_pred[type_x * new_size[0]:(type_x + 1) * new_size[0],
                    type_y * new_size[1]:(type_y + 1) * new_size[1]] = pred
     self.log_image('Predictions', final_pred)
コード例 #21
0
def JSFS(X: np.ndarray, y: np.ndarray, test_X: np.ndarray, test_y: np.ndarray,
         name: str):
    # reference: Jiang, Bingbing, et al.
    # "Joint semi-supervised feature selection and classification through Bayesian approach."
    # Proceedings of the AAAI Conference on Artificial Intelligence. Vol. 33. 2019.
    print('========== JSFS ==========')
    # --- Input & Initialize---
    # np.set_printoptions(threshold=np.inf)
    n = len(X)
    d = len(X[0])
    y.resize((n, 1))
    testSize = len(test_X)
    # labeled sample ratio
    # labelRatio = 0.5
    labelRatio = CONFIG[name]['labelRatio']
    l = int(n * labelRatio)
    u = n - l
    # γ and µ are super parameters
    # Gamma = 0.001
    Gamma = CONFIG[name]['Gamma']
    # Mu = 0.9
    Mu = CONFIG[name]['Mu']
    # Beta = 0.005
    Beta = 5
    Omega = np.zeros((d, 1))
    Omega[:] = 0.5
    Lambda_vector = np.zeros((u, 1))
    Lambda_vector[:] = 0.5
    A = np.zeros((d, d))
    for i in range(d):
        A[i, i] = 0.001
    C = np.zeros((u, u))
    for i in range(u):
        C[i, i] = 0.001
    # --- Construct the affinity matrix S and graph Laplacian L via KNN ---
    print('Construct the affinity matrix S and graph Laplacian L via KNN')
    trainData_X = X
    trainData_Y = y.ravel()  # y and trainData_Y address the same memory
    # replace the original -1 label with 0, because in this method -1 means no label
    for i in range(n):
        trainData_Y[i] = 0 if trainData_Y[i] == -1 else trainData_Y[i]
    trainData_Y[l:] = -1
    KNN = KNeighborsClassifier(n_neighbors=5)
    KNN.fit(trainData_X[:l], trainData_Y[:l])
    S = np.zeros((n, n))
    D = np.zeros((n, n))
    L = np.zeros((n, n))
    for i in range(n):
        for j in range(i, n):
            if trainData_Y[i] == trainData_Y[j] and trainData_Y[i] != -1:
                S[i][j] = 10
            elif (trainData_Y[i] == -1
                  and trainData_Y[j] > -1) and (KNN.predict(
                      trainData_X[i:i + 1]) == trainData_Y[j]):
                S[i][j] = 1
            elif (trainData_Y[j] == -1
                  and trainData_Y[i] > -1) and (KNN.predict(
                      trainData_X[j:j + 1]) == trainData_Y[i]):
                S[i][j] = 1
            else:
                S[i][j] = 0
            S[j][i] = S[i][j]
        D[i, i] = sum(S[i, :])
        percent = 100 * (float((2 * n - i) * (i + 1)) / ((n + 1) * n))
        show_str = ('[%%-%ds]' % 50) % (int(50 * percent / 100) * "#")
        print('\r%s %d%%' % (show_str, percent), end='')
    L = D - S
    # --- Obtain the pseudo laber vector y_u via label progation ---
    print('\nObtain the pseudo laber vector y_u via label progation')
    LGC_rbf = LabelSpreading(kernel='knn',
                             gamma=20,
                             n_neighbors=7,
                             max_iter=150)
    LGC_rbf.fit(trainData_X, trainData_Y)
    trainData_Y[l:] = LGC_rbf.predict(trainData_X[l:])
    # change 0 back to the -1
    """ for i in range(n):
        trainData_Y[i] = -1 if trainData_Y[i] == 0 else trainData_Y[i] """
    # --- Data preprocessing - Normalized for X, y ---
    # min_max_scaler = preprocessing.MinMaxScaler((0, 0.0001))
    min_max_scaler = preprocessing.MinMaxScaler(
        (0, CONFIG[name]['xMaxScaler']))
    X = min_max_scaler.fit_transform(X)
    test_X = min_max_scaler.transform(test_X)
    # --- Convergence ---
    B = Gamma * np.dot(np.dot(X.T, L), X)
    Lambda = np.matlib.identity(n)
    Sigma = np.zeros((n, 1))
    E = np.zeros((n, n))
    P = np.zeros((u, u))
    k_lambda = np.zeros((u, 1))
    Eu = np.zeros((u, u))
    O = np.zeros((u, u))
    Omega_old = np.ones((d, 1))
    Lambda_vector_old = np.zeros((u, 1))
    g_omega = np.zeros((d, 1))
    H_omega = np.zeros((d, d))
    Sig_omega = np.zeros((d, d))
    g_lambda = np.zeros((u, 1))
    H_lambda = np.zeros((u, u))
    Sig_lambda = np.zeros((u, u))
    G = np.zeros((d, d))
    cnt = 0
    while np.linalg.norm(Omega - Omega_old, ord=np.inf) > 0.001:
        print('--------', cnt + 1, '--------')
        for i in range(n):
            if (i < l):
                Sigma[i, 0] = 1 / (1 + np.exp(-1 * np.dot(X[i, :], Omega)))
                E[i, i] = Sigma[i, 0] * (1 - Sigma[i, 0])
            else:
                Sigma[i, 0] = 1 / \
                    (1 + np.exp(-1 *
                                Lambda_vector[i-l, 0] * np.dot(X[i, :], Omega)))
                E[i, i] *= Mu * Lambda_vector[i-l, 0] * \
                    Lambda_vector[i-l, 0] * Sigma[i, 0] * (1 - Sigma[i, 0])
                Lambda[i, i] = Mu * Lambda_vector[i - l, 0]
                P[i - l, i - l] = np.dot(X[i, :], Omega)
                k_lambda[i-l, 0] = Beta * \
                    (1 - (1 / (1 + np.exp(-(Beta * Lambda_vector[i-l, 0])))))
                Eu[i - l, i - l] = Sigma[i, 0] * (1 - Sigma[i, 0])
                O[i - l, i - l] = Beta * Beta * (
                    1 / (1 + np.exp(-(Beta * Lambda_vector[i - l, 0])))) * (
                        1 - (1 /
                             (1 + np.exp(-(Beta * Lambda_vector[i - l, 0])))))
        if (np.linalg.norm(g_omega[:, 0], ord=2) / d) < 0.001:
            g_omega = np.dot(np.dot(X.T, Lambda), (y - Sigma)) - \
                np.dot((A + B), Omega)
            H_omega = -1 * (np.dot(np.dot(X.T, E), X) + A + B)
            Sig_omega = -1 * np.linalg.inv(H_omega)
            Omega_old = Omega.copy()
            Omega = Omega - np.dot(np.linalg.inv(H_omega), g_omega)
            print('gw:', np.mean(g_omega[:, 0]), ' gw_judge:',
                  (np.linalg.norm(g_omega[:, 0], ord=2) / d), 'w_max',
                  np.max(Omega, axis=0), 'w_min', np.min(Omega, axis=0))
        for i in range(d):
            if (Omega[i, 0] != 0) and (abs(Omega[i, 0]) < 0.001):
                Omega[i, 0] = 0
        if (np.linalg.norm(g_lambda[:, 0], ord=2) / u) < 0.001:
            g_lambda = Mu * np.dot(P, (y[l:] - Sigma[l:])) - \
                np.dot(C, Lambda_vector) + k_lambda
            H_lambda = -1 * ((Mu * np.dot(np.dot(P.T, Eu), P)) + C + O)
            Sig_lambda = -1 * np.linalg.inv(H_lambda)
            Lambda_vector_old = Lambda_vector.copy()
            Lambda_vector = Lambda_vector - \
                np.dot(np.linalg.inv(H_lambda), g_lambda)
            print('gl:', np.mean(g_lambda[:, 0]), ' gl_judge:',
                  (np.linalg.norm(g_lambda[:, 0], ord=2) / u), 'l_max',
                  np.max(Lambda_vector, axis=0), 'l_min',
                  np.min(Lambda_vector, axis=0))
        for i in range(u):
            if (Lambda_vector[i, 0] != 0) and (abs(Lambda_vector[i, 0]) <
                                               0.001):
                Lambda_vector[i, 0] = 0
        G = np.dot(
            np.dot(
                np.dot(np.linalg.inv(A), B),
                np.linalg.inv(
                    np.matlib.identity(d) + np.dot(np.linalg.inv(A), B))),
            np.linalg.inv(A))
        for i in range(d):
            A[i,
              i] = 1 / (Omega[i, 0] * Omega[i, 0] + G[i, i] + Sig_omega[i, i])
        for i in range(u):
            C[i, i] = 1 / (Lambda_vector[i, 0] * Lambda_vector[i, 0] +
                           Sig_lambda[i, i])
        print('max_lambda_new-old',
              np.linalg.norm(Lambda_vector - Lambda_vector_old, ord=np.inf))
        print('max_omega_new-old', np.linalg.norm(Omega - Omega_old,
                                                  ord=np.inf))
        cnt += 1
        if cnt == 50:
            break
    # --- Test ---
    predict_y = np.zeros(testSize)
    predict_vector_y = np.dot(test_X, Omega).flatten()
    predict_vector_y *= CONFIG[name]['yScaler']
    threshold = CONFIG[name]['threshold']
    for i in range(testSize):
        if predict_vector_y[0, i] < threshold:
            predict_y[i] = -1
        else:
            predict_y[i] = 1
    print('predict_y:', predict_vector_y[0, :10])
    tp = 0
    fp = 0
    fn = 0
    tn = 0
    for idx in range(len(test_y)):
        if test_y[idx] == 1 and predict_y[idx] == 1:
            tp += 1
        elif test_y[idx] == 1 and predict_y[idx] == -1:
            fn += 1
        elif test_y[idx] == -1 and predict_y[idx] == 1:
            fp += 1
        elif test_y[idx] == -1 and predict_y[idx] == -1:
            tn += 1
    p = tp / (fp + tp)
    pf = fp / (fp + tn)
    pd = tp / (tp + fn)
    F_measure = 2 * pd * p / (pd + p)
    """ print('precision:', 100 * p, '%')
    print('recall:', 100 * recall_score(test_y, predict_y), '%')
    print('pf:', 100 * pf, '%')
    print('F-measure:', 100 * F_measure, '%')
    print('accuracy:', 100 * accuracy_score(test_y, predict_y), '%')
    print('AUC:', 100 * roc_auc_score(test_y, predict_y), '%') """
    print('precision:', p)
    print('recall:', recall_score(test_y, predict_y))
    print('pf:', pf)
    print('F-measure:', F_measure)
    print('accuracy:', accuracy_score(test_y, predict_y))
    print('AUC:', roc_auc_score(test_y, predict_y))
コード例 #22
0
def render_textblock_list_eng(img: np.ndarray,
                              blk_list: List[TextBlock],
                              font_path: str,
                              scale_quality=1.0,
                              align_center=True,
                              size_tol=1.0):
    pilimg = Image.fromarray(img)
    for blk in blk_list:
        if blk.vertical:
            blk.angle -= 90
        sw_r = 0.1
        fs = int(blk.font_size / (1 + 2 * sw_r) * scale_quality)
        min_bbox = blk.min_rect(rotate_back=False)[0]
        bx, by = min_bbox[0]
        bw, bh = min_bbox[2] - min_bbox[0]
        cx, cy = bx + bw / 2, by + bh / 2
        bw = bw * scale_quality

        font = ImageFont.truetype(font_path, fs)
        words = text_to_word_list(blk.translation)
        if not len(words):
            continue

        base_length = -1
        w_list = []

        sw = int(sw_r * font.size)
        line_height = int((1 + 2 * sw_r) * font.getmetrics()[0])

        for word in words:
            wl = font.getlength(word)
            w_list.append(wl)
            if wl > base_length:
                base_length = wl
        base_length = max(base_length, bw)
        space_l = font.getlength(' ')
        pos_x, pos_y = 0, 0
        line = Line(words[0], 0, 0, w_list[0])
        line_lst = [line]
        for word, wl in zip(words[1:], w_list[1:]):
            added_len = int(space_l + wl + line.length)
            if added_len > base_length:
                pos_y += line_height
                line = Line(word, 0, pos_y, wl)
                line_lst.append(line)
            else:
                line.text = line.text + ' ' + word
                line.length = added_len
        last_line = line_lst[-1]
        canvas_h = last_line.pos_y + line_height
        canvas_w = int(base_length)

        font_color = (0, 0, 0)
        stroke_color = (255, 255, 255)
        img = Image.new('RGBA', (canvas_w, canvas_h), color=(0, 0, 0, 0))
        d = ImageDraw.Draw(img)
        d.fontmode = 'L'

        for line in line_lst:
            pos_x = int((base_length - line.length) / 2) if align_center else 0
            d.text((pos_x, line.pos_y),
                   line.text,
                   font=font,
                   fill=font_color,
                   stroke_width=sw,
                   stroke_fill=stroke_color)

        if abs(blk.angle) > 3:
            img = img.rotate(-blk.angle, expand=True)
        im_w, im_h = img.size
        scale = min(bh / im_h * size_tol, bw / im_w * size_tol)
        if scale < 1:
            img = img.resize((int(im_w * scale), int(im_h * scale)))

        im_w, im_h = img.size
        paste_x, paste_y = int(cx - im_w / 2), int(cy - im_h / 2)
        pilimg.paste(img, (paste_x, paste_y), mask=img)

    return np.array(pilimg)
コード例 #23
0
def resize_image(img: np.ndarray, new_size: Tuple[int, int]) -> np.ndarray:
    # TODO: write description
    img = Image.fromarray(img)
    img = img.resize(new_size)
    return np.array(img)
コード例 #24
0
 def resize(self, image: np.ndarray, anti_alias=False):
     image = Image.fromarray(np.uint8(image))
     resample = Image.LANCZOS if anti_alias else Image.NEAREST
     image = image.resize(self.size, resample)
     return np.asarray(image)
コード例 #25
0
ファイル: image_util.py プロジェクト: stfnwong/lernomatic
def resize_to(img: np.ndarray,
              target_size: int,
              interp_method=Image.BICUBIC) -> np.ndarray:
    return img.resize((target_size, target_size), interp_method)
コード例 #26
0
 def __init__(self, pts: np.ndarray):
     if len(pts) < 3:
         raise ValueError('A polygon must contains at least 3 points')
     pts.resize((len(pts), 2))
     self._vertices = pts
     self._contained_point = None