Exemplo n.º 1
0
def refine_sky(bopt, image):
    sky_mask = make_mask(bopt, image)
    ground = np.ma.array(image,mask=cv2.cvtColor(cv2.bitwise_not(sky_mask), cv2.COLOR_GRAY2BGR)).compressed()
    sky = np.ma.array(image,mask=cv2.cvtColor(sky_mask, cv2.COLOR_GRAY2BGR)).compressed()
    ground.shape = (ground.size//3, 3)
    sky.shape = (sky.size//3, 3)
    ret, label, center = cv2.kmeans(np.float32(sky),2,None,(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0),10,cv2.KMEANS_RANDOM_CENTERS)
    sigma_s1, mu_s1 = cv2.calcCovarMatrix(sky[label.ravel() == 0],None,cv2.COVAR_NORMAL | cv2.COVAR_ROWS | cv2.COVAR_SCALE)
    ic_s1 = cv2.invert(sigma_s1, cv2.DECOMP_SVD)[1]
    sigma_s2, mu_s2 = cv2.calcCovarMatrix(sky[label.ravel() == 1],None,cv2.COVAR_NORMAL | cv2.COVAR_ROWS | cv2.COVAR_SCALE)
    ic_s2 = cv2.invert(sigma_s2, cv2.DECOMP_SVD)[1]
    sigma_g, mu_g = cv2.calcCovarMatrix(ground,None,cv2.COVAR_NORMAL | cv2.COVAR_ROWS | cv2.COVAR_SCALE)
    icg = cv2.invert(sigma_g, cv2.DECOMP_SVD)[1]
    if cv2.Mahalanobis(mu_s1, mu_g, ic_s1) > cv2.Mahalanobis(mu_s2, mu_g, ic_s2):
        mu_s = mu_s1
        sigma_s = sigma_s1
        ics = ic_s1
    else:
        mu_s = mu_s2
        sigma_s = sigma_s2
        ics = ic_s2
    for x in range(image.shape[1]):
        cnt = np.sum(np.less(spatial.distance.cdist(image[0:bopt[x], x],mu_s,'mahalanobis',VI=ics),spatial.distance.cdist(image[0:bopt[x], x],mu_g,'mahalanobis',VI=icg)))
        if cnt < (bopt[x] / 2):bopt[x] = 0
    return bopt
Exemplo n.º 2
0
def find_matching_face(face_coefficients_to_match, coefficients_of_all_faces,
                       covariance):

    # set up loop variables

    nearest_face_index = 0
    nearest_face_distance = 100  # i.e. huge
    current_face = 0

    for pca_face_coefficient in coefficients_of_all_faces:

        # calculate the Mahalanobis distamce between the coefficients we need to match and each from the set of faces
        # (skipping the first N eigenfaces that often contain just illumination variance, default N=3 )

        m_dist = cv2.Mahalanobis(
            face_coefficients_to_match[:, args.eigenfaces_to_skip:args.
                                       eigenfaces],
            pca_face_coefficient.reshape(
                1, args.eigenfaces)[:,
                                    args.eigenfaces_to_skip:args.eigenfaces],
            np.linalg.inv(covariance))

        # alternatively use the L1 or L2 norm as per original [Pentland / Turk 1991] paper - which used L1
        # m_dist = numpy.linalg.norm(face_coefficients_to_match[:,3:args.eigenfaces]-pca_face_coefficient.reshape(1,args.eigenfaces)[:,3:args.eigenfaces])

        if (m_dist < nearest_face_distance):
            nearest_face_index = current_face
            nearest_face_distance = m_dist

        current_face += 1

    return (nearest_face_index, nearest_face_distance)
Exemplo n.º 3
0
def matchProfiles(model, profiles):
    tProfiles = np.zeros(profiles.shape[0])
    for l in range(profiles.shape[0]):
        covar = model[0][l]
        mean = model[1][l]
        icovar = cv2.invert(covar, flags=cv2.DECOMP_SVD)[1]

        m = (profiles.shape[1] - 1) / 2
        n = (model[1].shape[1] - 1) / 2

        dist = np.zeros(2 * (m - n) + 1)
        for t in range(2 * (m - n) + 1):
            tMean = translateMean(mean, t, m)
            tIcovar = translateCovar(icovar, t, m)
            dist[t] = cv2.Mahalanobis(profiles[l], tMean, tIcovar)
        tProfiles[l] = m - n - np.argmin(dist)
    ''' PROFILE VISUALISATION
    ppl.vlines(np.arange(mean.shape[0]), np.zeros_like(mean), mean)
    ppl.show()
    ppl.vlines(np.arange(profiles[20].shape[0]), np.zeros_like(profiles[20]), profiles[20])
    ppl.show()
    print tProfiles
    '''

    return tProfiles
Exemplo n.º 4
0
def true_sky(border, src_image):

    #制作天空图像掩码和地面图像掩码
    sky_mask = make_sky_mask(src_image, border, 1)
    ground_mask = make_sky_mask(src_image, border, 0)

    #扣取天空图像和地面图像
    sky_image_ma = np.ma.array(src_image, mask = cv2.cvtColor(sky_mask, cv2.COLOR_GRAY2BGR))
    ground_image_ma = np.ma.array(src_image, mask = cv2.cvtColor(ground_mask, cv2.COLOR_GRAY2BGR))

    #将天空和地面区域shape转换为n*3
    sky_image = sky_image_ma.compressed()
    ground_image = ground_image_ma.compressed()

    sky_image.shape = (sky_image.size//3, 3)
    ground_image.shape = (ground_image.size//3, 3)

    # k均值聚类调整天空区域边界--2类
    sky_image_float = np.float32(sky_image)
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
    flags = cv2.KMEANS_RANDOM_CENTERS
    compactness, labels, centers = cv2.kmeans(sky_image_float, 2, None, criteria, 10, flags)

    sky_label_0 = sky_image[labels.ravel() == 0]
    sky_label_1 = sky_image[labels.ravel() == 1]

    sky_covar_0, sky_mean_0 = cv2.calcCovarMatrix(sky_label_0, mean= None, flags= cv2.COVAR_ROWS + cv2.COVAR_NORMAL + cv2.COVAR_SCALE)
    sky_covar_1, sky_mean_1 = cv2.calcCovarMatrix(sky_label_1, mean= None, flags= cv2.COVAR_ROWS + cv2.COVAR_NORMAL + cv2.COVAR_SCALE)
    ground_covar, ground_mean = cv2.calcCovarMatrix(ground_image, mean= None, flags= cv2.COVAR_ROWS + cv2.COVAR_NORMAL + cv2.COVAR_SCALE)

    ic_s0 = cv2.invert(sky_covar_0, cv2.DECOMP_SVD)[1]
    ic_s1 = cv2.invert(sky_covar_1, cv2.DECOMP_SVD)[1]
    ic_g = cv2.invert(ground_covar, cv2.DECOMP_SVD)[1]

    #推断真实的天空区域
    if cv2.Mahalanobis(sky_mean_0, ground_mean, ic_s0) > cv2.Mahalanobis(sky_mean_1, ground_mean, ic_s1):
        sky_mean = sky_mean_0
        sky_covar = sky_covar_0
        ic_s = ic_s0
    else:
        sky_mean = sky_mean_1
        sky_covar = sky_covar_1
        ic_s = ic_s1


    return sky_covar,sky_mean,ic_s,ground_covar, ground_mean,ic_g
Exemplo n.º 5
0
import cv2
import numpy as np

X = np.array([[0, 0, 0, 100, 100, 150, -100, -150],
              [0, 50, -50, 0, 30, 100, -20, -100]],
             dtype=np.float64)

# 전치행렬 변경
X = X.transpose()  # X = X.T

cov, mean = cv2.calcCovarMatrix(X,
                                mean=None,
                                flags=cv2.COVAR_NORMAL + cv2.COVAR_ROWS)
print('mean=', mean)
print('cov=', cov)

# 공분산 행렬 cov 의 역행렬 icov 계산
ret, icov = cv2.invert(cov)
print('icov=', icov)

v1 = np.array([[0], [0]], dtype=np.float64)
v2 = np.array([[0], [50]], dtype=np.float64)

# 벡터 v1, v2 사이의 마하라노비스 통계적 거리는 공분산 행렬의
#   역행렬을 이용해서 계산
dist = cv2.Mahalanobis(v1, v2, icov)
print('dist = ', dist)

cv2.waitKey()
cv2.destroyAllWindows()
Exemplo n.º 6
0
    for i in range(0, len(digits)):
        if labels[i] == c:
            grouped_data[c].append(digits[i])
    print 'group:', c, len(grouped_data[c])
    x = np.array(grouped_data[c])
    covar, mean = cv2.calcCovarMatrix(x, flags)
    icovar = np.empty(covar.shape, covar.dtype)
    cv2.invert(covar, icovar, cv2.DECOMP_SVD)
    icovariance.append(icovar)

#
# Q5: For each group, calculate the Mahalanobis distance of every element
#     to the centroid, then draw the centroid followed by the three farthest
#     elements (showing their Mahalanobis distance).
#

for index, centroid in enumerate(centroids):
    points = np.float32(grouped_data[index])
    icovar = np.float32(icovariance[index])

    mdistance = []
    for point in points:
        mdistance.append(cv2.Mahalanobis(centroid, point, icovar))

    farthest = sorted(mdistance, reverse=True)[:3]
    for far in farthest:
        print mdistance.index(far), far
        image_name = 'g' + str(index) + '_dist' + str(far) + '.png'
        draw_digit(points[mdistance.index(far)].tolist(), 200, True, True,
                   image_name)
Exemplo n.º 7
0
    def process(self, inframe, outframe=None):
        """
        Runs the pipeline and sets all outputs to new values.
        """
        self.bgr_input = inframe.getCvBGR()

        # Step Resize_Image0:
        #self.__resize_image_input = inframe.getCvRGB()
        # Start measuring image processing time (NOTE: does not account for input conversion time):
        self.timer.start()

        # Step Blur0:
        self.__blur_input = self.bgr_input
        (self.blur_output) = self.__blur(self.__blur_input, self.__blur_type,
                                         self.__blur_radius)

        # Step HSL_Threshold0:
        self.__hsl_threshold_input = self.blur_output
        (self.hsl_threshold_output) = self.__hsl_threshold(
            self.__hsl_threshold_input, self.__hsl_threshold_hue,
            self.__hsl_threshold_saturation, self.__hsl_threshold_luminance)

        # Step CV_erode0:
        self.__cv_erode_0_src = self.hsl_threshold_output
        (self.cv_erode_0_output) = self.__cv_erode(
            self.__cv_erode_0_src, self.__cv_erode_0_kernel,
            self.__cv_erode_0_anchor, self.__cv_erode_0_iterations,
            self.__cv_erode_0_bordertype, self.__cv_erode_0_bordervalue)

        # Step CV_dilate0:
        self.__cv_dilate_src = self.cv_erode_0_output
        (self.cv_dilate_output) = self.__cv_dilate(
            self.__cv_dilate_src, self.__cv_dilate_kernel,
            self.__cv_dilate_anchor, self.__cv_dilate_iterations,
            self.__cv_dilate_bordertype, self.__cv_dilate_bordervalue)

        # Step CV_erode1:
        self.__cv_erode_1_src = self.cv_dilate_output
        (self.cv_erode_1_output) = self.__cv_erode(
            self.__cv_erode_1_src, self.__cv_erode_1_kernel,
            self.__cv_erode_1_anchor, self.__cv_erode_1_iterations,
            self.__cv_erode_1_bordertype, self.__cv_erode_1_bordervalue)

        # Step Find_Contours0:
        self.__find_contours_input = self.cv_erode_1_output
        (self.find_contours_output) = self.__find_contours(
            self.__find_contours_input, self.__find_contours_external_only)

        # Step Filter_Contours0:
        self.__filter_contours_contours = self.find_contours_output
        (self.filter_contours_output) = self.__filter_contours(
            self.__filter_contours_contours, self.__filter_contours_min_area,
            self.__filter_contours_min_perimeter,
            self.__filter_contours_min_width, self.__filter_contours_max_width,
            self.__filter_contours_min_height,
            self.__filter_contours_max_height, self.__filter_contours_solidity,
            self.__filter_contours_max_vertices,
            self.__filter_contours_min_vertices,
            self.__filter_contours_min_ratio, self.__filter_contours_max_ratio)

        # Step Convex_Hulls0:
        self.__convex_hulls_contours = self.filter_contours_output
        (self.convex_hulls_output) = self.__convex_hulls(
            self.__convex_hulls_contours)

        fps = self.timer.stop()
        numobjects = 0

        M = []

        #f = open("moments.txt", "a+")
        for contour in self.convex_hulls_output:
            #M.append(cv2.moments(contour))
            M.append(cv2.HuMoments(cv2.moments(contour)).flatten())
            #jevois.sendSerial(str(M[numobjects]))
            #for outputVal in M[numobjects]:
            #f.write(str(outputVal) + ', ')
            #f.write('\n')
            numobjects += 1
        #f.close()
        serialMessage = ('Frame:' + str(self.frame) + str(self.frame) +
                         ', Process Time:' + str(fps) + ', Objects:' +
                         str(numobjects) + '=')
        if self.recordVideo is False and self.videoWriter is not None:
            #jevois.sendSerial('releasing video')
            self.videoWriter.release()
            self.videoWriter = None
            self.rawVideoWriter.release()
            self.rawVideoWriter = None
            self.hslVideoWriter.release()
            self.hslVideoWriter = None
        if outframe is not None or self.recordVideo is True:
            if self.recordVideo is True and self.videoWriter is None:
                timestamp = str(time.time())
                os.makedirs('videos/' + timestamp)
                self.videoWriter = cv2.VideoWriter(
                    'videos/' + timestamp + '/processed.avi',
                    cv2.VideoWriter_fourcc(*'XVID'), 22.0, (352, 288))
                self.rawVideoWriter = cv2.VideoWriter(
                    'videos/' + timestamp + '/raw.avi',
                    cv2.VideoWriter_fourcc(*'XVID'), 22.0, (352, 288))
                self.hslVideoWriter = cv2.VideoWriter(
                    'videos/' + timestamp + '/hsl.avi',
                    cv2.VideoWriter_fourcc(*'XVID'), 22.0, (352, 288))
                #jevois.sendSerial(str(self.videoWriter.isOpened()))
            if self.recordVideo is True:
                self.rawVideoWriter.write(self.bgr_input)
            outimg = self.bgr_input

            printedData = False
            textHeight = 22
            i = 0
            for contour in self.convex_hulls_output:
                printedData = True
                x, y, w, h = cv2.boundingRect(contour)
                cv2.circle(outimg, (x + int(w / 2), y + int(h / 2)), 3,
                           (255, 0, 0), 5)
                cv2.rectangle(outimg, (x, y), (x + w, y + h), (0, 255, 0), 3)
                mahalanobisVal = cv2.Mahalanobis(np.array(M[i]),
                                                 np.array(self.momentAvg),
                                                 np.array(self.invCovarMtrx))
                serialMessage = serialMessage + (
                    '\nObject:' + str(i) + '[x:' +
                    (str(x + int(w / 2)) + ',y:' + str(y + int(h / 2)) +
                     ',w:' + str(w) + ',h:' + str(h) + ',m:' +
                     str(mahalanobisVal) + ']'))
                cv2.putText(outimg, ('x: ' + str(x + int(w / 2)) + ', y: ' +
                                     str(y + int(h / 2)) + ', w: ' + str(w) +
                                     ', h: ' + str(h)), (3, 288 - textHeight),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                            cv2.LINE_AA)
                textHeight = textHeight + 15
                i += 1
            if self.sendFrames:
                jevois.sendSerial(serialMessage)
            cv2.drawContours(outimg, self.convex_hulls_output, -1, (0, 0, 255),
                             3)
            cv2.putText(outimg, "Glitch CubeVision", (3, 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)

            # Write frames/s info from our timer into the edge map (NOTE: does not account for output conversion time):

            #height, width, channels = self.outimg.shape # if self.outimg is grayscale, change to: height, width = self.outimg.shape

            cv2.putText(outimg, fps, (3, 288 - 6), cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (255, 255, 255), 1, cv2.LINE_AA)

            # Convert our BGR output image to video output format and send to host over USB. If your output image is not
            # BGR, you can use sendCvGRAY(), sendCvRGB(), or sendCvRGBA() as appropriate:

            if outframe is not None:
                if self.displayHSLOutput:

                    outframe.sendCvBGR(
                        cv2.cvtColor(self.hsl_threshold_output,
                                     cv2.COLOR_GRAY2BGR))
                else:
                    outframe.sendCvBGR(outimg)
            if self.recordVideo is True:
                self.videoWriter.write(outimg)
                self.hslVideoWriter.write(
                    cv2.cvtColor(self.hsl_threshold_output,
                                 cv2.COLOR_GRAY2BGR))

        else:
            i = 0
            for contour in self.convex_hulls_output:
                x, y, w, h = cv2.boundingRect(contour)
                mahalanobisVal = cv2.Mahalanobis(np.array(M[i]),
                                                 np.array(self.momentAvg),
                                                 np.array(self.invCovarMtrx))
                serialMessage = serialMessage + (
                    '\nObject:' + str(i) + '[x:' + str(x + int(w / 2)) +
                    ',y:' + str(y + int(h / 2)) + ',w:' + str(w) + ',h:' +
                    str(h) + ',m:' + str(mahalanobisVal) + ']')
                i += 1
            if self.sendFrames:
                jevois.sendSerial(serialMessage)
        self.frame += 1
Exemplo n.º 8
0
    def fit(self, img, verbose):
        """
        fits the model to given image

        :param img: target image
        :type img: numpy.ndarray
        :param verbose: if the progress should be displayed
        :type verbose: bool
        :return: result of fitting
        :rtype: ASMFitResult
        """
        # make sure image is in greyscale
        grey_img = img.copy()
        if len(grey_img.shape) == 3:
            grey_img = cv.cvtColor(grey_img, cv.COLOR_BGR2GRAY)

        # scale down the image
        ratio = math.sqrt(40000 / (grey_img.shape[0] * grey_img.shape[1]))
        new_w = int(grey_img.shape[1] * ratio)
        new_h = int(grey_img.shape[0] * ratio)
        resized_img = cv.resize(grey_img, (new_w, new_h))

        # create temporary search model image
        cur_search = ModelImage()
        cur_search.shape_info = self.shape_info
        cur_search.image = resized_img

        # create fit result object for search
        params = np.zeros([1, self.eigenvalues.shape[0]])
        sv = cur_search.shape_vector
        sv.set_from_vector(self.project_param_to_shape(params)[0])
        st = sv.get_shape_transform_fitting_size(resized_img.shape)
        fit_result = ASMFitResult(params, st, self)
        cur_search.build_from_shape_vector(st)

        total_offset: int  # sum of offsets of current iteration
        shape_old = ShapeVector()
        self.feature_extractor.load_image(resized_img)

        if verbose:
            cur_search.show(True, "resized image")

        for level in range(self.pyramid_level - 1, -1, -1):
            if verbose:
                print(f"Pyramid level: {level}\n")
            for run in range(10):
                shape_old.set_from_points_array(
                    cur_search.points)  # store old shape
                total_offset = 0
                best_ep = np.zeros((self.n_landmarks, 2), np.int)

                # find best fit for every point
                for i in range(self.n_landmarks):
                    if verbose:
                        print(f"Fitting point {i}\n")

                    cur_best = -1
                    best_i = 0
                    candidate_points, features = \
                        self.feature_extractor.get_candidates_with_feature(cur_search.points, i, level)

                    # select best candidate point with Mahalanobis distance
                    for j in range(len(candidate_points)):
                        x = np.zeros(len(features[j]))
                        for f, feature in enumerate(features[j]):
                            x[f] = feature

                        mean = self.mean_vec_pyr[level][i]
                        inv_cov = self.cov_mat_pyr_inv[level][i]
                        # ct = distance.mahalanobis(x, mean, inv_cov)
                        ct = cv.Mahalanobis(x, mean, inv_cov)
                        # if verbose:
                        #     print(f"Mahalanobis distance: {ct}")

                        if ct < cur_best or cur_best < 0:
                            cur_best = ct
                            best_i = j
                            best_ep[i, 0] = candidate_points[j][0]
                            best_ep[i, 1] = candidate_points[j][1]

                    total_offset += abs(best_i)
                    # if :
                    #     cur_search.show(True)

                # modify results with factor of current pyramid level
                for i in range(self.n_landmarks):
                    cur_search.points[i] = best_ep[i]
                    x = int(cur_search.points[i, 0])
                    cur_search.points[i, 0] = x << level
                    y = int(cur_search.points[i, 1])
                    cur_search.points[i, 1] = y << level
                    if level > 0:
                        cur_search.points[i, 0] += (1 << (level - 1))
                        cur_search.points[i, 1] += (1 << (level - 1))

                cur_search.shape_vector.set_from_points_array(
                    cur_search.points)

                # if verbose:
                #     cur_search.show(True)

                # project found shape to PCA model and back to get parameters
                fit_result = self.find_params_for_shape(
                    cur_search.shape_vector, shape_old, fit_result, level)

                # apply limits to found params
                for i, param in enumerate(fit_result.params[0]):
                    if param < self.params_limits[i, 0]:
                        fit_result.params[0, i] = self.params_limits[i, 0]
                    if param > self.params_limits[i, 1]:
                        fit_result.params[0, i] = self.params_limits[i, 1]

                cur_search.shape_vector.set_from_vector(
                    cv.PCABackProject(fit_result.params,
                                      self.pca_shape_pyr[level],
                                      self.eigenvectors_pyr[level])[0])
                cur_search.build_from_shape_vector(fit_result.similarity_trans)

                avg_mov = total_offset / self.n_landmarks
                if verbose:
                    print(f"Iteration: {run + 1}, Average offset: {avg_mov}\n")
                    cur_search.show(True, f"iteration {run + 1}")

                if avg_mov < 1.3:
                    break

            if verbose:
                print(
                    f"Finished fitting after {run + 1} iterations, Average offset of last iteration: {avg_mov}\n"
                )
                cur_search.show(True, f"level {level} final fit")

        st_ = SimilarityTransformation()
        st_.a = 1 / ratio
        fit_result.similarity_trans = st_.multiply(fit_result.similarity_trans)

        return fit_result
Exemplo n.º 9
0
ret, icov = cv2.invert(cov)

dst = np.full((512, 512, 3), (255, 255, 255), dtype=np.uint8)
rows, cols, channel = dst.shape
centerX = cols // 2
centerY = rows // 2

v2 = np.zeros((2, 1), dtype=np.float64)
FLIP_Y = lambda y: rows - 1 - y

# draw Mahalanobis distance
for y in range(rows):
    for x in range(cols):
        v2[0, 0] = x - centerX
        v2[1, 0] = FLIP_Y(y) - centerY  # y-축 뒤집기
        dist = cv2.Mahalanobis(mean, v2, icov)
        if dist < 0.1:
            dst[y, x] = [50, 50, 50]
        elif dist < 0.3:
            dst[y, x] = [100, 100, 100]
        elif dist < 0.8:
            dst[y, x] = [200, 200, 200]
        else:
            dst[y, x] = [250, 250, 250]

for k in range(X.shape[0]):
    x, y = X[k, :]
    cx = int(x + centerX)
    cy = int(y + centerY)
    cy = FLIP_Y(cy)
    cv2.circle(dst, (cx, cy), radius=5, color=(0, 0, 255), thickness=-1)