示例#1
0
def thresholding(img, thresholds, invert):
    """
    Performs thresholding for color detection for each color channel (BRG)
    :param img: The image in which to search
    :param thresholds: The threshold values for a given color
    :param invert: Whether or not the channel must be inverted or not
    :return: The thresholded image
    """
    blue_threshold = thresholds[0]
    red_threshold = thresholds[1]
    green_threshold = thresholds[2]

    blue_invert = invert[0]
    red_invert = invert[1]
    green_invert = invert[2]

    imgb = cv2.threshold(cv2.extractChannel(img, 0), blue_threshold, 255,
                         cv2.THRESH_BINARY)[1]
    imgr = cv2.threshold(cv2.extractChannel(img, 1), red_threshold, 255,
                         cv2.THRESH_BINARY)[1]
    imgg = cv2.threshold(cv2.extractChannel(img, 2), green_threshold, 255,
                         cv2.THRESH_BINARY)[1]

    if blue_invert:
        imgb = cv2.bitwise_not(imgb)

    if red_invert:
        imgr = cv2.bitwise_not(imgr)

    if green_invert:
        imgg = cv2.bitwise_not(imgg)

    return cv2.bitwise_and(imgb, cv2.bitwise_and(imgr, imgg))
示例#2
0
def main():
    parser = argparse.ArgumentParser(description='OpenCV test')
    parser.add_argument('camera', help='camera to use', type=str)
    parser.add_argument('hidraw', help='hdiraw control device', type=str)
    parser.add_argument('out', help='output name', type=str)

    np.set_printoptions(linewidth=200, suppress=True)

    args = parser.parse_args()

    cap1 = cv2.VideoCapture(args.camera)
    cap1.set(cv2.CAP_PROP_FRAME_WIDTH, 752)
    cap1.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

    set_manual_exposure(args.hidraw, 50000)

    key = 0
    i = 0

    while key != ord('q'):
        ret, image1 = cap1.read()
        gray_l = cv2.extractChannel(image1, 1);
        gray_r = cv2.extractChannel(image1, 2);

        cv2.imshow("right", gray_r)
        cv2.imshow("left", gray_l)

        key = cv2.waitKey(1)
        if key == ord('w'):
            print(f"Write image {i}")
            cv2.imwrite(args.out + str(i) + "_left.jpg", gray_l)
            cv2.imwrite(args.out + str(i) + "_right.jpg", gray_r)
            i += 1

    cap1.release()
示例#3
0
def binarizeSubt(inCvBgr):
    green = cv2.extractChannel(inCvBgr, 1)
    red = cv2.extractChannel(inCvBgr, 2)
    outCvGray = cv2.subtract(green, red)
    outCvGray = cv2.erode(outCvGray,
                          None,
                          iterations=int(p.visionParams[p.binErIter]))
    outCvGray = cv2.dilate(outCvGray,
                           None,
                           iterations=int(p.visionParams[p.binDiIter]))
    return cv2.threshold(outCvGray, 0, 255, cv2.THRESH_OTSU)[1]
示例#4
0
    def __get_the_most_blue_position(self, frame):
        # OpenCV uses BGR not RGB !!!
        redFrame = cv2.extractChannel(frame, 2)
        greenFrame = cv2.extractChannel(frame, 1)
        blueFrame = cv2.extractChannel(frame, 0)

        redGreenComponents = cv2.addWeighted(redFrame, 0.5, greenFrame, 0.5, 0)
        uniformBlueFrame = cv2.subtract(blueFrame, redGreenComponents)

        minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(uniformBlueFrame)

        return maxLoc
示例#5
0
 def FindSquares(self):
     for channel in range(3):
         image_single_channel = cv2.extractChannel(self.image, channel)
         edges = cv2.Canny(image_single_channel,
                           5,
                           self.canny_threshold,
                           apertureSize=5)
         edges = cv2.dilate(
             edges, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))
         contours, _ = cv2.findContours(edges, cv2.RETR_TREE,
                                        cv2.CHAIN_APPROX_SIMPLE)
         for i in range(len(contours)):
             contour = contours[i]
             arc_length = 0.02 * cv2.arcLength(contour, True)
             approximative_contour = cv2.approxPolyDP(
                 contour, arc_length, True)
             max_cosine = 0
             if len(approximative_contour) == 4 and cv2.isContourConvex(
                     approximative_contour
             ) and cv2.contourArea(
                     approximative_contour) > self.min_contour_area_size:
                 for j in range(2, 5):
                     cosine = computeCosine(approximative_contour[j - 1][0],
                                            approximative_contour[j % 4][0],
                                            approximative_contour[j - 2][0])
                     max_cosine = max(max_cosine, cosine)
                 if max_cosine < 0.3:
                     self.squares.append(approximative_contour)
示例#6
0
    def callback(self,data):
        try:
            frame = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)
        # Take each frame
        height, width, _ = frame.shape
        # Convert BGR to HSV
        frame_ycrcb = cv2.cvtColor(frame, cv2.COLOR_BGR2YCR_CB)
        frame_cr = cv2.extractChannel(frame_ycrcb, 1)

        _, frame_cr_highlights = cv2.threshold(frame_cr,cv2.getTrackbarPos('Thresh_Min', 'frame'),255, cv2.THRESH_BINARY)
        # frame_cr_highlights = cv2.adaptiveThreshold(frame_cr,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2)
        # Threshold the HSV image to get only blue colors
        # mask = cv2.inRange(hsv, lower_green, upper_green)

        keypoints = self.blob_detect.detect(frame_cr_highlights)

        im_with_keypoints = cv2.drawKeypoints(frame_cr_highlights, keypoints, np.array([]), (0, 255, 0),
                                              cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        if len(keypoints) > 0:
            center = (width/2, height/2)
            target = (int(keypoints[0].pt[0]), int(keypoints[0].pt[1]))
            distance = math.sqrt((center[0] - target[0])**2 + (center[1] - target[1])**2)

            # print(self.pid_controller.out(distance))

            cv2.line(im_with_keypoints, center, target, (0, 255-distance, distance), 5)

        cv2.imshow('frame', im_with_keypoints)
        cv2.waitKey(1)
示例#7
0
def detect_hue(img, color, sat_thresh=0, val_thresh=0):

    imgval = cv2.extractChannel(img, 2)

    if color == "yellow":
        imghue1 = cv2.threshold(cv2.extractChannel(img, 0), 20, 255,
                                cv2.THRESH_BINARY)[1]
        imghue2 = cv2.threshold(cv2.extractChannel(img, 0), 40, 255,
                                cv2.THRESH_BINARY)[1]
        #cv2.imshow('hue1', imghue1)
        #cv2.imshow('hue2', imghue2)
        imghue = cv2.bitwise_and(imghue1, imghue2)
        #cv2.imshow('hue', imghue)
        #cv2.waitKey(0)

        imgsat = cv2.threshold(cv2.extractChannel(img, 1), sat_thresh, 255,
                               cv2.THRESH_BINARY)[1]
        imgval = cv2.threshold(cv2.extractChannel(img, 2), val_thresh, 255,
                               cv2.THRESH_BINARY)[1]
        imgmask = cv2.bitwise_and(imgsat, imgval)
        result = cv2.bitwise_and(imghue, imgmask, dst=None)
        #cv2.imshow('result', result)
        return result
    elif color == "white":
        if val_thresh == 0:
            val_thresh = 140
        if sat_thresh == 0:
            sat_thresh = 150
        imgsat = cv2.threshold(cv2.extractChannel(img, 2), sat_thresh, 255,
                               cv2.THRESH_BINARY)[1]
        #cv2.imshow('Saturation', imgsat)
        imgval = cv2.threshold(cv2.extractChannel(img, 2), val_thresh, 255,
                               cv2.THRESH_BINARY)[1]
        #cv2.imshow('Value', imgval)
        return cv2.bitwise_and(imgsat, imgval)
示例#8
0
    def filter_bright(self, frame):
        """
        Looks for the brightest colors in the images
        """
        # frameblur = cv2.blur(frame, (10, 10))
        framehsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        framethresh = cv2.inRange(cv2.extractChannel(framehsv, 2), 175, 255)

        frameb = cv2.extractChannel(frame, 0)
        frameg = cv2.extractChannel(frame, 1)
        framer = cv2.extractChannel(frame, 2)

        newframe = frame.copy()
        newframe[:, :, 0] = cv2.bitwise_and(frameb, framethresh)
        newframe[:, :, 1] = cv2.bitwise_and(frameg, framethresh)
        newframe[:, :, 2] = cv2.bitwise_and(framer, framethresh)

        return newframe  # returns
示例#9
0
 def __cv_extractchannel(src, channel):
     """Extracts given channel from an image.
     Args:
         src: A numpy.ndarray.
         channel: Zero indexed channel number to extract.
     Returns:
          The result as a numpy.ndarray.
     """
     return cv2.extractChannel(src, (int) (channel + 0.5))
示例#10
0
 def separate_white_yellow(self, frame):
     """
     Separates white and yellow components of image, kind of.
     That's what it's used for, but it basically white and not white.
     """
     frameb = cv2.extractChannel(frame, 0)
     white = cv2.inRange(frameb, 128, 255)
     notwhite = cv2.inRange(white, 0, 0)
     notblack = cv2.inRange(frameb, 1, 255)
     yellow = cv2.bitwise_and(notwhite, notblack)
     return white, yellow
示例#11
0
    def filter_bright(self, frame):
        """
        Looks for the brightest colors in the images.
        Blacks out any part of the image that doesn't meet thoat threshold
        """
        # frameblur = cv2.blur(frame, (10, 10))
        framehsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)  # convert image to HSV
        framethresh = cv2.inRange(cv2.extractChannel(framehsv, 2), 175, 255)  # threshold based on value

        frameb = cv2.extractChannel(frame, 0)  # blue channel
        frameg = cv2.extractChannel(frame, 1)  # green channel
        framer = cv2.extractChannel(frame, 2)  # red channel

        # black out the portion of the image that doesn't meet the threshold
        newframe = frame.copy()  # create a new copy of the frame
        newframe[:, :, 0] = cv2.bitwise_and(frameb, framethresh)  # bitwise AND the blue channel and the thresholded image
        newframe[:, :, 1] = cv2.bitwise_and(frameg, framethresh)
        newframe[:, :, 2] = cv2.bitwise_and(framer, framethresh)

        return newframe  # returns the thresholded image
    def _test_color(self, img):
        b = cv2.bitwise_not(cv2.extractChannel(img, 0))
        g = cv2.bitwise_not(cv2.extractChannel(img, 1))
        r = cv2.bitwise_not(cv2.extractChannel(img, 2))

        cv2.threshold(b, 64, 255, cv2.THRESH_BINARY, dst=b)
        cv2.threshold(g, 64, 255, cv2.THRESH_BINARY, dst=g)
        cv2.threshold(r, 64, 255, cv2.THRESH_BINARY, dst=r)

        sign = cv2.bitwise_or(
            cv2.bitwise_or(cv2.bitwise_xor(b, r), cv2.bitwise_xor(b, g)),
            cv2.bitwise_xor(r, g))

        sign = cv2.GaussianBlur(sign, (11, 11), 0.0)

        cv2.threshold(sign, 16, 255, cv2.THRESH_BINARY, dst=sign)

        if cv2.getVersionMajor() == 3:
            # OpenCV 3.4.0
            _, contours, _ = cv2.findContours(sign, cv2.RETR_LIST,
                                              cv2.CHAIN_APPROX_SIMPLE)
        else:
            # OpenCV 4.1.0
            contours, _ = cv2.findContours(sign, cv2.RETR_LIST,
                                           cv2.CHAIN_APPROX_SIMPLE)

        combined = cv2.cvtColor(sign, cv2.COLOR_GRAY2BGR)
        good_contours = []

        for i, k in enumerate(contours):
            x, y, w, h = cv2.boundingRect(k)

            if w > sizes.mmToPix(3, self._DPI) and h > sizes.mmToPix(
                    5, self._DPI):
                combined = cv2.drawContours(combined,
                                            contours,
                                            i, (0, 0, 255),
                                            thickness=cv2.FILLED)
                good_contours.append(k)

        return good_contours
示例#13
0
    def show_brg(self, channel=rdp.ALL_CHANNELS, wait=True):
        """
        Shows the BGR image to the user
        :param channel:
        :param wait:
        :return:
        """
        if channel == rdp.ALL_CHANNELS:
            cv2.imshow('All channels', self.img)
        elif channel == rdp.BLUE_CHANNEL:
            z = cv2.extractChannel(self.img, channel, dst=None)
            cv2.imshow('Blue channel', z)
        elif channel == rdp.GREEN_CHANNEL:
            z = cv2.extractChannel(self.img, channel, dst=None)
            cv2.imshow('Green channel', z)
        elif channel == rdp.RED_CHANNEL:
            z = cv2.extractChannel(self.img, channel, dst=None)
            cv2.imshow('Red channel', z)

        if wait:
            cv2.waitKey(0)
示例#14
0
def imageCompare(test, golden, listPositionFirstDifference, displayResult,
                 epsilon):
    #print("running imageCompare ...")
    identical = True
    numberOfDifferences = -1
    error = -1

    if (len(golden.shape) > 2):
        channels = golden.shape[2]
    else:
        channels = 1

    if test.shape[0] != golden.shape[0] or test.shape[1] != golden.shape[1]:
        # test matching channels and depths too
        identical = False
        print("Error: image sizes do no match, golden: " + golden.shape +
              " test: " + test.shape)
    else:
        print("Comparing image shape (" + str(golden.shape) + "), channels: " +
              str(channels))
        #difference = golden
        error = cv2.norm(test, golden, cv2.NORM_L1)
        error = error / (golden.shape[0] * golden.shape[1])
        #np.absdiff(test,golden,difference)
        difference = cv2.absdiff(test, golden)

        numberOfDifferences = 0
        if (channels == 1):
            numberOfDifferences += cv2.countNonZero(difference)
        else:
            for k in range(channels):
                differenceChannel = cv2.extractChannel(difference, k)
                numberOfDifferences += cv2.countNonZero(differenceChannel)
        if (numberOfDifferences != 0):
            identical = False
        #identical = False

        #if displayResult:
        #    differenceWindowName = "difference"
        # imshow difference in window

        if listPositionFirstDifference and not identical:
            #print("calling listFirstDifferenceTwoMatrices...")
            identical = not listDifferenceTwoMatrices(test, golden, epsilon,
                                                      displayResult)

    if identical:
        print("Success! Images match!")
    else:
        print("Failure! Images do no match!")

    #return identical
    return numberOfDifferences, error
示例#15
0
    def show_hsv(self, channel=rdp.ALL_CHANNELS, wait=True):
        """
        Shows the HSV image to the user.
        channel will indicate which channel to show.
        :param channel:
        :param wait:
        :return:
        """
        if channel == rdp.ALL_CHANNELS:
            # print('Choose a channel, bro.')
            z = cv2.extractChannel(self.imgHSV, rdp.HUE_CHANNEL, dst=None)
            cv2.imshow('Hue channel', z)
            z = cv2.extractChannel(self.imgHSV,
                                   rdp.SATURATION_CHANNEL,
                                   dst=None)
            cv2.imshow('Saturation channel', z)
            z = cv2.extractChannel(self.imgHSV, rdp.VALUE_CHANNEL, dst=None)
            cv2.imshow('Value channel', z)
        elif channel == rdp.HUE_CHANNEL:
            z = cv2.extractChannel(self.imgHSV, channel, dst=None)
            cv2.imshow('Hue channel', z)
        elif channel == rdp.SATURATION_CHANNEL:
            z = cv2.extractChannel(self.imgHSV, channel, dst=None)
            cv2.imshow('Saturation channel', z)
        elif channel == rdp.VALUE_CHANNEL:
            z = cv2.extractChannel(self.imgHSV, channel, dst=None)
            cv2.imshow('Value channel', z)

        if wait:
            cv2.waitKey(0)
def enhance(image, level,check):
    if check == 1:
      img1 = simplest_cb(image, 5)
    else :
      img1 = image
    img1 = np.uint8(img1)
    LabIm1 = cv2.cvtColor(img1, cv2.COLOR_BGR2Lab);
    L1 = cv2.extractChannel(LabIm1, 0);
    # Apply     
    result = applyCLAHE(LabIm1, L1)
    img2 = result[0]
    L2 = result[1]
    w1 = calWeight(img1, L1)
    w2 = calWeight(img2, L2)
    sumW = cv2.add(w1, w2)
    w1 = cv2.divide(w1, sumW)
    w2 = cv2.divide(w2, sumW)
    return fuseTwoImages(w1, img1, w2, img2, level)  
示例#17
0
def projectedhistogram(img_in, string):
    sz = 0
    if string == "Horizontal":
        sz = img_in.shape[0]

    else:
        sz = img_in.shape[1]
    nonezeorimg = []
    img_in = cv2.extractChannel(img_in, 0)
    for j in range(sz):
        data = getrow(img_in, j) if (string == "Horizontal") else getcol(
            img_in, j)
        count = cv2.countNonZero(np.array(data))
        nonezeorimg.append(count)
    maxnum = 0.0
    for j in range(len(nonezeorimg)):
        maxnum = max(maxnum, nonezeorimg[j])
    if maxnum > 0:
        for j in range(len(nonezeorimg)):
            nonezeorimg[j] = nonezeorimg[j] / float(maxnum)
    return nonezeorimg
示例#18
0
def line_filter(image: ndarray, line_search_mask: ndarray, grass_mask: ndarray,
                line_width: int):
    start_time = time()

    # line_search_space = image
    line_search_space = cv2.bitwise_and(image, image, mask=line_search_mask)
    blue_component = cv2.extractChannel(line_search_space, 2)

    half_linewidth_shifted_down = np.roll(blue_component,
                                          round(line_width / 2), 0)
    half_linewidth_shifted_up = np.roll(blue_component, -round(line_width / 2),
                                        0)
    diff = np.minimum(blue_component - half_linewidth_shifted_down,
                      blue_component - half_linewidth_shifted_up)

    target = cv2.bitwise_and(diff, diff, mask=cv2.bitwise_not(grass_mask))
    # turn into a mask
    target_mask = cv2.inRange(target, np.array([1]), np.array([255]))

    # print(f'line_filter: {(time() - start_time) * 1000} ms')
    return target_mask
    def run(self):
        self.vs = PiVideoStream().start()
        time.sleep(1.0)
        while True:
            frame = self.vs.read()

            # diminution de la résolution du frame
            frame = imutils.resize(frame, width=400)

            cv2.imshow('frame', frame)
            # frameBGR = cv2.GaussianBlur(frame, (5, 5), 0) #on s'en fou de flouter

            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            v = cv2.extractChannel(hsv, 0)
            mask = cv2.inRange(v, 170, 179)  # red
            # mask = cv2.inRange(v,55,70) # green
            # mask = cv2.inRange(v,90,100) # blue

            kernel_open = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
            kernel_close = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                     (15, 15))

            mask_m = cv2.morphologyEx(mask,
                                      cv2.MORPH_OPEN,
                                      kernel_open,
                                      iterations=3)
            mask_m = cv2.morphologyEx(mask_m,
                                      cv2.MORPH_CLOSE,
                                      kernel_close,
                                      iterations=3)

            ref, contours, hierachy = cv2.findContours(mask_m, cv2.RETR_TREE,
                                                       cv2.CHAIN_APPROX_SIMPLE)

            galet_position = [0., 0.]
            galet_found = False

            list_center = []
            for c in contours:
                # calculate moments for each contour
                M = cv2.moments(c)

                # calculate x,y coordinate of center
                if (M["m00"] > 300):  #filtrage, diminuer le 300 si nécessaire
                    cX = int(M["m10"] / M["m00"])
                    cY = int(M["m01"] / M["m00"])
                    list_center.append([cY, cX])

            if len(list_center) > 0:
                galet_found = True
                list_center_np = np.array(list_center)

                index_max = np.argmax(list_center_np, axis=0)
                position_max = list_center_np[index_max[0]]

                center_image_X = mask_m.shape[1] / 2
                galet_error_X = position_max[1] - center_image_X

                with verrou:
                    global nouvelle_photo, resultat_photo
                    resultat_photo = galet_error_X
                    nouvelle_photo = True
                    print(nouvelle_photo)
                print("palet trouvé")

            else:
                with verrou:
                    global nouvelle_photo
                    nouvelle_photo = False
                    print(nouvelle_photo)
                global robot
                robot.arreter()
                print("aucun palet trouvé")

            time.sleep(0.01)
        cv2.destroyAllWindows()
        vs.stop()
示例#20
0
    def run(self):
        self.cap = cv2.VideoCapture(0)

        while True:
            ret, frame = self.cap.read()
            # frame = cv2.imread('image_test2.jpg')

            frameBGR = cv2.GaussianBlur(frame, (5, 5), 0)
            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            v = cv2.extractChannel(hsv, 0)
            mask = cv2.inRange(v, 170, 179)  # red
            # mask = cv2.inRange(v,55,70) # green
            # mask = cv2.inRange(v,90,100) # blue

            kernel_open = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
            kernel_close = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                     (15, 15))

            mask_m = cv2.morphologyEx(mask,
                                      cv2.MORPH_OPEN,
                                      kernel_open,
                                      iterations=3)
            mask_m = cv2.morphologyEx(mask_m,
                                      cv2.MORPH_CLOSE,
                                      kernel_close,
                                      iterations=3)

            #cv2.imshow('mask_m', mask_m)

            ref, contours, hierachy = cv2.findContours(mask_m, cv2.RETR_TREE,
                                                       cv2.CHAIN_APPROX_SIMPLE)

            galet_position = [0., 0.]
            galet_found = False

            list_center = []
            for c in contours:
                # calculate moments for each contour
                M = cv2.moments(c)

                # calculate x,y coordinate of center
                if (M["m00"] != 0 and M["m00"] != 0):
                    cX = int(M["m10"] / M["m00"])
                    cY = int(M["m01"] / M["m00"])
                    list_center.append([cY, cX])
                    cv2.circle(frame, (cX, cY), 5, (0, 0, 255), -1)
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(frame, "(" + str(cY) + "," + str(cX) + ")",
                                (cX, cY + 5), font, 1, (255, 255, 0), 2,
                                cv2.LINE_AA)

            if len(list_center) > 0:
                galet_found = True
                list_center_np = np.array(list_center)

                index_max = np.argmax(list_center_np, axis=0)
                position_max = list_center_np[index_max[0]]

                center_image_X = mask_m.shape[1] / 2
                galet_error_X = position_max[1] - center_image_X

                with verrou:
                    global nouvelle_photo, resultat_photo
                    resultat_photo = galet_error_X
                    nouvelle_photo = True
                    print(nouvelle_photo)
                print("palet trouvé")

            else:
                with verrou:
                    global nouvelle_photo
                    nouvelle_photo = False
                    print(nouvelle_photo)
                global robot
                robot.arreter()
                print("aucun palet trouvé")

            time.sleep(0.1)
示例#21
0
def operator():

    rospy.init_node('operator', anonymous=True)
    pub = rospy.Publisher('image_data', Image, queue_size=10)

    cap = cv2.VideoCapture('hand_video4.mp4')
    #W = video.get(cv2.CAP_PROP_FRAME_WIDTH)
    #H = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
    frame_rate = 20.0  # フレームレート
    size1 = (1080, 720)  # 動画の画面サイズ
    fmt = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')  # ファイル形式(ここではmp4)
    writer = cv2.VideoWriter('outtest2.mp4', fmt, frame_rate, size1)  # ライター作成

    while (cap.isOpened()):
        ret, frame = cap.read()
        count = 0

        if not ret:
            break

    # frame=cv2.medianBlur(frame,5)
    # frame=cv2.GaussianBlur(frame,(5,5),0)

        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        hue = cv2.extractChannel(hsv, 0)
        hue = cv2.inRange(hue, 4, 20)
        # hue = cv2.GaussianBlur(hue,(5,5), 0)
        # frame=cv2.medianBlur(frame,5)
        hue = cv2.medianBlur(hue, 9)
        # output = hue[ 0 : frame.shape[0], 0 : frame.shape[1]]
        #cv2.imshow('mark', frame)
        #輪郭を検出
        image, contours, hierarchy = cv2.findContours(hue, cv2.RETR_TREE,
                                                      cv2.CHAIN_APPROX_SIMPLE)

        #最大輪郭検出準備
        ci, max_area = -1, 0
        for i in range(len(contours)):
            cnt = contours[i]
            area = cv2.contourArea(cnt)
            #最大の輪郭を見つける
            if area > max_area:
                max_area = area
                ci = i
        cnt = contours[ci]

        #輪郭の凸包(convex hull)を求める
        hull = cv2.convexHull(cnt)
        hull2 = hull.copy()

        #物体検出
        n, img_label, data, center = cv2.connectedComponentsWithStats(hue)

        #    tr_x = lambda x : x * 150 / 500 # X軸 画像座標→実座標
        #    tr_y = lambda y : y * 150 / 500 # Y軸  〃
        img_trans_marked = frame.copy()
        for i in range(1, n):
            x, y, w, h, size = data[i]
            if size < 30000:  # 面積300px未満は無視
                continue
        #  detected_obj.append( dict( x = tr_x(x),
        #                        y = tr_y(y),
        #                       w = tr_x(w),
            #                      h = tr_y(h),
            #                      cx = tr_x(center[i][0]),
            #                     cy = tr_y(center[i][1])))
            # 確認
            w_size = w / 2
            w_size = int(w_size)
            h_size = h / 2
            h_size = int(h_size)
            z_size = (h_size + w_size) / 2
            z_size = int(z_size)
            img_trans_marked = cv2.rectangle(img_trans_marked, (x, y),
                                             (x + w, y + h), (0, 255, 0), 2)
            img_trans_marked = cv2.circle(
                img_trans_marked, (int(center[i][0]), int(center[i][1])), 5,
                (0, 0, 255), -1)
            img_trans_marked = cv2.circle(
                img_trans_marked, (int(center[i][0]), int(center[i][1])),
                w_size, (0, 0, 255), 5)
            #        img_trans_marked = cv2.circle(img_trans_marked, (int(center[i][0]),int(center[i][1])),h_size,(0,0,255),5)
            img_trans_marked = cv2.circle(
                img_trans_marked, (int(center[i][0]), int(center[i][1])),
                z_size, (0, 0, 255), 5)
    #        img_trans_marked = cv2.circle(img_trans_marked, (int(center[i][0]),int(center[i][1])),600,(0,0,255),5)
    #       img_trans_marked = cv2.circle(img_trans_marked, (int(center[i][0]),int(center[i][1])),700,(0,0,255),5)

    #最大の輪郭と凸包を描画
        img_trans_marked = cv2.drawContours(img_trans_marked, [cnt], -1,
                                            (255, 0, 0), 3)
        #        img_trans_marked = cv2.drawContours(img_trans_marked, [hull], -1, (0,255,0), 3)

        #凹状欠損(convexity defects)の検出
        cnt = cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True)
        hull = cv2.convexHull(cnt, returnPoints=False)

        #凹状欠損の点を描画
        defects = cv2.convexityDefects(cnt, hull)
        for i in range(defects.shape[0]):
            s, e, f, d = defects[i, 0]
            start = tuple(cnt[s][0])
            end = tuple(cnt[e][0])
            far = tuple(cnt[f][0])
            #            dist = cv2.pointPolygonTest(hull2,far,True)
            #            print(d)
            if d > 45000:
                count += 1

            cv2.line(img_trans_marked, start, end, [0, 255, 0], 2)
            #            cv2.line(img_trans_marked, avr, far, [0, 255, 0], 2)

            # u = start-far
            # numpy.linalg.norm(u)

            cv2.circle(img_trans_marked, far, 5, [0, 255, 0], -1)

    #    print(count)
        if count == 3:
            cv2.putText(img_trans_marked, 'choki', (0, 100),
                        cv2.FONT_HERSHEY_PLAIN, 6, (255, 255, 255), 5,
                        cv2.LINE_AA)
        if count <= 2:
            cv2.putText(img_trans_marked, 'gu', (0, 100),
                        cv2.FONT_HERSHEY_PLAIN, 6, (255, 255, 255), 5,
                        cv2.LINE_AA)

        if count >= 4:
            cv2.putText(img_trans_marked, 'pa', (0, 100),
                        cv2.FONT_HERSHEY_PLAIN, 6, (255, 255, 255), 5,
                        cv2.LINE_AA)

    #    cv2.imshow('trans',img_trans_marked)#hue
    #    cv2.imshow('gray', hue)

    #    frame_rate = 24.0 # フレームレート
    #    size = (640, 480) # 動画の画面サイズ

    #    fmt = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') # ファイル形式(ここではmp4)
    #    writer = cv2.VideoWriter('outtest.mp4', fmt, frame_rate, size) # ライター作成
        imgwrite = img_trans_marked.copy()
        imgwrite = cv2.resize(imgwrite, size1)

        writer.write(imgwrite)  # 画像を1フレーム分として書き込み

        if cv2.waitKey(10) & 0xFF == ord('q'):
            break

        bridge = CvBridge()
        msg = bridge.cv2_to_imgmsg(img_trans_marked, encoding="bgr8")

        rate = rospy.Rate(1)
        #  while not rospy.is_shutdown():
        pub.publish(msg)

# rate.sleep()

    writer.release()

    cap.release()
    cv2.destroyAllWindows()
示例#22
0
    def read_images(self, cap1):
        SKIP = 50
        key = 0
        max = SKIP * 20
        i = 0
        while key != ord('q'):
            if i > max:
                break
            cap1.set(cv2.CAP_PROP_POS_FRAMES, i)
            i = i + SKIP
            ret, image1 = cap1.read()
            if not ret:
                break

            gray_l = cv2.extractChannel(image1, 1)
            gray_r = cv2.extractChannel(image1, 2)

            # Find the chess board corners
            ret_l, corners_l = cv2.findChessboardCorners(
                gray_l, (9, 7), None, cv2.CALIB_CB_ADAPTIVE_THRESH)
            if not ret_l:
                continue
            ret_r, corners_r = cv2.findChessboardCorners(
                gray_r, (9, 7), None, cv2.CALIB_CB_ADAPTIVE_THRESH)
            if not ret_r:
                continue
            print(f"process img {i}", file=sys.stderr)

            if ret_l and ret_r:
                # If found, add object points, image points (after refining them)
                self.objpoints.append(self.objp)

                rt = cv2.cornerSubPix(gray_l, corners_l, (11, 11), (-1, -1),
                                      self.criteria)
                self.imgpoints_l.append(corners_l)

                # Draw and display the corners
                cv2.drawChessboardCorners(gray_l, (9, 7), corners_l, ret_l)

                rt = cv2.cornerSubPix(gray_r, corners_r, (11, 11), (-1, -1),
                                      self.criteria)
                self.imgpoints_r.append(corners_r)

                # Draw and display the corners
                cv2.drawChessboardCorners(gray_r, (9, 7), corners_r, ret_r)

                cv2.imshow("Image Left", gray_l)
                cv2.imshow("Image Right", gray_r)

            key = cv2.waitKey(1)
            if key == ord('q'):
                break
            if key == ord('a'):
                return

        img_shape = gray_r.shape
        self.shape = img_shape

        print("Starting camera calibration", file=sys.stderr)
        rt, self.M1, self.d1, self.r1, self.t1 = cv2.calibrateCamera(
            self.objpoints, self.imgpoints_l, img_shape, None, None)
        rt, self.M2, self.d2, self.r2, self.t2 = cv2.calibrateCamera(
            self.objpoints, self.imgpoints_r, img_shape, None, None)

        print("Starting stereo camrea calibration", file=sys.stderr)
        self.camera_model = self.stereo_calibrate(img_shape)
file_list = sorted(os.listdir(input_hr))
for filename in file_list:
    print('Generating patches for %s...' % filename)

    parts = filename.split('.')
    hr_path = os.path.join(input_hr, filename)
    lr_path = os.path.join(input_lr, parts[0] + 'x4.' + parts[1])

    hr = cv2.imread(hr_path)
    lr = cv2.imread(lr_path)

    hr_yuv = cv2.cvtColor(hr, cv2.COLOR_RGB2YUV)
    lr_yuv = cv2.cvtColor(lr, cv2.COLOR_RGB2YUV)

    hr_y = cv2.extractChannel(hr_yuv, 0)
    lr_y = cv2.extractChannel(lr_yuv, 0)

    hr_shape = hr_y.shape
    lr_shape = lr_y.shape

    num_patches = 0
    for i in range(0, lr_shape[0] - 64, 32):
        for j in range(0, lr_shape[1] - 64, 32):
            hr_patch = hr_y[4 * i:4 * i + 256, 4 * j:4 * j + 256]
            lr_patch = lr_y[i:i + 64, j:j + 64]

            num_patches += 1
            hr_patch_filename = '%s_%04d.png' % (parts[0], num_patches)
            lr_patch_filename = '%s_%04d.png' % (parts[0], num_patches)
            cv2.imwrite(os.path.join(output_hr, hr_patch_filename), hr_patch)
示例#24
0
D_l = fs.getNode("LEFT.D").mat()
D_r = fs.getNode("RIGHT.D").mat()

rows_l = int(fs.getNode("LEFT.height").real())
cols_l = int(fs.getNode("LEFT.width").real())

rows_r = int(fs.getNode("RIGHT.height").real())
cols_r = int(fs.getNode("RIGHT.width").real())

cap = cv2.VideoCapture(sys.argv[2])

key = 0
while key != ord('q'):
    ret, image = cap.read()
    gray_l = cv2.extractChannel(image, 1);
    gray_r = cv2.extractChannel(image, 2);

    M1l,M2l = cv2.initUndistortRectifyMap(K_l,D_l,R_l,P_l[0:3,0:3],(cols_l,rows_l),cv2.CV_32F)
    M1r,M2r = cv2.initUndistortRectifyMap(K_r,D_r,R_r,P_r[0:3,0:3],(cols_r,rows_r),cv2.CV_32F)

    gray_r_rect = cv2.remap(gray_r,M1r,M2r,cv2.INTER_LINEAR)
    gray_l_rect = cv2.remap(gray_l,M1l,M2l,cv2.INTER_LINEAR)

    cv2.imshow("left", gray_l)
    cv2.imshow("right", gray_r)

    cv2.imshow("left rect", gray_l_rect)
    cv2.imshow("right rect", gray_r_rect)

    key = cv2.waitKey(0)
示例#25
0
    def find_straight_road(self, file, imgFromRealSense=False):
        """
        Straight road detection.
        Finds straight roads in the image.
        First displays the lines found for the left-hand side and the average of those lines.
        Then displays the lines found for the right-hand side and the average of those lines.
        :param file:
        :param imgFromRealSense:
        :return:
        """
        # load the image
        self.load_image(file, imgFromRealSense=imgFromRealSense)

        # look at blue channel
        imgB = cv2.extractChannel(self.img, rdp.BLUE_CHANNEL, dst=None)

        # Threshold the blue channel
        imgBt = cv2.inRange(imgB, 0, rdp.CHANNEL_THRESHOLD, dst=None)

        # Take bottom half of image
        imgBts = imgBt[int(imgBt.shape[0] / 2):(imgBt.shape[0]), 0:(
            imgBt.shape[1]
        )]  # first index contains y values, second index contains x values

        # Calculate edges
        imgBtse = cv2.Canny(imgBts,
                            rdp.EDGE_LOW_THRESHOLD_DEFAULT,
                            rdp.EDGE_HIGH_THRESHOLD_DEFAULT,
                            edges=rdp.EDGE_TYPE,
                            apertureSize=rdp.EDGE_APERTURE_DEFAULT,
                            L2gradient=rdp.EDGE_L2GRADIENT)

        # Find lines
        # theta values:
        # 0 corresponds to vertical
        # pi/4 corresponds to diagonal from lower left-hand corner to upper right-hand corner
        # pi/2 corresponds to horizontal line
        leftlines = cv2.HoughLines(imgBtse,
                                   2,
                                   np.pi / 180,
                                   rdp.LINE_HOUGH_THRESHOLD,
                                   min_theta=0.1 * np.pi / 4,
                                   max_theta=1.9 * np.pi / 4)

        leftline = np.mean(leftlines, 0)  # takes average of all lines found
        leftline = np.mean(
            leftline, 0
        )  # leftline is a list in a list, so this gets rid of the outer list

        rightlines = cv2.HoughLines(imgBtse,
                                    2,
                                    np.pi / 180,
                                    rdp.LINE_HOUGH_THRESHOLD,
                                    min_theta=2.1 * np.pi / 4,
                                    max_theta=3.9 * np.pi / 4)

        rightline = np.mean(rightlines, 0)  # takes average of all lines found
        rightline = np.mean(
            rightline, 0
        )  # rightline is a list in a list, so this gets rid of the outer list

        # Show left lane boundary
        imgdummy = cv2.copyTo(self.img, None, dst=None)
        # self.show_lines_on_image(imgdummy, np.mean(leftlines, 1), offset=imgBtse.shape[0])
        self.show_line_on_image(self.imglanes,
                                leftline[0],
                                leftline[1],
                                offset=imgBtse.shape[0])
        blank_image_1 = np.zeros(shape=self.imglanes.shape, dtype=np.uint8)
        self.show_line_on_image(blank_image_1,
                                leftline[0],
                                leftline[1],
                                offset=imgBtse.shape[0])

        # Show right lane boundary
        imgdummy = cv2.copyTo(self.img, None, dst=None)
        # self.show_lines_on_image(imgdummy, np.mean(rightlines, 1), offset=imgBtse.shape[0])
        self.show_line_on_image(self.imglanes,
                                rightline[0],
                                rightline[1],
                                offset=imgBtse.shape[0])
        blank_image_2 = np.zeros(shape=self.imglanes.shape, dtype=np.uint8)
        self.show_line_on_image(blank_image_2,
                                rightline[0],
                                rightline[1],
                                offset=imgBtse.shape[0])

        # Find vanishing point
        intercept = cv2.bitwise_and(
            blank_image_1,
            blank_image_2)  # finds the intersection of the two lines
        non_zero = np.mean(
            np.mean(
                cv2.findNonZero(cv2.cvtColor(intercept, cv2.COLOR_BGR2GRAY)),
                0), 0)  # calculate midpoint of intersection

        # Add trajectory line to image
        cv2.line(
            self.imglanes,
            (int(self.imglanes.shape[1] / 2), int(self.imglanes.shape[0])),
            (int(non_zero[0]), int(non_zero[1])), (0, 255, 0),
            thickness=2)

        # Save the image with the lane indicators
        print("Saving output image.")
        cv2.imwrite(rdp.output_file, self.imglanes)
        print("Output image saved.")

        # Show images
        cv2.imshow('Line', self.imglanes)

        # Wait for user to press a key
        cv2.waitKey(0)
        cv2.destroyAllWindows()

        # Indicate that the lanes were found
        self.lanesdetected = True
def process_image(msg):

    try:
        bridge = CvBridge()
        frame = bridge.imgmsg_to_cv2(msg, "bgr8")
        #W = video.get(cv2.CAP_PROP_FRAME_WIDTH)
        #H = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
        frame_rate = 20.0  # フレームレート
        size1 = (1080, 720)  # 動画の画面サイズ

        count = 0

        # frame=cv2.medianBlur(frame,5)
        # frame=cv2.GaussianBlur(frame,(5,5),0)

        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        hue = cv2.extractChannel(hsv, 0)
        hue = cv2.inRange(hue, 4, 20)
        # hue = cv2.GaussianBlur(hue,(5,5), 0)
        # frame=cv2.medianBlur(frame,5)
        hue = cv2.medianBlur(hue, 9)
        # output = hue[ 0 : frame.shape[0], 0 : frame.shape[1]]
        #cv2.imshow('mark', frame)

        kernel = np.ones((5, 5), np.uint8)
        hue = cv2.erode(hue, kernel, iterations=1)

        #輪郭を検出
        contours, hierarchy = cv2.findContours(hue, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)

        #最大輪郭検出準備
        ci, max_area = -1, 0
        for i in range(len(contours)):
            cnt = contours[i]
            area = cv2.contourArea(cnt)
            #最大の輪郭を見つける
            if area > max_area:
                max_area = area
                ci = i
        cnt = contours[ci]

        #輪郭の凸包(convex hull)を求める
        hull = cv2.convexHull(cnt)

        #物体検出
        n, img_label, data, center = cv2.connectedComponentsWithStats(hue)

        #    tr_x = lambda x : x * 150 / 500 # X軸 画像座標→実座標
        #    tr_y = lambda y : y * 150 / 500 # Y軸  〃
        img_trans_marked = frame.copy()
        for i in range(1, n):
            x, y, w, h, size = data[i]
            if size < 30000:  # 面積300px未満は無視
                continue
                #  detected_obj.append( dict( x = tr_x(x),
                #                        y = tr_y(y),
                #                       w = tr_x(w),
                #                      h = tr_y(h),
                #                      cx = tr_x(center[i][0]),
                #                     cy = tr_y(center[i][1])))
                # 確認
                w_size = w / 2
                w_size = int(w_size)
                h_size = h / 2
                h_size = int(h_size)
                z_size = (h_size + w_size) / 2
                z_size = int(z_size)
                img_trans_marked = cv2.rectangle(img_trans_marked, (x, y),
                                                 (x + w, y + h), (0, 255, 0),
                                                 2)
                img_trans_marked = cv2.circle(
                    img_trans_marked, (int(center[i][0]), int(center[i][1])),
                    5, (0, 0, 255), -1)
                img_trans_marked = cv2.circle(
                    img_trans_marked, (int(center[i][0]), int(center[i][1])),
                    w_size, (0, 0, 255), 5)
                #        img_trans_marked = cv2.circle(img_trans_marked, (int(center[i][0]),int(center[i][1])),h_size,(0,0,255),5)
                img_trans_marked = cv2.circle(
                    img_trans_marked, (int(center[i][0]), int(center[i][1])),
                    z_size, (0, 0, 255), 5)
    #        img_trans_marked = cv2.circle(img_trans_marked, (int(center[i][0]),int(center[i][1])),600,(0,0,255),5)
    #       img_trans_marked = cv2.circle(img_trans_marked, (int(center[i][0]),int(center[i][1])),700,(0,0,255),5)

    #最大の輪郭と凸包を描画
        img_trans_marked = cv2.drawContours(img_trans_marked, [cnt], -1,
                                            (255, 0, 0), 3)
        #        img_trans_marked = cv2.drawContours(img_trans_marked, [hull], -1, (0,255,0), 3)

        #凹状欠損(convexity defects)の検出
        cnt = cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True)
        hull = cv2.convexHull(cnt, returnPoints=False)

        #凹状欠損の点を描画
        defects = cv2.convexityDefects(cnt, hull)
        for i in range(defects.shape[0]):
            s, e, f, d = defects[i, 0]
            start = tuple(cnt[s][0])
            end = tuple(cnt[e][0])
            far = tuple(cnt[f][0])
            #            dist = cv2.pointPolygonTest(hull2,far,True)
            #            print(d)
            if d > 45000:
                count += 1

            cv2.line(img_trans_marked, start, end, [0, 255, 0], 2)
            #            cv2.line(img_trans_marked, avr, far, [0, 255, 0], 2)

            # u = start-far
            # numpy.linalg.norm(u)

            cv2.circle(img_trans_marked, far, 5, [0, 255, 0], -1)

    #    print(count)
        if count == 3:
            cv2.putText(img_trans_marked, 'choki', (0, 100),
                        cv2.FONT_HERSHEY_PLAIN, 6, (255, 255, 255), 5,
                        cv2.LINE_AA)
        if count <= 2:
            cv2.putText(img_trans_marked, 'gu', (0, 100),
                        cv2.FONT_HERSHEY_PLAIN, 6, (255, 255, 255), 5,
                        cv2.LINE_AA)

        if count >= 4:
            cv2.putText(img_trans_marked, 'pa', (0, 100),
                        cv2.FONT_HERSHEY_PLAIN, 6, (255, 255, 255), 5,
                        cv2.LINE_AA)

        cv2.imshow('trans', img_trans_marked)
        #  if cv2.waitKey(10) & 0xFF == ord('q'):
        #       break

        #bridge = CvBridge()
        # msg = bridge.cv2_to_imgmsg(img_trans_marked, encoding = "bgr8")

        #rate=rospy.Rate(1)
        #  while not rospy.is_shutdown():
        #pub.publish(msg)
        # rate.sleep()
        cv2.waitKey(10)

    except Exception as err:
        print(err)
示例#27
0
from __future__ import division
import cv2
import numpy as np
import time
cap = cv2.VideoCapture(0)

while (True):
    ret, frame = cap.read()

    frameBGR = cv2.GaussianBlur(frame, (7, 7), 0)
    hsv = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2HSV)
    v = cv2.extractChannel(hsv, 0)
    #mask = cv2.inRange(v,170,179) # red
    #mask = cv2.inRange(v,55,70) # green
    mask = cv2.inRange(v, 90, 100)  # blue

    kernel_open = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
    kernel_close = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15))

    mask_m = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel_open, iterations=3)
    mask_m = cv2.morphologyEx(mask_m,
                              cv2.MORPH_CLOSE,
                              kernel_close,
                              iterations=3)

    contours, hierachy = cv2.findContours(mask_m, cv2.RETR_TREE,
                                          cv2.CHAIN_APPROX_SIMPLE)

    list = []
    dist_Galet = 120000
    for c in contours:
示例#28
0
 def threshold_channel(self, img_hls, channel, min_thr, max_thr):
     chan_out = cv2.extractChannel(img_hls, channel)
     return ThresholdImage(chan_out, min_thr, max_thr)
示例#29
0
    def find_points(self, image):

        start_col = 0
        margin = 0
        if self._do_reset:
            start_col = self.get_max_col(image)
            margin = self.lp.margin * 2.0
        else:
            start_col = self._best_fit.CalcX(image.shape[0])
            margin = self.lp.margin

        window_height = image.shape[0] / self.lp.nwindows

        current_x = start_col - margin / 2
        current_y = image.shape[0] - window_height
        self._search_windows = np.empty((0, 5, 2), dtype=np.int32)

        nonzero_points = np.empty((0, 2), dtype=np.int32)
        finished = False
        while current_y >= 0 and not finished:
            if current_x < 0:
                current_x = 0
                finished = True

            if current_x > image.shape[1] - margin:
                current_x = image.shape[1] - margin
                finished = True

            window_roi = np.s_[int(current_y):int(current_y + window_height),
                               int(current_x):int(current_x + margin)]

            new_search_window = np.array(
                np.int32([[[current_x, current_y],
                           [current_x + margin, current_y],
                           [current_x + margin, current_y + window_height],
                           [current_x, current_y + window_height],
                           [current_x, current_y]]]), np.int32)

            self._search_windows = np.concatenate(
                (self._search_windows, new_search_window), axis=0)

            window_image = image[window_roi]

            nonzero_this_window = cv2.findNonZero(window_image)

            if not nonzero_this_window is None:
                for i in range(nonzero_this_window.shape[0]):
                    pt = nonzero_this_window[i]
                    pt[0, 0] += current_x
                    pt[0, 1] += current_y
                    nonzero_points = np.concatenate([nonzero_points, pt],
                                                    axis=0)

                if nonzero_this_window.shape[
                        0] > self.lp.minpix and self._do_reset:
                    x_pts = cv2.extractChannel(nonzero_this_window, 0)
                    mean = x_pts.astype(np.float)
                    mean = cv2.reduce(mean, 0, cv2.REDUCE_AVG)

                    tc = current_x
                    current_x = mean[0, 0] - margin / 2.0

            if not self._do_reset:
                current_x = self._best_fit.CalcX(current_y) - margin / 2.0

            current_y -= window_height
        return nonzero_points
示例#30
0
 def gray_extract_channel():
     return cv2.extractChannel(image, 0)
    nameSuffix = inputLines[2].replace('\n', '')
    outputLoc = inputLines[3].replace('\n', '')
    colorMode = inputLines[4].replace('\n', '')
    if (colorMode != "Intensity"):
        nameTransformLoc = inputLines[4].replace('\n', '')
    inputFile.close()
except IOError:
    print("File not accessible")

baseImage = cv.imread(baseFile, cv.IMREAD_UNCHANGED)
overlayImage = cv.imread(overlayFile, cv.IMREAD_UNCHANGED)
#convert the overlay image to grayscale, then back to BGRA format
overlayImageGrayscale = cv.cvtColor(
    cv.cvtColor(overlayImage, cv.COLOR_BGRA2GRAY), cv.COLOR_GRAY2BGRA)
#get the alpha channels and combine them
overlayAlpha = cv.extractChannel(overlayImage, 3)
baseAlpha = cv.extractChannel(baseImage, 3)
combinedAlphas = cv.bitwise_or(baseAlpha, overlayAlpha)

#copy the grayscale overlay to get the dimensions right
coloredOverlay = overlayImageGrayscale.copy()
cv.insertChannel(overlayAlpha, overlayImageGrayscale, 3)
sizes = coloredOverlay.shape
#cv.imwrite(outputLoc+"gray_over"+nameSuffix,overlayImageGrayscale);
#cv.imwrite(outputLoc+"base"+nameSuffix+".png",baseImage);
intensityMap = []
for i in range(0, sizes[0]):
    intensityMap.insert(-1, [])

for rows in range(0, sizes[0]):
    for cols in range(0, sizes[1]):
示例#32
0
    delta: velikost navpicnega premika po rotaciji
    radij: premer vcrtanega kroga koncne
    radijDiag: premer vcrt. kroga zacetne
    """
    M = cv2.getRotationMatrix2D((radijDiag,radijDiag), np.degrees(kot), 1)
    M[1,-1] += delta-(radijDiag-radij)
    M[0,-1] += -(radijDiag-radij)
    return(cv2.warpAffine(slika, M, (2*radij+1, 2*radij+1)))

for ime in imena[nizi[stNiza][0]:nizi[stNiza][1]]:
    #zacetna obdelava slike
    print(ime)
    sl = cv2.imread(ime)
    clahe = cv2.createCLAHE(clipLimit=1.5, tileGridSize=(6,6))
    sl = cv2.cvtColor(sl, cv2.COLOR_BGR2HSV)
    sl = cv2.insertChannel(clahe.apply(cv2.extractChannel(sl, 2)), sl, 2)
    sl = cv2.cvtColor(sl, cv2.COLOR_HSV2BGR)
    sl = cv2.medianBlur(sl, 7)

    #nastavitve iskanja tock
    radij = 40
    korak = 10
    izKan1 = [800, 2470, 1490, 3660] #samo obarvanost, izrez
    slKan1 = cv2.extractChannel(sl, 1)[izKan1[0]:izKan1[1],izKan1[2]:izKan1[3]]
    zacToc = [izKan1[1]-izKan1[0]-97, 100]
    radijDiag = radij*2/np.sqrt(2)
    delta=0; deltaPrej=0
    kot=0; kotPrej = 0
    x = np.array(range(-radij, radij+1)) #za izracun momenta
    tocke = []