Exemple #1
0
def getCommand():
    
    print("getCommand running ...")
    cv.wait(3)

    global line_image
    global prev_left
    global prev_right

    # calculate left and right line
    image_bnw = cam.get_image('bnw',BNW_THRESHOLD)
    # image_bnw = cam.get_image('canny')

    line_list = find_all_lines(image_bnw)
    
    # in case no lines found use previous lines
    if (not line_list):
        print('No lines detected - using previous Lines!')
        line_list = [prev_left, prev_right]

    # get new lines - return format [[left offset, alpha][right offset, alpha]]
    lines_LR = find_lines_LR(line_list,prev_left,prev_right,DIV_ITER)
    
    # update lines and draw them in global line_image
    image_color = cam.get_image('color')

    # if current lines are detected use them ...
    # left line
    if(len(lines_LR[0]) == 2):
        prev_left  = [float(i) for i in lines_LR[0]]
        draw_lines(image_color,prev_left)
    # ... or if no lines use prev ...
#    else:
#        lines_LR[0] = prev_left;
    # righ tline
    if(len(lines_LR[1]) == 2):
        prev_right =  [float(i) for i in lines_LR[1]]
        draw_lines(image_color, prev_right)
    # ... or if no lines use prev ...
#    else:
#        lines_LR[1] = prev_right;
    # or write prev_left and prev_right into lines_LR



    # pref_left & prev_right beinhalten hier die Informationen mit denen
    # weitergearbeitet wird, also schreibe

    # write camera-image with drawed lines to global variable
    line_image = image_color

    # Annahme: Hier sind die Werte rho und theat
    print('lines_LR: LEFT: ' + str(lines_LR[0]) + ' RIGHT: ' + str(lines_LR[1]))
   
    
    vanishing_point = get_vp(lines_LR)
    
    print('getCommand returns: ' + str(vanishing_point))

    return vanishing_point
Exemple #2
0
    def find_marker(self, image_msg):

        if not self.active:
            return
        now = rospy.Time.now()
        if now - self.last_stamp < self.publish_duration:
            return
        else:
            self.last_stamp = now

        try:
            self.currentImg = self.bridge.compressed_imgmsg_to_cv2(
                image_msg, "bgr8")
        except CvBridgeError as e:
            print e

        gray = cv2.cvtColor(currentImg, cv2.COLOR_BGR2GRAY)

        detection, corners = cv2.findCirclesGrid(gray, (7, 3))

        copy = currentImg.copy()

        if detection:
            cv2.drawChessboardCorners(copy, (7, 3), corners, detection)

            twoone = []
            for i in range(0, 21):
                point = [corners[i][0][0], corners[i][0][1]]
                twoone.append(point)
            twoone = np.array(twoone)

            self.originalmatrix()
            detection, rvec, tvec = self.gradient(twoone)
            distance(copy, tvec)

        cv2.imshow('detection', copy)
        cv2.wait(1)
    def run(self):
        log_lib.debug("Starting ...")
        start_time = time.time()

        while not self.stop_event.is_set():
            try:
                log_lib.debug(
                    "Trying to pull data from Queue. Current Queue size {}".
                    format(queue.qsize()))
                if queue.qsize() == 0:
                    time.sleep(sleeping_time)
                    continue

                frame_data = queue.get(block=True)
                log_lib.debug(
                    "Pulled a frame of size of {} B from Queue. Current Queue size {}"
                    .format(len(frame_data), queue.qsize()))
                frame = cv2.imdecode(np.fromstring(frame_data, dtype=np.uint8),
                                     -1)
                cv2.imshow('Window', frame)
                cv2.wait(1)

                #time.sleep(sleeping_time)

            except Exception as e:
                log_lib.fatal(str(e))
                break

        try:
            cv2.destroyAllWindows()
        except:
            pass

        elapsed_time = time.time() - start_time
        log_lib.info("Completed in %s" %
                     utils_lib.elapsed_time_string(elapsed_time))
Exemple #4
0
import cv2
import numpy as np
import time

cap = cv2.VideoCapture(0)
i=0
while(i==0):
    #i=1+1
    ret,image = cap.read()
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray=cv2.medianBlur(gray,5)
    #output = image.copy()
    cv2.imshow("output", image)
    circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT,1,20,80,60,20,20)
    #print(circles)
    if circles is not None:
        circles = np.int32(circles[0, :])
        print(circles)
        if [80] in circles: continue
        for [x, y, r] in circles:
            cv2.circle(image, (x, y), r, (0, 255, 0), 1)            
        cv2.imshow("output", image)
    k = cv2.waitKey(30) & 0xff
    if k == 27:
        break
cv2.wait(1)
cv2.destroyAllWindows()
Exemple #5
0
def show_img(img, name='default'):
    cv2.imshow(name, img)
    cv2.wait()
Exemple #6
0
import cv2 as cv

img = cv.imread('Photos/user.jpg')

cv.imshow('User', img)

cv.wait()
import cv2
#reading the image from te same dir where the project is and at a same time changing it to grayscale.
img  = cv2.imread("cat.jpg",0)
#showing the  output  with an output window name output and passing the img
cv2.imshow("output",img)
#use to wait till any interruot is given through key
cv2.waitKey(0)
#this statement will create a new image with format of png
cv2.imwrite("qwerty.png",img)
#crerate a black white inage and will chnge the rbg value from 127 to 255 in 1 and rest in 0
ret,bw = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
cv2.imshow("blackWhite",bw)
cv2.wait(0)
#destroy all the windows bedore exiting from the program
cv2.destroyAllWindows()
    for files in TraningFiles:
        file = os.listdir("TrainingData" + "/" + files)
        for Image in file:
            IMAGE = face_recognition.load_image_file(
                "TrainingData" + "/" +
                TraningFiles[TraningFiles.index(files)] + "/" + Image)
            try:
                image = cv2.imread(
                    "TrainingData" + "/" +
                    TraningFiles[TraningFiles.index(files)] + "/" + Image, 0)
                (top, right, bottom,
                 left) = face_recognition.face_locations(IMAGE)
                cv2.rectangle(image, (left, top), (right, bottom), (0, 0, 255),
                              2)
                cv2.imshow('Training Data', image)
                cv2.wait(500)
                TrainingData.append(face_recognition.face_encodings(IMAGE)[0])
                labels.append(TraningFiles[TraningFiles.index(files)])
                cv2.destroyAllWindows()
            except:
                print(
                    "I wasn't able to locate any faces in at least one of the images. Check the image files: "
                    + "TrainingData" + "/" +
                    TraningFiles[TraningFiles.index(files)] + "/" + Image)
    trainedfile = open('TrainingData.pkl', 'wb')
    Labelfile = open('LabelData.pkl', 'wb')
    pickle.dump(TrainingData, trainedfile)
    pickle.dump(labels, Labelfile)
else:
    trainedfile = open('TrainingData.pkl', 'rb')
    Labelfile = open('LabelData.pkl', 'rb')
Exemple #9
0
                             thickness=1)
                cv.polylines(img2, [np.int32(res2)],
                             isClosed=True,
                             color=(0, 0, 255),
                             thickness=1)
                cv.polylines(img3, [np.int32(res3)],
                             isClosed=True,
                             color=(0, 0, 255),
                             thickness=1)
                cv.polylines(img4, [np.int32(res4)],
                             isClosed=True,
                             color=(0, 0, 255),
                             thickness=1)

            cv.imshow("image", img0)
            cv.wait(0)

            cv.imshow("image", img1)
            cv.waitKey(0)

            cv.imshow("image", img2)
            cv.waitKey(0)

            cv.imshow("image", img3)
            cv.waitKey(0)
            cv.imshow("image", img4)
            cv.waitKey(0)
            img5 = np.zeros((640, 640, 3), np.uint8)

            img5.fill(255)