def main():

	global redline_cnt
	redline_cnt = 0

	for i in range(0,20): vc.read()

	while 1:

		start_time = time.time()
		_, img = vc.read()

		img = calibrate(img)

		contour = get_contour_from_image(img)
		if contour is None: break
		if not contour is None: control(contour,s,img)
	
	return
Example #2
0
def nav_to_trans():
    # pass the box
    x0 = 3
    y0 = 4
    global loc_x, loc_y
    for i in range(0, 20):
        vc.read()
    while 1:
        start_time = time.time()
        _, img = vc.read()
        img = calibrate(img)

        contour, _ = get_contour_from_image(img)
        ######## ????
        if ((loc_x - x0)**2 + (loc_y - y0)**2)**0.5 < 3:
            cmd = "/StopCar/run \n"
            s.write(cmd.encode())
            time.sleep(1)
            break
        if not contour is None: control(contour, s, img)
Example #3
0
def nav_to_red():
    for i in range(0, 20):
        vc.read()
    cnt = 0
    while 1:
        start_time = time.time()
        _, img = vc.read()
        img = calibrate(img)

        contour, red_line = get_contour_from_image(img)
        if red_line > 0:
            cnt += 1
        else:
            cnt = 0
        if cnt >= 5:
            print(red_line)
            print("jjjdk")
            cmd = "/StopCar/run \n"
            s.write(cmd.encode())
            time.sleep(1)
            break

        ## if not red line or control center notify it already is at red line
        if not contour is None: control(contour, s, img)
Example #4
0
red.value(1)
blue.value(1)
green.value(1)
cBool = False

while True:  # wait for THE BUTTON to be pressed
    if (b1.value() == 0):
        if cBool == False:
            init = 2350  #if no calibration, set to preset calibration 2600 with water
            current = 340
        else:
            pass
        break
    elif b2.value(
    ) == 0:  #if 2nd button is pressed, the sensor is calibrated to the current blank
        c = calibrate()  #calibrate sensor
        init = c[0]
        current = c[1]
        green.value(0)  #turn the LED yellow, if there has been a calibration
        cBool = True
        time.sleep(2)
    else:
        pass
print("Engaging...")
file = open("data.csv", "w")
embblink = PWM(emb, freq=4)
time.sleep(10)

embblink.deinit()
cycle = 0  #a cycle is ~1 second
file.write("Time,ADC Value,Std Deviation,Calibrated OD\n")
Example #5
0

def control(contour, s):
    contour_center = get_contour_center(contour)
    if contour_center is None: return
    print("contour center = ", contour_center)
    control_vec = np.subtract((np.shape(img)[1] / 2, np.shape(img)[0]),
                              contour_center)
    speed, turn = _control_center(control_vec)
    cmd = "/ServoTurn/run " + str(speed) + " " + "{0:.2f}".format(turn) + " \n"
    print("cmd = ", cmd)
    s.write(cmd.encode())
    print("speed, turn = ", speed, turn)


if __name__ == "__main__":
    s = serial.Serial("/dev/ttyACM0")
    vc = cv2.VideoCapture(1)
    for i in range(0, 20):
        vc.read()
    f = open("timer.txt", "w")
    while 1:
        start_time = time.time()
        _, img = vc.read()
        img = calibrate(img)

        contour = get_contour_from_image(img)
        if not contour is None: control(contour, s)
        f.write("Line 267: " + str(time.time() - start_time) + "\n")
#		time.sleep(.3)
Example #6
0
    def find_squares(self):
        # Captures and calibrates a frame
        ret, frame = self.cap.read()
        frame = calibrate(frame)
        img_display = np.copy(frame)
        filterCombination = 0


        while (filterCombination != 2):
            # Pre-processes image before tracking
            img = np.copy(frame)
            img = cv2.GaussianBlur(img, (3,3), 0)
            if filterCombination == 1:
                # lapl = cv2.Laplacian(img, cv2.CV_64F)
                # img = img - lapl
                img = cv2.inRange(img, np.array([125, 125, 125], dtype=np.uint8), np.array([255, 255, 255], dtype=np.uint8))
            img = cv2.Canny(img, 100, 150, apertureSize=5)
            retval, img = cv2.threshold(img, 100, 255, cv2.THRESH_BINARY)
            self.canny = img

            # Locates connected components within the image
            contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

            # Helper variables
            i1 = 0
            squares = []

            # Analyzes each contour: checks if it's a square, checks if it has at least 5 "children"
            for cnt in contours:
                # More helper variables
                children = []
                children_final = []
                children_areas = 0
                average_area = 0.0

                # Skips contours that are enormous or tiny
                if cv2.contourArea(cnt) > self.frame_height * self.frame_width * 0.4 or cv2.contourArea(cnt) < 100:
                    i1 += 1
                    continue

                # Appends all of the contour's children to an array (child = contour enclosed by a parent contour)
                if len(hierarchy[0]) > 0:
                    i2 = hierarchy[0][i1][2]
                    while i2 != -1:
                        children.append(contours[i2])
                        children_areas += cv2.contourArea(contours[i2])
                        i2 = hierarchy[0][i2][0]
                i1 += 1
       
                # The children must be similarly sized in the fiducial 
                if len(children) > 0:
                    average_area = float(children_areas) / len(children)
                    for cld in children:
                        if self.is_square(cld, 0.01) and abs(cv2.contourArea(cld) - average_area) < 100:
                            children_final.append(cld)

                # Checks if the contour is a square and if it contains at least 5 children
                cnt, cnt_square = self.is_square(cnt, 0.02) 
                if cnt_square and len(children_final) >= 5:
                    squares.append(cnt)

                    # Only tracks the smallest detected fiducial
                    if len(squares) == 2:
                        if filterCombination == 0:
                            if cv2.contourArea(squares[0]) > cv2.contourArea(squares[1]):
                                squares.pop(0)
                            else:
                                squares.pop(1)
                        elif filterCombination == 1:
                            if cv2.contourArea(squares[0]) < cv2.contourArea(squares[1]):
                                squares.pop(0)
                            else:
                                squares.pop(1)

            # Calculates the x, y coordinates and the area of the fiducial
            if len(squares) != 0:
                M = cv2.moments(np.array(squares))
                self.x0 = self.x
                self.y0 = self.y

                self.x = (int(M['m10'] / M['m00']) * 2.0 / self.frame_width) - 1.0
                self.y = (int(M['m01'] / M['m00']) * 2.0 / self.frame_height) - 1.0
                self.z = cv2.contourArea(squares[0])

                cv2.drawContours( img_display, squares, -1, (0, 255, 0), 2 )

                filterCombination = 2

            # Estimates the fiducial's position based on previous position data
            else:
                filterCombination += 1
                if filterCombination == 2:
                    dx = self.x - self.x0
                    dy = self.y - self.y0

                    self.x += dx
                    self.y += dy
                    self.x0 += dx
                    self.y0 += dy

                    if self.x > 1.0:
                        self.x = 1.0
                    elif self.x < -1.0:
                        self.x = 1.0
                    if self.y > 1.0:
                        self.y = 1.0
                    elif self.y < -1.0:
                        self.y = 1.0

                    circle_x = int( (self.x + 1) / 2 * self.frame_width )
                    circle_y = int( (self.y + 1) / 2 * self.frame_height )
                    cv2.circle( img_display, (circle_x, circle_y), 20, (255, 0, 0), 2 )                    

        fiducial_msg = Point()
        (fiducial_msg.x, fiducial_msg.y, fiducial_msg.z) = (self.x, self.y, self.z)
        self.pub_fiducial.publish(fiducial_msg)
        
        self.img = img_display
Example #7
0
    def find_squares(self):
        ret, img = self.cap.read()
        img = calibrate(img)
        img_display = img
        img = cv2.inRange(img, np.array([150, 150, 150], dtype=np.uint8), np.array([255, 255, 255], dtype=np.uint8))
        img = cv2.GaussianBlur(img, (5,5), 0)
        img = cv2.morphologyEx(img, cv2.MORPH_OPEN, (9,9))
    
        squares = []
        img = cv2.Canny(img, 200, 250, apertureSize=5)
        self.canny = img
        img = cv2.dilate(img, None)
        retval, img = cv2.threshold(img, 100, 255, cv2.THRESH_BINARY)
        contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    
        i1 = 0
        for cnt in contours:
            children = []
            children_final = []
            children_areas = 0
            average_area = 0.0

            if cv2.contourArea(cnt) > self.frame_height * self.frame_width * 0.7:
                i1 += 1
                continue

            if len(hierarchy[0]) > 0:
                i2 = hierarchy[0][i1][2]
                while i2 != -1:
                    children.append(contours[i2])
                    children_areas += cv2.contourArea(contours[i2])
                    i2 = hierarchy[0][i2][0]
            i1 += 1
    
            if len(children) > 0:
                average_area = float(children_areas) / len(children)
                for cld in children:
                    if abs(cv2.contourArea(cld) - average_area) < 100:
                        children_final.append(cld)

            cnt, cnt_square = self.is_square(cnt, 0.02) 
            if cnt_square and len(children_final) >= 5:
                squares.append(cnt)

                if len(squares) == 2:
                    if cv2.contourArea(squares[0]) > cv2.contourArea(squares[1]):
                        squares.pop(0)
                    else:
                        squares.pop(1)
    
        if len(squares) != 0:
            M = cv2.moments(np.array(squares))
            x = (int(M['m10'] / M['m00']) * 2.0 / self.frame_width) - 1.0
            y = (int(M['m01'] / M['m00']) * 2.0 / self.frame_height) - 1.0
            z = cv2.contourArea(squares[0])

            cv2.drawContours( img_display, squares, -1, (0, 255, 0), 2 )
   
            fiducial_msg = Point()
            (fiducial_msg.x, fiducial_msg.y, fiducial_msg.z) = (x, y, z)
            self.pub_fiducial.publish(fiducial_msg)
        else:
            fiducial_msg = Point()
            (fiducial_msg.x, fiducial_msg.y, fiducial_msg.z) = (0, 0, 0)
            self.pub_fiducial.publish(fiducial_msg)

        self.img = img_display
Example #8
0
# 1. get the gripper poses
fname = '%s/calibrationValueConfig.txt' % args.data_dir
f = open(fname, 'r')
lines = f.readlines()
bHg_list = []
for line in lines:
    angles = [float(e.strip()) for e in line.split(',')]
    T_list = g2b(angles)
    T = np.identity(4)
    for t in T_list:
        T = np.matmul(t, T)
    bHg_list.append(T)

# 2. calibrate camera and get extrinsic matrix
wHc_dict = calibrate("%s/%s" % (args.data_dir, args.img_sub_dir),
                     show_img=args.show_img,
                     img_format=args.img_format)

# 3. filter bHg and wHc
wHc = []
bHg = []
for k in wHc_dict:
    bHg.append(bHg_list[k])
    wHc.append(wHc_dict[k])

# 4. do hand-eye calibration
bHg = np.array(bHg)
bHg = np.transpose(bHg, (1, 2, 0))
wHc = np.array(wHc)
wHc = np.transpose(wHc, (1, 2, 0))
Example #9
0
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points,
                                                       size, None, None)  # 标定

    return ret, mtx, dist, rvecs, tvecs, successfulImages, obj_points, img_points


if __name__ == '__main__':
    # 所用相机:iphone XR
    # 所用的棋盘格规格为10x7
    # 棋盘格来源:http://cvrs.whu.edu.cn/courses/ComputerVision2015/camera-calibration-checker-board_9x7.pdf
    nRow = 7
    nCol = 10
    sideLength = 32  # 棋盘格方块在真实世界的尺寸,单位为mm

    # 载入所有图片路径
    image_files = glob.glob(r'picturesForCalibrate\*.jpg')
    imageNumbers = len(image_files)

    print('Start to process input images...')
    ret, mtx, dist, rvecs, tvecs, successfulImages, objPoints, imgPoints = calibrate(
        image_files)  # 图片处理
    print('{} images are processeed successfully, {} fail.'.format(
        successfulImages, imageNumbers - successfulImages))
    print('-------------------------------------')
    plotAccracy(objPoints, imgPoints, mtx, dist, rvecs, tvecs)

    print('Start to generate 3D results...')
    produceResults(ret, mtx, dist, rvecs, tvecs, successfulImages,
                   sideLength)  # 结果输出
    print('Results are created successfully!')
Example #10
0
    net = cv2.dnn.readNetFromTensorflow(args.model_path)
    cap = cv2.VideoCapture(1)
    s.write("/ServoCtrl/run 30 \n".encode())

    for i in range(0,20):
        cap.read()

    f = open("timer.txt", "w")

    while 1:
        start_time = time.time()
        ret, frame = cap.read()
        
        frame = imresize(frame, tuple(args.target_img_size))

        img = calibrate(frame)

        frame = cv2.dnn.blobFromImage(frame)
        net.setInput(frame)
        pred = net.forward()
        ans = pred[0, :, 0, 0].argmax(axis=-1)
        if ans == 1:
            s.write("/ServoStop/run \n".encode())
            # time.sleep(0.5)
            continue

        contour = get_contour_from_image(img)

		if not contour is None:
            control(contour,s)
Example #11
0
        obj.goalPose[0,Diff+i] = waypoints[i][0,0]
        obj.goalPose[1,Diff+i] = waypoints[i][0,1]

    #Calibrate sphero frame

    sphero_theta = [0.0]*Num
    velNum = [[]]*Num

    print "test"
    for i in range(Num):
        # Get your position
        init_calib_loc = bVicon[i].getPose()
        # I guess this is x and y
        print init_calib_loc[0]
        print init_calib_loc[1]
    sphero_theta, velNum = calibrate(Num, vel_mag, bVicon, pub, sphero_theta, velNum)


    # for i in range(Num - Num/2):
    #     j = i + Num/2
    #     veltmp = vel_mag - 10
    #     calibVect = numpy.array([0,0])
    #     labVel = numpy.linalg.norm(calibVect)
    #
    #     while labVel < 0.05:
    #         print j
    #         veltmp = veltmp + 10
    #         # Start time
    #         startCalib = time.time()
    #         # Initial Location
    #         init_calib_loc = bVicon[j].getPose()
Example #12
0
import machine
import time
from setup import *
from calibrate import *
from measureSensor import *

components = setup() #setup the sensors
components[1].value(1) #turn on LED to indicate that it is connected
components[2].value(1) #turn on embedded LED to indicate calibration

while True: #calibrate the light-to-voltage sensor
    if components[3].value() == 1 #if the button is pressed, do the calibration
        blank = calibrate(components[0]) #define the calibration value
        components[1].value(0) #turn off LED
        components[2].value(0) #turn off embedded LED
        break
    else:
        pass

count = 0
while True: #now do some measurements...
    if components[3].value() == 1 #chech whether the exit button is pressed
        break #stop
    else:
        if count % 180 == 0 #check whether 180 seconds has passed
            data = measureSensor()
            dataval = data[0] #measured light
            devi = data[1] #standard deviation of measurement
        else:
            pass