Ejemplo n.º 1
0
def draw_hour_pictures():

    files = os.listdir(path_to_data)
    calibration_set = get_calibration_set(calibration_file)
    os.system("mkdir "+path_to_pictures)
    k = 0
    for filename in files:
        k = k+1

        if os.path.exists(path_to_calibrated_data + filename):
            print filename + " already calibrated"
            if os.path.exists(path_to_pictures+filename[:-4]+".png"): 
                print "picture " + filename + " already exists"
            else:
                header,header_list = get_header(path_to_calibrated_data + filename)
                data = get_data(path_to_calibrated_data + filename,header)
                draw_pnt(data,header,path_to_pictures+filename[:-4]+".png")
                del data[:]
        else:
            header,header_list = get_header(path_to_data+filename)
            data = get_data(path_to_data+filename,header)
            calibrate(calibration_set,data,header,header_list,path_to_calibrated_data,filename)
            del data[:]
            data = get_data(path_to_calibrated_data + filename,header)
            draw_pnt(data,header,path_to_pictures+filename[:-4]+".png")
            del data[:]

        print filename+" done"
Ejemplo n.º 2
0
def main(args):
    '''
    The main function
    '''
    (
        baseline_data_source,
        historical_data_source,
        projection_data_source,
        variable_name,
        baseline_start_date,
        baseline_end_date,
        projection_start_date,
        projection_end_date,
    ) = get_args(args)

    measurement = transform.to_standard_variable_name(variable_name)
    units = transform.standard_units_from_measurement(measurement)

    climatedb.connect()

    for month in range(1, climatedb.MONTHS_PER_YEAR + 1):
        #print('.', end='', flush=True)
        print('For month %d' % month)
        calibration.calibrate(baseline_data_source, historical_data_source,
                              projection_data_source, measurement, units,
                              baseline_start_date, baseline_end_date,
                              projection_start_date, projection_end_date,
                              month)

    climatedb.close()
Ejemplo n.º 3
0
def draw_hour_colormap_11():

    files = os.listdir(path_to_data)
    calibration_set = get_calibration_set(calibration_file)
    os.system("mkdir "+path_to_colormap)
    k = 0

    for filename in files :
        if(filename != '140413_16_N1_000.pnt'):
            continue
        k = k+1

        if os.path.exists(path_to_calibrated_data + filename):
            print filename + " already calibrated"
            if os.path.exists(path_to_colormap+filename[:-4]+".png"): 
                print "picture " + filename + " already exists"
            else:
                header,header_list = get_header(path_to_calibrated_data + filename)
                data = get_data(path_to_calibrated_data + filename,header)
                draw_colormap_pnt(data,header,path_to_colormap+filename[:-4]+".png")
                del data[:]
        else:
            header,header_list = get_header(path_to_data+filename)
            data = get_data(path_to_data+filename,header)
            calibrate(calibration_set,data,header,header_list,path_to_calibrated_data,filename)
            del data[:]
            data = get_data(path_to_calibrated_data + filename,header)
            draw_colormap_pnt(data,header,path_to_colormap+filename[:-4]+".png")
            del data[:]

        print filename+" done"
        if k> 1 :break
Ejemplo n.º 4
0
def draw_day_pictures(str_day):

    files = os.listdir(path_to_data)
    calibration_set = get_calibration_set(calibration_file)
    os.system("mkdir "+path_to_all_day_pictures)
    k = 0
    added_data = [False]*24
    total_points = []
    num_of_points = 0

    for filename in files:
        k = k+1
        if filename[0:6] == str_day:
            if os.path.exists(path_to_calibrated_data + filename):
                print filename + " already calibrated"
                
                header,header_list = get_header(path_to_calibrated_data + filename)
                data = get_data(path_to_calibrated_data + filename,header)
                total_points,num_of_points,stop = draw_day_pnt(str_day,data,header,path_to_all_day_pictures+filename[0:6]+".png",added_data,total_points,num_of_points)
                del data[:]
                if stop: break
                
            else:
                header,header_list = get_header(path_to_data+filename)
                data = get_data(path_to_data+filename,header)
                calibrate(calibration_set,data,header,header_list,path_to_calibrated_data,filename)
                del data[:]
                data = get_data(path_to_calibrated_data + filename,header)
                total_points,num_of_points,stop = draw_day_pnt(str_day,data,header,path_to_all_day_pictures+filename[0:6]+".png",added_data,total_points,num_of_points)
                del data[:]
                if stop: break

            print filename+" done"
Ejemplo n.º 5
0
def draw_day_colormap(date_str):
    files = os.listdir(path_to_data)
    calibration_set = get_calibration_set(calibration_file)
    os.system("mkdir "+path_to_all_day_pictures)
    k = 0
    added_data = [False]*24
    total_points = []
    num_of_points = 0
    try:
        f = open('Z.pickle','rb')
        Z = pickle.load(f)
        f.close()
        colormap_day(date_str, Z,'output.png')
    except IOError:       
        Z = []
        stop = False
        for i in xrange(24):
            Z.append([])

        for filename in files:
            
            if filename[0:6] == date_str :
                k = k+1
                if os.path.exists(path_to_calibrated_data + filename):
                    print filename + " already calibrated"
                    
                    header,header_list = get_header(path_to_calibrated_data + filename)
                    data = get_data(path_to_calibrated_data + filename,header)
                    Z = add_hour_data_for_colormap(Z,data,added_data,header,date_str)
                    # total_points,num_of_points,stop = draw_day_pnt(str_day,data,header,path_to_all_day_pictures+filename[0:6]+".png",added_data,total_points,num_of_points)
                    del data[:]
                    if stop: break                
                else:
                    header,header_list = get_header(path_to_data+filename)
                    data = get_data(path_to_data+filename,header)
                    calibrate(calibration_set,data,header,header_list,path_to_calibrated_data,filename)
                    del data[:]
                    data = get_data(path_to_calibrated_data + filename,header)
                    Z = add_hour_data_for_colormap(Z,data,added_data,header,date_str)
                    # total_points,num_of_points,stop = draw_day_pnt(str_day,data,header,path_to_all_day_pictures+filename[0:6]+".png",added_data,total_points,num_of_points)
                    del data[:]
                    if stop: break
                print k
                # if k > 1 : break
        if False in added_data:
            print added_data
        else:
            f = open('Z.pickle','wb')
            pickle.dump(Z,f)
            f.close()
            colormap_day(date_str, Z,"output.png")
Ejemplo n.º 6
0
 def test_calibrate(self):
     trajectory = gen.generate([self.kx, self.ky], phi=self.phi)
     t = gen.generate_time()
     result = cal.calibrate(t, trajectory)
     self.assertTrue(
         np.allclose(result[0], self.expected_result[0], atol=1e-6)
         and np.allclose(-result[1], self.expected_result[1], atol=0.1))
Ejemplo n.º 7
0
def hx_load():
    try:
        # Create an object hx which represents your real hx711 chip
        # Required input parameters are only 'dout_pin' and 'pd_sck_pin'
        # If you do not pass any argument 'gain_channel_A' then the default value is 128
        # If you do not pass any argument 'set_channel' then the default value is 'A'
        # you can set a gain for channel A even though you want to currently select channel B
        GPIO.setmode(GPIO.BCM)
        hx = HX711(dout_pin=21, pd_sck_pin=20, gain_channel_A=64)
        # Check if we have swap file. If yes that suggest that the program was not
        # terminated proprly (power failure). We load the latest state.
        if os.path.isfile(swap_file_name):
            with open(swap_file_name, 'rb') as swap_file:
                # now we loaded the state before the Pi restarted.
                hx = pickle.load(swap_file)
        else:
            hx = calibrate()

        return hx

    except (KeyboardInterrupt, SystemExit):
        print('\nhx_loader: Keyboard Interruption: stopping program')

    except (RuntimeError):
        print("\nhx_loader: Runtime Error during execution")

    except Exception as e:
        print("\nhx_loader: Other exception: " + e)
Ejemplo n.º 8
0
def test_calibration():

    space = odl.uniform_discr(min_pt=[-1],
                              max_pt=[1],
                              shape=[128],
                              dtype='float32',
                              interp='linear')

    cell_side = space.cell_sides

    #kernel = get_kernel(space)
    kernel = get_kernel_gauss(space, 0.2)

    def product(f, g):
        return struct.scalar_product_structured(f, g, kernel)

    #points = space.points()[::2].T
    points = np.array([[
        -0.75,
        0.0,
        0.2,
        0.5,
    ]])
    vectors = np.array([[
        0.3,
        0.0,
        0,
        1,
    ]])
    original = struct.create_structured(points, vectors)
    g = group.Translation(space)

    translation = np.array([1.0])
    translated = action.apply_element_to_field(g, translation, original)

    covariance_matrix = struct.make_covariance_matrix(space.points().T, kernel)
    noise_l2 = odl.phantom.noise.white_noise(space) * 0.05
    decomp = np.linalg.cholesky(covariance_matrix +
                                1e-5 * np.identity(len(covariance_matrix)))
    noise_rkhs = np.dot(decomp, noise_l2)

    get_unstructured = struct.get_from_structured_to_unstructured(
        space, kernel)
    noisy = space.tangent_bundle.element(
        get_unstructured(translated) + noise_rkhs)

    def act(element, struct):
        return action.apply_element_to_field(g, element, struct)

    result_calibration = calib.calibrate(original, noisy, g, act, product,
                                         struct.scalar_product_unstructured)

    estimated_translated = get_unstructured(act(result_calibration.x,
                                                original))
    print('real = {}, computed ={} , log diff = {}'.format(
        translation, result_calibration.x,
        np.log10(np.abs(translation[0] - result_calibration.x[0]))))
Ejemplo n.º 9
0
    def slota(self, MainWindow):#calibrate

        self.textBrowser.append("loading...")
        mtx, dist, rvecs, tvecs, rms, path2 = calibration.calibrate()
        self.textBrowser.append("内参矩阵:")
        self.textBrowser.append(str(mtx))
        self.textBrowser.append("畸变系数:")
        self.textBrowser.append(str(dist))
        self.textBrowser.append("旋转向量:")
        self.textBrowser.append(str(rvecs))
        self.textBrowser.append("平移向量:")
        self.textBrowser.append(str(tvecs))
        self.textBrowser.append("reprojection error:")
        self.textBrowser.append(str(rms))
        self.textBrowser.append("结果存储路径:")
        self.textBrowser.append(path2)
        self.textBrowser.append("Calibrate Done\n")
def render_curvature_and_offset(undist_image, curve, offset):

    offset_text = 'offset is: {:.2f}m'.format(offset)
    font = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(undist_image, offset_text, (30, 50), font, 1, (255, 255, 255),
                2)

    curve_text = 'curverature is: {:.2f}m'.format(curve)
    cv2.putText(undist_image, curve_text, (19, 90), font, 1, (255, 255, 255),
                2)

    return undist_image


if __name__ == "__main__":
    mtx, dist, mapx, mapy = calibrate(True)
    img = cv2.imread("../test_images/test1.jpg")
    color = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR)

    cv2.imwrite('../writeup_img/test1.jpg', color)
    binary = getBinaryImageframe(color)
    cv2.imwrite('../writeup_img/binary_combo.jpg', binary * 255)

    w = img.shape[1]
    h = img.shape[0]
    M, invM = compute_perspective_transform(color.shape[1], color.shape[0])
    perspective_binary = apply_perspective_transform(binary, M)
    cv2.imwrite('../writeup_img/warped_straight_lines.jpg',
                perspective_binary * 255)
    left_fit, right_fit, ploty, left_fitx, right_fitx, leftx, lefty, rightx, righty = identifyLines(
        perspective_binary)
Ejemplo n.º 11
0
    return binary_output


if __name__ == "__main__":
    from calibration import calibrate, undistort
    from unwraped import unwraped
    import matplotlib.image as mpimg
    img = mpimg.imread("test_images/test1.jpg")
    #img = cv2.imread("test_images/test1.jpg")
    img_size = (img.shape[0], img.shape[1])

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    #cv2.imwrite('examples/gray_1.png', gray)

    ret, mtx, dist, rvecs, tvecs = calibrate(gray, 6, 9)
    undistort_img = undistort(img, mtx, dist)
    #cv2.imwrite('examples/undistort_img_1.png', undistort_img)

    #gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)

    unwraped_img = unwraped(undistort_img, img_size)
    hls = cv2.cvtColor(unwraped_img, cv2.COLOR_RGB2HLS).astype(np.float)
    l_channel = hls[:, :, 1]
    s_channel = hls[:, :, 2]
    x_binary_output = abs_sobel_thresh(unwraped_img,
                                       orient='x',
                                       thresh_min=20,
                                       thresh_max=100)
    cv2.imwrite('examples/sobel_x_1.png', x_binary_output)
    cv2.imwrite('examples/sobel_y_1.png', y_binary_output)
Ejemplo n.º 12
0
def calibrateDistorted(settings, points, image):
    pixelSize = settings['pixelSize']
    resolution = settings['resolution']
    label = settings['label']
    yOffset = settings['yOffset']
    numLowDistortionPoints = settings['minLowDistortionPoints']
    numHighDistortionPoints = settings['numHighDistortionPoints']
    passes = settings['passes']
    
    points = pixelToSensor(points, resolution, pixelSize)

    # split the data into low/high distortion points
    points = sorted(points, key=lambda p: euclideanDistance2d(p.sensor))
    printVerbose('%d points' % len(points))
    lowDistortionPoints = points[:16]
    printVerbose('%d low distortion points, max. distance from center of sensor = %fmm' % (len(lowDistortionPoints), np.max(map(lambda p: np.linalg.norm(p.sensor[:2]), lowDistortionPoints))))
    highDistortionPoints = points[-numHighDistortionPoints:]
    printVerbose('%d high distortion points, min. distance from center of sensor = %fmm' % (len(highDistortionPoints), np.min(map(lambda p: np.linalg.norm(p.sensor[:2]), highDistortionPoints))))

    kappa = 0.0 # assume K1 = 0 (no distortion) for the initial calibration

    # record some basic statistics
    errors = []
    kappas = []
    def stats():
        e = error(points)
        errors.append(e)
        kappas.append(kappa)
        return e

    #re-calibrate, re-estimate kappa, repeat
    for i in range(passes+1):

        #use the estimated K1 to "undistort" the location of the points in the image,
        # then calibrate (using the points with low distortion.)
        params = calibrate(pixelToSensor(lowDistortionPoints, resolution, pixelSize, kappa))

        points = worldToPixel(points, params, pixelSize, resolution, yOffset, kappa)
        stats()

        #use the new calibration parameters to re-estimate kappa (using the points with high distortion.)
        kappa = estimateKappa(worldToPixel(highDistortionPoints, params, pixelSize, resolution, yOffset, kappa))

        #project the points in world coordinates back onto the sensor then record some stats
        points = worldToPixel(points, params, pixelSize, resolution, yOffset, kappa)
        stats()

        if passes == 2:
            numLowDistortionPoints = maxLowDistortionPoints
            lowDistortionPoints = points[:max(numLowDistortionPoints, settings['maxLowDistortionPoints'])]

    translationVector = np.array([ params['tx'], params['ty'], params['tz'] ], np.float64)
    return {
        'label': label,
        'params': {
            'f': params['f'],
            'rotationMatrix': params['rotationMatrix'],
            'translationVector': translationVector,
            'RT': np.dot(translationToHomogeneous(translationVector), rotationToHomogeneous(params['rotationMatrix'])),
            'K1': kappas[-1],
            'pixelSize': pixelSize,
            'resolution': resolution,
            'error': errors[-1]
        },
        'points': points,
        'image': image,
        'errors': errors,
        'kappas': kappas
    }
Ejemplo n.º 13
0
def run():  # main program logic
    # let a maximum of ~100ms (60 sample sets) of data pile up in the queue
    q = mp.Queue(60)
    # data capture is done in separate process so that it's not blocked by program logic.

    p = mp.Process(target=emgCapture.capture, args=(q, deviceStatusq, port))

    deviceConnected = False
    print("\nConnecting to device...")
    p.start()
    message = q.get()

    if message == "Connection established.":
        print(message)
        deviceConnected = True
    else:
        print("Connection failed!")
        p.join()

    op = 0

    W = np.zeros((electrodeNum, synergyNum))
    baselines = np.zeros(electrodeNum)
    maxes = np.full(electrodeNum, 256 * 256)

    calibrated = False
    armConnected = False

    while True:
        if deviceConnected:
            if headless:
                op = serverq.get(block=True)

                if op == "getSystemStatus":
                    armConnected = checkArmStatus(armConnected)
                    deviceConnected = checkDeviceStatus(deviceConnected)
                    serverq.put((deviceConnected, armConnected, calibrated))

                elif op == "startCalibration":
                    W, baselines, maxes = calibration.calibrate(q,
                                                                webPlotter,
                                                                headless=True,
                                                                server=serverq)

                    if W != "failed!":
                        print("\nCalibration complete. Synergy matrix W:")
                        print(W)
                        print("\nBaselines:")
                        print(baselines)
                        print("\nMax values:")
                        print(maxes)

                        np.save("calibrationMatrix.npy", W)
                        np.save("baselines.npy", baselines)
                        np.save("maxes.npy", maxes)
                        print("Matrix saved.")

                        calibrated = True

                        serverq.put("done")
                        time.sleep(1)

                elif op == "loadMatrix":
                    # print("loading matrix...")
                    try:
                        W = np.load("calibrationMatrix.npy")
                        baselines = np.load("baselines.npy")
                        maxes = np.load("maxes.npy")
                        calibrated = True
                        serverq.put((True, False))
                    except:
                        serverq.put((False, True))

                elif op == "startMonitor":
                    monitor.monitor(q,
                                    motionq,
                                    W,
                                    baselines,
                                    maxes,
                                    webPlotter,
                                    server=serverq,
                                    headless=True)
                elif op == "rebooting...":
                    print("Rebooting...")

        #     else:   # interactive main loop
        #         if op == 0:
        #             userGuide.menuPrompt()
        #             op = input("Option: ")
        #             try:
        #                 tmp = int(op)
        #                 op = tmp
        #             except:
        #                 pass

        #         elif op == 1 and deviceConnected: # calibrate
        #             W, baselines, maxes = calibration.calibrate(q, plotter)
        #             calibrated = True

        #             print("\nCalibration complete. Synergy matrix W:")
        #             print(W)
        #             print("\nBaselines:")
        #             print(baselines)
        #             print("\nMax values:")
        #             print(maxes)

        #             toSave = input("\nWould you like to save this matrix? (y/n): ")

        #             if toSave == "y":
        #                 np.save("calibrationMatrix.npy",W)
        #                 np.save("baselines.npy", baselines)
        #                 np.save("maxes.npy",maxes)
        #                 print("Matrix saved.")
        #             else:
        #                 print("Matrix not saved.")

        #             op = 0
        #         elif op == 2: # load
        #             try:
        #                 W = np.load("calibrationMatrix.npy")
        #                 baselines = np.load("baselines.npy")
        #                 maxes = np.load("maxes.npy")
        #                 calibrated = True
        #                 print("Calibration matrix loaded.")
        #             except:
        #                 print("Error: Calibration matrix not found!")

        #             op = 0
        #         elif op == 3 and deviceConnected:
        #             if calibrated:
        #                 monitor.monitor(q, None, W, baselines, maxes, plotter)
        #             else:
        #                 print("Error: Calibrate or load a calibration matrix first.")

        #             op = 0
        #         elif op == 4: # run test
        #             W = calibration.calibrate(q, plotter, testmode=True)
        #             print(W)
        #             op = 0
        #         elif op == 5: # quit
        #             break
        #         else:
        #             print("Invalid command.\n")
        #             op = 0
        else:
            break

    p.terminate()
    p.join()
Ejemplo n.º 14
0
                (10, 40), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
    cv2.putText(result,
                'Radius of curvature (Right) = %.2f m' % (right_curve_rad),
                (10, 70), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
    cv2.putText(result,
                'Vehicle position = %.2f m off center' % (vehicle_offset),
                (10, 100), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
    #plt.imshow(result)
    #plt.show()
    return result


from collections import deque

#testing with two consecutive image frames
mtx, dist = calibrate()
left_line_list = deque()
right_line_list = deque()
'''
line_im = cv2.imread('./test_images/test2.jpg')
line_im = cv2.GaussianBlur(line_im, (5,5), 0)

undistort = cv2.undistort(line_im, mtx, dist, None, mtx)
bright_const_im = cv2.cvtColor(undistort, cv2.COLOR_BGR2HLS)

#line_im = cv2.cvtColor(hls, cv2.COLOR_HLS2BGR)
absx_im = abs_sobel(undistort, ksize=9, thresh=(60,255))
absy_im = abs_sobel(undistort, ksize=5,orient='y', thresh=(60,255))
mag_im = mag_sobel(undistort, ksize=9, thresh=(30,255))
ang_im = ang_sobel(undistort, ksize=15, thresh=(0.7,1.3))
sat_im = saturation_filter(undistort, thresh=(155,255))
Ejemplo n.º 15
0
            cv2.circle(fixed, (int(p.x), int(p.y)), 4, 200, -1)

    points3d_new = list()
    if len(pointsList_cal) > 0:
        points3d_new = gd.getDepth(pointsList_cal, pointsList_new)

    cv2.imshow("img_pure", img_pure)
    cv2.imshow("img", img)
    cv2.imshow("points", detected)
    cv2.imshow("fixed", fixed)
    cv2.imshow("binary", binary)

    k = cv2.waitKey(1)
    if k == ord('s'):

        pointsList_cal = cal.calibrate(pointsList_new, "pars.txt")
        cv2.imwrite("calibrated.jpg", fixed)

    if k == ord('c'):

        print("C")
        if len(pointsList_cal) > 0:
            points3d_new = gd.getDepth(pointsList_cal, pointsList_new)
            gd.writeVertices("model.obj", points3d_new)
            cv2.imwrite("newGrid.jpg", fixed)
        else:

            print("Please calibrate first")

    if k == ord('e'):
        print("got E")
Ejemplo n.º 16
0
    cut_clip = clip.subclip(start_second, end_second)
    cut_clip.write_videofile('./../hard_cut_' + filename + '.mp4')


if __name__ == '__main__':
    """
      1. Camera calibration (once for the whole video). Returns camera matrix and distortion coefficients
      2. For each image:
        a. Undistort using data from the first step. Returns image
        b. Create gradient map for undistorted image using different thresholds. Returns binary image
        c. Warp this gradient map. Returns binary image
        d. Find lanes using window method (I prefer method with np.convolve). Returns lane pixels
        e. Fit pixels to polynomial of the second degree
        f. Calculate the radius of curvature and position of vehicle with respect to the center of the lane
        j. Plot lines on image
      3. Use described pipeline for video. Note that you need to use previously computed data for new frame (optional).
    """
    mtx, dst = calibrate()
    # cut_video(38, 43)
    process_video(process_frame)
    # dir = './../test_images/'
    # out_dir = './../out_binary_images/'
    # for img_name in os.listdir(dir):
    #     print(img_name)
    #     img_name = 'test9.jpg'
    #     img = mpimg.imread(dir + img_name)
    #     processed_frame = process_frame(img)
    #     # mpimg.imsave(out_dir + img_name, processed_frame, cmap='gray')
    #     plt.imshow(processed_frame)
    #     plt.show()
Ejemplo n.º 17
0
import numpy as np
import calibration as cal

peaks = cal.loadref('code/sourcedict.txt')
source_list, spectra = cal.loaddata('data/lab0_spectral_data.txt')
m, b = cal.calibrate(['Am241', 'Cs137'], source_list, spectra, peaks)
cal.validate('Ba133', m, b, source_list, spectra, peaks)
cal.validate('Eu152', m, b, source_list, spectra, peaks)
import calibration
import lane_finding
import cv2
import visualizer

image_size = (1280, 720)
# PREPROCESSING: CAMERA CALIBRATION
cal_mtx, dist_coeff = calibration.calibrate()
# print(cal_mtx)
# print(dist_coeff)

# PROCESSING:
lane_finder = lane_finding.LaneFinder(cal_mtx, dist_coeff, image_size)

# ON TEST IMAGE
lane_finding.debug = False
# test_image = cv2.imread('test_images/test5.jpg')
# # visualizer.plot_colorspaces(test_image,'output_images/test_image/')
# # lane_finder.adjust_parameters(test_image)
# test_result = lane_finder.process_image(test_image)

# ON PROJECT VIDEO
lane_finding.debug = False
cap = cv2.VideoCapture('project_video.mp4')
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
out = cv2.VideoWriter('output_video.mp4', fourcc, 60.0, image_size)

i = 0
while (cap.isOpened()):
    ret, frame = cap.read()
Ejemplo n.º 19
0
import cv2
import cv2.aruco as auo
import numpy as np
import PIL
import imutils
from imutils.video import VideoStream
import time
import sys
from calibration import calibrate
import os

print("[INFO] Use Ctrl+C to exit.")
print("[INFO] calibrating camera...")
ret, camera_matrix, dist_coeffs = calibrate()
if ret:
    print("[INFO] attained camera calibration values.")
else:
    print("[ERROR] failed to get camera calibration values...")

arucoDict = auo.Dictionary_get(auo.DICT_6X6_1000)
arucoParams = auo.DetectorParameters_create()

print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)

while True:
    # grab the frame from the threaded video stream and resize it
    # to have a maximum width of 1000 pixels with , width=1000.
    frame = vs.read()
    frame = imutils.resize(frame)
import numpy as np
import glob
import cv2
import calibration as calibrator
import findLines as detector

from moviepy.editor import VideoFileClip

mtx, dist, mapx, mapy = calibrator.calibrate(False)


def process_img(img, previous_l=[], previous_r=[]):
    global mapx, mapy
    w = img.shape[1]
    h = img.shape[0]
    undistorted_image = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR)
    binary = detector.getBinaryImageframe(undistorted_image)

    M, invM = detector.compute_perspective_transform(
        undistorted_image.shape[1], undistorted_image.shape[0])
    perspective_binary = detector.apply_perspective_transform(binary, M)
    left_fit, right_fit, ploty, left_fitx, right_fitx, leftx, lefty, rightx, righty = detector.identifyLines(
        perspective_binary, previous_l, previous_r)

    if previous_l and previous_r and (
        ((right_fitx - left_fitx) < 300).sum() > 30 or
        ((right_fitx - left_fitx) > 500).sum() > 50):
        print("new detection not realiable")
        ploty = np.linspace(0, perspective_binary.shape[0] - 1,
                            perspective_binary.shape[0])
        l_fitx = previous_l[0] * ploty**2 + previous_l[1] * ploty + previous_l[
Ejemplo n.º 21
0
    return contents


while True:
    # for i in range (1, 2):
    try:
        try:
            with open('/fenix/wrk/servos.txt') as f:
                servo_data = [float(s) for s in f.read().split(',')]
            f.closed

            calibration_dict = read_calibration_dict()

            for i in range(0, len(servo_data)):
                output_seq[i] = calibrate(i, servo_data[i])
                # print('I : {0}. Data : {1}. Calibrated : {2}'.format(i, servo_data[i], output_seq[i]))

            for i in range(0, 16):
                pulse_width = int(MIN_WIDTH + (MAX_WIDTH - MIN_WIDTH) *
                                  output_seq[i] / MAX_DIFF)
                pi.set_servo_pulsewidth(servo_list[i], pulse_width)
            #print('Initial data : {0}'.format(servo_data))
            print('Calibrated data : {0}'.format(output_seq))
            #print('-----------------------')
        except:
            print("Unexpected error:", sys.exc_info()[0])
        time.sleep(SLEEP_S)

    except KeyboardInterrupt:
        break
Ejemplo n.º 22
0
                    write_data(data_file, [p_robot, p_camera],
                               index,
                               type="point_pair")

            while len(working_worker_pipe) > 0:
                last_idx = working_worker_pipe[0]
                result = worker[last_idx].outQ.get()
                working_worker_pipe.remove(last_idx)
                index, p_camera, p_robot = result
                write_data(data_file, [p_robot, p_camera],
                           index,
                           type="point_pair")

        use_auto_judge = original_use_auto_judge

        H_coarse, error_coarse = calibrate(data_file)
        print("Coarse Calibrate Finished")
        print(H_coarse)
        write_data(data_file, H_coarse, "H_coarse", type="mat")
        trans, rot = H2trans_rot(H_coarse)
        write_data(data_file, trans, "Trans_coarse", type="vec")
        write_data(data_file, rot, "Rot_coarse", type="vec")

        H_final = copy.deepcopy(H_coarse)
        error_min = error_coarse

        ## Fine calibration
        if mode == "cal_fine":
            for idx, target in enumerate(zig_zag_path):
                if killing_flag:
                    sys.exit(0)
Ejemplo n.º 23
0
parser = argparse.ArgumentParser()

parser.add_argument('-imdir1', type=str, help='folder with camera1 images', required=True)
parser.add_argument('-imdir2', type=str, help='folder with camera2 images', required=True)
parser.add_argument('-cols', type=int, default=9, help='num of cols')
parser.add_argument('-rows', type=int, default=6, help='num of rows')
parser.add_argument('-outdir', type=str, default='out', help='directory to save camera matrix and distortion coeffs')

args = parser.parse_args()

try:
    os.mkdir(args.outdir)
except FileExistsError:
    pass


P1, P2, Q = calibrate(
    args.imdir1, 
    args.imdir2, 
    cols=args.cols, 
    rows=args.rows, 
    out=args.outdir
)

print()
print('-------------PROJECTION MATRICES-------------')

print('\n\tFIRST CAMERA\n', P1)
print('\n\tSECOND CAMERA\n', P2)
print()
print('Cameras are calibrated!')
Ejemplo n.º 24
0
    cap2 = cv2.VideoCapture(1)

    # calibration
    mtx1 = []
    dist1 = []
    mtx2 = []
    dist2 = []

    filterValid = False

    try:
        npzfile1 = np.load('calibration1.npz')
        mtx1 = npzfile1['mtx']
        dist1 = npzfile1['dist']
    except IOError:
        mtx1, dist1 = calibration.calibrate(cap1, 'calibration1.npz')

    try:
        npzfile2 = np.load('calibration2.npz')
        mtx2 = npzfile2['mtx']
        dist2 = npzfile2['dist']
    except IOError:
        mtx2, dist2 = calibration.calibrate(cap2, 'calibration2.npz')

    # start detecting
    while True:
        ret1, frame1 = cap1.read()
        ret2, frame2 = cap2.read()

        print("Camera 1:")
        frame1 = calibration.undistortion(frame1, mtx1, dist1[0:4])
Ejemplo n.º 25
0
import os
from calibration import calibrate

BASELINE_LENGTH = 100
TEST_LENGTH = 1000

volts = [none] * TEST_LENGTH    
times = [none] * TEST_LENGTH
baseline= [none] * BASELINE_LENGTH
thrust = []

calContinue = False
while not calContinue:
    calPrompt = raw_input('Calibrate load cell? (Y/N): ')
    if calPrompt.lower() == 'y':
        calibrate()
        calContinue = True
    else:
        if calPrompt.lower() == 'n':
            calContinue = True
        else:
            print 'Invalid entry'


calIn = open('cal_value.txt', 'r')
mVNFactor = float(calIn.read()) #read millivolt-Newton conversion factor
calIn.close()
print 'Conversion factor is: ', mVNFactor, ' N/mV.'

ard = serial.Serial('/dev/ttyACM0', 9600) #open serial port
Ejemplo n.º 26
0
#conda install -c menpo opencv3


import darknet.run_python as dn
import cv2
import os
from calibration import calibrate
from show_image import show_labeled

resize = 0.3
main_path = os.getcwd()
im_path = main_path + '/GoPro'

os.chdir(im_path)
rms, camera_mtx, dist_coeff, rotation, translation = calibrate(resize)
if (rms > 1):
    print('Bad calibration. RMS error: ', rms)

os.chdir(main_path)

im = cv2.imread('Test_images/GOPR0943.JPG')


im = cv2.resize(im, None, fx=resize, fy=resize, interpolation=cv2.INTER_CUBIC)
cv2.imshow('orig', im)
cv2.imwrite('orig.jpg', im)
im_undist = cv2.undistort(im, camera_mtx, dist_coeff)
cv2.imshow('undist', im_undist)
cv2.imwrite('im_undist.jpg', im_undist)
cv2.waitKey(0)
Ejemplo n.º 27
0
                cam_num = int(hostname[-1])
                if cam_num % 2 == 1:
                        camera.rotation = 180
                pic = "%s-%s.jpg" % (hostname, frame_id)
                take_picture(camera, pic)
                resp = send_to_shock(pic, frame_id, ip4, "calibration")
		subprocess.call("rm %s" % pic, shell = True)
		camera.close()
		if count < 10:
			ret, count = calibration.capture(count)
			trials += 1
		        channel.basic_publish(exchange='confirmation', routing_key='confirmation', properties=pika.BasicProperties(reply_to=hostname, correlation_id = id), body = "Picture taken: %r  Successes/Trials: %s/%s" % (ret, count, trials) )
			continue
		print "count =", count
		if count == 10 and not calibrated:
			calibration.calibrate(hostname, ip4, time_stamp)
			message = "calibrated"
			calibrated = True
		elif calibrated:
			message ="already calibrated. trials: %i" % trials
	elif response == "4":
		print "Command received: Restart calibration"
		count = 0
		trials = 0
		calibrated = False
		message = "Calibration reset"
	else:
		message=  "Command '%s' not found" % response
		print message

	channel.basic_publish(exchange='confirmation', routing_key='confirmation', properties=pika.BasicProperties(reply_to=hostname, correlation_id = id), body = message) 
Ejemplo n.º 28
0
import os
import cv2
from moviepy.editor import VideoFileClip
from pipeline import process_image
from calibration import calibrate

#challenge_output = 'challenge_out.mp4'
#clip = VideoFileClip('challenge_video.mp4').subclip(0,5)
#challenge_clip = clip.fl_image(process_image)
#challenge_clip.write_videofile(challenge_output, audio=False)

cal_images_dir = './camera_cal/calibration*.jpg'
ret, mtx, dist, rvecs, tvecs = calibrate(cal_images_dir)

for img_name in os.listdir("test_images/"):
    print(img_name)
    img = cv2.imread("test_images/" + img_name)
    out = process_image(img, mtx, dist)
    cv2.imwrite(img_name[:-4] + "_out.jpg", out)
Ejemplo n.º 29
0
def main():
    # remote source
    camera = cv2.VideoCapture(URL)

    # webcam source
#    camera = cv2.VideoCapture(0)
#    camera.set(cv2.CAP_PROP_FRAME_WIDTH, FRAME_WIDTH)
#    camera.set(cv2.CAP_PROP_FRAME_HEIGHT, FRAME_HEIGHT)

    gaze_tracker = GazeTracker()
    screen = Screen(SCREEN_WIDTH, SCREEN_HEIGHT)

    cv2.namedWindow("frame")
    cv2.createTrackbar('threshold', 'frame', 0, 255, nothing)
    cv2.setTrackbarPos('threshold', 'frame', 1)

    screen.clean()
    screen.show()

    os.makedirs('images', exist_ok=True)

    while True:

        _, frame = camera.read() 

#        print(frame.shape)

        start = time.time()

        gaze_tracker.update(frame)

        end = time.time()

        cv2.namedWindow("frame")
        dec_frame = gaze_tracker.eye_tracker.decorate_frame()

        dec_frame = cv2.resize(dec_frame,(int(FRAME_WIDTH / 2), int(FRAME_HEIGHT / 2)))

        cv2.moveWindow("frame", 0 , 0)
        cv2.imshow('frame', dec_frame)

        try:
            gaze = gaze_tracker.get_gaze()
        except:
            gaze = None
            screen.print_message("CALIBRATION REQUIRED!")
            screen.show()
            print("CALIBRATION REQUIRED!")

        print("GAZE: {}".format(gaze))

        if gaze:
            screen.update(gaze)
            screen.refresh()
            try:
                pyautogui.moveTo(gaze[0] + ((RES_SCREEN[0] - screen.width) // 2), gaze[1] + 25)
            except:
                pass

        print("TIME: {:.3f} ms".format(end*1000 - start*1000))

        k = cv2.waitKey(1) & 0xff
        if k == 1048603 or k == 27: # esc to quit
            break
        if k == ord('c'): # c to calibrate
            screen.mode = "calibration"
            screen.draw_center()
            calibrate(camera, screen, gaze_tracker)

    camera.release()
    cv2.destroyAllWindows()
Ejemplo n.º 30
0
def resize_adjust_bbox_map(bbox_map=0,
                           sprayers_qtt=0,
                           disk_hole_num=0,
                           adj_start=0,
                           adj_end=0,
                           show_map=False,
                           phis_dist=0,
                           photo_position_ref=0,
                           adjust_map=False,
                           count_photos=0,
                           convertion_base=0):
    '''Resizing the bounding box to fit the sprayers quantities.
    After splitting the bounding box, the max values for each line will be considered.
    
    Params:
    -----------
    bbox_map: Bounding box to be resized
    adj_start: difference of the TURN ON sprayed values with the values that were 
                supposed to be sprayed.
    adj_end: difference of the TURN OFF sprayed values with the values that were 
                supposed to be sprayed.
    sprayers_qtt: Quantity of sprayers to print the information (two lines)
    show_map: bool, if True it will print the map on the notebook
    phis_dist: It's the phisical distance between the camera and the sprayers, it will correct the 
                initial values
    count_photos: if it's the first photo, it will adjust the phisical distance for a first time.
    disk_hole_num: the quantity of holes in the rotary reader to be used to resize the lines
    convertion_base: This is the base to be used to count photos.
    -----------
    return: new bounding box with RESIZED columns (this is the real basis)
    return: new bounding box with ADJUSTED columns (this is the basis to be sprayed)'''
    if adjust_map:

        count_photos = int(photo_position_ref // convertion_base)

        #Splitting the bounding boxes according to the spray qtt, we will have
        # the same qtt of columns of sprayers in one line, these columns will have different values
        cols = np.array(np.split(bbox_map, sprayers_qtt, axis=1))

        #Getting the max values of each cols, if there is something above 0 there will prevail to use
        # for spraying
        max_value_col = np.array([np.max(x, axis=1) for x in cols]).T

        #Splitting the bounding boxes according to the holes numbers in one picture
        # convertion_base means how many steps are necessary to walk through the distance of a picture
        lines = np.array(np.split(max_value_col, convertion_base, axis=0))

        #Getting the max values of each cols
        max_value_line = np.array([np.max(x, axis=0) for x in lines])

        #New bounding Box (this is the basis to compare with the sprayed)
        # if count_photos == 1:
        #     adj_distance = np.zeros([phis_dist, sprayers_qtt],np.uint8)
        #     resized_bbox_map = np.insert(max_value_line.astype(np.uint8),
        #                                     -1, adj_distance, axis=0)
        # else:
        resized_bbox_map = max_value_line.astype(np.uint8)

        print('[WEEDS] Photo number', count_photos)
        calibrated_bbox_map = calibrate(resized_bbox_map,
                                        start=adj_start,
                                        end=adj_end)

        print('[WEEDS] Resizing the Maps')

        if show_map:
            plt.imshow(resized_bbox_map)
            plt.show()

        return resized_bbox_map, calibrated_bbox_map
    else:
        resized_bbox_map = np.zeros((1, sprayers_qtt), np.uint8)
        calibrated_bbox_map = np.zeros((1, sprayers_qtt), np.uint8)

        return resized_bbox_map, calibrated_bbox_map
Ejemplo n.º 31
0
                "Press Ctrl+C to complete.")


def main(data):
    data = data.split(' ')
    cl.plot_update()
    return data


while (True):
    command = input()
    #command = command + '\n'
    ser.write(bytes("getVal", 'utf-8'))
    time.sleep(0.5)
    data = ""
    if (ser.in_waiting > 0):
        data = ""
        while (ser.in_waiting > 0):
            data += ser.read().decode("utf-8")
        data = data.replace("\r\n", "")

    #print(data)
    if (command == "mod1"):
        data = data.split(' ')
        cl.calibrate(data[0], data[1])
        cl.plot_calibrate()
    if (command == "mod2"):
        main(data)

ser.close()