Exemplo n.º 1
0
def get_data():  #Get humidity and temperature
    if os.environ["TEST"] != "y":
        humidity = s.get_humidity()
        temp = s.get_temperature()
        pressure = s.get_pressure()
        soil_humidity = GPIO.input(17)
    else:
        # Generate Fake Data
        humidity = random.uniform(30, 80)  # 30% to 80%
        temp = random.uniform(10, 30)  # 10degC to 30degC
        pressure = random.uniform(1000, 2000)  # 1000mbar to 2000mbar
        soil_humidity = random.uniform(0, 1)  # 1 or 0
    log("=========================================")
    log("Current Humidity:      " + str(humidity) + " %")
    log("Current Temperature:   " + str(temp) + " degC")
    log("Current Air Pressure:  " + str(pressure) + " mbar")
    log("-----------------------------------------")
    log("Current Soil Humidity: " + str(soil_humidity))

    ts = time.time()

    st = datetime.datetime.fromtimestamp(ts).strftime('%H-%M-%S-%d')

    filename = st + ".jpg"

    camera.Capture(filename)

    insert_data(humidity, temp, pressure, soil_humidity, filename)
Exemplo n.º 2
0
 def handle_service(self, req):
     rospy.loginfo("Upload to Twitter!")
     # カメラから写真を取得
     rospy.loginfo("camera!")
     gi = camera.Capture()
     gi.cap()
     # 休憩
     rospy.loginfo("kyuukei!")
     time.sleep(2.0)
     # アップロード
     rospy.loginfo("upload!")
     self.Up()
Exemplo n.º 3
0
def Smp_det():
    global focalLength
    global cam_ON
    # choose one camera for single detection
    cam_ON = camera.Open(TYPE, cam_EYE)
    cam_ON = camera.Setup(cam_ON, cam_setting, TYPE)
    while 1:
        # capture image
        time.sleep(0.1)
        cap_img = camera.Capture(cam_ON, TYPE)
        # cap_img = cv2.flip(cap_img,-1)
        # cv2.imshow('cap_img', cap_img)
        # key = cv2.waitKey(0)

        # find biggest contour
        gray = cv2.cvtColor(cap_img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)
        gray = cv2.GaussianBlur(gray, (5, 5), 0)
        edged = cv2.Canny(gray, 35, 125)
        cv2.imshow('canny', edged)
        key = cv2.waitKey(20)

        (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                                        cv2.CHAIN_APPROX_SIMPLE)

        if len(cnts):
            c = max(cnts, key=cv2.contourArea)
            marker = cv2.minAreaRect(c)

            if focalLength == 0.0:
                focalLength = (marker[1][0] * KNOWN_DISTANCE) / KNOWN_WIDTH
                # cv2.imshow("initial_frame", cap_img)
                # cv2.waitKey(0)
            else:
                distance = (KNOWN_WIDTH * focalLength) / marker[1][0]

                # draw a bounding box around the image and display it
                box = np.int0(cv2.boxPoints(marker))
                cv2.drawContours(cap_img, [box], -1, (0, 255, 0), 2)
                cv2.putText(cap_img, "%.2fcm" % distance,
                            (20, cap_img.shape[0] - 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 2.0, (0, 255, 0), 3)
                cv2.imshow("distance_image", cap_img)
                key = cv2.waitKey(20)
                if key == 27:
                    cv2.destroyAllWindows()
                    cam_ON.close()
                    break
Exemplo n.º 4
0
def run():
    interval=0
    logging.basicConfig(level=settings.loglevel)
    logger = logging.getLogger(__name__)
    logger.info("Welcome to LapseTime".center(45, '='))
    capture = camera.Capture(settings.cameraID)
    while True:
        try:
            capture.updateFrame()
            if (interval%settings.frameDelay==0):
                capture.saveFrame(int(time.time()))
                interval=0
            interval=interval+1
            time.sleep(1)
        except KeyboardInterrupt:
            logger.warning("C-c was recieved")
            exit()
Exemplo n.º 5
0
def main():
    if status == "shoot":
        frameId = 1
        while True:
            cam_EYE = 'L'
            cam_ON = camera.Open("PI", cam_EYE)
            cam_ON = camera.Setup(cam_ON, cam_setting, "PI")
            # capture image
            # time.sleep(0.1)
            img_L = camera.Capture(cam_ON, "PI")
            # img_L = cv2.flip(img_L, -1)
            cam_ON.close()

            cam_EYE = 'R'
            cam_ON = camera.Open("PI", cam_EYE)
            cam_ON = camera.Setup(cam_ON, cam_setting, "PI")
            # capture image
            # time.sleep(0.1)
            img_R = camera.Capture(cam_ON, "PI")
            # img_R = cv2.flip(img_R, -1)
            cam_ON.close()

            gray_L = cv2.cvtColor(img_L, cv2.COLOR_BGR2GRAY)
            gray_R = cv2.cvtColor(img_R, cv2.COLOR_BGR2GRAY)

            height, width = gray_L.shape[:2]
            gray_L_s = cv2.resize(gray_L,
                                  (int(0.5 * width), int(0.5 * height)),
                                  interpolation=cv2.INTER_CUBIC)
            gray_R_s = cv2.resize(gray_R,
                                  (int(0.5 * width), int(0.5 * height)),
                                  interpolation=cv2.INTER_CUBIC)
            cv2.imshow('L_cam', gray_L_s)
            cv2.imshow('R_cam', gray_R_s)
            cv2.moveWindow("L_cam", 100, 400)
            cv2.moveWindow("R_cam", 800, 400)
            key = cv2.waitKey(50)
            if key == 27:
                break
            elif key == 13:
                # press enter
                L_save = L_path + '/{:04d}.jpg'
                R_save = R_path + '/{:04d}.jpg'
                cv2.imwrite(L_save.format(frameId), gray_L)
                cv2.imwrite(R_save.format(frameId), gray_R)
                print("Image pair {0} saved.".format(frameId))
                frameId += 1

    elif status == "calibrate":
        # refer: https://albertarmea.com/post/opencv-stereo-camera/

        TERMINATION_CRITERIA = (cv2.TERM_CRITERIA_EPS
                                | cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
        OPTIMIZE_ALPHA = 0

        outputFile = '/home/pi/Desktop/PyCharm/Cam_coefficients.npz'
        leftCamCali = '/home/pi/Desktop/PyCharm/L_Cam_Matrix.npz'
        rightCamCali = '/home/pi/Desktop/PyCharm/R_Cam_Matrix.npz'

        (leftFilenames, leftObjectPoints, leftImagePoints,
         leftSize) = Find_chessboard_single(L_path)
        (rightFilenames, rightObjectPoints, rightImagePoints,
         rightSize) = Find_chessboard_single(R_path)

        if leftSize != rightSize:
            print("Camera resolutions do not match")
            sys.exit(1)
        imageSize = leftSize

        print("Choose {0} images to do calibration".format(MAX_IMAGES))
        filenames = list(set(leftFilenames) & set(rightFilenames))
        if (len(filenames) > MAX_IMAGES):
            print(
                "Too many images to calibrate, using {0} randomly selected images"
                .format(MAX_IMAGES))
            filenames = random.sample(filenames, MAX_IMAGES)
        filenames = sorted(filenames)
        print("Using these images:")
        print(filenames)

        leftObjectPoints, leftImagePoints = Get_require_points(
            filenames, leftFilenames, leftObjectPoints, leftImagePoints)
        rightObjectPoints, rightImagePoints = Get_require_points(
            filenames, rightFilenames, rightObjectPoints, rightImagePoints)

        # objectPoints = leftObjectPoints
        objectPoints = rightObjectPoints

        try:
            cache_L = np.load(leftCamCali)
            print(
                "Loading left calibration data from cache file at {0}".format(
                    leftCamCali))
            cache_R = np.load(rightCamCali)
            print(
                "Loading left calibration data from cache file at {0}".format(
                    rightCamCali))

            leftCameraMatrix = cache_L["leftCameraMatrix"]
            leftDistortionCoefficients = cache_L["leftDistortionCoefficients"]
            rightCameraMatrix = cache_R["rightCameraMatrix"]
            rightDistortionCoefficients = cache_R[
                "rightDistortionCoefficients"]
        except IOError:
            print("Cache file at {0} not found".format(leftCamCali))
            print("Cache file at {0} not found".format(rightCamCali))

            print("Calibrating left camera...")
            _, leftCameraMatrix, leftDistortionCoefficients, _, _ = cv2.calibrateCamera(
                objectPoints, leftImagePoints, imageSize, None, None)
            print("Caching left camera matrix... ")
            np.savez_compressed(
                leftCamCali,
                leftCameraMatrix=leftCameraMatrix,
                leftDistortionCoefficients=leftDistortionCoefficients)

            print("Calibrating right camera...")
            _, rightCameraMatrix, rightDistortionCoefficients, _, _ = cv2.calibrateCamera(
                objectPoints, rightImagePoints, imageSize, None, None)
            print("Caching right camera matrix... ")
            np.savez_compressed(
                rightCamCali,
                rightCameraMatrix=rightCameraMatrix,
                rightDistortionCoefficients=rightDistortionCoefficients)

        # print("Calibration test... ")
        # Test_calibration(leftCameraMatrix, leftDistortionCoefficients, L_path)
        # Test_calibration(rightCameraMatrix, rightDistortionCoefficients, R_path)

        print("Calibrating cameras together...")
        (_, _, _, _, _, rotationMatrix,
         translationVector, _, _) = cv2.stereoCalibrate(
             objectPoints, leftImagePoints, rightImagePoints, leftCameraMatrix,
             leftDistortionCoefficients, rightCameraMatrix,
             rightDistortionCoefficients, imageSize, None, None, None, None,
             cv2.CALIB_FIX_INTRINSIC, TERMINATION_CRITERIA)

        print("Rectifying cameras...")
        (leftRectification, rightRectification, leftProjection,
         rightProjection,
         dispartityToDepthMap, leftROI, rightROI) = cv2.stereoRectify(
             leftCameraMatrix, leftDistortionCoefficients, rightCameraMatrix,
             rightDistortionCoefficients, imageSize, rotationMatrix,
             translationVector, None, None, None, None, None,
             cv2.CALIB_ZERO_DISPARITY, OPTIMIZE_ALPHA)

        print("Saving calibration...")
        leftMapX, leftMapY = cv2.initUndistortRectifyMap(
            leftCameraMatrix, leftDistortionCoefficients, leftRectification,
            leftProjection, imageSize, cv2.CV_32FC1)
        rightMapX, rightMapY = cv2.initUndistortRectifyMap(
            rightCameraMatrix, rightDistortionCoefficients, rightRectification,
            rightProjection, imageSize, cv2.CV_32FC1)

        np.savez_compressed(outputFile,
                            imageSize=imageSize,
                            leftMapX=leftMapX,
                            leftMapY=leftMapY,
                            leftROI=leftROI,
                            rightMapX=rightMapX,
                            rightMapY=rightMapY,
                            rightROI=rightROI,
                            dispartityToDepthMap=dispartityToDepthMap)

        cv2.destroyAllWindows()
Exemplo n.º 6
0
                    print("MATLAB cache file at {0} not found".format(
                        Cam_coefficient_MATLAB))
                    print("Exit with error")
                    able_to_cal = False

        # if matrix loaded, display captured image
        if able_to_cal != True:
            print("Unable to load calibration matrix, terminated")
            sys.exit(1)
        else:
            print("Start capture images...")
            cam_EYE = 'L'
            cam_ON = camera.Open(TYPE, cam_EYE)
            cam_ON = camera.Setup(cam_ON, cam_setting, TYPE)
            # capture image
            img_L = camera.Capture(cam_ON, TYPE)
            # img_L = cv2.flip(img_L, -1)
            gray_L = cv2.cvtColor(img_L, cv2.COLOR_BGR2GRAY)
            cam_ON.close()

            cam_EYE = 'R'
            cam_ON = camera.Open(TYPE, cam_EYE)
            cam_ON = camera.Setup(cam_ON, cam_setting, TYPE)
            # capture image
            # time.sleep(0.1)
            img_R = camera.Capture(cam_ON, TYPE)
            # img_R = cv2.flip(img_L, -1)
            gray_R = cv2.cvtColor(img_R, cv2.COLOR_BGR2GRAY)
            cam_ON.close()

            # obtain rectified image pair
Exemplo n.º 7
0
def videoLoop(mirror=False):

    camera.Capture(0)
    """
Exemplo n.º 8
0
def Depth_map():
    global cam_ON, cam_EYE
    Cam_coefficient = '/home/pi/Desktop/PyCharm/Cam_coefficients.npz'

    try:
        calibration = np.load(Cam_coefficient, allow_pickle=False)
        flag = True
        print("Loading camera coefficients from cache file at {0}".format(
            Cam_coefficient))
        imageSize = tuple(calibration["imageSize"])
        leftMapX = calibration["leftMapX"]
        leftMapY = calibration["leftMapY"]
        leftROI = tuple(calibration["leftROI"])
        rightMapX = calibration["rightMapX"]
        rightMapY = calibration["rightMapY"]
        rightROI = tuple(calibration["rightROI"])
        Q = calibration["dispartityToDepthMap"]
    except IOError:
        print("Cache file at {0} not found".format(Cam_coefficient))
        flag = False

    if flag:
        cam_EYE = 'L'
        cam_ON = camera.Open("PI", cam_EYE)
        cam_ON = camera.Setup(cam_ON, cam_setting, TYPE)
        # capture image
        img_L = camera.Capture(cam_ON, TYPE)
        # img_L = cv2.flip(img_L, -1)
        gray_L = cv2.cvtColor(img_L, cv2.COLOR_BGR2GRAY)
        cam_ON.close()

        cam_EYE = 'R'
        cam_ON = camera.Open("PI", cam_EYE)
        cam_ON = camera.Setup(cam_ON, cam_setting, TYPE)
        # capture image
        # time.sleep(0.1)
        img_R = camera.Capture(cam_ON, TYPE)
        # img_R = cv2.flip(img_L, -1)
        gray_R = cv2.cvtColor(img_R, cv2.COLOR_BGR2GRAY)
        cam_ON.close()

        fixedLeft = cv2.remap(gray_L, leftMapX, leftMapY, cv2.INTER_LINEAR)
        cv2.imshow('fixedLeft', fixedLeft)
        # cv2.imwrite('/home/pi/Desktop/PyCharm/fixedLeft.jpg', fixedLeft)
        fixedRight = cv2.remap(gray_R, rightMapX, rightMapY, cv2.INTER_LINEAR)
        cv2.imshow('fixedRight', fixedRight)
        # cv2.imwrite('/home/pi/Desktop/PyCharm/fixedRight.jpg', fixedRight)

        # depth: original depth map, used for calculate 3D distance
        # depth_no: normalised depth map, used to display
        # depth_tho: normalised depth map with threhold applied, used to optimise
        depth, depth_no, depth_tho = Depth_cal(fixedLeft, fixedRight, leftROI,
                                               rightROI)
        # calculate the real world distance
        threeD = cv2.reprojectImageTo3D(depth.astype(np.float32) / 16., Q)
        dis_set = cal_3d_dist(depth_tho, threeD)

        for item in dis_set:
            cv2.drawContours(depth_tho, [item[0]], -1, 255, 2)
            cv2.putText(depth_tho, "%.2fcm" % item[1],
                        (item[2][0], item[2][1] - 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 2, 255, 3)

        cv2.imshow("depth", depth_no)
        cv2.imshow("depth_threshold", depth_tho)
        key = cv2.waitKey(-1)
        if key == 27:
            cv2.destroyAllWindows()
Exemplo n.º 9
0
def run():
    arduino.initBoard(disabled=True, sketchDir="arduino/Serial Box")
    # camera1 = camera.Capture(windowName="camera",
    #                          cameraType="ELP",
    #                          # width=427, height=240,
    #                          # width=214, height=120,
    #                          sizeByFPS=31,
    #                          camSource=1
    #                          )
    camera1 = camera.Capture(
        windowName="camera",
        camSource="IMG_0582.m4v",
        # camSource="Sun Jul 26 13;21;49 2015.m4v",
        width=640,
        height=360,
        # width=427, height=240
        # frameSkip=5,
        loopVideo=False,
    )

    # imu = arduino.IMU()
    stepper = arduino.Stepper(1)

    captureProperties = dict(
        paused=False,
        showOriginal=False,
        enableDraw=False,
        currentFrame=0,
        writeVideo=False,
    )

    # if captureProperties['paused'] == True:
    frame1 = camera1.updateFrame(readNextFrame=False)

    height, width = frame1.shape[0:2]
    position = [width / 2, height / 2]

    # tracker = camera.analyzers.SimilarFrameTracker(frame1)
    tracker = camera.analyzers.OpticalFlowTracker(frame1)

    while True:
        time0 = time.time()

        if captureProperties['paused'] is False or captureProperties[
                'currentFrame'] != camera1.currentFrameNumber():
            # time1 = time.time()
            frame1 = camera1.updateFrame()
            # print "update:", time.time() - time1

            captureProperties['currentFrame'] = camera1.currentFrameNumber()

            if captureProperties['showOriginal'] is False:
                frame1, delta = tracker.update(frame1, False)
                position[0] += delta[0]
                position[1] += delta[1]
                print "%s\t%s" % (position[0], position[1]),

                if captureProperties['enableDraw'] is True:
                    frame1 = camera.analyzers.drawPosition(frame1,
                                                           width,
                                                           height,
                                                           position,
                                                           reverse=False)

            if captureProperties['writeVideo'] == True:
                camera1.writeToVideo(frame1)

            if captureProperties['enableDraw'] is True:
                camera1.showFrame(frame1)

        if captureProperties['enableDraw'] is True:
            # time2 = time.time()
            key = camera1.getPressedKey()
            # print "key:", time.time() - time2

            if key == 'q' or key == "esc":
                camera1.stopCamera()
                break
            elif key == ' ':
                if captureProperties['paused']:
                    print "...Video unpaused"
                else:
                    print "Video paused..."
                captureProperties['paused'] = not captureProperties['paused']
            elif key == 'o':
                captureProperties[
                    'showOriginal'] = not captureProperties['showOriginal']
                frame1 = camera1.updateFrame(False)
            elif key == "right":
                camera1.incrementFrame()
            elif key == "left":
                camera1.decrementFrame()
            elif key == 's':
                camera1.saveFrame(frame1)
            elif key == 'h':
                captureProperties[
                    'enableDraw'] = not captureProperties['enableDraw']
            elif key == 'v':
                if captureProperties['writeVideo'] == False:
                    camera1.initVideoWriter()
                else:
                    camera1.stopVideo()
                captureProperties[
                    'writeVideo'] = not captureProperties['writeVideo']
            elif key == 't':
                if stepper.position == 0:
                    stepper.moveTo(1000)
                else:
                    stepper.moveTo(0)
            elif key == 'p':
                position = [width / 2, height / 2]

        print time.time() - time0, camera1.currentFrameNumber()
Exemplo n.º 10
0
                              speckleWindowSize=100,
                              speckleRange=32
                              )
TYPE = "PI"
cam_setting = {"width":1280,"height":720,"frame_rate":23,"shutter":0,"iso":800}
# morphology settings
kernel = np.ones((12, 12), np.uint8)


while True:

    cam_EYE = 'L'
    cam_ON = camera.Open("PI", cam_EYE)
    cam_ON = camera.Setup(cam_ON, cam_setting, TYPE)
    # capture image
    image_left = camera.Capture(cam_ON, TYPE)
    # img_L = cv2.flip(img_L, -1)
    # gray_L = cv2.cvtColor(img_L, cv2.COLOR_BGR2GRAY)
    cam_ON.close()

    cam_EYE = 'R'
    cam_ON = camera.Open("PI", cam_EYE)
    cam_ON = camera.Setup(cam_ON, cam_setting, TYPE)
    # capture image
    # time.sleep(0.1)
    image_right = camera.Capture(cam_ON, TYPE)
    # img_R = cv2.flip(img_L, -1)
    # gray_R = cv2.cvtColor(img_R, cv2.COLOR_BGR2GRAY)
    cam_ON.close()

    # compute disparity