def Follow_Line(testMode=False,
                intersectionQueue=[],
                robot=loadRobot('ROBOSON.json')):

    # Adjusting Robot variables
    baseSpeed = robot.speed.base
    maxSpeed = robot.speed.max
    minSpeed = robot.speed.min
    numberOfLinesRequiredForIntersectionMode = robot.number_of_lines_required_for_inersection_mode

    # Value used for the binary filter
    BIN_CUT = robot.line_finder.binary_cut

    # Loading PID values
    MultiCoefficient = robot.pid.total
    PCoefficient = robot.pid.pro
    DCoefficient = robot.pid.der

    camera = PiCamera()
    camera.color_effects = (128, 128)
    cameraResolution = robot.line_finder.resolution
    camera.resolution = (cameraResolution[0], cameraResolution[1])
    rawCapture = PiRGBArray(camera,
                            size=(cameraResolution[0], cameraResolution[1]))

    # Check serial port issues
    # if there is no serial connection, this function
    # simply returned the untouched queue
    try:
        ser = serial.Serial("/dev/ttyS0", 9600)
        ser.flushInput()
        serialByteArray = []
    except serial.SerialException:
        return intersectionQueue

    # Changinig the mode to showing the frames or not
    if testMode:
        cv2.namedWindow("Original_frame", cv2.WINDOW_NORMAL)
        cv2.namedWindow("binary", cv2.WINDOW_NORMAL)
        cv2.resizeWindow("binary", 100, 100)
        cv2.resizeWindow("Original_frame", 100, 100)

    # Reads width of the line from the file
    with open('LineWidth.txt') as f:
        black_line_width = int(f.readline())

    # Minimum width for a fork
    fork_min_width = robot.fork_black_line_min_width_multiplier * black_line_width

    # Kernel setup
    n = 3
    kernel = np.array([[0, 1 / n, 0] * n])

    # Camera initialization and start
    camera.capture(rawCapture, format='bgr')
    frame = rawCapture.array[:, :, 0]
    rawCapture.truncate(0)

    # Retrieving the height and the with of the frame
    height = len(frame)
    width = len(frame[0])

    # Sensor lines used for the line follower
    # You could change the distances
    linesY = np.linspace(
        5, height - 5, 10,
        dtype=int)  #np.linspace(20, height-5, 5, dtype = int)#

    # Required by the camera function to runcate every time
    rawCapture.truncate(0)

    # List of errors and times for PID plots
    if testMode:
        errorList = []
        timeList = []

    # Initializing distances for derivative calculation
    currentDeltaX = 0
    previousDeltaX = 0

    # Initializing times
    currentTime = time.time()
    frameEndTime = time.time()
    frameStartTime = time.time()

    # Intersection mode determins the process of the line following
    intersectionMode = False
    # This value changes based on the last element in the intersection queue
    intersectionDirection = None
    # Number of lines detectecing a intersection where the left adn right
    # spikes are more than a single line
    numberOfLinesDetectingIntersection = 0

    # There are two ways to exit this main loop
    # 1) The serial connection is lost
    # 2) queue of the turns has reached character "X"

    # Main for loop starting
    # Frame is taken as a 3 channgel grayscaled image
    for frame in camera.capture_continuous(rawCapture,
                                           format="bgr",
                                           use_video_port=True):

        # Algorithm start time
        algorithmStartTime = time.time()

        # Sample frame taken time
        frameStartTime = time.time()
        frameTakingTime = frameStartTime - frameEndTime
        #print("Frame taking time:", frameTakingTime)

        # Appending time to list
        timeList.append(frameStartTime)

        # Assignet previous delta X used for the derivative
        previousDeltaX = currentDeltaX

        frameArray = frame.array
        # Creating a grayscale copy of the frame by taking only one of the channels
        # and only including the lines indicated for the line follwoing algorithm
        # This step is included to reduce the computer time
        frameCopy = frameArray.copy()[linesY][:, :, 0]
        #frameCopyTime = time.time()
        #print("transform time", frameCopyTime - algorithmStartTime)

        # Taking the kernel of the image
        kerneledImage = cv2.filter2D(frameCopy, -1, kernel)
        kernelTime = time.time()
        #kerneledImage = frameCopy
        #print("kernel time: ",kernelTime-frameCopyTime)

        # Blurring the image
        blurredImage = cv2.GaussianBlur(kerneledImage, (5, 5), 0)
        blurTime = time.time()
        #print("blur time: ",blurTime-kernelTime)

        # Binary filter for the image
        ret, binaryImage = cv2.threshold(blurredImage, BIN_CUT, 255,
                                         cv2.THRESH_BINARY)
        binTime = time.time()
        #print("bin time: ",binTime-blurTime)

        # Creating the matrix
        pathMatrix = binaryImage

        # Median center value
        # This value is a cumulative results from all the lines of sensors
        currentDeltaX = 0

        # list of deltaXs
        # A list of the deltaXs indicated by each line
        deltaXList = []

        # We zero this value to increment it in the for loop to find out
        # if the number of sensor lines detecting intersection goes to zero
        # in which case we must quit the intersection mode
        numberOfLinesDetectingIntersection = 0

        if not intersectionMode:

            # A loop for the calculations for each line of the sensors
            for line_index in range(0, len(linesY)):
                lineY = linesY[line_index]
                line = pathMatrix[line_index]

                # taking the derivative of the lines
                dline = diff(line)

                if line[0] == 0:
                    dline[0] = 1
                if line[-1] == 0:
                    dline[-1] = 1

                # Here we can either choose the two heighest points
                # or we could simple trust that there will only be 2 values
                # This will indicate the two edges of the line
                #top_two_indices = sorted(range(len(dline)), key = lambda i: dline[i])[-2:]
                edgeIndices = [
                    i for i in range(0, len(dline)) if dline[i] != 0
                ]

                # Case for when two or more edges are detected
                if (len(edgeIndices) >= 2):
                    leftmostEdge = edgeIndices[0]
                    rightmostEdge = edgeIndices[-1]

                    lineWidthDetected = rightmostEdge - leftmostEdge
                    print("Line width detected: ", lineWidthDetected)
                    print(len(edgeIndices))
                    print("_______________________________________________")

                    # Checking to see if a for is detected
                    if (lineWidthDetected >= fork_min_width
                            and len(edgeIndices) >= 3):

                        intersectionDirection = intersectionQueue[0]
                        numberOfLinesDetectingIntersection += 1

                        if (intersectionDirection == "L"):
                            thisLineDeltaX = rightmostEdge - width / 2
                        elif (intersectionDirection == "R"):
                            thisLineDeltaX = leftmostEdge - width / 2
                        elif (intersectionDirection == "X"):
                            return intersectionQueue

                        deltaXList.append(thisLineDeltaX)

                    else:
                        # Finding the denter of the line
                        lineCenterX = int((leftmostEdge + rightmostEdge) / 2)

                        # Draw circle for showing
                        # cv2.circle(frame, (lineCenterX, lineY), 2, (255,0,0), -1)

                        thisLineDeltaX = rightmostEdge - width / 2
                        deltaXList.append(thisLineDeltaX)

                # Case for when only one edge is detected
                elif (len(edgeIndices) == 1):
                    if dline[0] == 0:
                        onlyEdge = edgeIndices[-1]
                    if dline[-1] == 0:
                        onlyEdge = edgeIndices[0]

                    # Cases for when the edge is to the left or the right
                    if (onlyEdge >= width / 2):
                        lineCenterX = onlyEdge + black_line_width / 2
                    if (onlyEdge < width / 2):
                        lineCenterX = onlyEdge - black_line_width / 2
                    thisLineDeltaX = lineCenterX - width / 2
                    deltaXList.append(thisLineDeltaX)

                # Case for when we are offline
                # We will exponentially increase the delta values to return the line
                else:
                    thisLineDeltaX = 1.4 * (abs(previousDeltaX)) * (
                        -1 if previousDeltaX < 0 else 1)
                    deltaXList.append(thisLineDeltaX)

            if (numberOfLinesDetectingIntersection >=
                    numberOfLinesRequiredForIntersectionMode):
                intersectionMode = True
                intersectionDirection = intersectionQueue[0]
                intersectionStartTime = time.time()
                print("enter intersection mode m**********r at ! ",
                      time.time())
                #ser.write([0,0,0,0])
                #time.sleep(3)

        if intersectionMode:

            ser.write([0, 0, 0, 0])
            time.sleep(1)

            # Important !
            # Pay attention that in this loop now, the
            # intersection direction is already determined
            # We must have poped it off in the previous iteration from
            # the intersectionQueue in order for the state of the machine
            # to change to intersection mode

            # Also note,
            # if the intersection direction indicated "X",
            # we should have quitted the program by now
            # and there is no need to check

            # Loop is now complete
            key = cv2.waitKey(1)

            # if the `q` key was pressed, break from the loop
            if key == "q":
                break

            algorithmEndTime = time.time()
            #print("Full algorithm time: ", algorithmEndTime - algorithmStartTime)

            # Showing the frame in test mode only
            if testMode:
                cv2.imshow("Original_frame", frameArray)
                cv2.imshow("binary", binaryImage)

            # Truncating reqiured for the frames
            rawCapture.truncate(0)

            if (intersectionDirection == "L"):
                ser.write([baseSpeed, 1, -1 * minSpeed, 0])

                continue
            elif (intersectionDirection == "R"):
                ser.write([-1 * minSpeed, 0, baseSpeed, 1])

                continue
            else:
                return

            if (time.time() - intersectionStartTime > 0.05):
                intersectionMode = False

        # Takingn the median of the delta Xs
        currentDeltaX = statistics.median(deltaXList)
        #print("Delta X measured: ", currentDeltaX)

        forLoopTime = time.time()
        #print("for loop time: ", forLoopTime - binTime)

        #
        # PID calculations
        #

        # Finding the derivative time
        # The time it took from taking the last frame
        derivativeDeltaTime = frameTakingTime + (forLoopTime -
                                                 algorithmStartTime)

        # finding the derivative
        dXdT = (currentDeltaX - previousDeltaX) / derivativeDeltaTime

        # Finding the error value given the constants
        error_value = int(MultiCoefficient *
                          (DCoefficient * dXdT + PCoefficient * currentDeltaX))

        duty_cycle_left = baseSpeed + error_value
        duty_cycle_right = baseSpeed - error_value

        # Reassigning the duty cycle values based on the cutoffs
        # Left Wheel
        if duty_cycle_left > maxSpeed:
            duty_cycle_left = maxSpeed
        elif duty_cycle_left < minSpeed:
            duty_cycle_left = minSpeed

        # Right wheel
        if duty_cycle_right > maxSpeed:
            duty_cycle_right = maxSpeed
        elif duty_cycle_right < minSpeed:
            duty_cycle_right = minSpeed

        calcTime = time.time()
        #print("Calculation time: ", calcTime - forLoopTime)

        # Serial communication to the BluePill
        serialByteArray.append(abs(duty_cycle_left))
        if (duty_cycle_left > 0):
            serialByteArray.append(1)
        else:
            serialByteArray.append(0)

        serialByteArray.append(abs(duty_cycle_right))
        if (duty_cycle_right > 0):
            serialByteArray.append(1)
        else:
            serialByteArray.append(0)

        #print("Byte array sent to the BluePill: ",serialByteArray)

        # This try cathc block will return the unfinished
        # queue in case the serial communication is lost in the middle
        try:
            ser.write(serialByteArray)
            serialByteArray = []
        except serial.SerialException:
            return intersectionQueue

        serialTime = time.time()
        #print("serial time: ", serialTime - calcTime)

        # Loop is now complete
        key = cv2.waitKey(1)

        # if the `q` key was pressed, break from the loop
        if key == "q":
            break

        algorithmEndTime = time.time()
        #print("Full algorithm time: ", algorithmEndTime - algorithmStartTime)

        # Showing the frame in test mode only
        if testMode:
            cv2.imshow("Original_frame", frameArray)
            cv2.imshow("binary", binaryImage)

        # Truncating reqiured for the frames
        rawCapture.truncate(0)

        # End of this frame
        frameEndTime = time.time()
Exemple #2
0
 def __init__(self):
     self.camera = PiCamera()
Exemple #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model_path',
        required=True,
        help='Path to converted model file that can run on VisionKit.')
    parser.add_argument(
        '--label_path',
        required=True,
        help='Path to label file that corresponds to the model.')
    parser.add_argument('--input_height',
                        type=int,
                        required=True,
                        help='Input height.')
    parser.add_argument('--input_width',
                        type=int,
                        required=True,
                        help='Input width.')
    parser.add_argument('--input_layer',
                        required=True,
                        help='Name of input layer.')
    parser.add_argument('--output_layer',
                        required=True,
                        help='Name of output layer.')
    parser.add_argument(
        '--num_frames',
        type=int,
        default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    parser.add_argument('--input_mean',
                        type=float,
                        default=128.0,
                        help='Input mean.')
    parser.add_argument('--input_std',
                        type=float,
                        default=128.0,
                        help='Input std.')
    parser.add_argument('--input_depth',
                        type=int,
                        default=3,
                        help='Input depth.')
    parser.add_argument(
        '--threshold',
        type=float,
        default=0.1,
        help='Threshold for classification score (from output tensor).')
    parser.add_argument('--top_k',
                        type=int,
                        default=3,
                        help='Keep at most top_k labels.')
    parser.add_argument(
        '--preview',
        action='store_true',
        default=False,
        help=
        'Enables camera preview in addition to printing result to terminal.')
    parser.add_argument('--show_fps',
                        action='store_true',
                        default=False,
                        help='Shows end to end FPS.')
    parser.add_argument(
        '--gpio_logic',
        default='NORMAL',
        help='Indicates if NORMAL or INVERSE logic is used in GPIO pins.')
    args = parser.parse_args()

    model = inference.ModelDescriptor(
        name='mobilenet_based_classifier',
        input_shape=(1, args.input_height, args.input_width, args.input_depth),
        input_normalizer=(args.input_mean, args.input_std),
        compute_graph=utils.load_compute_graph(args.model_path))
    labels = read_labels(args.label_path)

    with PiCamera(sensor_mode=4, resolution=(1640, 1232),
                  framerate=30) as camera:
        if args.preview:
            camera.start_preview()

        with inference.CameraInference(model) as camera_inference:
            for result in camera_inference.run(args.num_frames):
                processed_result = process(result, labels, args.output_layer,
                                           args.threshold, args.top_k)
                send_signal_to_pins(processed_result[0], args.gpio_logic)
                message = get_message(processed_result, args.threshold,
                                      args.top_k)
                if args.show_fps:
                    message += '\nWith %.1f FPS.' % camera_inference.rate
                print(message)

                if args.preview:
                    camera.annotate_foreground = Color('black')
                    camera.annotate_background = Color('white')
                    # PiCamera text annotation only supports ascii.
                    camera.annotate_text = '\n %s' % message.encode(
                        'ascii', 'backslashreplace').decode('ascii')

        if args.preview:
            camera.stop_preview()
    def __init__(self, photos_dir):
        if has_module:
            self.camera_object = PiCamera()
            self.camera_object.resolution = self.DEFAULT_RESOLUTION

        self.photos_dir = photos_dir
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)

# Data basepath
dir_path = Path(__file__).parent.resolve()
data_file = dir_path/'data.csv'

# Ephem ISS location
name = "ISS (ZARYA)"
line1 = "1 25544U 98067A   20316.41516162  .00001589  00000+0  36499-4 0  9995"
line2 = "2 25544  51.6454 339.9628 0001882  94.8340 265.2864 15.49409479254842"
iss = readtle(name, line1, line2)
iss.compute()

# Camera Resolution
rpi_cam = PiCamera()
rpi_cam.resolution = (648,486) #Using AI expected resolution

# Pre-Processing Helper functions

def capture(camera, image):
    """
    Use 'camera' to capture an 'image' file with lat/long EXIF data.
    """
    iss.compute() # Get the lat/long values from ephem

    # convert the latitude and longitude to EXIF-appropriate representations
    south, exif_latitude = convert(iss.sublat)
    west, exif_longitude = convert(iss.sublong)

    # set the EXIF tags specifying the current location
from picamera import PiCamera
from time import sleep
import smtplib
import imghdr
from email.message import EmailMessage


# SMTP = SIMPLE MESSAGE TRANSFER PROTOCOL
Email_ADDRESS = '*****@*****.**'
Email_PASSWORD = '******'

picture = PiCamera()
picture.start_preview()
sleep(2)
picture.capture('/home/pi/Desktop/image.jpg') #Path for the image
picture.rotation(180)
picture.stop_preview()

msg = EmailMessage()
msg['Subject'] = 'PICTURE'
msg['From'] = Email_ADDRESS
msg['To'] = '*****@*****.**' #email your sending to
msg.set_content('GOAL')

#for the image
with open('/home/pi/Desktop/image.jpg', 'rb') as f:
# I used an absolute path to locate the picture.

    file_data = f.read()
    file_type = imghdr.what(f.name)
    file_name = f.name
    def frames():
        objectEngine = DetectionEngine(
            "models/edge-tpu/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite"
        )
        labels = Camera.ReadLabelFile("models/edge-tpu/coco_labels.txt")
        faceEngine = DetectionEngine(
            "models/edge-tpu/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite"
        )
        inceptionObjectEngine = ClassificationEngine(
            "models/edge-tpu/inception_v3_299_quant_edgetpu.tflite")
        inceptionLabels = Camera.ReadLabelFile(
            "models/edge-tpu/imagenet_labels.txt")

        width = 1280
        height = 720
        camera = PiCamera()
        camera.resolution = (width, height)
        camera.framerate = 30
        camera.vflip = True
        camera.hflip = True
        camera.meter_mode = "matrix"
        camera.awb_mode = "auto"
        camera.image_denoise = True
        camera.start_preview()
        try:
            while True:
                objectsInFrame = {}
                stream = io.BytesIO()
                camera.capture(stream, format='jpeg')
                stream.seek(0)
                img = Image.open(stream)
                draw = ImageDraw.Draw(img)

                for result in inceptionObjectEngine.ClassifyWithImage(
                        img, top_k=10):
                    score = result[1]
                    if score > 0.1:
                        label = inceptionLabels[result[0]]
                        objectsInFrame[label] = str(score)

                detectedObjects = objectEngine.DetectWithImage(
                    img,
                    threshold=0.05,
                    keep_aspect_ratio=True,
                    relative_coord=False,
                    top_k=10)
                for detection in detectedObjects:
                    confidence = detection.score
                    if confidence > .4:
                        label = labels[detection.label_id]
                        objectsInFrame[label] = str(confidence)
                        box = detection.bounding_box.flatten().tolist()
                        Camera.drawBoxAndLabel(draw, box, label, 'red')

                detectedFaces = faceEngine.DetectWithImage(
                    img,
                    threshold=0.05,
                    keep_aspect_ratio=True,
                    relative_coord=False,
                    top_k=10)
                for face in detectedFaces:
                    confidence = face.score
                    if confidence > .2:
                        box = face.bounding_box.flatten().tolist()
                        label = "face"
                        objectsInFrame[label] = str(confidence)
                        Camera.drawBoxAndLabel(draw, box, label, 'green')

                objectsInFrame = json.dumps(objectsInFrame)
                output = io.BytesIO()
                img.save(output, format='JPEG')
                yield (objectsInFrame, output.getvalue())
                stream.close()
        finally:
            camera.stop_preview()
Exemple #8
0
from picamera import PiCamera

camera = PiCamera(resolution=(640, 480))

for filename in camera.record_sequence('%d.h264' % i for i in range(1, 11)):
    camera.wait_recording(5)
Exemple #9
0
# Set a custom formatter
formatter = logging.Formatter(
    '%(name)s - %(asctime)-15s - %(levelname)s: \n%(message)s')
logzero.formatter(formatter)

name = "ISS (ZARYA)"

line1 = "1 25544U 98067A   19032.30987313  .00001170  00000-0  25693-4 0  9997"

line2 = "2 25544  51.6429 316.9782 0004973 340.0425 166.5613 15.53214053154148"

iss = ephem.readtle(name, line1, line2)

####
cam = PiCamera()
cam.resolution = (2592, 1944)
iss.compute()


def update_image(checker):

    # a list with all possible rotation values
    orientation = [0, 90, 270, 180]
    if checker == 0:
        # pick one at random
        rotation = random.choice(orientation)
        # set the rotation
        sh.set_rotation(rotation)
    elif checker == 1:
        sh.set_rotation(0)
#!/usr/bin/env python3
import subprocess
from picamera import PiCamera
import time
#stream_cmd = 'ffmpeg -re -ar 44100 -ac 2 -acodec pcm_s16le -f s16le -ac 2 -i />

stream_cmd = 'ffmpeg -i - -y -hls_time 2 -hls_list_size 10 -start_number 1 mystream.m3u8'

avg = None
stream_pipe = subprocess.Popen(stream_cmd, shell=True, stdin=subprocess.PIPE)
resolution = (320, 240)
with PiCamera(resolution=resolution) as camera:

    camera.resolution = (1024, 768)
    camera.framerate = 25
    camera.vflip = True
    camera.hflip = True
    camera.start_recording('myfirstsplitrecording.h264')
    camera.start_recording(stream_pipe.stdin,
                           splitter_port=2,
                           format='h264',
                           bitrate=6000000)
    camera.wait_recording(30)
    camera.stop_recording()
import serial
from picamera import PiCamera
import time
import random
import datetime

#define camera settings:
camera = PiCamera(resolution=(1920, 1080))
camera.shutter_speed = 3000
camera.iso = 1600
time.sleep(1)
#consult https://picamera.readthedocs.io/en/release-1.13/index.html for more
#info on camera settings

#initialize the serial communication (check the RFID logger datasheet
#to get the baudrate and the other settings)
ser = serial.Serial(
    port='/dev/ttyUSB0',  #ls /dev/tty*
    baudrate=9600,
    parity=serial.PARITY_NONE,
    stopbits=serial.STOPBITS_ONE,
    bytesize=serial.EIGHTBITS,
    timeout=1)

#in the case of the priorityOne logger the tags are seperated by "\r"
#for example
#b'011016BA90\r011016BA90\r011016BA90\r"
#to check how data is read by the raspberry pi uncomment the following lines
#while 1:
#    x=.readline()
#    print(x)
Exemple #12
0
    def detect(self):
        self.initialise_tflite_model()
        # Initialise the videostream
        resolution = (self.input_width, self.input_height)
        print('Initilising PiCamera...')
        camera = PiCamera()
        camera.resolution = resolution
        frame_rate_calc = 1
        freq = cv2.getTickFrequency()
        rawCapture = PiRGBArray(camera, size=resolution)
        time.sleep(2)
        print('Starting detection loop...')
        frame_rate_list = []
        frame_count_bool = self.config['frame_count_secs']

        out = cv2.VideoWriter(
            f'video_storage/{datetime.now().strftime("%m-%d-%Y, %H:%M:%S")}.mp4',
            cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), 8,
            (self.input_width, self.input_height))
        avg_fps = 1
        frame_count = 1
        for frame in camera.capture_continuous(rawCapture,
                                               format="bgr",
                                               use_video_port=True):
            t1 = cv2.getTickCount()
            # If user chooses to calculate video length by number of frames
            if frame_count >= self.config[
                    'absolute_frame_count'] and frame_count_bool == 'absolute':
                self.save_video(out)
                print(frame_count)
                frame_count = 1
                avg_fps = sum(frame_rate_list) / len(frame_rate_list)
                print(avg_fps)
                frame_rate_list = []
                out = cv2.VideoWriter(
                    f'video_storage/{datetime.now().strftime("%m-%d-%Y, %H:%M:%S")}.mp4',
                    cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), avg_fps,
                    (self.input_width, self.input_height))

            # If user wishes to calculate video length by approximated seconds
            if frame_count >= avg_fps * self.config[
                    'approx_secs'] and frame_count_bool == 'approx':
                self.save_video(out)
                print(frame_count)
                frame_count = 1
                avg_fps = sum(frame_rate_list) / len(frame_rate_list)
                print(avg_fps)
                frame_rate_list = []
                out = cv2.VideoWriter(
                    f'video_storage/{datetime.now().strftime("%m-%d-%Y, %H:%M:%S")}.mp4',
                    cv2.VideoWriter_fourcc('H', 'E', 'V', 'C'), avg_fps,
                    (self.input_width, self.input_height))

            # Get current frame
            frame = rawCapture.array

            (height, width) = frame.shape[:2]

            # Prepare frame for inference
            resized_frame = cv2.resize(frame,
                                       (self.input_width, self.input_height))
            image_np_expanded = np.expand_dims(resized_frame, axis=0)

            # Run inference
            self.interpreter.set_tensor(self.input_details[0]['index'],
                                        image_np_expanded)
            self.interpreter.invoke()

            # Retrieve output
            boxes = self.interpreter.get_tensor(
                self.output_details[0]['index'])[0]
            classes = self.interpreter.get_tensor(
                self.output_details[1]['index'])[0]
            scores = self.interpreter.get_tensor(
                self.output_details[2]['index'])[0]
            #Draw boxes and labels on frame   # Credit to pyimagesearch.com
            for i in range(0, len(classes)):
                if scores[i] > self.config['min_score']:

                    box = boxes[i]
                    (top, left, bottom,
                     right) = (box *
                               ([height, width, height, width])).astype('int')
                    prediction_index = classes[i].astype('int')

                    label_category = self.categories[prediction_index]['name']
                    label = "{}: {:.2f}%".format(label_category,
                                                 scores[i] * 100)
                    cv2.rectangle(frame, (left, top), (right, bottom),
                                  self.colour_list[prediction_index], 2)
                    cv2.putText(frame, label, (left, top - 5),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                self.colour_list[prediction_index], 2)

                    #Only save and send frames in list of desired categories
                    if (label_category in self.config['categories']):

                        # Include date and time of detection in file name
                        frame_time = datetime.now().strftime(
                            "%m-%d-%Y, %H:%M:%S")
                        frame_file_name = f'detection_storage/{frame_time} - {label}.png'

                        self.save_frame(frame_file_name, frame)

                        # Put category into notification queue
                        self.queue2.put(label_category)

            recorded_frame = frame.copy()
            # Adds fps to stream, but not to saved frames
            cv2.putText(frame, "FPS: {0:.2f}".format(frame_rate_calc),
                        (10, self.input_height - 10), cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (255, 255, 0), 1, cv2.LINE_AA)

            cv2.putText(recorded_frame,
                        datetime.now().strftime("%m-%d-%Y, %H:%M:%S"),
                        (10, self.input_height - 10), cv2.FONT_HERSHEY_SIMPLEX,
                        0.25, (255, 255, 0), 1, cv2.LINE_AA)

            out.write(recorded_frame)
            #Show the frame - TODO: to be taken out in final implementation
            cv2.imshow('frame', recorded_frame)

            self.stream_frame(frame)

            #Reset picamera
            rawCapture.truncate(0)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                cv2.destroyAllWindows()
                out.release()
                break

            #Calculate fps
            t2 = cv2.getTickCount()
            t2 = (t2 - t1) / freq
            frame_rate_calc = 1 / t2

            frame_rate_list.append(frame_rate_calc)
            frame_count = frame_count + 1
Exemple #13
0
 def __init__(self):
     Camera.camera = PiCamera()
     Camera.camera.preview_fullscreen = False
     Camera.camera.awb_mode = 'off'
     Camera.camera.awb_gains = (1.0, 1.0)
     Camera.camera.preview_window = (620, 320, 640, 480)
Exemple #14
0
from picamera import PiCamera, Color
from datetime import datetime
from time import sleep

try:
    camera = PiCamera()  #создаем экземпляр
    camera.annotate_background = Color("teal")
    camera.annotate_foreground = Color("lightpink")
    for exposure in camera.EXPOSURE_MODES:
        #camera.annotate_text=datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        camera.annotate_text_size = 55
        camera.exposure_mode = exposure
        camera.annotate_text = "Exp: %s" % exposure
        #camera.capture( '/root/Pictures/Example1/PhotoInNight.jpg')
        camera.capture('/root/Pictures/Example1/pic01%s.jpg' % exposure)
        #sleep(2)

finally:
    camera.close()
    print("End of program")
    camera.close()
    os.system("sudo shutdown now")
    exit(1)


def blink_led():
    led = LED(4)
    while True:
        led.on()
        sleep(1)
        led.off()
        sleep(1)


INTERVAL = 30
camera = PiCamera(framerate=20, resolution=(1280, 720))
print("Recording")

Thread(target=blink_led, daemon=True).start()
Thread(target=wait_button, daemon=True).start()

counter = 1
try:
    while True:

        file_name = f"videos/{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}-{str(counter)}"

        if counter == 1:
            camera.start_recording(f"{file_name}.h264", sei=True)
        else:
            camera.split_recording(f"{file_name}.h264")
Exemple #16
0
 def __init__(self):
     self.cam = PiCamera()
     self.cam.resolution = (150, 150)
cam_height = int((cam_height + 15) / 16) * 16
print("Used camera resolution: " + str(cam_width) + " x " + str(cam_height))
img_width = int(cam_width * scale_ratio)
img_height = int(cam_height * scale_ratio)
capture = np.zeros((img_height, img_width, 4), dtype=np.uint8)
print("Scaled image resolution: " + str(img_width) + " x " + str(img_height))
autotune_max = -10000000

# 3D зонирование положения
focal_length = 165.0
tx = 65
q = np.array([[1, 0, 0, -img_width / 2], [0, 1, 0, -img_height / 2],
              [0, 0, 0, focal_length], [0, 0, -1 / tx, 0]])

# иннициализация камеры
camera = PiCamera(stereo_mode='side-by-side', stereo_decimate=False)
camera.resolution = (cam_width, cam_height)
camera.framerate = 20
#camera.hflip = True

# иннициализация интерфейса
cv2.namedWindow("Image")
cv2.moveWindow("Image", 50, 100)
cv2.namedWindow("left")
cv2.moveWindow("left", 450, 100)
cv2.namedWindow("right")
cv2.moveWindow("right", 850, 100)

disparity = np.zeros((img_width, img_height), np.uint8)
sbm = cv2.StereoBM_create(numDisparities=0, blockSize=21)
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2

camera = PiCamera(resolution=(640, 480), framerate=30)

#camera.sharpness = 100
camera.saturation = 50
#camera.ISO = 100

camera.video_stabilization = True

camera.exposure_mode = 'auto'

#camera.awb_mode = 'off'
camera.awb_mode = 'auto'
#camera.awb_mode = 'sunlight'
#camera.awb_mode = 'cloudy'
#camera.awb_mode = 'shade'
#camera.awb_mode = 'tungsten'
#camera.awb_mode = 'fluorescent'
#camera.awb_mode = 'incandescent'
#camera.awb_mode = 'flash'
#camera.awb_mode = 'horizon'

#camera.image_effect = 'saturation'
#camera.image_effect = 'deinterlace1'
#camera.image_effect = 'deinterlace2'

#-------------------------------------
Exemple #19
0
def tracker():
    camera = PiCamera()
    camera.rotation = 180
    camera.resolution = (640, 480)
    camera.framerate = 32
    rawCapture = PiRGBArray(camera, size=(640, 480))

    time.sleep(0.1)
    frame = np.empty((480 * 640 * 3), dtype=np.uint8)
    camera.capture(frame, format="bgr")
    frame = frame.reshape((480, 640, 3))

    #setup initial location of window
    c, r, w, h = 270, 150, 100, 100
    track_window = (c, r, w, h)

    #Define a region of Interest
    roi = frame[r:r + h, c:c + w]

    #Change the color from bgr to hsv
    hsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    #Define color boundaries
    #Test on tracking red objects
    lowerColor = np.array((0, 55, 100))
    upperColor = np.array((8, 69, 100))

    #Performs Actual Color Detection using the specified ranges
    mask = cv2.inRange(hsv_roi, lowerColor, upperColor)

    #Remove Inaccuracies from the mask
    #mask = cv2.erode(mask, None, iterations=2)
    #mask = cv2.dilate(mask, None, iterations=2)

    #format: cv2.calcHist(images,channels,mask,histSize, ranges)
    roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)

    #Stop the algorithm when accuracy of 1 is reached or 50 iterations have been run
    term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1)

    for frame in camera.capture_continuous(rawCapture,
                                           format="bgr",
                                           use_video_port=True):
        frame = frame.array

        frame = frame.reshape((480, 640, 3))

        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)

        #apply meanshift to get the new location
        ret, track_window = cv2.meanShift(dst, track_window, term_crit)

        #print ret
        x, y, w, h = track_window

        #Draw a rectangle on the image
        cv2.rectangle(frame, (x, y), (x + w, y + h), 255, 2)

        #Write 'Tracked' over the rectangle
        cv2.putText(frame, 'Tracked', (x + 5, y - 12),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2,
                    cv2.CV_AA)

        #Show image
        cv2.imshow("Tracking", frame)

        key = cv2.waitKey(60) & 0xFF

        rawCapture.truncate(0)

        #if key == ord("q"):
        #	break
        #else:
        #	cv2.imwrite(chr(key) + ".jpg", frame)

    #camera.release()
    cv2.destroyAllWindows()
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
 

camera = PiCamera() #initialize RPI camera module
camera.resolution = (640, 480) #set image resolution
#camera.shutter_speed = 100 #uncomment if doing actual analysis of led's
camera.framerate = 60 #doesnt make much of a difference with this
rawCapture = PiRGBArray(camera, size=(640, 480)) #captures footage from RPI camera as an array
count = 0
t = time.time() # start timer

time.sleep(0.1) #waits for camera to turn on

for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): #capture is continous as long as camera is running
	image = frame.array #converts captured image to an array
	#cv2.imshow("response", image) #not showing images saves ~1 sec of total capturing time
	cv2.imwrite("pictures/frame" + str(count) + ".jpg", image) # not saving images saves ~3 secs of total capturing time
	#key = cv2.waitKey(1) & 0xFF #only use if video stream is continous
	rawCapture.truncate(0) #converts rawCapture array back to an array of zeros to prevent data overlaps
	count += 1
	#done = time.time() - t	
	if (count == 100): #breaks loop after 100 images created
		break
	#if key == ord("q"): #use this only if the video is continuous
		#break
camera.close()
done = time.time() - t #prints time taken for code to run
print('time taken: ' + str(done))
def camera(request):
    app.logger.info('Executing camera intent')
    camera = PiCamera()
    camera.start_preview()
    return 'camera'
Exemple #22
0
def setup_camera():
    '''
    Instantiate and return a PiCamera object
    '''
    camera = PiCamera()
    return camera
Exemple #23
0
 def __init__(self):
     self.cam = PiCamera()
     self.cap = PiRGBArray(self.cam)
Exemple #24
0
#Importing libraries

from picamera.array import PiRGBArray
from picamera import PiCamera
import RPi.GPIO as GPIO
import time
import cv2
import numpy as np

GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)

#Setting up PiCam

camera =PiCamera()                            #picam object
rowsp=160                                     #defining rows for the input frame,frame size is reduced for increased frame rate
colsp=128                                     #column for the frames
camera.resolution=(640,480)
camera.framerate=30
rawCapture=PiRGBArray(camera,size=(160,128))  #low resolution for line following to increase frame rate
still=PiRGBArray(camera,size=(640,480))       #high resolution  for shape detection
print "initiating"
time.sleep(0.1)
rawCapture.truncate(0)
still.truncate(0)

font = cv2.FONT_HERSHEY_SIMPLEX                #font style to print directions on the frame

#Setting up RGB LED
 
Exemple #25
0
def piCam(w=640, h=480, x=0, y=0, fill=False):
    camera = PiCamera()
    camera.resolution = (w, h)
    camera.start_preview(fullscreen=fill, window=(x, y, w, h))
Exemple #26
0
# Pi Camera Application

from picamera import PiCamera
from time import sleep


def camLive(camera, duration):
    camera.rotation = 180
    camera.start_preview()
    sleep(duration)
    camera.stop_preview()


myCamera = PiCamera()
camLive(myCamera, 5)
Exemple #27
0
def main():
    """Face detection camera inference example."""

    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        dest='num_frames',
        default=-1,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    blink_led(color=BLUE, period=0.5, n_blinks=10)

    # Initialize variables for automatic shutdown
    shuttdown_flag = False
    min_frames_w_arduino_power = 10
    max_frames_w_no_arduino_power = 10
    counter_frames_w_power = 0
    counter_frames_w_no_power = 0

    focal_length = 1320  # focal length in pixels for 1640 x 1232 resolution - found by calibration
    camera_resolution = (1640, 1232)
    x_center = int(camera_resolution[0] / 2)
    real_face_width_inch = 11  # width/height of bounding box of human face in inches
    min_angle = atan2(
        -x_center, focal_length
    )  # min angle where face can be detected (leftmost area) in radians
    max_angle = atan2(
        x_center, focal_length
    )  # max angle where face can be detected (rightmost area) in radians
    min_distance = 20  # min distance to detected face in inches
    max_distance = 200  # max distance to detected face in inches
    face_detected_on_prev_frame = False  # Flag indicated if face was detected on the previous frame
    a = (0.9 - 0.2) / (
        max_distance - min_distance
    )  # Coefficient a (slope) for coding distance value from range [min_distance, max_distance] to [0.2, 0.9]
    b = 0.9 - a * max_distance  # Coefficient b (intercept) for coding distance value from range [min_distance, max_distance] to [0.2, 0.9]

    with PiCamera() as camera:
        # Forced sensor mode, 1640x1232, full FoV. See:
        # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
        # This is the resolution inference run on.
        camera.sensor_mode = 4

        # Scaled and cropped resolution. If different from sensor mode implied
        # resolution, inference results must be adjusted accordingly. This is
        # true in particular when camera.start_recording is used to record an
        # encoded h264 video stream as the Pi encoder can't encode all native
        # sensor resolutions, or a standard one like 1080p may be desired.
        camera.resolution = camera_resolution

        # Start the camera stream.
        camera.framerate = 30

        #camera.start_preview()

        # Calculate face data (x_mean, y_mean, width, height).
        def face_data(face):
            x, y, width, height = face.bounding_box
            x_mean = int(x + width / 2)
            angle = atan2(x_mean - x_center, focal_length)
            distance = 0
            if width > 0:
                distance = focal_length * real_face_width_inch / width
            return angle, distance

        with CameraInference(face_detection.model()) as inference:
            for i, result in enumerate(inference.run()):
                if i == args.num_frames:
                    break
                faces = face_detection.get_faces(result)
                face = select_face(faces)
                if face:
                    if face_detected_on_prev_frame:
                        angle, distance = face_data(face)
                        if angle < min_angle:
                            angle = min_angle
                        if angle > max_angle:
                            angle = max_angle
                        angle_to_send = (angle - min_angle) / (max_angle -
                                                               min_angle)
                        pin_A.value = 0.8 * angle_to_send + 0.1
                        if distance < min_distance:
                            distance = min_distance
                        if distance > max_distance:
                            distance = max_distance
                        pin_B.value = a * distance + b
                        leds.update(Leds.rgb_on(GREEN))
                    face_detected_on_prev_frame = True
                else:
                    if not face_detected_on_prev_frame:
                        pin_A.value = 0.5
                        pin_B.value = 0.1
                        leds.update(Leds.rgb_off())
                    face_detected_on_prev_frame = False
                clock = i % 80 + 11
                pin_C.value = clock / 100
                print('Iteration #', str(i), ' A=', str(pin_A.value), ' B=',
                      str(pin_B.value), ' C=', str(pin_C.value), ' D=',
                      str(pin_D.value))
                print('face_detected_on_prev_frame = ',
                      face_detected_on_prev_frame)

                if pin_D.is_pressed:
                    counter_frames_w_no_power += 1
                else:
                    counter_frames_w_power += 1
                print('released, shuttdown_flag = ', shuttdown_flag,
                      ' frames with power=', str(counter_frames_w_power))
                print('pressed,  shuttdown_flag = ', shuttdown_flag,
                      ' frames with no power=', str(counter_frames_w_no_power))
                if counter_frames_w_power >= min_frames_w_arduino_power and not shuttdown_flag:
                    shuttdown_flag = True
                    counter_frames_w_no_power = 0
                if counter_frames_w_no_power >= max_frames_w_no_arduino_power and shuttdown_flag:
                    shut_aiy_kit_down()
                sleep(0.1)
def handle(msg):
    global enabled
    global pirtake
    global intruder

    command = msg['text'] 
    from_id = msg['from']['id'] #id telegram
    print ('Got command: %s' % command)
    
    if from_id == 809596619: # mencocokan id telegram

        keyboard = ReplyKeyboardMarkup(keyboard=[['start', 'status', 'help'], ['foto', 'buka','rekam'], ['startsensor', 'stopsensor']])
        bot.sendMessage(chat_id, 'Siap tuan', reply_markup=keyboard)

        if command.lower() == "start": #banner selamat datang
            bot.sendMessage(chat_id, 'Selamat datang di kamera keamanan Ketuk Pintu. /help untuk melihat beberapa perintah')
        
        elif command.lower() == "help": #bantuan
            bot.sendMessage(chat_id, '''Berikut perintah yang bisa digunakan pada bot ini : \n- start : Info bot ini \n- status : Cek status sensor\n- help : bantuan perintah pada bot \n- foto : mengambil gambar \n- buka : membuka pintu(aktifkan relay) \n- rekam : merekam video selama 5 detik \n- startsensor : menyalakan sensor \n- stopsensor : mematikan sensor''')
            bot.sendMessage(chat_id, 'kamera akan mengambil gambar ketika ada gerakan pada sensor melebihi 3 detik')
        
        elif command.lower() == "foto": #mengambil foto saat ini
            camera = cv2.VideoCapture(0)
            return_value, image = camera.read()
            foto = path + "/foto/foto_" + (time.strftime("%d%b%y_%H%M%S"))
            cv2.imwrite(foto + '.jpg', image)
            print('capturing')
            del(camera)
            inf = open(foto + '.jpg', 'rb')
            caption = 'Foto pada '+time.strftime("%H:%M:%S %d-%m-%Y")
            bot.sendPhoto(chat_id,inf,caption)
        
        elif command.lower() == "rekam": #merekam situasi selama 10 detik     
            waktu = 5
            with PiCamera() as camera:
                camera.resolution = (640, 480)
                filename = path + "/video/video_" + (time.strftime("%d%b%y_%H%M%S"))
                camera.start_recording(filename + '.h264')
                camera.wait_recording(waktu)
                camera.stop_recording()
                command = "MP4Box -add " + filename + '.h264' + " " + filename + '.mp4'
                print(command)
                call([command], shell=True)
                bot.sendVideo(chat_id, video = open(filename + '.mp4', 'rb'))
        
        elif command.lower() == "buka": #membuka pintu
            bot.sendMessage(chat_id,text="pintu dibuka")
            GPIO.output(relay, 1)
            time.sleep(4)
            GPIO.output(relay, 0)

        elif command.lower() == "startsensor": #menyalakan sensor
            if pirtake == True:
                bot.sendMessage(chat_id,text="Sensor dalam keadaan menyala")
            else:
                pirtake = True
                bot.sendMessage(chat_id,text="Sensor menyala")

        elif command.lower() == "stopsensor": #mematikan sensor
            if pirtake == True:
                pirtake = False
                bot.sendMessage(chat_id,text="Sensor dimatikan")
            else:
                pirtake = False
                bot.sendMessage(chat_id,text="Sensor dalam keadaan mati")
        #BREAK HERE
        elif command.lower() == "status": #melihat status sensor
            if pirtake == 1:
                bot.sendMessage(chat_id, 'Sensor aktif')
            
            else:
                bot.sendMessage(chat_id, 'Sensor mati')
            
        else:
            bot.sendMessage(chat_id,text="perintah invalid") #respons saat tidak ada perintah yang cocok
            

    else: 
        bot.sendMessage(from_id,text="Mohon maaf,tidak bisa berinteraksi untuk saat ini") #respons ketika user lain mencoba menggunakan bot
        print("intruder : "+str(from_id))
        bot.sendMessage(chat_id,text="Seseorang mencoba untuk berinteraksi dengan bot ini. \nid : "+str(from_id)+"\nperintah : "+ command) #mengirim laporan kepada user valid
pwm1 = PWM(1)
pwm1.period = 20000000
pwm1.duty_cycle = 1500000
pwm1.enable = True

#Setup PID
pid = PID.PID(Kp,Kd,Ki) #Kp is set to 2000, I & D are disabled
pid.SetPoint = 320
pid.setSampleTime(0.01) #sample time is 10ms

# set default value
line = 200

# setup camera settings
res = (640, 480)
camera = PiCamera()
camera.sensor_mode = 7
camera.resolution = res
camera.framerate = 120
#initialize camera feed
rawCapture = PiYUVArray(camera, size=res)
stream = camera.capture_continuous(rawCapture, format="yuv", use_video_port=True)


## main loop stuff
pwm0.duty_cycle = 1200000
first_run = True
threshold = 100
p_old = []
p_current = []
i = 0
Exemple #30
0
def videoFilter(qvideo, qauto):
    print("starting Video....")
    i = 0  #For iteration in text Output
    flag = False  #Ensures that one door is not counted twice
    flag2 = False  #Ensures that first door is not counted

    camera = PiCamera()
    camera.resolution = (640, 480)
    camera.framerate = 2

    # converted to use with openCV funciton
    rawCapture = PiRGBArray(camera, size=(640, 480))

    # allow the camera to warmup
    time.sleep(0.1)
    # capture frames from the camera
    try:
        for frame in camera.capture_continuous(rawCapture,
                                               format="bgr",
                                               use_video_port=True):
            #start_time = time.time()
            # grab the raw NumPy array representing the image, then initialize the timestamp
            # and occupied/unoccupied text
            image = frame.array  #numpy array for matlab

            #Hue [0-179], Saturation [0,255], Light [0,255]
            hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
            lower_green = np.array([50, 50, 50])
            upper_green = np.array([70, 255, 255])
            #Image Filter
            mask = cv2.inRange(hsv, lower_green, upper_green)
            result = cv2.bitwise_and(image, image, mask=mask)
            #Image Enhancement
            blur = cv2.medianBlur(mask, 15)

            #count white pixel
            pixel = cv2.countNonZero(mask)

            #DoorCount
            if flag2 == False and pixel < door_lower:
                flag2 = True

            if flag == False and flag2 == True and pixel >= door_upper:
                print('Node Detected')
                qvideo.put(1)
                flag = True

            elif flag == True and pixel < door_lower:  #distance b/w two rooms very close so need door_lower
                flag = False

            if qauto.empty() == False:
                print('Exiting Video....')
                break

            i += 1
            outputText(pixel, nodeCount, i)

            # clear the stream in preparation for the next frame
            rawCapture.truncate(0)
            time.sleep(0.1)
    except:

        print('no frame')
    print('VIDEO FINISHED')