Ejemplo n.º 1
0
def cv2_demo(net, transform):
    def predict(frame):
        height, width = frame.shape[:2]
        x = torch.from_numpy(transform(frame)[0]).permute(2, 0, 1)
        x = Variable(x.unsqueeze(0))
        y = net(x)  # forward pass
        detections = y.data
        # scale each detection back up to the image
        scale = torch.Tensor([width, height, width, height])
        for i in range(detections.size(1)):
            j = 0
            while detections[0, i, j, 0] >= 0.6:
                pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
                cv2.rectangle(frame,
                              (int(pt[0]), int(pt[1])),
                              (int(pt[2]), int(pt[3])),
                              COLORS[i % 3], 2)
                cv2.putText(frame, labelmap[i - 1], (int(pt[0]), int(pt[1])),
                            FONT, 2, (255, 255, 255), 2, cv2.LINE_AA)
                j += 1
        return frame

    # start video stream thread, allow buffer to fill
    print("[INFO] starting threaded video stream...")
    stream = WebcamVideoStream(src=0).start()  # default camera
    time.sleep(1.0)
    # start fps timer
    # loop over frames from the video file stream
    while True:
        # grab next frame
        frame = stream.read()
        key = cv2.waitKey(1) & 0xFF

        # update FPS counter
        fps.update()
        frame = predict(frame)

        # keybindings for display
        if key == ord('p'):  # pause
            while True:
                key2 = cv2.waitKey(1) or 0xff
                cv2.imshow('frame', frame)
                if key2 == ord('p'):  # resume
                    break
        cv2.imshow('frame', frame)
        if key == 27:  # exit
            break
Ejemplo n.º 2
0
prevx = 0
prevy = 0

# loop over frames from the video file stream
while True:

    fps.update()
    #print(fps._numFrames)
    #if (fps._numFrames > reset_rate):
    #    fps.stop()
    #    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    #    fps._numFrames = 0
    #    stream = WebcamVideoStream(src=args["input"]).start()
    #    fps.start()

    frame = stream.read()
    print(frame.shape)

    # crop image area for TV screen
    print(img_x, img_y, img_w, img_h)
    crop_img = frame[img_y:img_y + img_h, img_x:img_x + img_w]

    #find mean pixel value
    mean, stddev = cv2.meanStdDev(crop_img)
    b = int(mean[0])
    g = int(mean[1])
    r = int(mean[2])
    print("BGR: ", b, g, r)

    #convert to hue xy colorspace
    x, y = converter.rgb_to_xy(r, g, b)
Ejemplo n.º 3
0
# fvs = WebcamVideoStream(src='rtsp://*****:*****@10.150.30.202:554/live').start()	# back of the office
fvs = WebcamVideoStream(src='rtsp://*****:*****@10.150.30.203:554/live').start()	# inside the office
time.sleep(1.0)

# Start fps counter
fps = FPS().start()

# Start folder reading in thread
thread = threading.Thread(target=readFolder, args=())
thread.daemon = True
thread.start() 

cnt = 0
# Main loop
while True:
  img = fvs.read()

  thresh = 0.8
  scales = [1080, 1920] # [1024, 1980]   
    
  print(img.shape)
  # print(scales[1])
  im_shape = img.shape
  target_size = scales[0]
  max_size = scales[1]
  im_size_min = np.min(im_shape[0:2])
  im_size_max = np.max(im_shape[0:2])
  #im_scale = 1.0
  #if im_size_min>target_size or im_size_max>max_size:
  im_scale = float(target_size) / float(im_size_min)
  # prevent bigger axis from being more than max_size:
		#move to surveillance position
		success &= highlevel_movements.move_to_waypoint_linear(base, base_cyclic,[-0.06,-0.096,0.8,-90.823,171.8,113.73],speed=0.1)
		"""

        # time.sleep(5)
        # e.set() #kill the camera process
        # p.join()
        cap = WebcamVideoStream(src='rtsp://192.168.1.10/color').start()
        t0 = time.time()
        token = False
        target_reached_flag = False
        beer_mode = False
        while True:
            #ret, frame = cap.read()
            frame = cap.read()
            # if not ret:
            # 	break
            (h, w) = frame.shape[:2]
            vision_middle = (int(w / 2), int(h / 2))
            cv2.circle(frame, vision_middle, target_circle_size * 2,
                       (255, 0, 255), 2)
            #Detect the object and get the target middle
            cv2_im = frame
            cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB)
            pil_im = Image.fromarray(cv2_im_rgb)
            #common.set_input(interpreter, pil_im)
            scale = detect.set_input(
                interpreter, pil_im.size,
                lambda size: pil_im.resize(size, Image.ANTIALIAS))
            interpreter.invoke()
Ejemplo n.º 5
0
def cv2_demo(net, transform, debug=None):
    def predict(loop_frame, debug=None):
        height, width = loop_frame.shape[:2]
        x = torch.from_numpy(transform(loop_frame)[0]).permute(2, 0, 1)
        x = Variable(x.unsqueeze(0))

        global first_slice_flag
        global last_slice_flag
        global speed_start_time
        global speed

        # print("first_slice_flag:",first_slice_flag)
        print("motor speed:", speed)
        # print("last_slice_flag:",last_slice_flag)
        if args.cuda:
            x = x.cuda()

        y = net(x)  # forward pass
        detections = y.data
        # scale each detection back up to the image
        scale = torch.Tensor([width, height, width, height])

        # print("use cuda:",args.cuda)

        """ add control part for automatic slices collection"""
        """
        control statagy:
        1. distinguish the non collected slices between the left baffle and right baffle, 
           start the spin motor when the last slices reach the bottom of right baffle;
        2. distinguish the collected slices which on the right side of the right baffle,
           when all slices are not in the right baffle area, and when the non collected slices 
           don't reach the bottom of the right baffle, stop the motor;
        3. distinguish the slices on the left side of the left baffle, stop the motor;
        """
        ultrathin_slices = []
        left_baffle = []
        right_baffle = []
        for i in range(detections.size(1)):
            j = 0
            while detections[0, i, j, 0] >= 0.6:
                pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
                if i == 1:
                    ultrathin_slices.append(pt)
                elif i == 2:
                    left_baffle.append(pt)
                elif i == 3:
                    right_baffle.append(pt)
                cv2.rectangle(
                    loop_frame,
                    (int(pt[0]), int(pt[1])),
                    (int(pt[2]), int(pt[3])),
                    COLORS[i % 3],
                    2,
                )
                cv2.putText(
                    loop_frame,
                    labelmap[i - 1],
                    (int(pt[0]), int(pt[1]) - 10),
                    FONT,
                    0.6,
                    (255, 255, 255),
                    2,
                    cv2.LINE_AA,
                )
                j += 1
        """ control part"""

        if ultrathin_slices and left_baffle and right_baffle:
            non_collected_slices = []  # 未收取的切片列表,切片中心坐标
            collected_slices = []  # 收取的切片列表,切片中心坐标
            for slice in ultrathin_slices:
                x_middle, y_middle = (
                    (slice[0] + slice[2]) / 2,
                    (slice[1] + slice[3]) / 2,
                )
                if x_middle > left_baffle[0][2] and x_middle < right_baffle[0][0]:
                    non_collected_slices.append([x_middle, y_middle])
                else:
                    collected_slices.append([x_middle, y_middle])
            non_collected_slices = np.array(non_collected_slices)
            collected_slices = np.array(collected_slices)
            if len(non_collected_slices) > 0:
                if not first_slice_flag:
                    speed_start_time = time.time()
                    first_slice_flag = True
                ind_leftside_slices = np.array([])
                rightbaffle_inside_slices_final = np.array([])
                if len(collected_slices) > 0:
                    ind_leftside_slices = collected_slices[
                        collected_slices[:, 0] < left_baffle[0][0]
                    ]
                    # 区分在右挡板间的切片,如果有这样的切片代表切片收取正在进行,不能停止电机旋转
                    rightbaffle_inside_slices_temp = collected_slices[
                        collected_slices[:, 0] >= right_baffle[0][0]
                    ]
                    rightbaffle_inside_slices_final = rightbaffle_inside_slices_temp[
                        rightbaffle_inside_slices_temp[:, 0] <= right_baffle[0][2]
                    ]

                # 未收取的切片排序,找到最下方的切片
                sorted_noncollected_ind = np.argsort(-non_collected_slices[:, 1])
                last_noncollected_slices = non_collected_slices[
                    sorted_noncollected_ind[0]
                ]
                # calculate the spin speed of motor,(speed-->(2*pi/60s))
                if (
                    first_slice_flag
                    and not last_slice_flag
                    and (
                        len(rightbaffle_inside_slices_final) > 0
                        or last_noncollected_slices[1] >= right_baffle[0][3]
                    )
                ):
                    speed_stop_time = time.time()
                    time_through_rightbaffle = speed_stop_time - speed_start_time
                    motor_speed = int(
                        d_rightbaffle
                        * reduction_ratio
                        * 60
                        / r_wafer
                        / (2 * 3.14159)
                        / time_through_rightbaffle
                        * speed_gain
                    )
                    if motor_speed <= 90 and motor_speed >= 50:
                        speed = "8v" + str(motor_speed) + "\n"
                    else:
                        speed = "8v70\n"
                    last_slice_flag = True

                """
                1. stop the motor when more than two slices are on the left side
                2. start the motor when the last uncollected reach the bottom of right baffle
                3. stop the motor when the last uncollected slices has not reach the bottom of right baffle
                   and there has not slices inside the rightbaffle area.
                """
                if (
                    len(ind_leftside_slices) > 1
                ):  # number of slices in the leftside more than 2 indicates that collection is done
                    if not debug:
                        ser.write("8v0\n".encode())  # stop motor
                    else:
                        pass
                else:
                    if (
                        last_noncollected_slices[1] >= right_baffle[0][3]
                        or len(rightbaffle_inside_slices_final) > 0
                    ):
                        if not debug:
                            ser.write(speed.encode())  # start motor
                        else:
                            pass
                    elif (
                        last_noncollected_slices[1] <= right_baffle[0][3]
                        and len(rightbaffle_inside_slices_final) == 0
                    ):
                        # if motor is on then stop motor else continue
                        if not debug:
                            ser.write("8v0\n".encode())  # stop motor
                        else:
                            pass
        return loop_frame

    # start video stream thread, allow buffer to fill
    print("[INFO] starting threaded video stream...")
    stream = WebcamVideoStream(src=0).start()  # default camera
    frame_width = int(1024)
    frame_height = int(768)

    time.sleep(1.0)
    # start fps timer
    # loop over frames from the video file stream
    while True:

        start_time = time.time()

        # grab next frame
        frame = stream.read()

        key = cv2.waitKey(1) & 0xFF

        # update FPS counter
        fps.update()
        # ori_out.write(frame)

        det_frame = predict(frame)

        # det_out.write(det_frame)
        end_time = time.time()
        current_fps = 1 / (end_time - start_time)
        print("[INFO] current. FPS: {:.2f}".format(current_fps))

        # keybindings for display
        if key == ord("p"):  # pause
            while True:
                key2 = cv2.waitKey(1) or 0xFF
                cv2.imshow("frame", det_frame)
                if key2 == ord("p"):  # resume
                    break
        cv2.imshow("frame", det_frame)
        if key == 27:  # exit
            stream.stop()
            break
Ejemplo n.º 6
0
    args = sys.argv

    #We will be using WebcamVideoStream to increase the speed of the program
    vs = WebcamVideoStream(src=0).start()
    hc = HandControl()

    movetime = 0
    continueProg = True

    while not vs.stopped:

        # Getting the start time of this loop
        hc.cur_time = time.time()

        # Reading the next frame of the video stream
        image = vs.read()
        if not vs.grabbed:
            print("Ignoring empty camera frame.")
            continue
        image = cv2.flip(image, 1)

        hc.generateHands(image)

        if hc.results.multi_hand_landmarks and hc.cur_time - movetime > 0.05:
            hc.generateMatrix()
            hc.basecalc()
            hc.movement()
            hc.clickAction()
            continueProg = hc.endCheck()

            if 'video' in args and continueProg:
Ejemplo n.º 7
0
import cv2
import numpy as np
import time
from imutils.video import WebcamVideoStream

cap = WebcamVideoStream(0).start()
time.sleep(0.5)
cv2_im = cap.read()
(h, w) = cv2_im.shape[:2]
print(h, "/", w)
# out = cv2.VideoWriter('./detection_%s.avi'%(datetime.today().strftime('%Y-%m-%d-%H:%M:%S')),cv2.VideoWriter_fourcc('M','J','P','G'), 10, (w,h))
# out = cv2.VideoWriter('./detection_.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (w,h))
divider = 3
while True:
    cv2_im = cap.read()
    (h, w) = cv2_im.shape[:2]
    x_step = int(w / divider)
    y_step = int(h / divider)
    # outer
    for x in range(divider):
        for y in range(divider):
            pt1 = x * x_step, y * y_step
            pt2 = (x + 1) * x_step, (y + 1) * y_step
            cv2.rectangle(cv2_im, pt1, pt2, (0, 0, 255), 4)
    # inner
    for x in range(divider - 1):
        for y in range(divider - 1):
            pt1 = int(x * x_step * divider / 2), int(y * y_step * divider / 2)
            pt2 = int((x + 1) * x_step * divider / 2), int(
                (y + 1) * y_step * divider / 2)
            cv2.rectangle(cv2_im, pt1, pt2, (0, 255, 0), 3)
Ejemplo n.º 8
0
import cv2
from imutils.video import WebcamVideoStream
import imutils

vs = WebcamVideoStream(src=0).start()
avg = None

while True:

    frame = vs.read()
    frame = imutils.resize(frame, width=400)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (21, 21), 0)

    if avg is None:
        avg = gray.copy().astype("float")
        continue

    cv2.accumulateWeighted(gray, avg, 0.1)
    frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
    thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
    thresh = cv2.dilate(thresh, None, iterations=2)
    _, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    for c in contours:
        if cv2.contourArea(c) < 4000:
            print("Tiny")
            continue

        (x, y, w, h) = cv2.boundingRect(c)
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
Ejemplo n.º 9
0
# and start the FPS counter
print("[INFO] sampling THREADED frames from webcam...")
vs1 = WebcamVideoStream(src=0).start()
vs2 = WebcamVideoStream(src=1).start()
time.sleep(2.0)

fps = FPS().start()

# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out1 = cv2.VideoWriter('frame1.avi', fourcc, 30, (640, 480))
out2 = cv2.VideoWriter('frame2.avi', fourcc, 30, (640, 480))

# loop over frames from the video file stream
while True:
    frame1 = vs1.read()
    frame2 = vs2.read()

    # write the frame
    out1.write(frame1)
    out2.write(frame2)
    # show the frame and update the FPS counter
    cv2.imshow("Frame1", frame1)
    cv2.imshow("Frame2", frame2)
    cv2.waitKey(1)
    fps.update()
    key = cv2.waitKey(30) & 0xFF

    # if the `q` key was pressed, break from the loop
    if key == ord("q"):
        break
Ejemplo n.º 10
0
def main():
    import signal
    import sys
    
    rospy.init_node("obstacle_node")
    forward_pub = rospy.Publisher("/forward", String, queue_size=10)
    #data_pub = rospy.Publisher('enabled_test', String, queue_size=10)
    #velocity_pub = rospy.Publisher('cmd_vel_test', Twist, queue_size=10)
    image_pub = rospy.Publisher("img/reg", Image)
    #image_pub_dis = rospy.Publisher("img/td", Image)
    bridge = CvBridge()
    sess = tf.Session(config=config)
    
    def end(sig, frame):
        print "\n\nClosing TF sessions"
        sess.close()
        print "done."
        sys.exit(0)
    
    signal.signal(signal.SIGINT, end)

    new_trunc = tf.constant(5, dtype=tf.float32, shape=[7, 7, 3, 64])
    tf.import_graph_def(tf.get_default_graph().as_graph_def(), input_map={"resnet_v2_50/conv1/weights/Initializer/truncated_normal/TruncatedNormal:0": new_trunc})
    saver = tf.train.Saver()
    # Create a saver.
    sess.run(tf.local_variables_initializer())
    sess.run(tf.global_variables_initializer())
    test_folder = os.path.join(log_folder, model_name, "test")
    train_folder = os.path.join(log_folder, model_name, "train")
    # Restore variables from disk.
    saver.restore(sess, os.path.join(train_folder, "model.ckpt"))
    print("Model", model_name, "restored.")
    cam = WebcamVideoStream(src=1).start()
    print "finna run"
    import time
    last_time = (int(round(time.time() * 1000)))
    while True:
		last_time = (int(round(time.time() * 1000)))
		frame = cam.read()
		#frame = perspective.undistort_internal(frame)
		#cv2.imshow('yo1', frame)
		frame = frame[240:, :]
		frame = cv2.resize(frame, (480, 180))
		frame = cv2.GaussianBlur(frame, (3, 3), 0)
		out = sess.run(predictions_tf, feed_dict={holder: [frame]})
		out = np.squeeze(out)
		bg = cv2.inRange(out, 0, 0)
		stairs = cv2.inRange(out, 3, 255)
		unsafe = cv2.bitwise_or(stairs, bg)
		#cv2.imshow("rawm", unsafe)
		unsafe = cv2.erode(unsafe, (5,5), iterations=6)
		unsafe = cv2.dilate(unsafe, (5,5), iterations=6)
		unsafe = cv2.bitwise_and(UNSAFE_MASK, unsafe)
		#cv2.imshow("asdkj", unsafe)
		numPx = cv2.countNonZero(unsafe)
		print numPx
		if numPx > 450:
			forward_pub.publish("0")
		else:
			forward_pub.publish("1")
			
		regular = cv2.addWeighted(frame, 0.7, cv2.cvtColor(unsafe, cv2.COLOR_GRAY2RGB), 0.3, 0.0)
		
		#distort_unsafe = perspective.undistort_to_top_down(unsafe)
		#distort_frame = perspective.undistort_to_top_down(frame)
		#distorted = cv2.addWeighted(distort_frame, 0.7, cv2.cvtColor(distort_unsafe, cv2.COLOR_GRAY2RGB), 0.3, 0.0)
		#cv2.imshow('yo', distort_unsafe)
		try:
			image_pub.publish(bridge.cv2_to_imgmsg(regular, encoding="bgr8"))
		#	image_pub_dis.publish(bridge.cv2_to_imgmsg(distorted, encoding="bgr8"))
		except CvBridgeError as e: print e
		
		#unsafe1 = unsafe[90:180, 0:160]
		#unsafe2 = unsafe[90:180, 160:320]
		#unsafe3 = unsafe[90:180, 320:480]
		#unsafe = unsafe[90:180, 100:380]
		#num1 = cv2.countNonZero(unsafe1)
		#num2 = cv2.countNonZero(unsafe2)
		#num3 = cv2.countNonZero(unsafe3)
		#num = cv2.countNonZero(unsafe)
            
		#if num < (percent*90*280/100.0):
		#	if num1 <= num2 and num1 <= num3: velocity_pub.publish(get_twist_left())
		#	elif num2 <= num3 and num2 <= num1: velocity_pub.publish(get_twist_forward())
		#	else: velocity_pub.publish(get_twist_right())
		#	data_pub.publish('go')
		#else:
		#	data_pub.publish('stop')
		#	velocity_pub.publish(get_twist_stop())
		#	
		print (last_time - (int(round(time.time() * 1000))))
Ejemplo n.º 11
0
face_encodings = []
for img in paths_to_images:
    encoding = get_face_encodings(img)
    if len(encoding) == 0:
        print("No face found in the image")
        exit()
    elif len(encoding) > 1:
        print("More then 1 face found in the image")
        exit()
    face_encodings.append(get_face_encodings(img)[0])

# WebCam Check
print("Warming up camera...")
vs = WebcamVideoStream(src=0).start()
while True:
    img = vs.read()
    cv2.imshow("Frame", img)
    key = cv2.waitKey(1)
    if key == ord('q'):
        break
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    encoding = get_face_encodings_camera(img)
    if len(encoding) == 0:
        print("No face found in the image")
        # Allow the person to make movements
        time.sleep(20)
    match = find_match(face_encodings, names, encoding[0])
    print(match)

# File Check
image_filenames = filter(lambda x: x.endswith('.jpg'), os.listdir('test/'))
def webreg(userexcel):
    args = {
        "east": "frozen_east_text_detection.pb",
        "min_confidence": 0.5,
        "width": 320,
        "height": 320,
        "padding": 0.0,
        "webcam": "",  # path of webcam
        "excel": userexcel,
    }

    (W, H) = (None, None)  # actual dimensions of image 720x460 etc
    (newW, newH) = (args["width"], args["height"])  # required dimewnsion
    (rW, rH) = (None, None)  # ration of both

    layerNames = [
        "feature_fusion/Conv_7/Sigmoid",  # Scores - probability
        "feature_fusion/concat_3"
    ]  # geometry - dimensions of the bounding box

    print("loading EAST text detector...")

    net = cv2.dnn.readNet(args["east"])

    # if no webcam path, grabbing the reference to the web cam

    print("[INFO] starting webcam stream...")
    vs = WebcamVideoStream(src=0).start()  # 0 for default webcam
    # time.sleep(1.0)

    fnumber = -10
    vfname = []
    T = []
    predictedTexts = []
    fps = FPS().start()
    while True:
        if args["webcam"]:
            fnumber += 10
            vs.set(cv2.CAP_PROP_POS_FRAMES, fnumber)

        frame = vs.read()
        frame = frame[1] if args.get("webcam", False) else frame

        # check to see if we have reached the end of the stream
        if frame is None:
            break

        # resize the frame maintain aspect ratio
        frame = imutils.resize(frame, width=1000)
        orig = frame.copy()

        if W is None or H is None:
            (H, W) = frame.shape[:2]  # actual size
            rW = W / float(newW)
            rH = H / float(newH)

        # resize the frame
        frame = cv2.resize(frame, (newW, newH))

        # construct a blob
        blob = cv2.dnn.blobFromImage(frame,
                                     1.0, (newW, newH),
                                     (123.68, 116.78, 103.94),
                                     swapRB=True,
                                     crop=False)

        net.setInput(blob)
        (scores, geometry) = net.forward(layerNames)

        # decode the predictions obtaining probabilites and position of box
        (rects, confidences) = decode_predictions_video(scores, geometry, args)

        boxes = non_max_suppression(np.array(rects), probs=confidences)

        pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
        for (startX, startY, endX, endY) in boxes:
            # scaling the bounding box coordinates based on the respective ratios
            startX = int(startX * rW)
            startY = int(startY * rH)
            endX = int(endX * rW)
            endY = int(endY * rH)

            # applying padding in percentage
            dX = int((endX - startX) * args["padding"])
            dY = int((endY - startY) * args["padding"])

            # apply padding to each side of the bounding box, respectively
            startX = max(0, startX - dX)
            startY = max(0, startY - dY)
            endX = min(W, endX + (dX * 2))
            endY = min(H, endY + (dY * 2))

            # extract the actual padded image out of original
            roi = orig[startY:endY, startX:endX]

            # config stating language, LSTM model and stating all is one line of text
            config = ("-l eng --oem 1 --psm 7")

            # obtaining text out of image
            text = filterText_video(
                pytesseract.image_to_string(roi, config=config))
            # text = pytesseract.image_to_string(roi, config=config)
            if text and text not in predictedTexts:
                # add the bounding box coordinates and text

                print("Predicted Text")
                print("========")

                # timestamps for webcam wil be in realtime whereas for webcam will be according to webcam
                if args["webcam"]:
                    print(text, " at time ~ ", end="")
                    printTime(vs.get(cv2.CAP_PROP_POS_MSEC)
                              )  # converting millisecs into hour min secs
                else:
                    Tt = datetime.datetime.now().strftime(
                        "%H:%M:%S on %d/%m/%Y")
                T.append(Tt)
                predictedTexts.append(text)
                # draw the bounding box on the frame
                cv2.rectangle(orig, (startX, startY), (endX, endY),
                              (0, 255, 0), 2)

                os.chdir(r"C:\Users\hp\Desktop\ml\static")
                rnd = time.time()
                fname = str(rnd) + ".jpg"
                cv2.imwrite(fname, orig)
                os.chdir(r"C:\Users\hp\Desktop\ml")
                vfname.append(fname)

        fps.update()

        # show the output frame
        cv2.imshow("Text Detection", orig)

        # if the `q` key was pressed, break from the loop
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # if we are using a webcam, release the pointer
    if not args["webcam"]:
        vs.stop()
    # otherwise, release the file pointer of webcam
    else:
        vs.release()

    # close all windows
    cv2.destroyAllWindows()

    data = pd.read_csv("C:\\Users\\hp\\Desktop\\ml\\static\\" + args["excel"])
    df = pd.DataFrame(data, columns=['Bib_no'])

    bib_list = df.values.tolist()
    print('bib_list : ', bib_list)

    print('predictedTexts : ', predictedTexts)
    n = len(bib_list)

    pred = []
    for el in predictedTexts:
        sub = el.split(', ')
        pred.append(sub)

    print('pred:', pred)

    if (len(pred) != n):
        pred.append(None)
    print('pred:', pred)

    p = len(pred)

    b = []
    for i in range(n):
        for j in range(p):
            if (bib_list[i] == pred[j]):
                df.loc[i, 'Status'] = T[j]
                #df.loc[i, 'Time and Date'] = T[j]
                break
            else:
                df.loc[i, 'Status'] = 'Not Predicted'

    #df.Status.fillna("Not predicted", inplace=True)

    print(df)

    download_source = (
        r'C:\Users\hp\Desktop\ml\static\Wexcel\output_video.xlsx')
    df.to_excel(download_source)

    return predictedTexts, vfname, T
#thresholding trackbar
#cv2.createTrackbar('Threshold','Depth Map',0,255,nothing)
#cv2.setTrackbarPos('Threshold','Depth Map',128)


exec_time_sum = 0
i = 0


#contour detection


while (True):

    start_time = time.time()
    frameL = capL.read()
    frameR = capR.read()


    #remap cameras to remove distortion
    rectL = cv2.remap(frameL, undistMapL, rectMapL, cv2.INTER_LINEAR)
    rectR = cv2.remap(frameR, undistMapR, rectMapR, cv2.INTER_LINEAR)


    #~~~~ DEPTH MAP PART ~~~~~~~
    # convert rectified from RGB to 8 bit grayscale
    grayRectL = cv2.cvtColor(rectL, cv2.COLOR_BGR2GRAY)
    grayRectR = cv2.cvtColor(rectR, cv2.COLOR_BGR2GRAY)
    grayFrameL = cv2.cvtColor(frameL, cv2.COLOR_BGR2GRAY)
    grayFrameR = cv2.cvtColor(frameR, cv2.COLOR_BGR2GRAY)
class Stereo_Vision:
    """Class for obtaining and analyzing depth maps"""

    def __init__(self, cam_L_src, cam_R_src, display_frames=True, threshold=110):

        # initialize camera feed threads
        self.capL = WebcamVideoStream(src=cam_L_src).start()
        time.sleep(0.5)
        self.capR = WebcamVideoStream(src=cam_R_src).start()

        self.stop = False

        self.display_frames = display_frames

        # initialize windows
        if self.display_frames == True:
            cv2.namedWindow('Depth Map')
            cv2.namedWindow('Threshold')
            cv2.namedWindow('Shapes')

        # import calibration matrices
        self.undistMapL = np.load('calibration/undistortion_map_left.npy')
        self.undistMapR = np.load('calibration/undistortion_map_right.npy')
        self.rectMapL = np.load('calibration/rectification_map_left.npy')
        self.rectMapR = np.load('calibration/rectification_map_right.npy')

        # initialize blank frames
        self.rectL = np.zeros((640, 480, 3), np.uint8)
        self.rectR = np.zeros((640, 480, 3), np.uint8)

        self.quadrant_near_object = np.zeros((3, 3), dtype=bool)
        self.threshold = threshold

        self.exec_time_sum = 0
        self.frame_counter = 0
        self.fps = 0

        self.no_object_left_column = False
        self.no_object_mid_column = False
        self.no_object_right_column = False
        self.no_object_bottom_row = False
        self.no_object_mid_row = False
        self.no_object_top_row = False

    def start(self):
        self.vision_thread = Thread(target=self.start_stereo)
        self.vision_thread.start()

    def stop_vision(self):
        self.stop = True

    def get_quadrants(self):
        return self.quadrant_near_object

    def start_stereo(self):
        while (self.stop != True):

            self.start_time = time.time()
            self.frameL = self.capL.read()
            self.frameR = self.capR.read()

            # remap cameras to remove distortion
            self.rectL = cv2.remap(self.frameL, self.undistMapL, self.rectMapL, cv2.INTER_LINEAR)
            self.rectR = cv2.remap(self.frameR, self.undistMapR, self.rectMapR, cv2.INTER_LINEAR)

            # convert rectified from RGB to 8 bit grayscale
            self.grayRectL = cv2.cvtColor(self.rectL, cv2.COLOR_BGR2GRAY)
            self.grayRectR = cv2.cvtColor(self.rectR, cv2.COLOR_BGR2GRAY)
            self.grayFrameL = cv2.cvtColor(self.frameL, cv2.COLOR_BGR2GRAY)
            self.grayFrameR = cv2.cvtColor(self.frameR, cv2.COLOR_BGR2GRAY)

            # create depth map

            self.object_left_column = True
            self.object_mid_column = True
            self.object_right_column = True
            self.object_top_row = True
            self.object_mid_row = True
            self.object_bottom_row = True

            self.stereo = cv2.StereoBM_create(numDisparities=0, blockSize=13)

            self.disparity = self.stereo.compute(self.grayRectL, self.grayRectR).astype(np.float32)
            self.disparity = cv2.normalize(self.disparity, self.disparity, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX,
                                           dtype=cv2.CV_8UC1)

            # blur depth map to filter high frequency noise
            self.disparity_blur = cv2.medianBlur(self.disparity, 13)

            #apply 2 pixel border, helps keep contours continuous at extreme edges of image
            self.disparity_blur = cv2.copyMakeBorder(self.disparity_blur, top=2, bottom=2, right=2, left=2,
                                                     borderType=cv2.BORDER_CONSTANT, value=0)

            # threshold depth map
            self.disparity_thresh_imm = cv2.threshold(self.disparity_blur, self.threshold, 255, cv2.THRESH_BINARY)[1]

            self.disparity_edge = cv2.Canny(self.disparity_thresh_imm, 100, 200)

            # contour finding
            self.contours = cv2.findContours(self.disparity_edge, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[1]

            self.disparity_contours = np.zeros((480, 640, 3), np.uint8)

            for self.contour_index in self.contours:
                #draw bounding rectangle around each contour
                self.rect = cv2.minAreaRect(self.contour_index)
                self.box = cv2.boxPoints(self.rect)
                self.box = np.int0(self.box)

                #calculate area of each box
                self.box_area = abs(self.box[2][0] - self.box[0][0]) * abs(self.box[2][1] - self.box[0][1])
                self.col = -1
                self.row = -1

                if self.box_area > 1000: #pixels^2
                    cv2.drawContours(self.disparity_contours, [self.box], 0, (0, 255, 0), 2)  # draw green box

                    # determine which quadrants the rectangle occupies
                    for corner_index in range(0, 3):
                        self.col = self.box[corner_index][0] / 213  # x coordinates
                        self.row = self.box[corner_index][1] / 160  # y coordinates
                        if self.row > 2:
                            self.row = 2

                        if self.col > 2:
                            self.col = 2

                        if self.col != -1 and self.row != -1:
                            self.quadrant_near_object[self.row][self.col] = True


            #Boolean logic to set flags for main program
            if (self.quadrant_near_object[0][0] == False and self.quadrant_near_object[1][0] == False or
                        self.quadrant_near_object[2][0] == False):
                self.no_object_left_column = True
            else:
                self.no_object_left_column = False

            if (self.quadrant_near_object[0][1] == False and self.quadrant_near_object[1][1] == False and
                        self.quadrant_near_object[2][1] == False):
                self.no_object_mid_column = True
            else:
                self.no_object_mid_column = False

            if (self.quadrant_near_object[0][2] == False and self.quadrant_near_object[1][2] == False and
                        self.quadrant_near_object[2][2] == False):
                self.no_object_right_column = True
            else:
                self.no_object_right_column = False

            if (self.quadrant_near_object[0][0] == False and self.quadrant_near_object[0][1] == False and
                        self.quadrant_near_object[0][2] == False):
                self.no_object_top_row = True
            else:
                self.no_object_top_row = False
            if (self.quadrant_near_object[1][0] == False and self.quadrant_near_object[1][1] == False and
                        self.quadrant_near_object[1][2] == False):
                self.no_object_mid_row = True
            else:
                self.no_object_mid_row = False

            if (self.quadrant_near_object[2][0] == False and self.quadrant_near_object[2][1] == False and
                        self.quadrant_near_object[2][2] == False):
                self.no_object_bottom_row = True
            else:
                self.no_object_bottom_row = False

            #FPS calculations
            self.end_time = time.time()
            self.exec_time = self.end_time - self.start_time
            self.exec_time_sum += self.exec_time
            self.frame_counter += 1
            self.fps = 1.0 / self.exec_time

            #draw edges in pink
            cv2.drawContours(self.disparity_contours, self.contours, -1, (180, 105, 255), -1)
            cv2.line(self.disparity_contours, (0,160), (640,160), (255,0,0))
            cv2.line(self.disparity_contours, (0,320), (640,320), (255,0,0))
            cv2.line(self.disparity_contours, (213,0), (213,480), (255,0,0))
            cv2.line(self.disparity_contours, (426,0), (426,480), (255,0,0))


            if self.display_frames == True:
                cv2.imshow('Threshold', self.disparity_edge)
                cv2.imshow('Depth Map', self.disparity)
                cv2.imshow('Shapes', self.disparity_contours)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                self.capL.stop()
                self.capR.stop()
                break

        self.capL.stop()
        self.capR.stop()
        cv2.destroyAllWindows()

    def save_frames(self, filename_index):
        cv2.imwrite("Images/DepthMap" + str(filename_index) + ".jpg", self.disparity)
        cv2.imwrite("Images/DepthMapBlur" + str(filename_index) + ".jpg", self.disparity_blur)
        cv2.imwrite("Images/Contours_" + str(filename_index) + ".jpg", self.disparity_contours)
        cv2.imwrite("Images/LeftCam" + str(filename_index) + ".jpg", self.frameL)
        cv2.imwrite("Images/RightCam" + str(filename_index) + ".jpg", self.frameR)
Ejemplo n.º 15
0
def network(q):
    frame_count_orig = 0
    frame_interval = 1  # Number of frames after which to run face detection
    fps_display_interval = 1  # seconds
    frame_rate = 1
    frame_count = 0
    playing = True
    # PATH = '/home/jazari/Aman_Workspace/facenet/face_yolo_video_test/'
    PATH = './sameer.mp4'
    # video_capture = cv2.VideoCapture(0)
    video_capture = WebcamVideoStream(src=0).start()
    fps = FPS().start()
    # video_capture.set(15, -6.0)
    # video_capture.set(3,1280)
    # video_capture.set(4, 1024)
    # video_capture.set(15, -4.0)
    face_recognition = face.Recognition()
    start_time = time.time()
    attendance_counter = 0

    ###### CHANGES END. STARTING TO INTEGRATE VIDEO CODE ######

    while True:
        # Capture frame-by-frame

        now = datetime.datetime.now()
        current_day = now.day
        # att = pd.read_csv('attendance.csv')

        current_month_days = calendar.monthrange(now.year, now.month)[1]
        current_month = now.strftime("%B")
        # print('inside loop')
        try:
            att = pd.read_csv(SAVE_PATH + current_month + '_entry.csv')
            # print('taking from try')
        except:
            att = create_csv(current_month_days)
            # print('taking from catch')

        # print(att.iloc[current_day-1,  0])
        if int(att.iloc[current_day - 1, 0]) == 0:
            att.iloc[current_day - 1, 0] = now.day
            # print('Prining csv')
            att.to_csv(SAVE_PATH + current_month + '_entry.csv', index=False)
        # print(att)

        # ret, frame = video_capture.read()
        frame = video_capture.read()

        if now.strftime("%B") != current_month:
            current_month = now.strftime("%B")

            current_month_days = calendar.monthrange(now.year, now.month)[1]

            att = create_csv(current_month_days)

        if (frame_count % frame_interval) == 0:
            faces = face_recognition.identify(frame)

            # Check our current fps
            end_time = time.time()
            if (end_time - start_time) > fps_display_interval:
                frame_rate = int(frame_count / (end_time - start_time))
                start_time = time.time()
                frame_count = 0

        ### Attendance Code ###

        if datetime.datetime.now().day != current_day:
            attendance_counter += 1

        add_overlays(frame, faces, frame_rate, attendance_counter, att,
                     current_day, current_month)

        frame_count += 1
        emp = frame
        print('adding in queue')
        emp = cv2.cvtColor(emp, cv2.COLOR_BGR2RGB)
        # Adding a frame in Queue
        q.put(emp)
Ejemplo n.º 16
0
    vs = WebcamVideoStream(src=0).start()
elif args["multithread"] == 0:
    vs = VideoStream(src=0).start()
else:
    raise ValueError("multithread input value must be 0 or 1")

time.sleep(2.0)

count = 0
fps = FPS().start()
prev_frame = None
time_data = [''] * 1000

while True:
    # read the frame from the camera and send it to the server
    frame = vs.read()
    if prev_frame is None:
        prev_frame = np.zeros(frame.shape)
    comp = prev_frame == frame
    if comp.all():
        # print("matched")
        continue
    else:
        prev_frame = frame

    if args["bl_li"] == 1:

        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        fm = variance_of_laplacian(frame_gray)
        blur_text = "Not Blurry"
Ejemplo n.º 17
0
# to the webcam
if not args.get("video", False):
    #	camera = cv2.VideoCapture(3)
    camera = WebcamVideoStream(src=3).start()

# otherwise, grab a reference to the video file
else:
    #	camera = cv2.VideoCapture(args["video"])
    camera = WebcamVideoStream(src=args["video"]).start()

fps = FPS().start()
# keep looping
while True:
    # grab the current frame
    #	(grabbed, frame) = camera.read()
    frame = camera.read()

    # if we are viewing a video and we did not grab a frame,
    # then we have reached the end of the video
    if args.get("video") and not grabbed:
        break

    # resize the frame, and convert it to the HSV
    # color space
    #frame = imutils.resize(frame, width=600)
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    # construct a mask for the color "yellow", then perform
    # a series of dilations and erosions to remove any small
    # blobs left in the mask
    mask = cv2.inRange(hsv, yellowLower, yellowUpper)
Ejemplo n.º 18
0
class EyeCanSee(object):
    def __init__(self, center=int(cvsettings.CAMERA_WIDTH / 2), debug=False, is_usb_webcam=True, period_s=0.025):
        # Our video stream
        # If its not a usb webcam then get pi camera
        if not is_usb_webcam:
            self.vs = PiVideoStream(resolution=(cvsettings.CAMERA_WIDTH, cvsettings.CAMERA_HEIGHT))
            # Camera cvsettings
            self.vs.camera.shutter_speed = cvsettings.SHUTTER
            self.vs.camera.exposure_mode = cvsettings.EXPOSURE_MODE
            self.vs.camera.exposure_compensation = cvsettings.EXPOSURE_COMPENSATION
            self.vs.camera.awb_gains = cvsettings.AWB_GAINS
            self.vs.camera.awb_mode = cvsettings.AWB_MODE
            self.vs.camera.saturation = cvsettings.SATURATION
            self.vs.camera.rotation = cvsettings.ROTATION
            self.vs.camera.video_stabilization = cvsettings.VIDEO_STABALIZATION
            self.vs.camera.iso = cvsettings.ISO
            self.vs.camera.brightness = cvsettings.BRIGHTNESS
            self.vs.camera.contrast = cvsettings.CONTRAST

        # Else get the usb camera
        else:
            self.vs = WebcamVideoStream(src=0)
            self.vs.stream.set(cv2.CAP_PROP_FRAME_WIDTH, cvsettings.CAMERA_WIDTH)
            self.vs.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, cvsettings.CAMERA_HEIGHT)

        # Has camera started
        self.camera_started = False
        self.start_camera()  # Starts our camera

        # To calculate our error in positioning
        self.center = center

        # To determine if we actually detected lane or not
        self.detected_lane = False

        # debug mode on? (to display processed images)
        self.debug = debug

        # Time interval between in update (in ms)
        # FPS = 1/period_s
        self.period_s = period_s

        # Starting time
        self.start_time = time.time()

    # Mouse event handler for get_hsv
    def on_mouse(self, event, x, y, flag, param):
        if event == cv2.EVENT_LBUTTONDBLCLK:
            # Circle to indicate hsv location, and update frame
            cv2.circle(self.img_debug, (x, y), 3, (0, 0, 255))
            cv2.imshow('hsv_extractor', self.img_debug)

            # Print values
            values = self.hsv_frame[y, x]
            print('H:', values[0], '\tS:', values[1], '\tV:', values[2])

    def get_hsv(self):
        cv2.namedWindow('hsv_extractor')
        while True:
            self.grab_frame()

            # Bottom ROI
            cv2.rectangle(self.img_debug, (0, cvsettings.HEIGHT_PADDING_BOTTOM-2), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_BOTTOM + cvsettings.IMG_ROI_HEIGHT + 2), (0, 250, 0), 2)

            # Top ROI
            cv2.rectangle(self.img_debug, (0, cvsettings.HEIGHT_PADDING_TOP-2), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_TOP + cvsettings.IMG_ROI_HEIGHT + 2), (0, 250, 0), 2)

            # Object
            cv2.rectangle(self.img_debug, (0, cvsettings.OBJECT_HEIGHT_PADDING), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_TOP - cvsettings.OBJECT_HEIGHT_PADDING), (238, 130, 238), 2)

            self.hsv_frame = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)

            # Mouse handler
            cv2.setMouseCallback('hsv_extractor', self.on_mouse, 0)
            cv2.imshow('hsv_extractor', self.img_debug)

            key = cv2.waitKey(0) & 0xFF
            if key == ord('q'):
                break
        self.stop_camera()
        cv2.destroyAllWindows()

    # Starts camera (needs to be called before run)
    def start_camera(self):
        self.camera_started = True
        self.vs.start()
        time.sleep(2.0)  # Wait for camera to cool

    def stop_camera(self):
        self.camera_started = False
        self.vs.stop()

    # Grabs frame from camera
    def grab_frame(self):
        # Starts camera if it hasn't been started
        if not self.camera_started:
            self.start_camera()
        self.img = self.vs.read()
        self.img_debug = self.img.copy()

    # Normalizes our image
    def normalize_img(self):
        # Crop img and convert to hsv
        self.img_roi_bottom = np.copy(self.img[cvsettings.HEIGHT_PADDING_BOTTOM:int(cvsettings.HEIGHT_PADDING_BOTTOM + cvsettings.IMG_ROI_HEIGHT), :])
        self.img_roi_top = np.copy(self.img[cvsettings.HEIGHT_PADDING_TOP:int(cvsettings.HEIGHT_PADDING_TOP + cvsettings.IMG_ROI_HEIGHT), :])

        self.img_roi_bottom_hsv = cv2.cvtColor(self.img_roi_bottom, cv2.COLOR_BGR2HSV).copy()
        self.img_roi_top_hsv = cv2.cvtColor(self.img_roi_top, cv2.COLOR_BGR2HSV).copy()

        # Get our ROI's shape
        # Doesn't matter because both of them are the same shape
        self.roi_height, self.roi_width, self.roi_channels = self.img_roi_bottom.shape

    # Smooth image and convert to bianry image (threshold)
    # Filter out colors that are not within the RANGE value
    def filter_smooth_thres(self, RANGE, color):
        for (lower, upper) in RANGE:
            lower = np.array(lower, dtype='uint8')
            upper = np.array(upper, dtype='uint8')

            mask_bottom = cv2.inRange(self.img_roi_bottom_hsv, lower, upper)
            mask_top = cv2.inRange(self.img_roi_top_hsv, lower, upper)

        blurred_bottom = cv2.medianBlur(mask_bottom, 5)
        blurred_top = cv2.medianBlur(mask_top, 5)

        # Morphological transformation
        kernel = np.ones((2, 2), np.uint8)
        smoothen_bottom = blurred_bottom #cv2.morphologyEx(blurred, cv2.MORPH_OPEN, kernel, iterations=5)
        smoothen_top = blurred_top  # cv2.morphologyEx(blurred, cv2.MORPH_OPEN, kernel, iterations=5)

        """
        if self.debug:
            cv2.imshow('mask bottom ' + color, mask_bottom)
            cv2.imshow('blurred bottom' + color, blurred_bottom)

            cv2.imshow('mask top ' + color, mask_top)
            cv2.imshow('blurred top' + color, blurred_top)
        """

        return smoothen_bottom, smoothen_top

    # Gets metadata from our contours
    def get_contour_metadata(self):

        # Metadata (x,y,w,h)for our ROI
        contour_metadata = {}
        for cur_img in [self.thres_yellow_bottom, self.thres_yellow_top, self.thres_blue_bottom, self.thres_blue_top]:
            key = ''

            # Blue is left lane, Yellow is right lane
            if cur_img is self.thres_yellow_bottom:
                key = 'right_bottom'
            elif cur_img is self.thres_yellow_top:
                key = 'right_top'
            elif cur_img is self.thres_blue_bottom:
                key = 'left_bottom'
            elif cur_img is self.thres_blue_top:
                key = 'left_top'

            _, contours, hierarchy = cv2.findContours(cur_img.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

            cur_height, cur_width = cur_img.shape

            # Get index of largest contour
            try:
                areas = [cv2.contourArea(c) for c in contours]
                max_index = np.argmax(areas)
                cnt = contours[max_index]

                # Metadata of contour
                x, y, w, h = cv2.boundingRect(cnt)

                # Normalize it to the original picture
                x += int(cvsettings.WIDTH_PADDING + w / 2)

                if 'top' in key:
                    y += int(cvsettings.HEIGHT_PADDING_TOP +h /2)
                else:
                    y += int(cvsettings.HEIGHT_PADDING_BOTTOM + h / 2)

                contour_metadata[key] = (x, y)

                self.detected_lane = True

            # If it throws an error then it doesn't have a ROI
            # Means we're too far off to the left or right
            except:

                # Blue is left lane, Yellow is right lane
                x = int(cvsettings.WIDTH_PADDING) - cvsettings.CAMERA_WIDTH

                if 'bottom' in key:
                    y = int(cur_height / 2) + int(cvsettings.HEIGHT_PADDING_BOTTOM + cur_height / 2)
                else:
                    y = int(cur_height / 2) + int(cvsettings.HEIGHT_PADDING_TOP + cur_height / 2)

                if 'right' in key:
                    x = int(cur_width)*2

                contour_metadata[key] = (x, y)

                self.detected_lane = False

        return contour_metadata

    # Gets the centered coord of the detected lines
    def get_centered_coord(self):
        bottom_centered_coord = None
        top_centered_coord = None

        left_xy_bottom = self.contour_metadata['left_bottom']
        right_xy_bottom = self.contour_metadata['right_bottom']

        left_xy_top = self.contour_metadata['left_top']
        right_xy_top = self.contour_metadata['right_top']

        bottom_xy = (left_xy_bottom[0] + right_xy_bottom[0], left_xy_bottom[1] + right_xy_bottom[1])
        bottom_centered_coord = (int(bottom_xy[0] / 2), int(bottom_xy[1] / 2))

        top_xy = (left_xy_top[0] + right_xy_top[0], left_xy_top[1] + right_xy_top[1])
        top_centered_coord = (int(top_xy[0] / 2), int(top_xy[1] / 2))

        # Left can't be greater than right and vice-versa
        if left_xy_top > right_xy_top:
            top_centered_coord = (0, top_centered_coord[1])
        elif right_xy_top < left_xy_top:
            top_centered_corrd = (cvsettings.CAMERA_WIDTH, top_centered_coord[1])

        if left_xy_bottom > right_xy_bottom:
            bottom_centered_coord = (0, bottom_centered_coord[1])
        elif right_xy_bottom < left_xy_bottom:
            bottom_centered_coord = (cvsettings.CAMERA_WIDTH, top_centered_coord[1])

        return bottom_centered_coord, top_centered_coord

    # Gets the error of the centered coordinates (x)
    # Also normlizes their values
    def get_errors(self):
        top_error = (float(self.center_coord_top[0]) - float(self.center))/float(cvsettings.CAMERA_WIDTH + cvsettings.WIDTH_PADDING)
        bottom_error = (float(self.center_coord_bottom[0]) - float(self.center))/float(cvsettings.CAMERA_WIDTH + cvsettings.WIDTH_PADDING)
        relative_error = (float(self.center_coord_top[0]) - float(self.center_coord_bottom[0]))/float(cvsettings.CAMERA_WIDTH + cvsettings.WIDTH_PADDING)

        return (top_error + relative_error + bottom_error)/3.0

    # Object avoidance
    def where_object_be(self):
        # We only want to detect objects in our path: center of top region and bottom region
        # So to save processing speed, we'll only threshold from center of top region to center of bottom region
        left_x = cvsettings.WIDTH_PADDING
        right_x = cvsettings.CAMERA_WIDTH

        # Image region with objects
        img_roi_object = self.img[cvsettings.OBJECT_HEIGHT_PADDING : int(cvsettings.HEIGHT_PADDING_TOP - cvsettings.OBJECT_HEIGHT_PADDING), left_x:right_x]
        img_roi_object_hsv = cv2.cvtColor(img_roi_object, cv2.COLOR_BGR2HSV).copy()

        # Filtering color and blurring
        for (lower, upper) in cvsettings.OBJECT_HSV_RANGE:
            lower = np.array(lower, dtype='uint8')
            upper = np.array(upper, dtype='uint8')

            mask_object = cv2.inRange(img_roi_object_hsv, lower, upper)

        blurred_object = cv2.medianBlur(mask_object, 25)

        # Finding position of object (if its there)
        _, contours, hierarchy = cv2.findContours(blurred_object.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        left_x = self.contour_metadata['left_top'][0]
        right_x = self.contour_metadata['right_top'][0]

        for c in contours:
            areas = cv2.contourArea(c)

            # Object needs to be larger than a certain area
            if areas > cvsettings.OBJECT_AREA:
                x, y, w, h = cv2.boundingRect(c)

                y += int(cvsettings.OBJECT_HEIGHT_PADDING + h / 2)

                # Confusing part - this finds the object and makes it think that
                # it is also a line to avoid bumping into it
                # It +w and -w to find which line its closest to and then set
                # the opposite as to be the new left/right lane
                # e.g. line is closest to left lane (x-w), so left lane new x is
                # (x+w)

                distance_to_left = abs(x - left_x)
                distance_to_right = abs(x+w - right_x)

                # Make object's left most area the middle of right lane
                if distance_to_left > distance_to_right:
                    self.contour_metadata['right_top'] = (x, self.contour_metadata['right_top'][1])

                # Make object's right most area the middle of left lane
                elif distance_to_right > distance_to_right:
                    self.contour_metadata['left_top'] = (x+w, self.contour_metadata['left_top'][1])

                if self.debug:
                    cv2.circle(self.img_debug, (x+(w/2), y+(h/2)), 5, (240, 32, 160), 2)

        if self.debug:
            cv2.imshow('Blurred object', blurred_object)


    # Where are we relative to our lane
    def where_lane_be(self):
        # Running once every period_ms
        while float(time.time() - self.start_time) < self.period_s:
            pass

        # Update time instance
        self.start_time = time.time()

        # Camera grab frame and normalize it
        self.grab_frame()
        self.normalize_img()

        # Filter out them colors
        self.thres_blue_bottom, self.thres_blue_top = self.filter_smooth_thres(cvsettings.BLUE_HSV_RANGE, 'blue')
        self.thres_yellow_bottom, self.thres_yellow_top = self.filter_smooth_thres(cvsettings.YELLOW_HSV_RANGE, 'yellow')

        # Get contour meta data
        self.contour_metadata = self.get_contour_metadata()

        # Finds objects and (and corrects lane position)
        # this overwrite contour_metadata
        #self.where_object_be()

        # Find the center of the lanes (bottom and top) [we wanna be in between]
        self.center_coord_bottom, self.center_coord_top = self.get_centered_coord()

        # Gets relative error between top center and bottom center
        self.relative_error = self.get_errors()

        if self.debug:
            # Drawing locations
            blue_top_xy = self.contour_metadata['left_top']
            blue_bottom_xy = self.contour_metadata['left_bottom']
            yellow_top_xy = self.contour_metadata['right_top']
            yellow_bottom_xy = self.contour_metadata['right_bottom']

            # Circles to indicate lanes
            cv2.circle(self.img_debug, blue_top_xy, 5, (255, 0, 0), 2)
            cv2.circle(self.img_debug, blue_bottom_xy, 5, (255, 0, 0), 2)
            cv2.circle(self.img_debug, yellow_top_xy, 5, (0, 255, 255), 2)
            cv2.circle(self.img_debug, yellow_bottom_xy, 5, (0, 255, 255), 2)
            cv2.circle(self.img_debug, self.center_coord_bottom, 5, (0, 255, 0), 3)
            cv2.circle(self.img_debug, self.center_coord_top, 5, (0, 255, 0), 3)

            # ROI for object detection
            cv2.rectangle(self.img_debug, (0, cvsettings.OBJECT_HEIGHT_PADDING), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_TOP - cvsettings.OBJECT_HEIGHT_PADDING), (238, 130, 238), 2)

            # Displaying image
            cv2.imshow('img', self.img_debug)
            #cv2.imshow('img_roi top', self.img_roi_top)
            #cv2.imshow('img_roi bottom', self.img_roi_bottom)
            #cv2.imshow('img_hsv', self.img_roi_hsv)
            cv2.imshow('thres_blue_bottom', self.thres_blue_bottom)
            cv2.imshow('thres_blue_top', self.thres_blue_top)
            cv2.imshow('thres_yellow_bottom', self.thres_yellow_bottom)
            cv2.imshow('thres_yellow_top', self.thres_yellow_top)
            key = cv2.waitKey(0) & 0xFF  # Change 1 to 0 to pause between frames

    # Use this to calculate fps
    def calculate_fps(self, frames_no=100):
        fps = FPS().start()

        # Don't wanna display window
        if self.debug:
            self.debug = not self.debug

        for i in range(0, frames_no):
            self.where_lane_be()
            fps.update()

        fps.stop()

        # Don't wanna display window
        if not self.debug:
            self.debug = not self.debug

        print('Time taken: {:.2f}'.format(fps.elapsed()))
        print('~ FPS : {:.2f}'.format(fps.fps()))

    # Use this to save images to a location
    def save_images(self, dirname='dump'):
        import os
        img_no = 1

        # Makes the directory
        if not os.path.exists('./' + dirname):
            os.mkdir(dirname)

        while True:
            self.grab_frame()

            if self.debug:
                cv2.imshow('frame', self.img)

            k = cv2.waitKey(1) & 0xFF

            if k == ord('s'):
                cv2.imwrite(os.path.join(dirname, 'dump_' + str(img_no) + '.jpg'), self.img)
                img_no += 1

            elif k == ord('q'):
                break

        cv2.destroyAllWindows()
    # Destructor
    def __del__(self):
        self.vs.stop()
        cv2.destroyAllWindows()
Ejemplo n.º 19
0
        elif abs(r_eye_x - x) < frame_x * .05 and abs(
                r_eye_y - y) < frame_y * .05 and right_eye_found == True:
            r_eye_blink_state = 'open'
        counter = counter + 1

    return None


##############################MAIN SCRIPT#############################################

lcdstate = ''  ##found, not
while True:

    tempState = ''

    frameForDetection = cap.read()
    frameForDetection = cv2.resize(frameForDetection, (320, 240))

    if isRaspberryPi == True:  ##Convert to grayscale if RPI
        frameForDetection = cv2.cvtColor(frameForDetection, cv2.COLOR_BGR2GRAY)
    else:  ##Create image to draw on if not RPI
        frameForDrawing = frameForDetection.copy()

    if counter == 0:
        frame_y = frameForDetection.shape[1]
        frame_x = frameForDetection.shape[0]

    #####FRAME DRAWING AND VARIABLE FETCHING#######

    detect_left_and_right_eyes(frameForDetection)
Ejemplo n.º 20
0
class App:
    def __init__(self, master, res):
        #Pass the parameter master to variable self.master
        self.master = master

        #Sets the icon and titlebar text (Quite unnecessary but looks better)
        self.master.iconbitmap('content/warden.ico')

        #key binding definition for f key to fullscreen function
        self.master.bind('<Key>', self.event_handler)

        #Fullscreen status checks the fullscreen status when key or mouse enters/exits fullscreen
        self.fullscreen_status = False

        #set the titlebar text
        self.master.title("Warden Control Panel")

        #Configure the master frame's grid
        self.master.grid_rowconfigure(0, weight=1)
        self.master.grid_rowconfigure(1, weight=1)
        self.master.grid_columnconfigure(0, weight=1)
        self.master.grid_columnconfigure(1, weight=1)

        #Set the opening resolution, and minsize if required
        #self.master.minsize(res[1],res[0])
        self.master.geometry(str(res[1]) + "x" + str(res[0]))

        #Set 4 frames and put them into the grid (grid contains 4 equal boxes)

        #Frame 1 will hold the VIDEO STREAM
        self.frame1 = Label(self.master, background="grey")
        self.frame1.grid(column=0, row=0, sticky='nsew')
        self.frame1.grid_rowconfigure(0, weight=1)
        self.frame1.grid_columnconfigure(0, weight=1)

        self.video_frame = Label(self.frame1,
                                 width=self.frame1.winfo_width(),
                                 height=self.frame1.winfo_height(),
                                 background="black")
        self.video_frame.grid(sticky='nsew')

        #bind double mouse click to the video frame
        self.video_frame.bind('<Double-Button>', self.event_handler)
        self.vid_dis_img_path = 'content/vid_disconnect.jpg'

        #Frame 2 buttons
        self.frame2 = Frame(self.master, background="black")
        self.frame2.grid(column=1, row=0, sticky='nsew')
        self.frame2.grid_rowconfigure(0, weight=1)
        self.frame2.grid_columnconfigure(0, weight=1)
        # self.button_frame = Label(self.frame2,width=self.frame2.winfo_width(),height=self.frame2.winfo_height(),background="black")
        # self.button_frame.grid(sticky = 'nsew')

        # self.ward_rawimg = Image.open('warden_text.png')
        # self.ward_rawimg = self.ward_rawimg.resize((self.frame2.winfo_width(),self.frame2.winfo_height()), Image.ANTIALIAS)
        # self.ward_img = ImageTk.PhotoImage(self.ward_rawimg)
        # self.button_frame.config(image=self.ward_img)

        #frame 3 contains the CONSOLE
        self.frame3 = Frame(self.master, background="black")
        self.frame3.grid(column=0, row=1, sticky='nsew')
        self.frame3.grid_rowconfigure(0, weight=1)
        self.frame3.grid_columnconfigure(0, weight=1)
        # self.frame3.grid_columnconfigure(1,weight = 1)

        self.textArea = Text(self.frame3,
                             wrap=WORD,
                             foreground="black",
                             background="white",
                             width=1,
                             height=1,
                             state=DISABLED)
        self.textArea.grid(sticky='nsew')

        #Scrollbar (not working)
        #self.scroll=Scrollbar(self.textArea, orient = VERTICAL)
        # self.scroll=Scrollbar(self.textArea)
        # self.scroll.grid(column=1, row=0, sticky='nsew')
        # self.textArea.config(yscrollcommand=self.scroll.set)
        # self.scroll.config(command=self.textArea.yview)

        self.console_fhandle = False
        self.console_file_exists = False
        self.console_modified_time = 0
        #self.console_count = 0
        self.console_file_path = '/home/pi/Devel/secbot/files/main.log'
        #self.console_file_path = r'C:\Coding\Secbot\files\main.log'

        #Frame 4 MAP
        self.frame4 = Frame(self.master, background="black")
        self.frame4.grid(column=1, row=1, sticky='nsew')
        self.frame4.grid_rowconfigure(0, weight=1)
        self.frame4.grid_columnconfigure(0, weight=1)

        self.image_frame = Label(self.frame4,
                                 width=self.frame4.winfo_width(),
                                 height=self.frame4.winfo_height(),
                                 background="black")
        self.image_frame.grid(sticky='nsew')

        self.img_path = '/home/pi/Devel/secbot/files/depth.png'
        #self.img_path = r'C:\Coding\Secbot\files\depth.png'
        self.img_modified_time = 0
        self.img_file_exists = False
        self.img_get_path = 'content/depth/depth.png'

        #SSH settings
        self.ssh = paramiko.SSHClient()
        self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())

        #Set variables to check connections
        self.video_conn_ready = False
        self.pi_conn_ready = False

        self.video_ready = False
        self.ssh_ready = False

        #Start threads:-

        #Console thread
        self.console_thread = threading.Thread(target=self.write_to_console,
                                               args=())
        self.console_thread.daemon = True
        self.console_thread.start()

        #Video feed thread
        self.video_feed_thread = Thread(target=self.video_feed, args=())
        self.video_feed_thread.daemon = True
        self.video_feed_thread.start()

        #Video feed initialiser thread
        self.video_feed_init_thread = Thread(
            target=self.video_feed_initialiser, args=())
        self.video_feed_init_thread.daemon = True
        self.video_feed_init_thread.start()

        #Connections ready thread
        self.conn_thread = threading.Thread(target=self.conn_ready, args=())
        self.conn_thread.daemon = True
        self.conn_thread.start()

        #Update the map thread
        self.img_thread = threading.Thread(target=self.image_update, args=())
        self.img_thread.daemon = True
        self.img_thread.start()

        self.console_list = []
        self.modifiedtime = 0

        self.authProblem = False

    def conn_ready(self):
        while True:
            try:
                video = str(
                    subprocess.check_output("ping -n 1 192.168.1.10",
                                            shell=True))
            except:
                video = ""
            try:
                pi = str(
                    subprocess.check_output("ping -n 1 192.168.1.2",
                                            shell=True))
                #pi = str(subprocess.check_output("ping -n 1 192.168.1.176", shell=True))
            except:
                pi = ''
            if 'Reply from 192.168.1.2' in pi:
                #if 'Reply from 192.168.1.176' in pi:
                self.pi_conn_ready = True
            else:
                self.pi_conn_ready = False
                self.ssh_ready = False
            if 'Reply from 192.168.1.10' in video:
                #print("ping has been received: ",video)
                self.video_conn_ready = True
            else:
                #print("ping failed, setting video connection and video ready to false")
                self.video_conn_ready = False
                self.video_ready = False
            time.sleep(3)

    def video_feed_initialiser(self):
        if not self.video_ready:
            #print("video feed initilaiser has recognised that self.video_ready is false")
            if self.video_conn_ready:
                #print("attempting to reconnect the video connection")
                self.vs = WebcamVideoStream(
                    src=
                    "rtsp://192.168.1.10:554/user=admin&password=&channel=1&stream=0.sdp?real_stream"
                ).start()
                if str(self.vs.read()) != 'None':
                    #print("setting video_ready to true (self.vs is not none)")
                    self.video_ready = True
        self.master.after(1000, self.video_feed_initialiser)

    def image_update(self):
        if self.img_file_exists == False:
            # print("ssh ready? = ",self.ssh_ready)
            # print("File exsists marked as false, at the top")
            self.img_modified_time = 0
            try:
                ssh = self.ssh.exec_command('ls', timeout=5)
            except Exception as msg:
                #print (msg)
                pass
            if self.ssh_ready == False and self.pi_conn_ready == True:
                if self.authProblem != True:
                    #print("ssh connect called")
                    try:
                        self.ssh.connect('192.168.1.2',
                                         username='******',
                                         password='******')
                        connected = True
                    except (paramiko.ssh_exception.AuthenticationException,
                            socket.error) as msg:
                        #except (paramiko.SSHException, socket.error) as msg:
                        print(msg)
                        if str(msg) == 'Authentication failed.':
                            self.authProblem = True
                        self.ssh.close()
                        #pass
                        connected = False

                    if connected == True:
                        try:
                            print('sftp')
                            self.ftp_client = self.ssh.open_sftp()
                            print('sftp open')
                            self.ssh_ready = True

                        except (paramiko.ssh_exception, socket.error) as msg:
                            # print (msg)
                            pass
                    else:
                        self.ssh_ready = False

            if self.ssh_ready == True and self.img_file_exists == False:
                #print("ssh ready and file doesnt exist")
                #print("should be attempting to open the file again")
                try:
                    self.ftp_client.get(self.img_path, self.img_get_path)
                    self.img_file_exists = True
                    print("1")
                except Exception as msg:
                    # print(msg)
                    pass

                if self.img_file_exists == True:
                    try:
                        self.img_modified_time = self.ftp_client.stat(
                            self.img_path).st_mtime
                    except Exception as msg:
                        self.img_modified_time = False
                        self.img_file_exists = False
        if self.img_file_exists:
            self.raw_img = Image.open(self.img_get_path)
            try:
                modified_time = self.ftp_client.stat(self.img_path).st_mtime
                size_img = self.ftp_client.stat(self.img_path).st_size
                print(size_img)
            except:
                modified_time = False
            if modified_time != self.img_modified_time and size_img != 0:
                try:
                    self.ftp_client.get(self.img_path, self.img_get_path)
                    self.raw_img = Image.open(self.img_get_path)
                    self.img_modified_time = modified_time
                except Exception as msg:
                    self.img_file_exists = False
                    pass
            if self.raw_img.width != self.image_frame.winfo_width():
                self.raw_img = Image.open(self.img_get_path)
                self.raw_img = self.raw_img.resize(
                    (self.image_frame.winfo_width(),
                     self.image_frame.winfo_height()), Image.ANTIALIAS)
                self.img = ImageTk.PhotoImage(self.raw_img)
                self.image_frame.config(image=self.img)
        self.master.after(1000, self.image_update)

    def video_feed(self):
        if self.video_ready:
            try:
                frame = self.vs.read()  #read the next frame
                ##resize the image
                if self.video_frame.winfo_width() > 1:
                    if self.video_frame.winfo_height() < int(
                        (self.video_frame.winfo_width() * 0.5625)):
                        frame = imutils.resize(
                            frame,
                            width=int(self.video_frame.winfo_height() *
                                      1.77777)  #image width
                            ,
                            height=self.video_frame.winfo_height(
                            )  #image height
                        )
                    else:
                        frame = imutils.resize(
                            frame,
                            width=self.video_frame.winfo_width()  #image width
                            ,
                            height=int((self.video_frame.winfo_width() *
                                        0.5625)))  #image height
                ##process the raw frame
                cvimage = cv.cvtColor(
                    frame, cv.COLOR_BGR2RGBA)  #colours picture correctly
                current_image = Image.fromarray(
                    cvimage
                )  #Something to do with PIL, processes the matrix array
                imagetk = ImageTk.PhotoImage(
                    image=current_image)  # convert image for tkinter
                self.video_frame.imgtk = imagetk  # stops garbage collection
                self.video_frame.config(
                    image=imagetk)  # show the image in image_box
            except:
                pass

        else:
            #print("should be setting video window to the video not there image")
            self.vid_dis_raw_img = Image.open(self.vid_dis_img_path)
            if self.vid_dis_raw_img.width != self.image_frame.winfo_width():
                self.vid_dis_raw_img = self.vid_dis_raw_img.resize(
                    (self.video_frame.winfo_width(),
                     self.video_frame.winfo_height()), Image.ANTIALIAS)
            self.vid_dis_img = ImageTk.PhotoImage(self.vid_dis_raw_img)
            self.video_frame.config(image=self.vid_dis_img)
        self.master.after(
            50, self.video_feed
        )  # cause the function to be called after X milliseconds

    def event_handler(self, event=None):
        if event.char == 'f' or event.keysym == 'Escape' or event.num == 1:
            self.fullscreen(event)
        if event.char == 'q':
            pass

    def fullscreen(self, event=None):
        if event.char == 'f' or event.num == 1 or event.keysym == 'Escape':
            if self.fullscreen_status == False and (event.char == 'f'
                                                    or event.num == 1):
                self.frame1.grid(columnspan=2, rowspan=2)
                self.frame1.lift()
                self.fullscreen_status = True
            else:
                self.frame1.grid(columnspan=1, rowspan=1)
                self.fullscreen_status = False

    def write_to_console(self):
        if self.console_file_exists == False:
            # print("ssh ready? = ",self.ssh_ready)
            # print("File exsists marked as false, at the top")
            self.textArea.config(state=NORMAL)
            self.textArea.delete('1.0', END)
            self.textArea.insert(INSERT, "Waiting for console data")
            self.textArea.config(state=DISABLED)
            self.modifiedtime = 0
            self.console_count = 0
            self.console_list = []
            try:
                ssh = self.ssh.exec_command('ls', timeout=5)
            except Exception as msg:
                #print (msg)
                pass
            if self.ssh_ready == False and self.pi_conn_ready == True:
                if self.authProblem != True:
                    #print("ssh connect called")
                    try:
                        self.ssh.connect('192.168.1.2',
                                         username='******',
                                         password='******')
                        connected = True
                    except (paramiko.ssh_exception.AuthenticationException,
                            socket.error) as msg:
                        #except (paramiko.SSHException, socket.error) as msg:
                        print(msg)
                        if str(msg) == 'Authentication failed.':
                            self.authProblem = True
                        self.ssh.close()
                        #pass
                        connected = False

                    if connected == True:
                        try:
                            self.ftp_client = self.ssh.open_sftp()
                            self.ssh_ready = True

                        except (paramiko.ssh_exception, socket.error) as msg:
                            # print (msg)
                            pass
                    else:
                        self.ssh_ready = False

            if self.ssh_ready == True and self.console_file_exists == False:
                #print("ssh ready and file doesnt exist")
                #print("should be attempting to open the file again")
                try:
                    self.console_fhandle = self.ftp_client.open(
                        self.console_file_path)
                    self.console_file_exists = True
                    #print ("1")
                except Exception as msg:
                    # print(msg)
                    pass

                if self.console_file_exists == True:
                    self.textArea.config(state=NORMAL)
                    self.textArea.delete('1.0', END)
                    self.textArea.config(state=DISABLED)
                    try:
                        modifiedtime = self.ftp_client.stat(
                            self.console_file_path).st_mtime
                    except Exception as msg:
                        modifiedtime = False
                        self.console_file_exists = False

        ##################################################################################
        if self.console_file_exists:
            try:
                modifiedtime = self.ftp_client.stat(
                    self.console_file_path).st_mtime
            except:
                modifiedtime = False
            if modifiedtime:
                if modifiedtime != self.console_modified_time or len(
                        self.console_list) < 1:
                    file_temp = self.console_fhandle.readlines()
                    for item in file_temp:
                        self.console_list.append(item)
                    list_length = len(self.console_list)
                    while self.console_count < list_length - 1:
                        self.textArea.config(state=NORMAL)
                        self.textArea.insert(
                            END, self.console_list[self.console_count])
                        self.textArea.config(state=DISABLED)
                        self.textArea.see("end")
                        self.console_count += 1
                    self.console_modified_time = modifiedtime
            else:
                #print("console file doesnt exist line 330")
                self.console_file_exists = False
        self.master.after(1000, self.write_to_console)
Ejemplo n.º 21
0
obj_corners[0,0,0] = 0
obj_corners[0,0,1] = 0
obj_corners[1,0,0] = h_template
obj_corners[1,0,1] = 0
obj_corners[2,0,0] = h_template
obj_corners[2,0,1] = w_template
obj_corners[3,0,0] = 0
obj_corners[3,0,1] = w_template

# start frame acquisition
print("Camera init -> DONE")
cam = WebcamVideoStream(src=0).start()

while True:
    # Capture frame
    frame = cam.read()
    scene_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    scene_img = cv2.resize(scene_img,(w_scene, h_scene))
    # get feature in the scene
    kp_scene, des_scene = orb_detector.detectAndCompute(scene_img, None)
    # match feature with the template using KNN matching (norm L2)
    knn_matches = matcher.knnMatch(des_obj, des_scene, 2)
    # filter matches (lowe ratio)
    good_matches = []
    for m,n in knn_matches:
        if m.distance < ratio_thresh * n.distance:
            good_matches.append(m)

    # draw matches
    img_matches = np.empty((w_scene, h_scene),dtype=np.uint8)
    img_matches = scene_img
Ejemplo n.º 22
0
    def run(self, teleop):
        add_text = "Starting"
        sess = Session()
        vs = WebcamVideoStream(src=-1).start()
        teleop._running = True

        grow_led = False
        while True:
            # Get inputs
            keycode = teleop._interface.read_key()
            if keycode:
                if teleop._key_pressed(keycode):
                    teleop._publish(add_text)
            else:
                teleop._publish(add_text)

            if teleop.LED:
                GPIO.output(self.LED, GPIO.HIGH)
            else:
                GPIO.output(self.LED, GPIO.LOW)

            if teleop.growLED != grow_led:
                if teleop.growLED:
                    val = 200
                else:
                    val = 0
                self.ser.write(f"<LED,{val}\n>".encode('utf-8'))
                line = self.ser.readline().decode('utf-8').rstrip().split(',')
                if line[0][0] == 'T':
                    temp = float(line[0][1:]) / 100.0
                    hum = float(line[1][1:]) / 100.0
                    teleop.update_values(temp, hum)
                grow_led = not grow_led

            frame = vs.read()
            frame = imutils.resize(frame, width=teleop.img_res)
            frame = cv.flip(frame, 0)
            gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)

            if self.mode == 'Ar':
                frame, err, ids = self.detect_aruco(gray, frame, sess)

            elif self.mode == 'blob':
                frame, err = self.detect_blob(frame)

            if teleop.go_home == True:
                if teleop.home_cmd and not self.limit_switch:
                    tr = threading.Thread(target=self.motor.move_cont)
                    tr.start()
                    add_text = "Going home"
                if self.limit_switch:
                    self.motor.stop_motor = False
                    teleop.go_home = False
                    add_text = "Got home"

                teleop.home_cmd = False

            if teleop.move_single == True:
                # move motor x steps
                steps = teleop.mov_steps
                add_text = f"Moving {steps} steps"
                if steps > 0 and self.limit_switch:
                    pass
                else:
                    self.tr = threading.Thread(target=self.motor.move,
                                               args=(teleop.mov_steps,
                                                     teleop.motor_speed))
                    self.tr.start()
                #self.motor.move(teleop.mov_steps,teleop.motor_speed)
                teleop.move_single = False

            if teleop.servo_pos[1] and self.ser is not None:
                # move servo motor
                self.ser.write(
                    f"<SERVO,{teleop.servo_pos[0]}\n>".encode('utf-8'))
                line = self.ser.readline().decode('utf-8').rstrip().split(',')
                if line[0][0] == 'T':
                    temp = float(line[0][1:]) / 100.0
                    hum = float(line[1][1:]) / 100.0
                    teleop.update_values(temp, hum)
                teleop.servo_pos[1] = False

            teleop.limit_switch = self.limit_switch

            if sess.found:
                # initialize timer
                sess.started_section = time.perf_counter()
                sess.mode = 'stay'
                sess.found = False
                add_text = 'Stopping'

            if sess.mode == 'stay':
                obj = err
                #check time
                if time.perf_counter(
                ) - sess.started_section >= sess.time_in_section:
                    sess.mode = 'move'
                    add_text = 'Moving'
            elif sess.mode == 'move':
                obj = -30

            #if abs(obj)>10:
            #   self.motor.move(obj)

            # Display the resulting frame
            h, w, _ = frame.shape
            h = int(h)
            cv.line(frame, (0, int(h / 2)), (w, int(h / 2)), (0, 0, 0), 1)
            cv.line(frame, (int(w / 2), h), (int(w / 2), 0), (0, 0, 0), 1)
            cv.imshow('frame', frame)
            if cv.waitKey(1) == ord('q'):
                break
            if teleop._running == False:
                break
        # When everything done, release the capture
        vs.stop()
        GPIO.output(self.LED, GPIO.LOW)
        cv.destroyAllWindows()
Ejemplo n.º 23
0
class Pipeline:
    # created a *threaded* video stream, allow the camera sensor to warmup,
    # and start the FPS counter
    print("[INFO] sampling THREADED frames from webcam...")

    #Constructor creates a list
    def __init__(self, fixed_size):
        self.vs1 = WebcamVideoStream(src=0)  #For anylizing
        self.vs1.start()
        self.vs2 = WebcamVideoStream(src=0)  #For anylizing
        self.vs2.start()
        self.cb = CircularBuffer(fixed_size)
        self.stopped = False
        self.current_stream = (None, None)
        self.disp = None
        self.haptic = None

    #Start thread
    def start(self):
        t = Thread(target=self.capture, args=())
        t.daemon = True
        t.start()
        return self

    def stop(self):
        self.stopped = True

    def capture(self):
        # keep looping infinitely
        # if the thread indicator variable is set, stop the
        # threa
        while not self.stopped:
            left_captured_frame = self.vs1.read()
            right_captured_frame = self.vs2.read()
            self.cb.enqueue((left_captured_frame, right_captured_frame))

    def push(self):
        pushed_stereo = self.capture()
        self.cb.enqueue(pushed_stereo)

    def pull(self):
        pulled_stereo = self.cb.dequeue()
        return pulled_stereo

    def get_disp(self):
        plt.clf()
        self.disp = i2d(self.stereo, 16)
        plt.imshow(self.disp, 'gray')
        buf = io.BytesIO()
        plt.savefig(buf, format='png')
        buf.seek(0)
        return buf.read()

    def get_depth(self):
        plt.clf()
        # Then send the disparity to haptic
        self.haptic = np.array(depth_to_haptic(self.disp))
        plt.scatter(self.haptic[:, 0], self.haptic[:, 1])
        buf = io.BytesIO()
        plt.savefig(buf, format='png')
        buf.seek(0)
        return buf.read()

    def get_frame(self):
        self.stereo = self.pull()
        # depth = self.process(stereo_feed)
        left_feed = self.stereo[0]
        right_feed = self.stereo[1]

        # We are using Motion JPEG, but OpenCV defaults to capture raw images,
        # so we must encode it into JPEG in order to correctly display the
        # video stream.
        l_ret, l_jpeg = cv2.imencode('.jpg', left_feed)
        r_ret, r_jpeg = cv2.imencode('.jpg', right_feed)
        return l_jpeg.tobytes(), r_jpeg.tobytes()

    def gen(self, dir):
        while True:
            self.current_stream = self.get_frame()
            if dir == 'left':
                yield (b'--stream_left\r\n'
                       b'Content-Type: image/jpeg\r\n\r\n' +
                       self.current_stream[0] + b'\r\n\r\n')
            if dir == 'right':
                yield (b'--stream_right\r\n'
                       b'Content-Type: image/jpeg\r\n\r\n' +
                       self.current_stream[1] + b'\r\n\r\n')
            if dir == 'disp':
                self.disp = self.get_disp()
                yield (b'--stream_right\r\n'
                       b'Content-Type: image/jpeg\r\n\r\n' + self.disp +
                       b'\r\n\r\n')
            if dir == 'depth':
                yield (b'--stream_depth\r\n'
                       b'Content-Type: image/jpeg\r\n\r\n' + self.haptic +
                       b'\r\n\r\n')
Ejemplo n.º 24
0
    gd = np.array(([0.2163, 0.5674, 0.2163]), dtype="float32")  # norm = 1
    # compute component
    dx = cv2.sepFilter2D(add, cv2.CV_32F, gd, dg)
    dy = cv2.sepFilter2D(add, cv2.CV_32F, dg, gd)
    # compute mag ang direction
    mag, angle = cv2.cartToPolar(dy, dx)
    # compute mag mask
    velocity = cv2.bitwise_and(mag, mag, mask=diffMask)
    direction = cv2.bitwise_and(angle, angle, mask=diffMask) * 180 / np.pi / 2

    return diff, velocity, direction


# init camera
cam = WebcamVideoStream(src=0).start()
refFrame = cam.read()
refFrame = cv2.resize(refFrame, (320, 240))

# init integral
tau = 0.01
lastIntegral = refFrame

# compute position feature
xLoc = np.ones((refFrame.shape[0], refFrame.shape[1]))
yLoc = np.ones((refFrame.shape[0], refFrame.shape[1]))
xLoc[:, :] = xLoc[:, :] * np.arange(refFrame.shape[1])
yLoc[:, :] = yLoc[:, :] * np.arange(refFrame.shape[0]).reshape((-1, 1))

while True:
    # get current frame
    curFrame = cam.read()
Ejemplo n.º 25
0
    with open('/home/pi/Desktop/Apli2/Results.csv', mode='a') as results_read:
        results_write = csv.writer(results_read,
                                   delimiter=',',
                                   quotechar='"',
                                   quoting=csv.QUOTE_MINIMAL)
        write_log = results_write.writerow(
            [Fecha(), Hora(), getTemp(),
             Mascarilla(), Acceso])
        return write_log


cap = WebcamVideoStream(src=0).start()
fourcc = cv2.VideoWriter_fourcc(*'XVID')
status = True
while status:
    img_raw = cap.read()
    img_raw = img_raw[0:640, 190:460]
    img_raw = cv2.cvtColor(img_raw, cv2.COLOR_BGR2RGB)
    if (status):
        inference(img_raw,
                  conf_thresh=0.5,
                  iou_thresh=0.5,
                  target_shape=(260, 260),
                  draw_result=True,
                  show_result=False)
    cv2.putText(img_raw, "%.2f C" % getTemp(), (60, 450),
                cv2.FONT_HERSHEY_SIMPLEX, 1.3, (255, 255, 255))
    if (getTemp() > 35 and getTemp() < 37 and MASK == 1):
        GPIO.output(20, True)
        GPIO.output(21, False)
        check1 = check1 + 1
Ejemplo n.º 26
0
class VideoCamera(object):
    def __init__(self):
        # Using OpenCV to capture from device 0. If you have trouble capturing
        # from a webcam, comment the line below out and use a video file
        # instead.

        self.stream = WebcamVideoStream(src=0).start()
        with open("trained_knn_model.clf", 'rb') as f:
            self.knn_clf = pickle.load(f)

    def __del__(self):
        self.stream.stop()

    def predict(self, frame, knn_clf, distance_threshold=0.4):
        # Find face locations
        X_face_locations = face_recognition.face_locations(frame)
        # print("X_face_locations",X_face_locations[0])
        # X_face_locations[0][0]: X_face_locations[0][1], X_face_locations[0][2]: X_face_locations[0][3]
        # try:
        #     print("here")
        #     cv2.imshow("fdgd",frame[57:304,242:118])
        #     cv2.waitKey(1)
        # except:
        #     pass
        # If no faces are found in the image, return an empty result.
        if len(X_face_locations) == 0:
            return []

        # Find encodings for faces in the test iamge
        faces_encodings = face_recognition.face_encodings(
            frame, known_face_locations=X_face_locations)

        # Use the KNN model to find the best matches for the test face
        closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)
        are_matches = [
            closest_distances[0][i][0] <= distance_threshold
            for i in range(len(X_face_locations))
        ]
        for i in range(len(X_face_locations)):
            print("closest_distances")
            print(closest_distances[0][i][0])

        # Predict classes and remove classifications that aren't within the threshold
        return [(pred, loc) if rec else ("unknown", loc)
                for pred, loc, rec in zip(knn_clf.predict(faces_encodings),
                                          X_face_locations, are_matches)]

    def get_frame(self):
        image = self.stream.read()
        li = []
        global person_name
        # We are using Motion JPEG, but OpenCV defaults to capture raw images,
        # so we must encode it into JPEG in order to correctly display the
        # video stream.
        f = open("trainStatus.txt")
        for i in f:
            isTrained = int(i)
        if isTrained:  # change updated model file
            # load again
            with open("trained_knn_model.clf", 'rb') as f:
                self.knn_clf = pickle.load(f)
            file = open("trainStatus.txt", "w")
            file.write("0")
            file.close()
        predictions = self.predict(image, self.knn_clf)
        name = ''
        for name, (top, right, bottom, left) in predictions:
            startX = int(left)
            startY = int(top)
            endX = int(right)
            endY = int(bottom)

            cv2.rectangle(image, (startX, startY), (endX, endY), (0, 255, 0),
                          2)
            cv2.putText(image, name, (endX - 70, endY - 15),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)

        ret, jpeg = cv2.imencode('.jpg', image)
        data = []
        data.append(jpeg.tobytes())
        data.append(name)
        return data
Ejemplo n.º 27
0
 print("[info] starting to read a webcam ...")
 capWebCam = WebcamVideoStream(0).start()
 time.sleep(1.0)
 
 # start the frame per second  (FPS) counter
 fps = FPS2().start() 
 
 
 
 # loop over the frames obtained from the webcam
 while True:
     # grab each frame from the threaded  stream,
     # resize
     # it, and convert it to grayscale (while still retaining 3
     # channels)
     frame1 = capWebCam.read()
     frame = cv2.flip(frame1,1)
     #frame = imutils.resize(frame, width=450)
     #frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
     #frame = np.dstack([frame, frame, frame])
     
     # display the size of the queue on the frame
     #cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()),
     #            (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
     
     
     fps.update()
     cv2.putText(frame, "FPS: {:.2f}".format(fps.fps()),
                 (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
     
     
Ejemplo n.º 28
0
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
    im_with_keypoints = cv2.drawKeypoints(img, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

# Show keypoints
    cv2.imshow("Keypoints", im_with_keypoints)
    cv2.waitKey(1)

LIVE_STREAM = "http://192.168.77.81:8080/video"

camera_stream = WebcamVideoStream(src=LIVE_STREAM).start()


framecount = 0
while camera_stream.stream.isOpened():
    framecount += 1
    frame = cv2.cvtColor(camera_stream.read(), cv2.COLOR_BGR2GRAY)
    img = Image.fromarray(frame)
    img_out = pillowfight.swt(img, output_type=pillowfight.SWT_OUTPUT_ORIGINAL_BOXES)
    print(tesserocr.image_to_text(img))

    cv2.imshow("", frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()


#image = cv2.imread("img1.jpg")
#image = cv2.resize(image,(320,240))
#captch_ex(image)
Ejemplo n.º 29
0
class App:
    def __init__(self, master, res):
        #Grab the camera ip from config
        self.cam_ip = config['cam_ip']

        #Pass the parameter master to variable self.master
        self.master = master

        #Sets the icon and titlebar text (Quite unnecessary but looks better)
        self.master.iconbitmap('{0}/warden.ico'.format(
            config['content_folder']))

        #key binding definition for f key to fullscreen function
        self.master.bind('<Key>', self.event_handler)

        #Fullscreen status checks the fullscreen status when key or mouse enters/exits fullscreen
        self.fullscreen_status = False

        #set the titlebar text
        self.master.title("Warden Control Panel")

        #Configure the master frame's grid
        self.master.grid_rowconfigure(0, weight=1)
        self.master.grid_rowconfigure(1, weight=1)
        self.master.grid_columnconfigure(0, weight=40)
        self.master.grid_columnconfigure(1, weight=28)

        self.master.grid_propagate(False)

        #Set the opening resolution, and minsize if required
        #self.master.minsize(res[1],res[0])
        self.master.geometry(str(res[1]) + "x" + str(res[0]))

        #####Set 4 frames and put them into the grid (grid contains 4 equal boxes)#####
        ###############################################################################

        ###Frame 1 will hold the VIDEO STREAM###
        self.frame1 = Frame(self.master)
        self.frame1.grid(column=0, row=0, sticky='nsew')
        self.frame1.grid_rowconfigure(0, weight=1)
        self.frame1.grid_columnconfigure(0, weight=1)

        #create video frame within frame1, i.e. a tkinter Label
        self.video_frame = Label(self.frame1,
                                 width=self.frame1.winfo_width(),
                                 height=self.frame1.winfo_height(),
                                 background="white",
                                 relief=RIDGE)
        #Label 'sticks' to top, bottom, left and right
        self.video_frame.grid(sticky='nsew')

        #bind double mouse click to the video frame
        self.video_frame.bind('<Double-Button>', self.event_handler)

        #Picture to display when no video present
        self.vid_dis_img_path = '{0}/vid_disconnect.jpg'.format(
            config['content_folder'])
        #initialise as default image to show
        self.vid_show_image = Image.open(self.vid_dis_img_path)

        self.vid_ratio = [16, 9]

        ###Frame 2 SWITCH / CONTROL PANEL SECTION###
        self.frame2 = Frame(self.master, background='black')
        self.frame2.grid(column=1, row=0, sticky='nsew')
        self.frame2.grid_rowconfigure(0, weight=1)
        self.frame2.grid_columnconfigure(0, weight=1)

        self.switchpanel_ratio = [16, 9]

        #auto switch picture
        self.auto_img_raw = Image.open('{0}/auto_button.png'.format(
            config['content_folder']))
        # self.auto_img_raw = self.auto_img_raw.resize((170,60),Image.ANTIALIAS)
        self.auto_photo = ImageTk.PhotoImage(self.auto_img_raw)

        #manual switch picture
        self.manual_img_raw = Image.open('{0}/manual_button.png'.format(
            config['content_folder']))
        # self.manual_img_raw = self.manual_img_raw.resize((170,60),Image.ANTIALIAS)
        self.manual_photo = ImageTk.PhotoImage(self.manual_img_raw)

        #Panel auto/manual switch initialise
        # self.switch_canvas = Canvas(self.switch_panel,bg='white',height=self.frame2.winfo_height()/5,width=self.frame2.winfo_width()/3)
        # self.switch_canvas.grid()

        #self.switch = tk.Button(self.switch_canvas,width=1,height=1,command=self.switch_handler,relief=FLAT,activebackground='black')
        self.switch = tk.Button(self.frame2,
                                width=1,
                                height=1,
                                command=self.switch_handler,
                                relief=FLAT,
                                activebackground='black')
        self.switch.configure(command=self.switch_handler)
        self.switch.grid()
        self.switch.configure(image=self.manual_photo, bg='black')

        # switch_window = self.switch_panel.create_window(1,1,window=self.switch)

        #initialise the control_status
        data_send["control_status"] = "manual"
        data_receive["control_status"] = "manual"

        #data file remote and local paths, (datafile used only by the switch but could be expanded at a later date)
        self.data_local = config['content_folder'] + "/" + config['data_local']
        self.data_remote = config['data_remote']

        ###frame 3 contains the CONSOLE###
        self.frame3 = Frame(self.master, background="black")
        self.frame3.grid(column=0, row=1, sticky='nsew')
        self.frame3.grid_rowconfigure(0, weight=1)
        self.frame3.grid_columnconfigure(0, weight=1)

        #Create text area with scrollbar within frame 3
        self.textArea = tkst.ScrolledText(self.frame3,
                                          wrap=WORD,
                                          foreground="black",
                                          background="white",
                                          width=-1,
                                          height=-1,
                                          state=DISABLED)
        self.textArea.grid(sticky='nsew')

        #initilaise console variables
        self.console_file_exists = False
        self.console_list = []
        self.console_modified_time = 0

        #Set the console local and remote paths
        self.console_remote = config['console_remote']
        self.console_local = config['content_folder'] + "/" + config[
            'console_local']

        ###Frame 4 DEPTHMAP###
        self.frame4 = Frame(self.master, background="black")
        self.frame4.grid(column=1, row=1, sticky='nsew')
        self.frame4.grid_rowconfigure(0, weight=1)
        self.frame4.grid_columnconfigure(0, weight=1)

        self.depthmap_frame = Label(self.frame4,
                                    width=self.frame4.winfo_width(),
                                    height=self.frame4.winfo_height(),
                                    background="white")
        self.depthmap_frame.grid(sticky='nsew')

        self.depthmap_modified_time = 0
        self.depthmap_file_exists = False
        self.depthmap_img_remote = config['depthmap_file_path_remote']
        self.depthmap_img_local = './' + config['content_folder'] + config[
            'depthmap_file_path_local']

        self.depthmap_dis_img_path = '{0}/vid_disconnect.jpg'.format(
            config['content_folder'])
        self.depthmap_show_image = Image.open(self.depthmap_dis_img_path)

        self.depthmap_ratio = [16, 9]

        #delete local data/log files
        try:
            os.remove(self.depthmap_img_local)
            os.remove(self.console_local)
        except:
            pass
    ##create instance of other classes
        self.connect = Connect(config)

        #Start functions/threads:-
        self.write_to_console()
        # self.draw()
        # self.video_feed()
        # self.video_feed_initialiser()
        # self.depmap_update()

        #Update the depthmap thread
        self.depmap_thread = threading.Thread(target=self.depmap_update,
                                              args=())
        self.depmap_thread.daemon = True
        self.depmap_thread.start()

        #Video feed thread
        self.video_feed_thread = Thread(target=self.video_feed, args=())
        self.video_feed_thread.daemon = True
        self.video_feed_thread.start()

        #Video feed initialiser thread
        self.video_feed_init_thread = Thread(
            target=self.video_feed_initialiser, args=())
        self.video_feed_init_thread.daemon = True
        self.video_feed_init_thread.start()

        #draw thread
        self.draw_thread = Thread(target=self.draw, args=())
        self.draw_thread.daemon = True
        self.draw_thread.start()

    ###Class functions###################################################

    def resize(self, image, frame, ratio):
        if frame.winfo_width() > 1:
            if frame.winfo_height() < int(
                (frame.winfo_width() * (ratio[1] / ratio[0]))):
                image = image.resize(
                    (int(frame.winfo_height() *
                         (ratio[0] / ratio[1])), frame.winfo_height()),
                    Image.ANTIALIAS)
            else:
                image = image.resize(
                    (frame.winfo_width(),
                     int(frame.winfo_width() * (ratio[1] / ratio[0]))),
                    Image.ANTIALIAS)
            return image
        else:
            return image

    def draw(self):
        self.draw_list = [{
            'exists': self.depthmap_file_exists,
            'frame': self.depthmap_frame,
            'image': self.depthmap_show_image,
            'disconnect_image': self.depthmap_dis_img_path,
            'ratio': self.depthmap_ratio
        }, {
            'exists': self.depthmap_file_exists,
            'frame': self.video_frame,
            'image': self.vid_show_image,
            'disconnect_image': self.vid_dis_img_path,
            'ratio': self.vid_ratio
        }]

        #loops through and draw each item in the list
        for item in self.draw_list:
            if (item['image'].width != item['frame'].winfo_width()) or (
                    item['image'].height != item['frame'].winfo_height()):
                if not item['exists']:
                    item['image'] = Image.open(item['disconnect_image'])

                self.temp_draw_image = self.resize(item['image'],
                                                   item['frame'],
                                                   item['ratio'])
                item['image'] = ImageTk.PhotoImage(self.temp_draw_image)
            item['frame'].config(image=item['image'])

            #Drawing and resizing the button
            self.switch.configure(height=self.frame2.winfo_height() / 5,
                                  width=self.frame2.winfo_width() / 3)
            if data_receive[
                    "control_status"] == "manual" and self.frame2.winfo_height(
                    ) > 1:
                self.manual_img_raw = Image.open(
                    '{0}/manual_button.png'.format(config['content_folder']))
                self.manual_img_raw = self.manual_img_raw.resize(
                    (int(self.frame2.winfo_width() / 3),
                     int(self.frame2.winfo_height() / 5)), Image.ANTIALIAS)
                self.manual_photo = ImageTk.PhotoImage(self.manual_img_raw)
                self.switch.configure(image=self.manual_photo)
            elif self.frame2.winfo_height() > 1:
                self.auto_img_raw = Image.open('{0}/auto_button.png'.format(
                    config['content_folder']))
                self.auto_img_raw = self.auto_img_raw.resize(
                    (int(self.frame2.winfo_width() / 3),
                     int(self.frame2.winfo_height() / 5)), Image.ANTIALIAS)
                self.auto_photo = ImageTk.PhotoImage(self.auto_img_raw)
                self.switch.configure(image=self.auto_photo)

        self.switch.grid()

        self.master.after(100, self.draw)

    def depmap_update(self):
        file_data = self.connect.get_file(self.depthmap_img_remote,
                                          self.depthmap_img_local,
                                          self.depthmap_modified_time)
        if file_data != None:
            if file_data['file_exists']:
                self.depthmap_frame.configure(bg='black')
                if file_data[
                        'modified_time'] != self.depthmap_modified_time and file_data[
                            'file_size'] != 0:
                    self.depthmap_modified_time = file_data['modified_time']
                self.depthmap_file_exists = True
            try:
                self.depthmap_show_image = Image.open(self.depthmap_img_local)
            except:
                self.depthmap_file_exists = False
                self.depthmap_frame.configure(bg='white')
        self.master.after(1000, self.depmap_update)

    def write_to_console(self):
        file_data = self.connect.get_file(self.console_remote,
                                          self.console_local,
                                          self.console_modified_time)
        if file_data and file_data['file_exists']:
            file = open(self.console_local).readlines()
            self.console_modified_time = file_data['modified_time']
            file_length = len(file)
            #Restrict to only printing tailing 100 lines
            if file_length > 100:
                file = file[file_length - 101:len(file)]
            #Print lines to the text area
            try:
                self.textArea.config(state=NORMAL)
                self.textArea.delete(1.0, END)
                self.textArea.config(state=DISABLED)
                for item in file:
                    self.textArea.config(state=NORMAL)
                    self.textArea.insert(END, item)
                    self.textArea.config(state=DISABLED)
                self.textArea.see("end")

            except Exception as msg:
                print("Exception in console printing: " + str(msg))
        else:
            self.textArea.config(state=NORMAL)
            self.textArea.delete('1.0', END)
            self.textArea.insert(INSERT, "Waiting for console data")
            self.textArea.config(state=DISABLED)
            self.console_modifiedtime = 0
        self.master.after(1000, self.write_to_console)

    def video_feed_initialiser(self):
        if not self.connect.video_ready:
            #print("video feed initilaiser has recognised that self.video_ready is false")
            if self.connect.video_conn_ready:
                #print("attempting to reconnect the video connection")
                self.vs = WebcamVideoStream(
                    src="rtsp://" + self.cam_ip +
                    ":554/user=admin&password=&channel=1&stream=0.sdp?real_stream"
                ).start()
                if str(self.vs.read()) != 'None':
                    #print("setting video_ready to true (self.vs is not none)")
                    self.connect.video_ready = True
        self.master.after(1000, self.video_feed_initialiser)

    def video_feed(self):
        if self.connect.video_ready:
            self.video_frame.configure(bg='black')
            try:
                frame = self.vs.read()  #read the next frame
                cvimage = cv.cvtColor(
                    frame, cv.COLOR_BGR2RGBA)  #colours picture correctly
                self.vid_show_image = Image.fromarray(
                    cvimage)  #PIL, processes the matrix array
            except:
                self.video_frame.configure(bg='white')
        self.master.after(
            100, self.video_feed
        )  # cause the function to be called after X milliseconds

    def switch_handler(self):
        if self.connect.ssh_ready:
            if data_send["control_status"] == "manual":
                #self.switch.configure(image=self.auto_photo)
                data_send["control_status"] = "automatic"
            else:
                #self.switch.configure(image=self.manual_photo)
                data_send["control_status"] = "manual"
            #open/create the data file locally and get handle
            try:
                file = open(self.data_local, 'w')
            except IOError:
                file = open(self.data_local, 'w+')

            #Empty the file
            file.seek(0)
            file.truncate()
            #print the contents of the dictionary to lines such as control_status=manual
            for k, v in data_send.items():
                file.write("{0}={1}".format(k, v))
                file.write("\n")
            file.close()

            #send the data
            try:
                #put the file to the remote location
                self.connect.put_file(self.data_local, self.data_remote)

                #read the raw remote data from file
                remote_data = self.connect.get_file_lines(self.data_remote)

                #put the remote data from remote file into data_receive dictionary
                for item in remote_data:
                    parts = item.split('=')
                    data_receive[parts[0].strip()] = parts[1].strip()

            except Exception as msg:
                print(msg)

            #final check makes sure that what is in the remote location is reflected locally
            if data_receive['control_status'] != data_send['control_status']:
                if data_receive['control_status'] == 'manual':
                    data_send['control_status'] = data_receive[
                        'control_status']
                    #self.switch.configure(image=self.manual_photo)
                if data_receive['control_status'] == 'automatic':
                    data_send['control_status'] = data_receive[
                        'control_status']
                    #self.switch.configure(image=self.auto_photo)

    def fullscreen(self, event=None):
        if event.char == 'f' or event.num == 1 or event.keysym == 'Escape':
            if self.fullscreen_status == False and (event.char == 'f'
                                                    or event.num == 1):
                self.frame1.grid(columnspan=2, rowspan=2)
                self.frame1.lift()
                self.fullscreen_status = True
            else:
                self.frame1.grid(columnspan=1, rowspan=1)
                self.fullscreen_status = False

    def event_handler(self, event=None):
        if event.char == 'f' or event.keysym == 'Escape' or event.num == 1:
            self.fullscreen(event)
        if event.char == 'q':
            pass
Ejemplo n.º 30
0
class ObjectDetection:
    def __init__(self, id): 
        # self.cap = cv2.VideoCapture(id)
        self.cap = WebcamVideoStream(src = id).start()
        self.cfgfile = "cfg/yolov4-tiny.cfg"
        self.weightsfile = "yolov4-tiny.weights"
        self.confidence = float(0.6)
        self.nms_thesh = float(0.8)
        self.num_classes = 1
        self.classes = load_classes('data/butts.names')
        self.colors = pkl.load(open("pallete", "rb"))
        self.model = Darknet(self.cfgfile)
        self.CUDA = torch.cuda.is_available()
        self.model.load_weights(self.weightsfile)
        self.width = 1280 #640#1280
        self.height = 720 #360#720
        print("Loading network.....")
        if self.CUDA:
            self.model.cuda()
        print("Network successfully loaded")

        self.model.eval()

    def main(self):
        q = queue.Queue()
        while True:
            def frame_render(queue_from_cam):
                frame = self.cap.read() # If you capture stream using opencv (cv2.VideoCapture()) the use the following line
                # ret, frame = self.cap.read()
                frame = cv2.resize(frame,(self.width, self.height))
                queue_from_cam.put(frame)
            cam = threading.Thread(target=frame_render, args=(q,))
            cam.start()
            cam.join()
            frame = q.get()
            q.task_done()
            fps = FPS().start() 
            
            try:
                img, orig_im, dim = prep_image(frame, 160)
                
                im_dim = torch.FloatTensor(dim).repeat(1,2)
                if self.CUDA:                            #### If you have a gpu properly installed then it will run on the gpu
                    im_dim = im_dim.cuda()
                    img = img.cuda()
                # with torch.no_grad():               #### Set the model in the evaluation mode
                
                output = self.model(img)
                from tool.utils import post_processing,plot_boxes_cv2
                bounding_boxes = post_processing(img,self.confidence, self.nms_thesh, output)
                frame = plot_boxes_cv2(frame, bounding_boxes[0], savename= None, class_names=self.classes, color = None, colors=self.colors)

            except:
                pass
            
            fps.update()
            fps.stop()
            print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
            print("[INFO] approx. FPS: {:.1f}".format(fps.fps()))
            
            cv2.imshow("Object Detection Window", frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            continue
            torch.cuda.empty_cache()
Ejemplo n.º 31
0
Archivo: yolo.py Proyecto: joe660/YOLOF
                         except:
                             if(FLAGS.verbose):
                                 print("[INFO] not able to perform")
                 frame_prev = frame
             if writer is None:
                 # Initialize the video writer
                 fourcc = cv.VideoWriter_fourcc(*"MJPG")
                 writer = cv.VideoWriter(FLAGS.video_output_path, fourcc, 30, 
                                 (matrix.shape[1], matrix.shape[0]), True)
             writer.write(matrix)
         print ("[INFO] Cleaning up...")
         writer.release()
 else:
     # vid = cv.VideoCapture(0)¡
     stream = WebcamVideoStream(src=0).start()  # default camera
     frame_prev = stream.read()
     frame_prev = cv.resize(frame_prev, None, fx=0.5, fy=0.5, interpolation = cv.INTER_LINEAR)
     previous_of = None
     while True:
         # _, frame = vid.read()
         frame = stream.read()
         frame = cv.resize(frame, None, fx=0.5, fy=0.5, interpolation = cv.INTER_LINEAR)
         height, width = frame.shape[:2]
         matrix = np.zeros_like(frame, dtype=np.uint8)
         if count == 0:
             start = time.time()
             frame, boxes, confidences, classids, idxs = infer_image(net, layer_names,
                                                                     height, width, frame, colors, labels, FLAGS)
             end = time.time()
             count += 1
             if(FLAGS.verbose):
Ejemplo n.º 32
0
autoFile.close()
fs = False

#Initialize camera

blank_image = np.zeros((30, 100, 3), np.uint8)
agi = 1
farCenterBuff = 1
lastTryAngle = 0
moveTry = 0
allTry = 0
LaneArea = 100
while True:

    angle = 0
    frame = video_capture.read()
    orig = imutils.resize(frame, width=400)
    frame = orig
    imgHSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    imageGreen = cv2.inRange(imgHSV, np.array([40, 100, 150], np.uint8),
                             np.array([80, 255, 255], np.uint8))

    ### START LANE DETECTION
    _, contours, hierarchy = cv2.findContours(imageGreen, cv2.RETR_TREE,
                                              cv2.CHAIN_APPROX_SIMPLE)
    areas = [cv2.contourArea(c) for c in contours]

    if np.any(areas):
        allTry = 1
        max_index = np.argmax(areas)
        cnt = contours[max_index]
Ejemplo n.º 33
0
    vs = cv2.VideoCapture(0)

if SHOW_FPS:
    fps_caption = "FPS: 0"
    fps_counter = 0
    start_time = time.time()

SCREEN_NAME = 'Mask RCNN LIVE'
cv2.namedWindow(SCREEN_NAME, cv2.WINDOW_NORMAL)
cv2.setWindowProperty(SCREEN_NAME, cv2.WND_PROP_FULLSCREEN,
                      cv2.WINDOW_FULLSCREEN)

while True:
    # Capture frame-by-frame
    if OPTIMIZE_CAM:
        frame = vs.read()
    else:
        grabbed, frame = vs.read()
        if not grabbed:
            break

    if SHOW_FPS_WO_COUNTER:
        start_time = time.time()  # start time of the loop

    if PROCESS_IMG:
        results = model.detect([frame])
        r = results[0]

        # Run detection
        masked_image = visualize.display_instances_10fps(frame,
                                                         r['rois'],
def main():
	points = []
	ticks =str(int(time.time() * 1000))
	# construct the argument parse and parse the arguments
	ap = argparse.ArgumentParser()
	ap.add_argument("-v", "--video",
		help="path to the (optional) video file")
	ap.add_argument("-b", "--buffer", type=int, default=64,
		help="max buffer size")
	args = vars(ap.parse_args())

	# define the lower and upper boundaries of the "green"
	# ball in the HSV color space, then initialize the
	# list of tracked points

	# BLUE
	ballLower, ballUpper = getBallColor()

	pts = deque(maxlen=args["buffer"])

	# created a *threaded *video stream, allow the camera senor to warmup,
	# and start the FPS counter
	debugPrint("[INFO] sampling THREADED frames from webcam...")
	vs = WebcamVideoStream(src=1).start()
	frame = vs.read()
	if frame is None:
		debugPrint("Changed to default camera!")
		# Take the default camera (webcam is not connected)
		vs = WebcamVideoStream(src=0).start()

	if __ENABLE_VIDEO_OUT__:
		outStream = cv2.VideoWriter('Output/output' + ticks +'.avi', -1, 20.0, (1000,705))

	eventHook = EventHook()

	# keep looping
	while True:
		# grab the current frame
		frame = vs.read()

		if frame is None:
			continue
		
		# resize the frame, blur it, and convert it to the HSV
		# color space
		frame = imutils.resize(frame, width=1000)
		frame = frame[40:745,0:1000]

		# blurred = cv2.GaussianBlur(frame, (11, 11), 0)
		hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

		# construct a mask for the color "green", then perform
		# a series of dilations and erosions to remove any small
		# blobs left in the mask
		mask = cv2.inRange(hsv, ballLower, ballUpper)
		mask = cv2.erode(mask, None, iterations=2)
		mask = cv2.dilate(mask, None, iterations=2)

		# find contours in the mask and initialize the current
		# (x, y) center of the ball
		cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
			cv2.CHAIN_APPROX_SIMPLE)[-2]
		center = None

		# only proceed if at least one contour was found
		if len(cnts) > 0:
			# find the largest contour in the mask, then use
			# it to compute the minimum enclosing circle and
			# centroid
			c = max(cnts, key=cv2.contourArea)
			((x, y), radius) = cv2.minEnclosingCircle(c)
			M = cv2.moments(c)
			center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

			# only proceed if the radius meets a minimum size
			if radius > 5: #Changed from 10
				# draw the circle and centroid on the frame,
				# then update the list of tracked points
				cv2.circle(frame, (int(x), int(y)), int(radius),
					(0, 255, 255), 2)
				cv2.circle(frame, center, 5, (0, 0, 255), -1)

		# update the points queue
		pts.appendleft(center)

		debugPrint(center)
		if center is None:
			points.append((-1,-1))
		else:
			points.append(center)

		# loop over the set of tracked points
		for i in xrange(1, len(pts)):
			# if either of the tracked points are None, ignore
			# them
			if pts[i - 1] is None or pts[i] is None:
				continue

			# otherwise, compute the thickness of the line and
			# draw the connecting lines
			thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
			cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)

		# show the frame to our screen
		cv2.imshow("Frame", frame)
		
		if __ENABLE_VIDEO_OUT__:
			outStream.write(frame)

		# add replay frame
		addReplayFrame(frame)
		
		key = cv2.waitKey(1) & 0xFF

		# if the 'q' key is pressed, stop the loop
		if key == ord("q"):
			break

		if center is None:
			if eventHook.fire([(-1,-1)]):
				saveGoalVideo()
		else:
			normalizePoint = (int(2000*float(center[0]/1000.0)),int(1000*float(center[1]/705.0)))
			if eventHook.fire([normalizePoint]):
				saveGoalVideo()

			debugPrint(normalizePoint)

	# cleanup the camera and close any open windows
	cv2.destroyAllWindows()
	vs.stop()
	savePtsVector(points,ticks)
	raise SystemExit
	if __ENABLE_VIDEO_OUT__:
		outStream.release()
Ejemplo n.º 35
0
vs = WebcamVideoStream(src=0).start() #start threading

cv2.namedWindow("video", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("video", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)

print "Inicializando Camera..." #Initializing Camera
sleep(0.5)
print "Ajustando exposicao..." #Adjusting Exposition - Otherwise camera
#could start darker
sleep(0.5)
GPIO.output(26,GPIO.LOW)
print "Running..."

try:
    vid_height, vid_width = vs.read().shape[:2]
except:
    print "Camera nao encontrada. Reconecte a camera e tente novamente."
    sys.exit();

icon_width = int(vid_width*screen_height*0.1/vid_height)
icon_height = int(screen_height*0.1)
vid_width = int(vid_width*screen_height*0.9/vid_height)
vid_height = int(screen_height*0.9)
black = np.zeros((screen_height,screen_width,3), np.uint8)
overlay = np.zeros((vid_height,vid_width,3), np.uint8)

#layout drawing function
def layout():
    for i in range(0,9):
        cv2.rectangle(black,(i*icon_width,int(screen_height*0.9)),(icon_width + i*icon_width,screen_height),(255,255,255),3)
print ("UDP socket bound to %s" %(port))

#initialize the video stream and allow the camera sensor to warmup
print("Warming up camera")

if args["setting"]:
    os.system('qv4l2')

vs1 = WebcamVideoStream(src=1).start()
vs2 = WebcamVideoStream(src=2).start()

time.sleep(2.0)

#initialize the FourCC, video writer, dimensions of the frame, and zeros array
fps = args["fps"]
frame1 = vs1.read()
frame1 = imutils.resize(frame1, args["width"])
(h, w) = frame1.shape[:2]
print(h, w)
numframe = int(fps * args['length'] * 60)
print(numframe)
ts = None
toverflow = 0
record = False

#Allocate memory on HD
print("Allocating Memory")
c1 = np.zeros((numframe, h, w), dtype=np.uint8)
c2 = np.zeros((numframe, h, w), dtype=np.uint8)

fnm_save = newFile()
Ejemplo n.º 37
0
def main(display_history=True):
    #Window for past frames
    frames_diff_history = [(get_blank_frame_diff(), get_blank_frame_diff())
                           for i in range(frames_in_history)]
    last_eyes = None

    #Load model classifier
    classifier = Classifier()
    #Start thread to make predictions
    classifier.startPredictions()

    #Initialize webcam
    vs = WebcamVideoStream(src=0).start()

    #For FPS computation
    t0 = -1

    #Face/eyes detector
    detector = Detector()

    print "Starting eye recognition..."
    while True:

        #Compute FPS
        dt = time.time() - t0
        fps = 1 / dt
        t0 = time.time()

        #Limit FPS with wait
        waitMs = 5
        key = cv2.waitKey(waitMs) & 0xFF

        #Get image from webcam, convert to grayscale and resize
        full_frame = vs.read()
        full_frame = cv2.cvtColor(full_frame, cv2.COLOR_BGR2GRAY)
        frame = imutils.resize(full_frame, width=300)

        #Find face
        face_bb = detector.get_face(frame)
        if face_bb is None:
            #Invalidate eyes bounding box as all will change
            last_eyes = None
            detector.reset_eyes_bb()
            continue

        #Get low resolution face coordinates
        x, y, w, h = face_bb
        face = frame[y:y + h, x:x + w]

        #Apply to high resolution frame
        xScale = full_frame.shape[1] / frame.shape[1]
        yScale = full_frame.shape[0] / frame.shape[0]
        x, y, w, h = x * xScale, y * yScale, w * xScale, h * yScale
        fullFace = full_frame[y:y + h, x:x + w]

        #Find eyes on high resolution face
        eyes = detector.get_eyes(fullFace)
        if eyes is None:
            #Reset last eyes
            last_eyes = None
            continue

        eye_0, eye_1 = eyes

        #Process (normalize, resize)
        eye_0 = process(eye_0)
        eye_1 = process(eye_1)

        #Reshape for dataset
        eye_0 = np.reshape(eye_0, [dataset_img_size, dataset_img_size, 1])
        eye_1 = np.reshape(eye_1, [dataset_img_size, dataset_img_size, 1])

        #We have a recent picture of the eyes
        if last_eyes is not None:
            #Load previous eyes
            eye_0_previous, eye_1_previous = last_eyes

            #Compute diffs
            diff0 = get_difference_frame(eye_0, eye_0_previous)
            diff1 = get_difference_frame(eye_1, eye_1_previous)

            #Display/debug
            displayDiff = False
            if displayDiff:
                display_current_diff(eye_0,
                                     eye_1,
                                     eye_0_previous,
                                     eye_1_previous,
                                     stop_frame=False)

            #Crop beginning then add new to end
            frames_diff_history = frames_diff_history[1:]
            frames_diff_history.append([diff0, diff1])

        #Keep current as last frame
        last_eyes = [eye_0, eye_1]

        #Note: this is not time consuming
        if display_history:
            display_history_diffs(frames_diff_history, fps)

        #Extract each eyes
        X0, X1 = zip(*frames_diff_history)

        #Reshape as a tensor (NbExamples,SerieLength,Width,Height,Channels)
        X0 = np.reshape(X0, [
            -1,
            len(frames_diff_history), dataset_img_size, dataset_img_size, 1
        ])
        X1 = np.reshape(X1, [
            -1,
            len(frames_diff_history), dataset_img_size, dataset_img_size, 1
        ])

        #Save history to Classifier
        classifier.X0 = X0
        classifier.X1 = X1