Beispiel #1
0
    def __init__(self, *args, **kwargs):
        """ Initialize the Redis store and framerate monitor. """

        super(SocketHandler, self).__init__(*args, **kwargs)
        self._store = redis.Redis()
        self._pfs = coils.RateTicker((1, 5, 10))
        self._prev_image_id = None
Beispiel #2
0
    def __init__(self, *args, **kwargs):
        super(SocketHandler, self).__init__(*args, **kwargs)

        # Client to the socket server.
        self._map_client = coils.MapSockClient('0.0.0.0', 9002, encode=False)

        # Monitor the framerate at 1s, 5s, 10s intervals.
        self._fps = coils.RateTicker((1, 5, 10))
Beispiel #3
0
def start_recording(url='0'):
    # Retrieve command line arguments.
    width = None if len(sys.argv) <= 1 else int(sys.argv[1])
    height = None if len(sys.argv) <= 2 else int(sys.argv[2])

    # width = 480
    # height = 360

    # Create video capture object, retrying until successful.
    if url.isdigit():
        cap = open_cap(int(url))
    else:
        cap = open_cap(url)

    print("cap is opened!")

    mfps = cap.get(cv2.CAP_PROP_FPS)

    # Create client to the Redis store.
    store = StrictRedis(host=redishost, port=6379, db=0)

    # Set video dimensions, if given.
    if width: cap.set(3, width)
    if height: cap.set(4, height)

    # Monitor the framerate at 1s, 5s, 10s intervals.
    fps = coils.RateTicker((1, 5, 10))

    # Repeatedly capture current image,
    # encode, serialize and push to Redis database.
    # Then create unique ID, and push to database as well.
    print('Start Recording %s...' % url)
    while url in urls:
        hello, image = cap.read()
        while image is None:
            cap.release()
            # cap = cv2.VideoCapture(0)
            # cap = cv2.VideoCapture('./trimed.mp4')
            if url.isdigit():
                cap = open_cap(int(url))
            else:
                cap = open_cap(url)
            hello, image = cap.read()
            print(None)
        time.sleep(0.05)
        if image is None:
            time.sleep(0.5)
            continue

        _, image = cv2.imencode('.jpg', image)
        value = image.tobytes()
        store.set('image' + '_' + url, value)
        image_id = os.urandom(4)
        store.set('image_id' + '_' + url, image_id)
        # Print the framerate.
        # text = '{:.2f}, {:.2f}, {:.2f} fps'.format(*fps.tick())
        # print(text)
    print('Stop Recording %s...' % url)
Beispiel #4
0
    def writer(self, video_path, idx):

        cap = cv2.VideoCapture(video_path)

        # Monitor the framerate at 1s, 5s, 10s intervals.
        fps = coils.RateTicker((1, 5, 10))
        while cap.isOpened():
            ret, frame = cap.read()
            h, w, c = frame.shape
            res_img = frame.reshape(1, w * h * c)
            enc_img = base64.b64encode(res_img).decode("utf-8")
            self.__store.set(cam_name + idx, enc_img)
            self.__store.set(shape_name + idx, pickle.dumps((h, w, c)))
            cv2.imshow('VIDEO TO REDIS', frame)
            cv2.waitKey(1)
            # Print the framerate.
            text = '{:.2f}, {:.2f}, {:.2f} fps'.format(*fps.tick())
            print(text)
    def __init__(self, *args, **kwargs):
        super(SocketHandler, self).__init__(*args, **kwargs)

        # Client to the socket server.
        # self._map_client = coils.MapSockClient(host, port, encode=False)
        # Monitor the framerate at 1s, 5s, 10s intervals.
        self._fps = coils.RateTicker((1, 5, 10))

        self.now = time()
        self.speed = 0

        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(LANDMARK_FILE)

        self.rects = []
        self.shape = 0
        self.tvec = np.array([160, 120, -300])
        self.rvec = np.array([0, 0, 0])

        self.cam_w = 320
        self.cam_h = 240

        self.aT = np.array([2, 2, 1])
        self.bT = np.array([-320, -250, 0])

        self.aR = np.array([-1.0, 0.75, -1.])
        self.bR = np.array([0, 0, 0])

        self.poseEstimator = PnpHeadPoseEstimator(self.cam_w, self.cam_h)
        self.stream_processor = StreamProcessor(
            np.zeros((self.cam_w, self.cam_h)))

        self.computing = False
        self.rvecInc = 0
        self.tvecInc = 0
        self.found = None
        self.image = ""

        self.max_time = 0
    def writer(self, video_path, idx):

        cap = cv2.VideoCapture(video_path)

        # Monitor the framerate at 1s, 5s, 10s intervals.
        fps = coils.RateTicker((1, 5, 10))
        while cap.isOpened():
            ret, frame = cap.read()
            h, w, c = frame.shape
            # reshape or flatten the frame into a string
            res_img = frame.reshape(1, w * h * c)
            # base encode string
            enc_img = base64.b64encode(res_img).decode("utf-8")
            # store the encoded image against corresponding video/camera name - key-value pairs
            self.__store.set(cam_name + idx, enc_img)
            # store shape of each frame with corresponding camera/video name string
            self.__store.set(shape_name + idx, pickle.dumps((h, w, c)))

            cv2.imshow('VIDEO TO REDIS', frame)
            cv2.waitKey(1)
            # Print the framerate.
            text = '{:.2f}, {:.2f}, {:.2f} fps'.format(*fps.tick())
            print(text)
Beispiel #7
0
    if cur_sleep < max_sleep:
        cur_sleep *= 2
        cur_sleep = min(cur_sleep, max_sleep)
        continue
    cur_sleep = 0.1

# This creates a client to the Redis store, database.
store = redis.Redis(host='<insert IP address>', port=6379) #Insert IP address here

# Set video dimensions, if given.
if width: cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
if height: cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)


# Monitor the framerate at 1s, 5s, 10s intervals.
fps = coils.RateTicker((1, 5, 10))

# Repeatedly capture current image, encode it, convert it to bytes and push
# it to Redis database. Then create unique ID, and push it to database as well.
try:
    while True:
        retval, frame = cap.read()
        if frame is None:
            time.sleep(0.5)
            continue
        retval, frame = cv2.imencode('.jpg', frame) # frame is memory buffer of jpg image
        value = np.array(frame).tobytes()
       # print(np.array(frame).dtype)
        store.set('image', value)
        
        image_id = os.urandom(4)
Beispiel #8
0
                image,
                scaleFactor=1.3,
                minNeighbors=3,
                minSize=tuple([x / 20 for x in size]),
                maxSize=tuple([x / 2 for x in size]),
            )
            if len(rects):
                for a, b, c, d in rects:
                    result.append((a, b, c, d, self._color))
        except:
            print('Error in detector !!!')
        return result


# Monitor framerates for the given seconds past.
framerate = coils.RateTicker((2, ))


class Postprocessor(mpipe.OrderedWorker):
    def doTask(self, (tstamp, rects, )):
        """Augment the input image with results of processing."""
        # Make a flat list from a list of lists .
        rects = [item for sublist in rects for item in sublist]

        # Draw rectangles.
        for x1, y1, x2, y2, color in rects:
            cv2.rectangle(
                images[tstamp],
                (x1, y1),
                (x1 + x2, y1 + y2),
                color=color,
Beispiel #9
0
import cv2
import coils
import datetime
import numpy as np

output_tensors = [
    'ssd_300_vgg/block4_box/Reshape_1:0', 'ssd_300_vgg/block7_box/Reshape_1:0',
    'ssd_300_vgg/block8_box/Reshape_1:0', 'ssd_300_vgg/block9_box/Reshape_1:0',
    'ssd_300_vgg/block10_box/Reshape_1:0',
    'ssd_300_vgg/block11_box/Reshape_1:0', 'ssd_300_vgg/block4_box/Reshape:0',
    'ssd_300_vgg/block7_box/Reshape:0', 'ssd_300_vgg/block8_box/Reshape:0',
    'ssd_300_vgg/block9_box/Reshape:0', 'ssd_300_vgg/block10_box/Reshape:0',
    'ssd_300_vgg/block11_box/Reshape:0'
]
input_tensor = "Placeholder:0"
fps = coils.RateTicker((60, ))

FPGAinCloudAddress = None
FPGAinCloudssl_enabled = None
FPGAinCloudPort = None
FPGAinCloudaks_servicename = None
CameraURL = None
predictClient = None
scoring = False
PauseBetweenScoreSeconds = 0


async def getInitialTWIN(module_client):
    global FPGAinCloudAddress
    global FPGAinCloudssl_enabled
    global FPGAinCloudPort
Beispiel #10
0
 def __init__(self):
     self.image_acc = None  # Maintain accumulation of thresholded differences.
     self.tstamp_prev = None  # Keep track of previous iteration's timestamp.
     self.framerate = coils.RateTicker((1,5,10))  # Monitor framerates.
Beispiel #11
0
        hello = common[tstamp]
        hello['image_diff'] = image_diff
        common[tstamp] = hello
        
        # Propagate result to the next stage.
        self.putResult(tstamp)

        # Accumulate.
        hello = cv2.accumulateWeighted(
            image,
            self.image_acc,
            alpha,
            )

# Monitor framerates for the given seconds past.
framerate2 = coils.RateTicker((1,5,10))

# Create the output window.
cv2.namedWindow('diff average 4', cv2.cv.CV_WINDOW_NORMAL)

def step2(tstamp):
    """Display the image, stamped with framerate."""
    fps_text = '{:.2f}, {:.2f}, {:.2f} fps'.format(*framerate2.tick())
    util.writeOSD(common[tstamp]['image_diff'], (fps_text,))
    cv2.imshow('diff average 4', common[tstamp]['image_diff'])
    cv2.waitKey(1)  # Allow HighGUI to process event.
    return tstamp

# Assemble the pipeline.
stage1 = mpipe.Stage(Step1)
stage2 = mpipe.FilterStage(
Beispiel #12
0
def main(video_uri):
    global max_sleep, cur_sleep
    while True:
        cap = cv2.VideoCapture(
            'rtsp://*****:*****@192.168.1.153:554/Streaming/Channels/101?transportmode=unicast&profile=Profile_1'
        )
        if cap.isOpened():
            break
        print('not opened, sleeping {}s'.format(cur_sleep))
        time.sleep(cur_sleep)
        if cur_sleep < max_sleep:
            cur_sleep *= 2
            cur_sleep = min(cur_sleep, max_sleep)
            continue
        cur_sleep = 0.1
    cap.set(cv2.CAP_PROP_FRAME_COUNT, 3)
    # Create client to the Redis store.
    store = redis.Redis()
    width = None if len(sys.argv) <= 2 else int(sys.argv[2])
    height = None if len(sys.argv) <= 3 else int(sys.argv[3])
    # Set video dimensions, if given.
    if width: cap.set(3, width)
    if height: cap.set(4, height)

    # Monitor the framerate at 1s, 5s, 10s intervals.
    fps = coils.RateTicker((1, 5, 10))

    # encode, serialize and push to Redis database.
    # Then create unique ID, and push to database as well.
    while True:
        ticks = time.time()
        ret, image = cap.read()
        while time.time() - ticks < 3.0:
            ret, image = cap.read()
            if image is None:
                print('image is none')
                time.sleep(0.1)
                continue
        ticks1 = time.time()
        small_frame = cv2.resize(image, (0, 0), fx=0.5, fy=0.5)
        dets = detector(small_frame, 1)
        ticks2 = time.time()
        print("detection {} faces takes {} s".format(len(dets),
                                                     ticks2 - ticks1))
        for i, d in enumerate(dets):
            img = image[d.top() * 2:d.bottom() * 2, d.left() * 2:d.right() * 2]
            cv2.imwrite(
                "{}{}.jpg".format(
                    '/home/fengping/Pictures/faces/',
                    time.strftime("%Y%m%d%H%M%S", time.localtime())), img)
            img = cv2.resize(img, dim)
            img = img.astype(np.float32)
            img[:, :, 0] = (img[:, :, 0] - ilsvrc_mean[0])
            img[:, :, 1] = (img[:, :, 1] - ilsvrc_mean[1])
            img[:, :, 2] = (img[:, :, 2] - ilsvrc_mean[2])
            ticks3 = time.time()
            # ***************************************************************
            # Send the image to the NCS
            # ***************************************************************
            graph.LoadTensor(img.astype(np.float16), 'user object')

            # ***************************************************************
            # Get the result from the NCS
            # ***************************************************************
            output, userobj = graph.GetResult()
            print("movidius calssfication cost {}".format(time.time() -
                                                          ticks3))
            # ***************************************************************
            # Print the results of the inference form the NCS
            # ***************************************************************
            order = output.argsort()[::-1][:6]
            print('\n------- predictions --------')
            for i in range(0, 6):
                print('prediction ' + str(i) + ' (probability ' +
                      str(output[order[i]]) + ') is ' + labels[order[i]] +
                      '  label index is: ' + str(order[i]))
            cv2.rectangle(image, (int(d.left()) * 2, int(d.top()) * 2),
                          (int(d.right()) * 2, int(d.bottom()) * 2),
                          (255, 0, 0), 2)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(image, labels[order[0]],
                        (int(d.left()) * 2 + 6, int(d.top()) * 2 - 6), font,
                        0.5, (255, 0, 0), 1)
        hello, image = cv2.imencode('.jpg', image)
        sio = io.BytesIO()
        sio.write(image)
        value = sio.getvalue()
        store.set('image', value)
        image_id = os.urandom(4)
        store.set('image_id', image_id)

        # Print the framerate.
        text = '{:.2f}, {:.2f}, {:.2f} fps'.format(*fps.tick())
        print(text)
Beispiel #13
0
def main(video_uri):
    global max_sleep, cur_sleep, avg, lastUploaded
    while True:
        cap = cv2.VideoCapture(video_uri)
        if cap.isOpened():
            break
        print('not opened, sleeping {}s'.format(cur_sleep))
        time.sleep(cur_sleep)
        if cur_sleep < max_sleep:
            cur_sleep *= 2
            cur_sleep = min(cur_sleep, max_sleep)
            continue
        cur_sleep = 0.1
    #cap.set(cv2.CAP_PROP_FRAME_COUNT, 3)
    #cap.release()
    # Create client to the Redis store.
    store = redis.Redis()

    # Monitor the framerate at 1s, 5s, 10s intervals.
    fps = coils.RateTicker((1, 5, 10))

    # encode, serialize and push to Redis database.
    # Then create unique ID, and push to database as well.
    while True:
        text = "Unoccupied"
        ticks = time.time()
        timestamp = datetime.datetime.now()
        ret, image = cap.read()
        if image is None:
            print('image is none')
            time.sleep(0.1)
            continue
        #cap.release()
        ticks1 = time.time()
        small_frame = cv2.resize(image, (0, 0), fx=0.5, fy=0.5)
        gray = cv2.cvtColor(small_frame, cv2.COLOR_BGR2GRAY)
        gray2 = cv2.GaussianBlur(gray, (21, 21), 0)
        # if the average frame is None, initialize it
        if avg is None:
            print("[INFO] starting background model...")
            avg = gray2.copy().astype("float")
            continue
        # accumulate the weighted average between the current frame and
        # previous frames, then compute the difference between the current
        # frame and running average
        cv2.accumulateWeighted(gray2, avg, 0.5)
        frameDelta = cv2.absdiff(gray2, cv2.convertScaleAbs(avg))

        # threshold the delta image, dilate the thresholded image to fill
        # in holes, then find contours on thresholded image
        thresh = cv2.threshold(frameDelta, 5, 255, cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        _, cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                      cv2.CHAIN_APPROX_SIMPLE)
        # loop over the contours
        for c in cnts:
            # if the contour is too small, ignore it
            if cv2.contourArea(c) < 5000:
                continue

            # compute the bounding box for the contour, draw it on the frame,
            # and update the text
            (x, y, w, h) = cv2.boundingRect(c)
            cv2.rectangle(image, (x * 2, y * 2), ((x + w) * 2, (y + h) * 2),
                          (255, 0, 0), 2)
            text = "Occupied"
        if text == "Occupied":
            # check to see if enough time has passed between uploads
            if (timestamp - lastUploaded).seconds >= 2.0:
                # increment the motion counter
                motionCounter += 1
                # check to see if the number of frames with consistent motion is
                # high enough
                if motionCounter >= 5:
                    lastUploaded = timestamp
                    motionCounter = 0
                    #face_locations = opencv_detection(gray)
                    face_locations = dlib_detection(small_frame)
                    if len(face_locations) == 0:
                        print("no face detected at all")
                    # Draw a rectangle around the faces
                    for (left, top, right, bottom) in face_locations:
                        font = cv2.FONT_HERSHEY_DUPLEX
                        cv2.putText(image, 'face', (left * 2 + 6, top * 2 - 6),
                                    font, 0.5, (0, 255, 0), 1)
                        cv2.rectangle(image, (left * 2, top * 2),
                                      (right * 2, bottom * 2), (0, 255, 0), 2)
                        ticks2 = time.time()
                        print("detection {} faces takes {} s".format(
                            len(face_locations), ticks2 - ticks1))
                        img = image[top * 2:bottom * 2, left * 2:right * 2]
                        # cv2.imwrite("{}{}.jpg".format('/home/fengping/Pictures/faces/', time.strftime("%Y%m%d%H%M%S", time.localtime())), img)
                        img = cv2.resize(img, dim)
                        img = img.astype(np.float32)
                        img[:, :, 0] = (img[:, :, 0] - ilsvrc_mean[0])
                        img[:, :, 1] = (img[:, :, 1] - ilsvrc_mean[1])
                        img[:, :, 2] = (img[:, :, 2] - ilsvrc_mean[2])
                        ticks3 = time.time()
                        # ***************************************************************
                        # Send the image to the NCS
                        # ***************************************************************
                        graph.LoadTensor(img.astype(np.float16), 'user object')

                        # ***************************************************************
                        # Get the result from the NCS
                        # ***************************************************************
                        output, userobj = graph.GetResult()
                        print("movidius calssfication cost {}".format(
                            time.time() - ticks3))
                        # ***************************************************************
                        # Print the results of the inference form the NCS
                        # ***************************************************************
                        order = output.argsort()[::-1][:6]
                        print('\n------- predictions --------')
                        for i in range(0, 6):
                            print('prediction ' + str(i) + ' (probability ' +
                                  str(output[order[i]]) + ') is ' +
                                  labels[order[i]] + '  label index is: ' +
                                  str(order[i]))
                        cv2.rectangle(image, (left * 2, top * 2),
                                      (right * 2, bottom * 2), (255, 0, 0), 2)
                        font = cv2.FONT_HERSHEY_DUPLEX
                        cv2.putText(image, labels[order[0]],
                                    (left * 2 + 6, top * 2 - 6), font, 0.5,
                                    (255, 0, 0), 1)
                        path = timestamp.strftime("%b-%d_%H_%M_%S" + ".jpg")
                        cv2.imwrite(path, image)
        # otherwise, the room is not occupied
        else:
            motionCounter = 0
        hello, image = cv2.imencode('.jpg', image)
        sio = io.BytesIO()
        sio.write(image)
        value = sio.getvalue()
        store.set('image', value)
        image_id = os.urandom(4)
        store.set('image_id', image_id)

        # Print the framerate.
        #text = '{:.2f}, {:.2f}, {:.2f} fps'.format(*fps.tick())
        #print(text)
    cap.relsese()