示例#1
0
def Log(Message=None, LogLevel=0):
    if Message == None:
        raise ValueError("Message has no value.")
        return
    Time = time.strftime("%Y-%m-%d %H:%M:%S")
    if type(Message) is str:
        LogLevel = str(LogLevel).upper()
        if LogLevel in ("0", "INFO"):
            Message = "INFO: " + Message
        elif LogLevel in (
                "1", "DEBUG"
        ):  # TODO: Only log messages marked as "DEBUG" if DebugMode == True in Settings.Settings!
            Message = "DEBUG: " + Message
        elif LogLevel in ("2", "WARNING"):
            Message = "WARNING: " + Message
        elif LogLevel in ("3", "ERROR"):
            Message = "ERROR: " + Message
        elif LogLevel in ("4", "CRITICAL"):
            Message = "CRITICAL: " + Message
        Message = Time + " " + Message + "\r\n"
        print(str(Message))
    elif type(Message) is np.ndarray:  # If the message is an image.
        ImageName = Time + " " + str(uuid4()) + ".png"
        cv2.imWrite(ImageName, Message)
示例#2
0
def Log(Message=None, LogLevel=1):
    if Message == None:
        raise ValueError("Message has no value.")
        return
    Time = time.strftime("%Y-%m-%d %H:%M:%S")
    if type(Message) == str:
        LogLevel = str(LogLevel).lower()
        if LogLevel in ("0", "debug"):
            Message = "DEBUG:    " + Message
        elif LogLevel in ("1", "info"):
            Message = "INFO:     " + Message
        elif LogLevel in ("2", "warning"):
            Message = "WARNING:  " + Message
        elif LogLevel in ("3", "error"):
            Message = "ERROR:    " + Message
        elif LogLevel in ("4", "critical"):
            Message = "CRITICAL: " + Message
        else:
            raise ValueError("Improper value passed for log level.")
        Message = "{0} > {1}".format(Time, Message)
        print(str(Message))
    elif type(Message) is np.ndarray:  # If the message is an image.
        ImageName = "{0} {1}.png".format(Time, uuid4())
        cv2.imWrite(ImageName, Message)
示例#3
0
 def saveImageAction(self):
     saveImage = self.grayImage
     filename = QFileDialog.getSaveFileName()
     cv2.imWrite(filename, saveImage)
     print("Save")
示例#4
0
def postAuthParse(packet, UserID, img = None):
    #print(packet)
    splitPacket = packet.split(":")
    function = splitPacket[0]
    argumentLength = len(splitPacket[1])
    argument = splitPacket[1][0:argumentLength-1]
    #print("USER ID")
    #print(UserID)
    profileInfo = open("profileInfo.txt", "r+")
    profInfo = profileInfo.read().split("\n")
    #print(profInfo)
    if(function == "set" and argument == "Single" or argument == "Taken" or argument == "Not Looking"):
        for x in range(0,len(profInfo)):
            splitInfo = profInfo[x].split(",")
            if(splitInfo[0] == UserID):
            #print("Changing status")
            #print(splitInfo[3])
                tempStringArray = profInfo[x].split(",")
                splitInfo[3] = argument
                profInfo[x] = tempStringArray[0] + "," + tempStringArray[1] + "," + tempStringArray[2] + "," + splitInfo[3]
            #print(profInfo[x])
            #print(splitInfo[3])
            #print("Done changing the array")
            #print(profInfo)
            profileInfo.truncate(0)
            for x in range(0,len(profInfo)):
                if(x == len(profInfo)-1):
                    profileInfo.seek(0)
                    profileInfo.write(profInfo[x])
                    profileInfo.close()
                    return argument, False
                elif(x == 0):
                    profileInfo.seek(0)
                    profileInfo.write(profInfo[x] + "\n")
                    profileInfo.close()
                    return argument, False
                else:
                    profileInfo.seek(0)
                    profileInfo.write(profInfo[x] + "\n")
                    profileInfo.close()
                    return argument, False
                    #print("We done")
    elif(function == "get"):
        #print("Start Here")
        #print(profInfo)
        for x in range(0,len(profInfo)):
            #print(profInfo[x])
            #print(UserID)
            splitProfInfo = profInfo[x].split(",")
            if(splitProfInfo[0] == UserID):
                if(argument == "First Name"):
                    return splitProfInfo[1], False
                elif(argument == "Last Name"):
                    return splitProfInfo[2], False
                elif(argument == "Status"):
                    return splitProfInfo[3], False
                else:
                    return "Invalid Argument", False
    elif(function == "login"):
        return "Allow", False
    elif(function == "getFirstName"):
        for x in range(0,len(profInfo)):
            splitInfo = profInfo[x].split(",")
            if(splitInfo[0] == argument):
                return splitInfo[1], False
    elif(function == "getStatus"):
        for x in range(0,len(profInfo)):
            splitInfo = profInfo[x].split(",")
            if(splitInfo[0] == argument):
                return splitInfo[3], False
    elif(function[0] == "i"):
        #photo mode activated
        functionLength = len(function);
        index = function[1:len(function)];
        targetId = UserID;
        directory = "Imgs/" + targetId + "/" + index + "vP.jpg"
        if(path.exists(directory)):
            img = cv2.imread(directory)
            print("basic")
            return ServerSockets.picture_to_data(img), True

        directory = "Imgs/" + targetId + "/" + index + "vH.jpg"
        if(path.exists(directory) and targetId == UserID):
            print("Private")
            img = cv2.imread(directory)
            return ServerSockets.picture_to_data(img), True

        print("No photo")
        return "None", False
    elif(function == "P" or function == "H"):
        index = ""
        for x in range(1,100):
            location = "Imgs/" + UserID + "/" + str(x) + "vP.jpg"
            if(path.exists(location)):
                print("Starting over")
                continue
            location = "Imgs/" + UserID + "/" + str(x) + "vH.jpg"
            if(path.exists(location)):
                print("Starting over")
                continue
            index = x
            break
        location = "Imgs/" + UserID + "/" + str(index) + "v" + function + ".jpg"
        pass
        pass
        pass
        cv2.imwrite(location,img)
        pass
        pass
        pass
        print(str(index))
        pass
        pass
        pass
        return str(index)

    elif(function == "ANALYZE"):
        location = "Imgs/Analyze/analyzePic.jpg"
        cv2.imWrite(location, img)

    else:
        print("Invalid function")
        return "Invalid", False
示例#5
0
def infer_on_stream(args, client):
    """
    Initialize the inference network, stream video to network,
    and output stats and video.

    :param args: Command line arguments parsed by `build_argparser()`
    :param client: MQTT client
    :return: None
    """
    global i_w, i_h, prob_threshold
    current_request_num = 0
    total_count = 0
    latest_count = 0
    previous_count = 0
    duration_sum = 0
    duration_in_frame = 0.0
    frame_count = 0
    infer_frame_count = 0
    single_image_mode = False
    # Initialise the class
    infer_network = Network()
    # Set Probability threshold for detections
    prob_threshold = args.prob_threshold
    client.connect(HOSTNAME,
                   port=MQTT_PORT,
                   keepalive=60,
                   bind_address=IPADDRESS)
    ### Load the model through `infer_network` ###
    n, c, h, w = infer_network.load_model(args.model, args.device, 1, 1,
                                          current_request_num,
                                          args.cpu_extension)

    ### Handle the input stream ###
    if args.input.endswith('.jpg') or args.input.endswith('.bmp'):
        single_image_mode = True
        input_stream = args.input
    else:
        input_stream = args.input

    capture_frames = cv2.VideoCapture(input_stream)
    length_of_video = int(capture_frames.get(cv2.CAP_PROP_FRAME_COUNT))
    frame_rate = int(capture_frames.get(cv2.CAP_PROP_FPS))
    ### Read from the video capture ###
    infer_time_start = time.time()
    if input_stream:
        capture_frames.open(args.input)
    if not capture_frames.isOpened():
        log.error("Unable to Open the Video File.")

    i_w = capture_frames.get(3)
    i_h = capture_frames.get(4)
    out = cv2.VideoWriter(os.path.join("people_counter.mp4"), 0x00000021,
                          frame_rate, (int(i_w), int(i_h)), True)
    while capture_frames.isOpened():

        isEnd, frame = capture_frames.read()
        frame_count += 1
        current_count = 0
        if not isEnd:
            break
        cv2.waitKey(10)
        ### Pre-process the image as needed ###
        inf_image = cv2.resize(frame, (w, h))
        inf_image = inf_image.transpose((2, 0, 1))
        inf_image = inf_image.reshape((n, c, h, w))

        # Starting the Asynchronous Inference:
        inf_start = time.time()
        infer_network.exec_net(current_request_num, inf_image)

        ### Waiting for the result ###
        if infer_network.wait(current_request_num) == 0:
            duration = (time.time() - inf_start)
            results = infer_network.get_output(current_request_num)
            out_frame, current_count = draw_frame_on_inference(frame, results)
            duration_message = "Inference Time Per Frame: {:.3f}ms".format(
                duration * 1000)

        if current_count > 0:
            infer_frame_count += 1
            duration_sum += float(infer_frame_count) / frame_rate

        if current_count > 0 and infer_frame_count > args.frames_ignore and previous_count > 0:
            '''
            If the Count of People Goes up and keeps like that for more than 
            '''
            previous_count = max(previous_count, current_count)

        if previous_count == 0 and infer_frame_count > args.frames_ignore:
            total_count += current_count
            #             infer_frame_count = 0
            previous_count = max(previous_count, current_count)
            client.publish("person", json.dumps({"count": current_count}))
            client.publish("person", json.dumps({"total": total_count}))

        if args.enable_alert_limit is not None and current_count >= args.enable_alert_limit:
            client.publish(
                "alert",
                json.dumps({
                    "alert_msg": "Stampede",
                    "count": current_count
                }))
            intruder_msg = "STAMPEDE ALERT, CURRENT COUNT {} IS SAME OR EXCEEDED SAFE LIMIT {}".format(
                current_count, args.enable_alert_limit)
            cv2.putText(out_frame, intruder_msg, (15, 45),
                        cv2.FONT_HERSHEY_DUPLEX, 0.5, (10, 10, 210), 1)

        if previous_count != 0 and current_count == 0:
            duration_in_frame = infer_frame_count / frame_rate
            for i in range(previous_count):
                client.publish("person/duration",
                               json.dumps({"duration": duration_in_frame}))

        if current_count == 0:
            infer_frame_count = 0
            previous_count = current_count
            duration_sum = 0.0
            client.publish("person", json.dumps({"count": current_count}))

        cv2.putText(out_frame, duration_message, (15, 15),
                    cv2.FONT_HERSHEY_DUPLEX, 0.5, (210, 10, 10), 1)
        people_count_msg = "People counted: in Current Frame: {} ; Total: {}".format(
            current_count, total_count)
        cv2.putText(out_frame, people_count_msg, (15, 30),
                    cv2.FONT_HERSHEY_DUPLEX, 0.5, (210, 10, 10), 1)
        #         person_duration_msg = "Duration in Frame: {:.2f} seconds".format(duration_sum%60)
        #         cv2.putText(out_frame, person_duration_msg, (15, 45), cv2.FONT_HERSHEY_DUPLEX, 0.5, (210, 10, 10), 1)

        out.write(out_frame)

        client.publish("person", json.dumps({"count": current_count}))

        ### Send the frame to the FFMPEG server ###
        sys.stdout.buffer.write(out_frame)
        sys.stdout.flush()

        ### Write an output image if `single_image_mode` ###
        if single_image_mode:
            cv2.imWrite('infer_out.jpg', frame)

    capture_frames.release()
    client.disconnect()
示例#6
0
def infer_on_stream(args, client):
    """
    Initialize the inference network, stream video to network,
    and output stats and video.

    :param args: Command line arguments parsed by `build_argparser()`
    :param client: MQTT client
    :return: None
    """
    #Initial, global variables for counting
    current_request_id = 0
    start_time = 0
    last_count = 0
    total_count = 0
    
    
    # Initialise the class
    infer_network = Network()
    # Set Probability threshold for detections
    prob_threshold = args.prob_threshold

    ###  Load the model through `infer_network` ###
    infer_network.load_model(args.model, args.device, current_request_id, args.cpu_extension)
    model_input_shape = infer_network.get_input_shape()

    ### Handle the input stream ###
    single_image_mode = False
    
    while args.input == 'CAM':
        input_stream = 0
        
    if args.input.endswith('.jpg') or args.input.endswith('.png') or args.input.endswith('.bmp'):
        single_image_mode = True
        input_stream = args.input
        
    else:
        input_stream = args.input
        assert os.path.isfile(args.input),"The input file does not exist"
        
    cap = cv2.VideoCapture(input_stream)
    
    if input_stream:
        cap.open(input_stream)
        
    if not cap.IsOpened():
        log.error('Error! The video file/source is not opening' )
    
    #inital width and height taken from the input
    initial_width = int(cap.get(3))
    initial_height = int(cap.get(4))
     ###  Loop until stream is over ###   
    while cap.isOpened():
         ###  Read from the video capture ###
        flag, frame = cap.read()
        
        if not flag:
            break
            
        pressed_key = cv2.waitKey(60)
        ### Pre-process the image as needed ###
        width = model_input_shape[3]
        height = model_input_shape[2]
        processed_input_image = cv2.resize(frame,(width, height))
        processed_input_image = processed_input_image.transpose((2, 0, 1))
        processed_input_image = processed_input_image.reshape(model_input_shape[0], model_input_shape[1], height, width)
        ###  Start asynchronous inference for specified request ###
        start_of_inference = time.time()
        infer_network.exec_net(current_request_id, processed_input_image)
        
        ###  Wait for the result ###
        if infer_network.wait(current_request_id) == 0:
            detection_time = int(time.time() - start_of_inference) * 1000
            ###  Get the results of the inference request ###
            result = infer_network.get_output(current_request_id)
            ### Extract any desired stats from the results ###
            frame, present_count = draw_rectangular_box(frame, result, initial_width, initial_height, prob_threshold)
            ##Find out the inference time and write the result on the video as text.
            inf_time_msg = "Inference time: {:.5f}ms".format(detection_time)
            cv2.putText(frame, inf_time_msg, (20,10), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
            #Person's count is calculated here
            if present_count > last_count:
                start_time = time.time()
                total_count += present_count - last_count
                client.publish('person', json.dumps({"total": total_count}))
            #Duration is calculated here
            if present_count < last_count:
                person_duration = int(time.time() - start_time)
                # This is to prevent double counting. Higher value to ensure that the app does not get oversensitive#
                if person_duration > 5:
                    total_count -= 1
                client.publish('person/duration', json.dumps({"duration": person_duration}))
            
                #if present_count >=4:
                #print('Alert! Number of people exceeds the limit! Please take necessary action.')
                
                
            client.publish('person', json.dumps({"count": present_count}))
            last_count = present_count
            # End if escape key is pressed
            if pressed_key == 27:
                break
         ###  Send the frame to the FFMPEG server ###    
        sys.stdout.buffer.write(frame)
        sys.stdout.flush()
        ### Write an output image if `single_image_mode` ###
        if single_image_mode:
            cv2.imWrite('output_image.jpg', frame)
        
        cap.release()
        cv2.DestroyAllWindows()
        client.disconnect()
        infer_network.clean()