Пример #1
0
def DetectViolence(PATH_FILE_NAME_OF_SOURCE_VIDEO,
                   PATH_FILE_NAME_TO_SAVE_RESULT):
    violenceDetector = ViolenceDetector()
    videoReader = cv2.VideoCapture(PATH_FILE_NAME_OF_SOURCE_VIDEO)
    shouldSaveResult = (PATH_FILE_NAME_TO_SAVE_RESULT != None)

    if shouldSaveResult:
        videoSavor = VideoSavor(PATH_FILE_NAME_TO_SAVE_RESULT + "_Result",
                                videoReader)

    listOfForwardTime = []
    isCurrentFrameValid, currentImage = videoReader.read()
    while isCurrentFrameValid:
        netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage)

        startDetectTime = time.time()
        isFighting = violenceDetector.Detect(netInput)
        endDetectTime = time.time()
        listOfForwardTime.append(endDetectTime - startDetectTime)

        targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2 * deploySettings.BORDER_SIZE
        currentImage = cv2.resize(currentImage, (targetSize, targetSize))
        if isFighting:
            print("isFighting")
            resultImage = cv2.copyMakeBorder(
                currentImage,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                cv2.BORDER_CONSTANT,
                value=deploySettings.FIGHT_BORDER_COLOR)

        else:
            print("is't Fighting")
            resultImage = cv2.copyMakeBorder(
                currentImage,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                cv2.BORDER_CONSTANT,
                value=deploySettings.NO_FIGHT_BORDER_COLOR)

        cv2.imshow("Violence Detection", resultImage)
        if shouldSaveResult:
            videoSavor.AppendFrame(resultImage)

        userResponse = cv2.waitKey(1)
        if userResponse == ord('q'):
            videoReader.release()
            cv2.destroyAllWindows()
            break

        else:
            isCurrentFrameValid, currentImage = videoReader.read()

    # PrintUnsmoothedResults(violenceDetector.unsmoothedResults)
    averagedForwardTime = np.mean(listOfForwardTime)
    print("Averaged Forward Time: ", averagedForwardTime)
Пример #2
0
def DetectViolence():
    violenceDetector = ViolenceDetector()
    videoReader = cv2.VideoCapture(
        "rtsp://*****:*****@118.70.125.33:8554/CH001.sdp")
    count = 0
    while (True):
        listOfForwardTime = []
        isCurrentFrameValid, currentImage = videoReader.read()
        # print(isCurrentFrameValid)
        netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage)
        startDetectTime = time.time()
        isFighting = violenceDetector.Detect(netInput)
        print(isFighting)
        endDetectTime = time.time()
        listOfForwardTime.append(endDetectTime - startDetectTime)
        targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2 * deploySettings.BORDER_SIZE
        currentImage = cv2.resize(currentImage, (targetSize, targetSize))
        if isFighting:
            count += 1
            if count == 5:
                print("saving...")
                cv2.imwrite('image.jpg', currentImage)  # luu image
                #send mail
                sendEmail.sendMail(
                    '*****@*****.**', '12345Aa@',
                    '*****@*****.**', 'Fighting', 'Have Fight',
                    'C:/Users/anlan/OneDrive/Desktop/Project_Violence/ViolenceDetection-master_main/image.jpg'
                )
            resultImage = cv2.copyMakeBorder(
                currentImage,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                cv2.BORDER_CONSTANT,
                value=deploySettings.FIGHT_BORDER_COLOR)
            # copyMakeBorder them borfer cho video
        else:
            if count > 5:
                count = 0
            resultImage = cv2.copyMakeBorder(
                currentImage,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                cv2.BORDER_CONSTANT,
                value=deploySettings.NO_FIGHT_BORDER_COLOR)
        cv2.imshow("Violence Detection", resultImage)
        print("count", count)
        userResponse = cv2.waitKey(1)
        if userResponse == ord('q'):
            videoReader.release()
            cv2.destroyAllWindows()
            break

        else:
            isCurrentFrameValid, currentImage = videoReader.read()
    averagedForwardTime = np.mean(listOfForwardTime)
Пример #3
0
def DetectViolence(PATH_FILE_NAME_OF_SOURCE_VIDEO, PATH_FILE_NAME_TO_SAVE_RESULT):
	violenceDetector = ViolenceDetector()
	videoReader = cv2.VideoCapture(PATH_FILE_NAME_OF_SOURCE_VIDEO)
	print(videoReader)
	shouldSaveResult = (PATH_FILE_NAME_TO_SAVE_RESULT != None)

	if shouldSaveResult:
		videoSavor = VideoSavor(PATH_FILE_NAME_TO_SAVE_RESULT + "_Result", videoReader)

	listOfForwardTime = []
	isCurrentFrameValid, currentImage = videoReader.read()
	count = 0
	while isCurrentFrameValid:
		print(isCurrentFrameValid)
		netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage)
		startDetectTime = time.time()
		isFighting = violenceDetector.Detect(netInput)
		print(isFighting)
		endDetectTime = time.time()
		listOfForwardTime.append(endDetectTime - startDetectTime)
		targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2*deploySettings.BORDER_SIZE
		currentImage = cv2.resize(currentImage, (targetSize, targetSize))
		if isFighting:			
			count += 1
			if count == 5:
				print("saving...")
				cv2.imwrite('image.jpg',currentImage)
			resultImage = cv2.copyMakeBorder(currentImage,deploySettings.BORDER_SIZE,deploySettings.BORDER_SIZE,deploySettings.BORDER_SIZE,deploySettings.BORDER_SIZE,cv2.BORDER_CONSTANT,value=deploySettings.FIGHT_BORDER_COLOR)
		else:
			resultImage = cv2.copyMakeBorder(currentImage,deploySettings.BORDER_SIZE,deploySettings.BORDER_SIZE,deploySettings.BORDER_SIZE,deploySettings.BORDER_SIZE,cv2.BORDER_CONSTANT,value=deploySettings.NO_FIGHT_BORDER_COLOR)
		cv2.imshow("Violence Detection", resultImage)
		print("count",count)
		if shouldSaveResult:
			print("shouldSaveResult",shouldSaveResult)
			videoSavor.AppendFrame(resultImage)
		userResponse = cv2.waitKey(1)
		if userResponse == ord('q'):
			videoReader.release()
			cv2.destroyAllWindows()
			break

		else:
			isCurrentFrameValid, currentImage = videoReader.read()
	# cv2.imshow('image',image)
	# PrintUnsmoothedResults(violenceDetector.unsmoothedResults)
	averagedForwardTime = np.mean(listOfForwardTime)
	if count > 5:
		print("truee")
		sendEmail.sendMail('*****@*****.**','12345Aa@','*****@*****.**','Fighting','Have Fight','C:/Users/anlan/OneDrive/Desktop/Project_Violence/ViolenceDetection-master_main/image.jpg')
def DetectViolence(PATH_FILE_NAME_TO_SAVE_RESULT):
    # font for text used on video frames
    font = cv2.FONT_HERSHEY_SIMPLEX

    violenceDetector = ViolenceDetector()
    capture = cv2.VideoCapture(0)
    shouldSaveResult = (PATH_FILE_NAME_TO_SAVE_RESULT != None)

    if shouldSaveResult:
        videoSavor = VideoSavor(PATH_FILE_NAME_TO_SAVE_RESULT + "_Result",
                                capture)

    listOfForwardTime = []

    # Get some properties of VideoCapture (frame width, frame height and frames per second (fps)):
    frame_width = capture.get(cv2.CAP_PROP_FRAME_WIDTH)
    frame_height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
    fps = capture.get(cv2.CAP_PROP_FPS)

    # Print these values:
    print("CV_CAP_PROP_FRAME_WIDTH: '{}'".format(frame_width))
    print("CV_CAP_PROP_FRAME_HEIGHT : '{}'".format(frame_height))
    print("CAP_PROP_FPS : '{}'".format(fps))

    # Check if camera opened successfully
    if capture.isOpened() is False:
        print("Error opening the camera")

    flag = False
    # Read until video is completed
    while capture.isOpened():
        # Capture frame-by-frame from the camera
        ret, frame = capture.read()

        if ret is True:
            # Display the captured frame:
            # cv2.imshow('Input frame from the camera', frame)
            netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(frame)

            startDetectTime = time.time()
            isFighting = violenceDetector.Detect(netInput)
            endDetectTime = time.time()
            listOfForwardTime.append(endDetectTime - startDetectTime)

            targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2 * deploySettings.BORDER_SIZE
            currentImage = cv2.resize(frame, (targetSize, targetSize))
            if isFighting:
                resultImage = cv2.copyMakeBorder(
                    frame,
                    deploySettings.BORDER_SIZE,
                    deploySettings.BORDER_SIZE,
                    deploySettings.BORDER_SIZE,
                    deploySettings.BORDER_SIZE,
                    cv2.BORDER_CONSTANT,
                    value=deploySettings.FIGHT_BORDER_COLOR)

            else:
                resultImage = cv2.copyMakeBorder(
                    frame,
                    deploySettings.BORDER_SIZE,
                    deploySettings.BORDER_SIZE,
                    deploySettings.BORDER_SIZE,
                    deploySettings.BORDER_SIZE,
                    cv2.BORDER_CONSTANT,
                    value=deploySettings.NO_FIGHT_BORDER_COLOR)
            # frameText = "Violence Detected!" if isFighting else "No Violence Detected."
            # textColor = deploySettings.FIGHT_BORDER_COLOR if isFighting else deploySettings.NO_FIGHT_BORDER_COLOR
            # cv2.putText(frame, frameText, (50, 50), font, 4, (0, 255, 0), 2, cv2.LINE_AA)
            cv2.imshow("Violence Detection", resultImage)
            if shouldSaveResult:
                videoSavor.AppendFrame(resultImage)

            userResponse = cv2.waitKey(1)
            if userResponse == ord('q'):
                capture.release()
                cv2.destroyAllWindows()
                flag = True
                break
            else:
                isCurrentFrameValid, currentImage = capture.read()
        print("Details about current frame:")
        PrintUnsmoothedResults(violenceDetector.unsmoothedResults)
        averagedForwardTime = np.mean(listOfForwardTime)
        # print("Averaged Forward Time: ", averagedForwardTime)

        if flag:
            break
Пример #5
0
count=0
baslangicsn=list()
bitissn=list()
	
	
while(True):
    # Capture frame-by-frame
    ret, currentImage = cap.read()
    # do what you want with frame
    #  and then save to file
    cv2.imwrite('/home/murat/Desktop/image.png', currentImage)
    count +=1
    netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage)
    startDetectTime = time.time()
    isFighting = violenceDetector.Detect(netInput)
    endDetectTime = time.time()
    

    targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2*deploySettings.BORDER_SIZE
    currentImage = cv2.resize(currentImage, (targetSize, targetSize))

    if isFighting:#şiddet tespit edildi
        p=0
        font = cv2.FONT_HERSHEY_SIMPLEX
        bottomLeftCornerOfText = (10,50)
        fontScale = 1
        fontColor = (255,255,255)
        lineType = 2
        if len(baslangicsn)==len(bitissn):
            baslangicsn.append(count/25)
Пример #6
0
def DetectViolence(PATH_FILE_NAME_OF_SOURCE_VIDEO,
                   PATH_FILE_NAME_TO_SAVE_RESULT):

    violenceDetector = ViolenceDetector()
    videoReader = cv2.VideoCapture(PATH_FILE_NAME_OF_SOURCE_VIDEO)
    shouldSaveResult = (PATH_FILE_NAME_TO_SAVE_RESULT != None)

    if shouldSaveResult:
        videoSavor = VideoSavor(PATH_FILE_NAME_TO_SAVE_RESULT + "_Result",
                                videoReader)

    listOfForwardTime = []
    isCurrentFrameValid, currentImage = videoReader.read()

    count = 0
    baslangicsn = list()
    bitissn = list()
    while isCurrentFrameValid:
        count += 1
        netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage)
        startDetectTime = time.time()
        isFighting = violenceDetector.Detect(netInput)
        endDetectTime = time.time()
        listOfForwardTime.append(endDetectTime - startDetectTime)

        targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2 * deploySettings.BORDER_SIZE
        currentImage = cv2.resize(currentImage, (targetSize, targetSize))

        if isFighting:  #şiddet tespit edildi
            p = 0
            font = cv2.FONT_HERSHEY_SIMPLEX
            bottomLeftCornerOfText = (10, 50)
            fontScale = 1
            fontColor = (255, 255, 255)
            lineType = 2
            if len(baslangicsn) == len(bitissn):
                baslangicsn.append(count / 25)

            cv2.putText(currentImage, "Siddet tespit edildi",
                        bottomLeftCornerOfText, font, fontScale, fontColor,
                        lineType)
            bottomLeftCornerOfText = (10, 450)

        else:

            if len(baslangicsn) != len(bitissn):
                bitissn.append(count / 25)

            font = cv2.FONT_HERSHEY_SIMPLEX
            bottomLeftCornerOfText = (10, 450)
            fontScale = 1
            fontColor = (255, 255, 255)
            lineType = 2
            cv2.putText(currentImage, "Siddet tespit edilmedi",
                        bottomLeftCornerOfText, font, fontScale, fontColor,
                        lineType)

        cv2.imshow("Violence Detection", currentImage)

        if shouldSaveResult:
            videoSavor.AppendFrame(currentImage)

        userResponse = cv2.waitKey(1)
        if userResponse == ord('q'):
            videoReader.release()
            cv2.destroyAllWindows()
            break

        else:
            isCurrentFrameValid, currentImage = videoReader.read()

    PrintUnsmoothedResults(violenceDetector.unsmoothedResults)
    averagedForwardTime = np.mean(listOfForwardTime)
    bitissn.append(count / 25)
    print(len(baslangicsn), "-------", len(bitissn))

    for index in range(len(baslangicsn)):
        try:
            print("tespit edilen sureler", baslangicsn.pop(index), "------",
                  bitissn.pop(index))
        except IndexError:
            print("----son----")

    print("Averaged Forward Time: ", averagedForwardTime)
Пример #7
0
def DetectViolence(PATH_FILE_NAME_OF_SOURCE_VIDEO,
                   PATH_FILE_NAME_TO_SAVE_RESULT):
    violenceDetector = ViolenceDetector()
    videoReader = cv2.VideoCapture(PATH_FILE_NAME_OF_SOURCE_VIDEO)
    #videoReader.set(cv2.CAP_PROP_FPS, 80)
    #video_fps = videoReader.get(cv2.CAP_PROP_FPS)
    videoReader.get(cv2.CAP_PROP_FPS)
    shouldSaveResult = (PATH_FILE_NAME_TO_SAVE_RESULT != None)

    if shouldSaveResult:
        videoSavor = VideoSavor(PATH_FILE_NAME_TO_SAVE_RESULT + "_Result",
                                videoReader)

    listOfForwardTime = []
    isCurrentFrameValid, currentImage = videoReader.read()
    while isCurrentFrameValid:
        netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage)

        startDetectTime = time.time()
        isFighting = violenceDetector.Detect(netInput)
        endDetectTime = time.time()
        listOfForwardTime.append(endDetectTime - startDetectTime)

        targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2 * deploySettings.BORDER_SIZE
        currentImage = cv2.resize(currentImage, (targetSize, targetSize))
        if isFighting:
            resultImage = cv2.copyMakeBorder(
                currentImage,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                cv2.BORDER_CONSTANT,
                value=deploySettings.FIGHT_BORDER_COLOR)
            cv2.putText(resultImage, "Violence detected", (20, 40),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 225), 2)

        else:
            resultImage = cv2.copyMakeBorder(
                currentImage,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                cv2.BORDER_CONSTANT,
                value=deploySettings.NO_FIGHT_BORDER_COLOR)
            cv2.putText(resultImage, "No violence", (20, 40),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)

        # display a piece of text to the frame (so we can benchmark
        # fairly against the fast method)


#		cv2.putText(resultImage, "Slow Method", (10, 30),
#		cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)

        cv2.imshow("Violence Detection", resultImage)
        if shouldSaveResult:
            videoSavor.AppendFrame(resultImage)

        userResponse = cv2.waitKey(1)
        #userResponse = cv2.waitkey(int(2000/video_fps))
        fps.update()  #
        if userResponse == ord('q'):
            fps.stop()
            print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))  #
            print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))  #
            videoReader.release()
            cv2.destroyAllWindows()
            break

        else:
            isCurrentFrameValid, currentImage = videoReader.read()

    PrintUnsmoothedResults(violenceDetector.unsmoothedResults)
    averagedForwardTime = np.mean(listOfForwardTime)
    print("Averaged Forward Time: ", averagedForwardTime)
Пример #8
0
def DetectViolence(PATH_FILE_NAME_OF_SOURCE_VIDEO, saveresult):
    violenceDetector = ViolenceDetector()
    videoReader = cv2.VideoCapture(PATH_FILE_NAME_OF_SOURCE_VIDEO)
    if saveresult == True:
        videoSavor = VideoSavor(PATH_FILE_NAME_OF_SOURCE_VIDEO + "_Result",
                                videoReader)

    listOfForwardTime = []
    isCurrentFrameValid, currentImage = videoReader.read()
    while isCurrentFrameValid:
        netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage)

        startDetectTime = time.time()
        isFighting = violenceDetector.Detect(netInput)
        endDetectTime = time.time()
        listOfForwardTime.append(endDetectTime - startDetectTime)

        targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2 * deploySettings.BORDER_SIZE
        currentImage = cv2.resize(currentImage, (targetSize, targetSize))
        if isFighting:
            resultImage = cv2.copyMakeBorder(
                currentImage,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                cv2.BORDER_CONSTANT,
                value=deploySettings.FIGHT_BORDER_COLOR)
            # Using cv2.putText() method
            resultImage = cv2.putText(resultImage, 'Violence :(',
                                      deploySettings.org, deploySettings.font,
                                      deploySettings.fontScale,
                                      deploySettings.color2,
                                      deploySettings.thickness, cv2.LINE_AA)
        else:
            resultImage = cv2.copyMakeBorder(
                currentImage,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                cv2.BORDER_CONSTANT,
                value=deploySettings.NO_FIGHT_BORDER_COLOR)
            resultImage = cv2.putText(resultImage, 'NonViolence :)',
                                      deploySettings.org, deploySettings.font,
                                      deploySettings.fontScale,
                                      deploySettings.color1,
                                      deploySettings.thickness, cv2.LINE_AA)

        cv2.imshow("Violence Detection", resultImage)
        if saveresult == True:
            videoSavor.AppendFrame(resultImage)

        userResponse = cv2.waitKey(1)
        if userResponse == ord('q'):
            videoReader.release()
            cv2.destroyAllWindows()
            break

        else:
            isCurrentFrameValid, currentImage = videoReader.read()

    PrintUnsmoothedResults(violenceDetector.unsmoothedResults)
    averagedForwardTime = np.mean(listOfForwardTime)
    print("Averaged Forward Time: ", averagedForwardTime)
def DetectViolence(PATH_FILE_NAME_OF_SOURCE_VIDEO,
                   PATH_FILE_NAME_TO_SAVE_RESULT):

    violenceDetector = ViolenceDetector()
    videoReader = cv2.VideoCapture(PATH_FILE_NAME_OF_SOURCE_VIDEO)
    shouldSaveResult = (PATH_FILE_NAME_TO_SAVE_RESULT != None)

    if shouldSaveResult:
        videoSavor = VideoSavor(PATH_FILE_NAME_TO_SAVE_RESULT + "_Result",
                                videoReader)

    listOfForwardTime = []
    isCurrentFrameValid, currentImage = videoReader.read()
    data = DataSet()
    model = load_model(
        '/home/furkan/five-video-classification-methods-master/inception.023-3.04.hdf5'
    )
    # Predict.
    image_arr = np.expand_dims(currentImage, axis=0)
    predictions = model.predict(image_arr)

    label_predictions = {}
    for i, label in enumerate(data.classes):
        label_predictions[label] = predictions[0][i]

    sorted_lps = sorted(label_predictions.items(),
                        key=operator.itemgetter(1),
                        reverse=True)
    listeString = list()
    listeValue = list()
    for i, class_prediction in enumerate(sorted_lps):
        # Just get the top five.
        if i > 4:
            break
        #print("%s: %.2f" % (class_prediction[0], class_prediction[1]))
        listeString.append(class_prediction[0])
        listeValue.append(class_prediction[1])
        maxValue = max(listeValue)
        maxValueIndex = listeValue.index(maxValue)
        #print(maxValueIndex,"--",maxValue)
        #print(listeString[maxValueIndex])

        i += 1

    X = 0
    while isCurrentFrameValid:
        netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage)
        startDetectTime = time.time()
        isFighting = violenceDetector.Detect(netInput)
        endDetectTime = time.time()
        listOfForwardTime.append(endDetectTime - startDetectTime)

        targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2 * deploySettings.BORDER_SIZE
        currentImage = cv2.resize(currentImage, (targetSize, targetSize))

        if isFighting:  #şiddet tespit edildi

            if X == 50:
                listeString.clear()
                listeValue.clear()
                image_arr = np.expand_dims(currentImage, axis=0)
                predictions = model.predict(image_arr)

                label_predictions = {}
                for i, label in enumerate(data.classes):
                    label_predictions[label] = predictions[0][i]

                sorted_lps = sorted(label_predictions.items(),
                                    key=operator.itemgetter(1),
                                    reverse=True)

                for i, class_prediction in enumerate(sorted_lps):
                    # Just get the top five.
                    if i > 4:
                        break
                    #print("%s: %.2f" % (class_prediction[0], class_prediction[1]))
                    listeString.append(class_prediction[0])
                    listeValue.append(class_prediction[1])
                    maxValue = 0
                    maxValue = max(listeValue)
                    maxValueIndex = listeValue.index(maxValue)
                    print(listeString[maxValueIndex], "--", maxValue)
                    print(listeString[maxValueIndex])

                    i += 1
                x = 0

            else:
                X += 1

            resultImage = cv2.copyMakeBorder(
                currentImage,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                cv2.BORDER_CONSTANT,
                value=deploySettings.FIGHT_BORDER_COLOR)
            font = cv2.FONT_HERSHEY_SIMPLEX
            bottomLeftCornerOfText = (10, 300)
            fontScale = 1
            fontColor = (255, 255, 255)
            lineType = 2

            cv2.putText(resultImage, listeString[maxValueIndex],
                        bottomLeftCornerOfText, font, fontScale, fontColor,
                        lineType)
            print(listeString[maxValueIndex], "--", maxValue)

        else:
            resultImage = cv2.copyMakeBorder(
                currentImage,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                cv2.BORDER_CONSTANT,
                value=deploySettings.NO_FIGHT_BORDER_COLOR)

        cv2.imshow("Violence Detection", resultImage)
        if shouldSaveResult:
            videoSavor.AppendFrame(resultImage)

        userResponse = cv2.waitKey(1)
        if userResponse == ord('q'):
            videoReader.release()
            cv2.destroyAllWindows()
            break

        else:
            isCurrentFrameValid, currentImage = videoReader.read()

    PrintUnsmoothedResults(violenceDetector.unsmoothedResults)
    averagedForwardTime = np.mean(listOfForwardTime)
    print("Averaged Forward Time: ", averagedForwardTime)