コード例 #1
0
def DetectViolence(PATH_FILE_NAME_OF_SOURCE_VIDEO,
                   PATH_FILE_NAME_TO_SAVE_RESULT):
    violenceDetector = ViolenceDetector()
    videoReader = cv2.VideoCapture(PATH_FILE_NAME_OF_SOURCE_VIDEO)
    shouldSaveResult = (PATH_FILE_NAME_TO_SAVE_RESULT != None)

    if shouldSaveResult:
        videoSavor = VideoSavor(PATH_FILE_NAME_TO_SAVE_RESULT + "_Result",
                                videoReader)

    listOfForwardTime = []
    isCurrentFrameValid, currentImage = videoReader.read()
    while isCurrentFrameValid:
        netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage)

        startDetectTime = time.time()
        isFighting = violenceDetector.Detect(netInput)
        endDetectTime = time.time()
        listOfForwardTime.append(endDetectTime - startDetectTime)

        targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2 * deploySettings.BORDER_SIZE
        currentImage = cv2.resize(currentImage, (targetSize, targetSize))
        if isFighting:
            print("isFighting")
            resultImage = cv2.copyMakeBorder(
                currentImage,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                cv2.BORDER_CONSTANT,
                value=deploySettings.FIGHT_BORDER_COLOR)

        else:
            print("is't Fighting")
            resultImage = cv2.copyMakeBorder(
                currentImage,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                cv2.BORDER_CONSTANT,
                value=deploySettings.NO_FIGHT_BORDER_COLOR)

        cv2.imshow("Violence Detection", resultImage)
        if shouldSaveResult:
            videoSavor.AppendFrame(resultImage)

        userResponse = cv2.waitKey(1)
        if userResponse == ord('q'):
            videoReader.release()
            cv2.destroyAllWindows()
            break

        else:
            isCurrentFrameValid, currentImage = videoReader.read()

    # PrintUnsmoothedResults(violenceDetector.unsmoothedResults)
    averagedForwardTime = np.mean(listOfForwardTime)
    print("Averaged Forward Time: ", averagedForwardTime)
コード例 #2
0
def DetectViolence():
    violenceDetector = ViolenceDetector()
    videoReader = cv2.VideoCapture(
        "rtsp://*****:*****@118.70.125.33:8554/CH001.sdp")
    count = 0
    while (True):
        listOfForwardTime = []
        isCurrentFrameValid, currentImage = videoReader.read()
        # print(isCurrentFrameValid)
        netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage)
        startDetectTime = time.time()
        isFighting = violenceDetector.Detect(netInput)
        print(isFighting)
        endDetectTime = time.time()
        listOfForwardTime.append(endDetectTime - startDetectTime)
        targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2 * deploySettings.BORDER_SIZE
        currentImage = cv2.resize(currentImage, (targetSize, targetSize))
        if isFighting:
            count += 1
            if count == 5:
                print("saving...")
                cv2.imwrite('image.jpg', currentImage)  # luu image
                #send mail
                sendEmail.sendMail(
                    '*****@*****.**', '12345Aa@',
                    '*****@*****.**', 'Fighting', 'Have Fight',
                    'C:/Users/anlan/OneDrive/Desktop/Project_Violence/ViolenceDetection-master_main/image.jpg'
                )
            resultImage = cv2.copyMakeBorder(
                currentImage,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                cv2.BORDER_CONSTANT,
                value=deploySettings.FIGHT_BORDER_COLOR)
            # copyMakeBorder them borfer cho video
        else:
            if count > 5:
                count = 0
            resultImage = cv2.copyMakeBorder(
                currentImage,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                cv2.BORDER_CONSTANT,
                value=deploySettings.NO_FIGHT_BORDER_COLOR)
        cv2.imshow("Violence Detection", resultImage)
        print("count", count)
        userResponse = cv2.waitKey(1)
        if userResponse == ord('q'):
            videoReader.release()
            cv2.destroyAllWindows()
            break

        else:
            isCurrentFrameValid, currentImage = videoReader.read()
    averagedForwardTime = np.mean(listOfForwardTime)
コード例 #3
0
def Detector(frames, methods=['GET', 'POST']):
   dataJson = json.loads(str(frames).replace('\'','\"'))
   for item in dataJson['data']:
      netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(readb64(item["img"]))
      isFighting = violenceDetector.Detect(netInput)
      #siddet tespit edildi
      if isFighting:
         response={'isDone':'false','message':'Baslangic:'+str(item['time'])}
         socketio.emit('Detector', response, callback=MessageReceived)
      else:
         response={'isDone':'false','message':'Bitis:'+str(item['time'])}
         socketio.emit('Detector', response, callback=MessageReceived)
   response={'isDone':'true','message':'tespit bitti'}
   socketio.emit('Detector', response, callback=MessageReceived)
コード例 #4
0
def DetectViolence(PATH_FILE_NAME_OF_SOURCE_VIDEO, PATH_FILE_NAME_TO_SAVE_RESULT):
	violenceDetector = ViolenceDetector()
	videoReader = cv2.VideoCapture(PATH_FILE_NAME_OF_SOURCE_VIDEO)
	print(videoReader)
	shouldSaveResult = (PATH_FILE_NAME_TO_SAVE_RESULT != None)

	if shouldSaveResult:
		videoSavor = VideoSavor(PATH_FILE_NAME_TO_SAVE_RESULT + "_Result", videoReader)

	listOfForwardTime = []
	isCurrentFrameValid, currentImage = videoReader.read()
	count = 0
	while isCurrentFrameValid:
		print(isCurrentFrameValid)
		netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage)
		startDetectTime = time.time()
		isFighting = violenceDetector.Detect(netInput)
		print(isFighting)
		endDetectTime = time.time()
		listOfForwardTime.append(endDetectTime - startDetectTime)
		targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2*deploySettings.BORDER_SIZE
		currentImage = cv2.resize(currentImage, (targetSize, targetSize))
		if isFighting:			
			count += 1
			if count == 5:
				print("saving...")
				cv2.imwrite('image.jpg',currentImage)
			resultImage = cv2.copyMakeBorder(currentImage,deploySettings.BORDER_SIZE,deploySettings.BORDER_SIZE,deploySettings.BORDER_SIZE,deploySettings.BORDER_SIZE,cv2.BORDER_CONSTANT,value=deploySettings.FIGHT_BORDER_COLOR)
		else:
			resultImage = cv2.copyMakeBorder(currentImage,deploySettings.BORDER_SIZE,deploySettings.BORDER_SIZE,deploySettings.BORDER_SIZE,deploySettings.BORDER_SIZE,cv2.BORDER_CONSTANT,value=deploySettings.NO_FIGHT_BORDER_COLOR)
		cv2.imshow("Violence Detection", resultImage)
		print("count",count)
		if shouldSaveResult:
			print("shouldSaveResult",shouldSaveResult)
			videoSavor.AppendFrame(resultImage)
		userResponse = cv2.waitKey(1)
		if userResponse == ord('q'):
			videoReader.release()
			cv2.destroyAllWindows()
			break

		else:
			isCurrentFrameValid, currentImage = videoReader.read()
	# cv2.imshow('image',image)
	# PrintUnsmoothedResults(violenceDetector.unsmoothedResults)
	averagedForwardTime = np.mean(listOfForwardTime)
	if count > 5:
		print("truee")
		sendEmail.sendMail('*****@*****.**','12345Aa@','*****@*****.**','Fighting','Have Fight','C:/Users/anlan/OneDrive/Desktop/Project_Violence/ViolenceDetection-master_main/image.jpg')
コード例 #5
0
 def SaveCurrentBatchData(self):
     batchOfImages = self._batchData.batchOfImages
     for eachBatch in range(batchOfImages.shape[0]):
         for eachFrame in range(batchOfImages.shape[1]):
             # Note: for the Group Dimension, we ONLY need one image, therefore, pick the last group-index image.
             cvFormatImage = ImageUtils.ConvertImageFrom_NetInput_to_CV(
                 batchOfImages[eachBatch, eachFrame, -1])
             pathToSaveImage = os.path.join(
                 trainSettings.PATH_TO_SAVE_MODEL,
                 "save_epoch_" + str(self._dataManager.epoch))
             if not os.path.exists(pathToSaveImage):
                 os.makedirs(pathToSaveImage)
             fileName = str(self._dataManager.step) + "_" + str(
                 eachBatch) + "_" + str(eachFrame) + ".jpg"
             cv2.imwrite(os.path.join(pathToSaveImage, fileName),
                         cvFormatImage)
コード例 #6
0
def Detector(frames, methods=['GET', 'POST']):
    dataFrames = list()
    dataJson = json.loads(str(frames).replace('\'', '\"'))
    for item in dataJson['data']:
        # print(item[item.index("base64,")+7:])
        imgdata = readb64(item)

        dataFrames.append(imgdata)
        # Capture frame-by-frame
        ret, currentImage = imgdata
        # do what you want with frame
        #  and then save to file
        count += 1
        netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage)
        startDetectTime = time.time()
        isFighting = violenceDetector.Detect(netInput)
        endDetectTime = time.time()

        targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2 * deploySettings.BORDER_SIZE
        currentImage = cv2.resize(currentImage, (targetSize, targetSize))

        if isFighting:  #şiddet tespit edildi
            if len(baslangicsn) == len(bitissn):
                baslangicsn.append(count / 25)
            response = {
                'isDone': 'false',
                'message': 'Baslangic:' + str(baslangicsn.append(count / 25))
            }
            socketio.emit('Detector', response, callback=messageReceived)

        else:
            if len(baslangicsn) != len(bitissn):
                bitissn.append(count / 25)
            response = {
                'isDone': 'false',
                'message': 'Bitis:' + str(bitissn.append(count / 25))
            }
            socketio.emit('Detector', response, callback=messageReceived)
    print("Eleman sayisi:" + str(len(dataFrames)))
    bitissn.append(count / 25)
    response = {'isDone': 'true', 'message': 'tespit bitti'}
    socketio.emit('Detector', response, callback=messageReceived)
コード例 #7
0
ファイル: detector.py プロジェクト: murat199/ViolenceDetector
def SocketDetectorWebcam(frames, methods=['GET', 'POST']):
    dataJson = json.loads(str(frames).replace('\'','\"'))
    isStarted=0
    for item in dataJson['data']:
        netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(readb64(item["img"]))
        isFighting = violenceDetector.Detect(netInput)
        #siddet tespit edildi
        if isFighting:
            #response={'isFight':'true'}
            #socketio.emit('SocketDetectorState', response, callback=MessageReceived)
            isStarted=1
            response={'isStarted':''+isStarted,'isDone':'false','message':''+str(item['time'])}
            socketio.emit('SocketDetectorComplete', response, callback=MessageReceived)
        else:
            #response={'isFight':'false'}
            #socketio.emit('SocketDetectorState', response, callback=MessageReceived)
            response={'isStarted':''+isStarted,'isDone':'true','message':''+str(item['time'])}
            socketio.emit('SocketDetectorComplete', response, callback=MessageReceived)
            isStarted=0
    response={'isComplete':'true','message':'tespit bitti'}
    socketio.emit('SocketDetectorComplete', response, callback=MessageReceived)
コード例 #8
0
def upload():
    target = os.path.join(APP_ROOT, 'videos/')
    print(target)
    if not os.path.isdir(target):
        os.mkdir(target)
    print(request.files.getlist("file"))
    for upload in request.files.getlist("file"):
        print(upload)
        print("{} is the file name".format(upload.filename))
        filename = upload.filename
        # This is to verify files are supported
        ext = os.path.splitext(filename)[1]
        if (ext == ".mp4") or (ext == ".mov"):
            print("File supported moving on...")
        else:
            render_template("index_upload.html",
                            message="Files uploaded are not supported...")
        destination = "/".join([target, filename])
        print("Accept incoming file:", filename)
        print("Save it to:", destination)
        upload.save(destination)

        vidcap = cv2.VideoCapture(destination)
        success, image = vidcap.read()
        count = 0
        message = ""
        while success:
            netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(image)
            isFighting = violenceDetector.Detect(netInput)
            #siddet tespit edildi
            if isFighting:
                message += "Siddet Basladi."
            else:
                message += "Siddet Bitti."
            count += 1
            success, image = vidcap.read()
    # return send_from_directory("images", filename, as_attachment=True)
    return render_template("index_upload.html", message=message)
コード例 #9
0
    def LoadVideoImages(self, dataAugmentFunction_=None):
        '''
		    This function will Block the current thread utill the images are loaded.
		'''
        try:
            rgbImages = skvideo.io.vread(self.name)
            numberOfLoadedImages = rgbImages.shape[0]
            if self.totalFrames != numberOfLoadedImages:
                print("Warning! self.totalFrames (=" + str(self.totalFrames) +
                      ") != loadedImages(=" + str(numberOfLoadedImages) + ")!")
                print(
                    "\t This may due to the inconsistence of OpenCV & Sk-Video..."
                )
                self.totalFrames = numberOfLoadedImages
                self._calculateLabels()

            if dataAugmentFunction_ != None:
                rgbImages = dataAugmentFunction_(rgbImages)

            self._images = np.zeros([
                numberOfLoadedImages, dataSettings.IMAGE_SIZE,
                dataSettings.IMAGE_SIZE, dataSettings.IMAGE_CHANNELS
            ])
            for i in range(numberOfLoadedImages):
                self._images[
                    i] = ImageUtils.ConvertImageFrom_RGB255_to_NetInput(
                        rgbImages[i])

            self.hasImages = True

        except Exception as error:
            print("---------------------------------------------")
            print("Video: " + self.name)
            print(error)
            print("ignore the video because of the above error...")
            print("---------------------------------------------")
            self.hasImages = False
コード例 #10
0
def DetectViolence(PATH_FILE_NAME_OF_SOURCE_VIDEO,
                   PATH_FILE_NAME_TO_SAVE_RESULT):

    violenceDetector = ViolenceDetector()
    videoReader = cv2.VideoCapture(PATH_FILE_NAME_OF_SOURCE_VIDEO)
    shouldSaveResult = (PATH_FILE_NAME_TO_SAVE_RESULT != None)

    if shouldSaveResult:
        videoSavor = VideoSavor(PATH_FILE_NAME_TO_SAVE_RESULT + "_Result",
                                videoReader)

    listOfForwardTime = []
    isCurrentFrameValid, currentImage = videoReader.read()
    data = DataSet()
    model = load_model(
        '/home/furkan/five-video-classification-methods-master/inception.023-3.04.hdf5'
    )
    # Predict.
    image_arr = np.expand_dims(currentImage, axis=0)
    predictions = model.predict(image_arr)

    label_predictions = {}
    for i, label in enumerate(data.classes):
        label_predictions[label] = predictions[0][i]

    sorted_lps = sorted(label_predictions.items(),
                        key=operator.itemgetter(1),
                        reverse=True)
    listeString = list()
    listeValue = list()
    for i, class_prediction in enumerate(sorted_lps):
        # Just get the top five.
        if i > 4:
            break
        #print("%s: %.2f" % (class_prediction[0], class_prediction[1]))
        listeString.append(class_prediction[0])
        listeValue.append(class_prediction[1])
        maxValue = max(listeValue)
        maxValueIndex = listeValue.index(maxValue)
        #print(maxValueIndex,"--",maxValue)
        #print(listeString[maxValueIndex])

        i += 1

    X = 0
    while isCurrentFrameValid:
        netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage)
        startDetectTime = time.time()
        isFighting = violenceDetector.Detect(netInput)
        endDetectTime = time.time()
        listOfForwardTime.append(endDetectTime - startDetectTime)

        targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2 * deploySettings.BORDER_SIZE
        currentImage = cv2.resize(currentImage, (targetSize, targetSize))

        if isFighting:  #şiddet tespit edildi

            if X == 50:
                listeString.clear()
                listeValue.clear()
                image_arr = np.expand_dims(currentImage, axis=0)
                predictions = model.predict(image_arr)

                label_predictions = {}
                for i, label in enumerate(data.classes):
                    label_predictions[label] = predictions[0][i]

                sorted_lps = sorted(label_predictions.items(),
                                    key=operator.itemgetter(1),
                                    reverse=True)

                for i, class_prediction in enumerate(sorted_lps):
                    # Just get the top five.
                    if i > 4:
                        break
                    #print("%s: %.2f" % (class_prediction[0], class_prediction[1]))
                    listeString.append(class_prediction[0])
                    listeValue.append(class_prediction[1])
                    maxValue = 0
                    maxValue = max(listeValue)
                    maxValueIndex = listeValue.index(maxValue)
                    print(listeString[maxValueIndex], "--", maxValue)
                    print(listeString[maxValueIndex])

                    i += 1
                x = 0

            else:
                X += 1

            resultImage = cv2.copyMakeBorder(
                currentImage,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                cv2.BORDER_CONSTANT,
                value=deploySettings.FIGHT_BORDER_COLOR)
            font = cv2.FONT_HERSHEY_SIMPLEX
            bottomLeftCornerOfText = (10, 300)
            fontScale = 1
            fontColor = (255, 255, 255)
            lineType = 2

            cv2.putText(resultImage, listeString[maxValueIndex],
                        bottomLeftCornerOfText, font, fontScale, fontColor,
                        lineType)
            print(listeString[maxValueIndex], "--", maxValue)

        else:
            resultImage = cv2.copyMakeBorder(
                currentImage,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                cv2.BORDER_CONSTANT,
                value=deploySettings.NO_FIGHT_BORDER_COLOR)

        cv2.imshow("Violence Detection", resultImage)
        if shouldSaveResult:
            videoSavor.AppendFrame(resultImage)

        userResponse = cv2.waitKey(1)
        if userResponse == ord('q'):
            videoReader.release()
            cv2.destroyAllWindows()
            break

        else:
            isCurrentFrameValid, currentImage = videoReader.read()

    PrintUnsmoothedResults(violenceDetector.unsmoothedResults)
    averagedForwardTime = np.mean(listOfForwardTime)
    print("Averaged Forward Time: ", averagedForwardTime)
コード例 #11
0
ファイル: Run_demo.py プロジェクト: tiniltom/project
def DetectViolence(PATH_FILE_NAME_OF_SOURCE_VIDEO, saveresult):
    violenceDetector = ViolenceDetector()
    videoReader = cv2.VideoCapture(PATH_FILE_NAME_OF_SOURCE_VIDEO)
    if saveresult == True:
        videoSavor = VideoSavor(PATH_FILE_NAME_OF_SOURCE_VIDEO + "_Result",
                                videoReader)

    listOfForwardTime = []
    isCurrentFrameValid, currentImage = videoReader.read()
    while isCurrentFrameValid:
        netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage)

        startDetectTime = time.time()
        isFighting = violenceDetector.Detect(netInput)
        endDetectTime = time.time()
        listOfForwardTime.append(endDetectTime - startDetectTime)

        targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2 * deploySettings.BORDER_SIZE
        currentImage = cv2.resize(currentImage, (targetSize, targetSize))
        if isFighting:
            resultImage = cv2.copyMakeBorder(
                currentImage,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                cv2.BORDER_CONSTANT,
                value=deploySettings.FIGHT_BORDER_COLOR)
            # Using cv2.putText() method
            resultImage = cv2.putText(resultImage, 'Violence :(',
                                      deploySettings.org, deploySettings.font,
                                      deploySettings.fontScale,
                                      deploySettings.color2,
                                      deploySettings.thickness, cv2.LINE_AA)
        else:
            resultImage = cv2.copyMakeBorder(
                currentImage,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                cv2.BORDER_CONSTANT,
                value=deploySettings.NO_FIGHT_BORDER_COLOR)
            resultImage = cv2.putText(resultImage, 'NonViolence :)',
                                      deploySettings.org, deploySettings.font,
                                      deploySettings.fontScale,
                                      deploySettings.color1,
                                      deploySettings.thickness, cv2.LINE_AA)

        cv2.imshow("Violence Detection", resultImage)
        if saveresult == True:
            videoSavor.AppendFrame(resultImage)

        userResponse = cv2.waitKey(1)
        if userResponse == ord('q'):
            videoReader.release()
            cv2.destroyAllWindows()
            break

        else:
            isCurrentFrameValid, currentImage = videoReader.read()

    PrintUnsmoothedResults(violenceDetector.unsmoothedResults)
    averagedForwardTime = np.mean(listOfForwardTime)
    print("Averaged Forward Time: ", averagedForwardTime)
コード例 #12
0
def DetectViolence(PATH_FILE_NAME_OF_SOURCE_VIDEO):
    videoReader = cv2.VideoCapture(PATH_FILE_NAME_OF_SOURCE_VIDEO)
    listOfForwardTime = []
    # isCurrentFrameValid : 아직 프레임이 남아있다.
    # currentImage : 현재 이미지
    isCurrentFrameValid, currentImage = videoReader.read()
    cnt = 0
    tol_cnt = 0
    fighting = False

    while isCurrentFrameValid:
        netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage)

        startDetectTime = time.time()

        # detecting
        isFighting = violenceDetector.Detect(netInput)
        endDetectTime = time.time()
        listOfForwardTime.append(endDetectTime - startDetectTime)

        targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2 * deploySettings.BORDER_SIZE
        resizedImage = cv2.resize(currentImage, (targetSize, targetSize))
        if isFighting:
            image_list[cnt] = currentImage
            cnt += 1
            resultImage = cv2.copyMakeBorder(
                resizedImage,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                cv2.BORDER_CONSTANT,
                value=deploySettings.FIGHT_BORDER_COLOR)

        else:
            tol_cnt += 1
            if tol_cnt > deploySettings.TOLERANCE:
                tol_cnt = 0
                cnt = 0
            resultImage = cv2.copyMakeBorder(
                resizedImage,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                cv2.BORDER_CONSTANT,
                value=deploySettings.NO_FIGHT_BORDER_COLOR)

        cv2.imshow("Violence Detection", resultImage)

        userResponse = cv2.waitKey(1)
        if userResponse == ord('q'):
            videoReader.release()
            cv2.destroyAllWindows()
            break

        else:
            isCurrentFrameValid, currentImage = videoReader.read()

        if cnt >= deploySettings.THRESHOLD:
            fighting = True
            break

    # boolean 값
    PrintUnsmoothedResults(violenceDetector.unsmoothedResults)
    averagedForwardTime = np.mean(listOfForwardTime)
    print("Averaged Forward Time: ", averagedForwardTime)
    # 폭력 감지가 되면
    # fighting = true : 경보!!!!!
    return fighting
コード例 #13
0
def DetectViolence(PATH_FILE_NAME_OF_SOURCE_VIDEO,
                   PATH_FILE_NAME_TO_SAVE_RESULT):
    violenceDetector = ViolenceDetector()
    videoReader = cv2.VideoCapture(PATH_FILE_NAME_OF_SOURCE_VIDEO)
    #videoReader.set(cv2.CAP_PROP_FPS, 80)
    #video_fps = videoReader.get(cv2.CAP_PROP_FPS)
    videoReader.get(cv2.CAP_PROP_FPS)
    shouldSaveResult = (PATH_FILE_NAME_TO_SAVE_RESULT != None)

    if shouldSaveResult:
        videoSavor = VideoSavor(PATH_FILE_NAME_TO_SAVE_RESULT + "_Result",
                                videoReader)

    listOfForwardTime = []
    isCurrentFrameValid, currentImage = videoReader.read()
    while isCurrentFrameValid:
        netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage)

        startDetectTime = time.time()
        isFighting = violenceDetector.Detect(netInput)
        endDetectTime = time.time()
        listOfForwardTime.append(endDetectTime - startDetectTime)

        targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2 * deploySettings.BORDER_SIZE
        currentImage = cv2.resize(currentImage, (targetSize, targetSize))
        if isFighting:
            resultImage = cv2.copyMakeBorder(
                currentImage,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                cv2.BORDER_CONSTANT,
                value=deploySettings.FIGHT_BORDER_COLOR)
            cv2.putText(resultImage, "Violence detected", (20, 40),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 225), 2)

        else:
            resultImage = cv2.copyMakeBorder(
                currentImage,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                deploySettings.BORDER_SIZE,
                cv2.BORDER_CONSTANT,
                value=deploySettings.NO_FIGHT_BORDER_COLOR)
            cv2.putText(resultImage, "No violence", (20, 40),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)

        # display a piece of text to the frame (so we can benchmark
        # fairly against the fast method)


#		cv2.putText(resultImage, "Slow Method", (10, 30),
#		cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)

        cv2.imshow("Violence Detection", resultImage)
        if shouldSaveResult:
            videoSavor.AppendFrame(resultImage)

        userResponse = cv2.waitKey(1)
        #userResponse = cv2.waitkey(int(2000/video_fps))
        fps.update()  #
        if userResponse == ord('q'):
            fps.stop()
            print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))  #
            print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))  #
            videoReader.release()
            cv2.destroyAllWindows()
            break

        else:
            isCurrentFrameValid, currentImage = videoReader.read()

    PrintUnsmoothedResults(violenceDetector.unsmoothedResults)
    averagedForwardTime = np.mean(listOfForwardTime)
    print("Averaged Forward Time: ", averagedForwardTime)
コード例 #14
0
def Check_TrainDataManager():
    pauseLoadData = False

    print("Start reading videos...")
    dataManager = TrainDataManager(PATH_TO_DATA)
    print("Read videos finished.")

    while True:
        '''
		    The following Info should be extracted before calling 'AssignBatchData()'
		'''
        listOfBatchInfo = []
        listOfBatchInfo.append('dataManager.epoch=' + str(dataManager.epoch))
        listOfBatchInfo.append('dataManager.step=' + str(dataManager.step))

        batchData = BatchData()
        startGetBatchTime = time.time()
        dataManager.AssignBatchData(batchData)
        finishGetBatchTime = time.time()
        print("GetBatchTime = ", finishGetBatchTime - startGetBatchTime)

        info = dataManager.GetQueueInfo()
        print("\t" + info + "\n")

        batchData.batchOfImages = batchData.batchOfImages.reshape([
            batchData.batchSize * batchData.unrolledSize *
            batchData.groupedSize, dataSettings.IMAGE_SIZE,
            dataSettings.IMAGE_SIZE, dataSettings.IMAGE_CHANNELS
        ])
        batchData.batchOfLabels = batchData.batchOfLabels.reshape([
            batchData.batchSize * batchData.unrolledSize,
            dataSettings.NUMBER_OF_CATEGORIES
        ])

        i = 0
        while i < batchData.batchOfImages.shape[0]:
            currentImage = batchData.batchOfImages[i]
            currentImage = ImageUtils.ConvertImageFrom_NetInput_to_CV(
                currentImage)
            currentImage = cv2.resize(currentImage, (500, 500))
            currentLabel = batchData.batchOfLabels[int(
                i / dataSettings.GROUPED_SIZE)]

            listOfInfoToDisplay = []
            listOfInfoToDisplay += listOfBatchInfo
            listOfInfoToDisplay.append('i = ' + str(i))
            listOfInfoToDisplay.append('batchImages.shape = ' +
                                       str(batchData.batchOfImages.shape))
            listOfInfoToDisplay.append('label = ' + str(currentLabel))
            resultImage = DrawInfo(currentImage, listOfInfoToDisplay)

            cv2.imshow("Result", resultImage)

            userResponse = cv2.waitKey(0)
            if userResponse == ord('n'):
                i += 1

            elif userResponse == ord('l'):
                i = batchData.batchOfImages.shape[0] - 1

            elif userResponse == ord('p'):
                pauseLoadData = not pauseLoadData
                if pauseLoadData:
                    dataManager.Pause()
                else:
                    dataManager.Continue()

            elif userResponse == ord('q'):
                dataManager.Stop()
                raise StopIteration()
コード例 #15
0
def DetectViolence(PATH_FILE_NAME_OF_SOURCE_VIDEO,
                   PATH_FILE_NAME_TO_SAVE_RESULT):

    violenceDetector = ViolenceDetector()
    videoReader = cv2.VideoCapture(PATH_FILE_NAME_OF_SOURCE_VIDEO)
    shouldSaveResult = (PATH_FILE_NAME_TO_SAVE_RESULT != None)

    if shouldSaveResult:
        videoSavor = VideoSavor(PATH_FILE_NAME_TO_SAVE_RESULT + "_Result",
                                videoReader)

    listOfForwardTime = []
    isCurrentFrameValid, currentImage = videoReader.read()

    count = 0
    baslangicsn = list()
    bitissn = list()
    while isCurrentFrameValid:
        count += 1
        netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage)
        startDetectTime = time.time()
        isFighting = violenceDetector.Detect(netInput)
        endDetectTime = time.time()
        listOfForwardTime.append(endDetectTime - startDetectTime)

        targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2 * deploySettings.BORDER_SIZE
        currentImage = cv2.resize(currentImage, (targetSize, targetSize))

        if isFighting:  #şiddet tespit edildi
            p = 0
            font = cv2.FONT_HERSHEY_SIMPLEX
            bottomLeftCornerOfText = (10, 50)
            fontScale = 1
            fontColor = (255, 255, 255)
            lineType = 2
            if len(baslangicsn) == len(bitissn):
                baslangicsn.append(count / 25)

            cv2.putText(currentImage, "Siddet tespit edildi",
                        bottomLeftCornerOfText, font, fontScale, fontColor,
                        lineType)
            bottomLeftCornerOfText = (10, 450)

        else:

            if len(baslangicsn) != len(bitissn):
                bitissn.append(count / 25)

            font = cv2.FONT_HERSHEY_SIMPLEX
            bottomLeftCornerOfText = (10, 450)
            fontScale = 1
            fontColor = (255, 255, 255)
            lineType = 2
            cv2.putText(currentImage, "Siddet tespit edilmedi",
                        bottomLeftCornerOfText, font, fontScale, fontColor,
                        lineType)

        cv2.imshow("Violence Detection", currentImage)

        if shouldSaveResult:
            videoSavor.AppendFrame(currentImage)

        userResponse = cv2.waitKey(1)
        if userResponse == ord('q'):
            videoReader.release()
            cv2.destroyAllWindows()
            break

        else:
            isCurrentFrameValid, currentImage = videoReader.read()

    PrintUnsmoothedResults(violenceDetector.unsmoothedResults)
    averagedForwardTime = np.mean(listOfForwardTime)
    bitissn.append(count / 25)
    print(len(baslangicsn), "-------", len(bitissn))

    for index in range(len(baslangicsn)):
        try:
            print("tespit edilen sureler", baslangicsn.pop(index), "------",
                  bitissn.pop(index))
        except IndexError:
            print("----son----")

    print("Averaged Forward Time: ", averagedForwardTime)
コード例 #16
0
def DetectViolence(PATH_FILE_NAME_TO_SAVE_RESULT):
    # font for text used on video frames
    font = cv2.FONT_HERSHEY_SIMPLEX

    violenceDetector = ViolenceDetector()
    capture = cv2.VideoCapture(0)
    shouldSaveResult = (PATH_FILE_NAME_TO_SAVE_RESULT != None)

    if shouldSaveResult:
        videoSavor = VideoSavor(PATH_FILE_NAME_TO_SAVE_RESULT + "_Result",
                                capture)

    listOfForwardTime = []

    # Get some properties of VideoCapture (frame width, frame height and frames per second (fps)):
    frame_width = capture.get(cv2.CAP_PROP_FRAME_WIDTH)
    frame_height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
    fps = capture.get(cv2.CAP_PROP_FPS)

    # Print these values:
    print("CV_CAP_PROP_FRAME_WIDTH: '{}'".format(frame_width))
    print("CV_CAP_PROP_FRAME_HEIGHT : '{}'".format(frame_height))
    print("CAP_PROP_FPS : '{}'".format(fps))

    # Check if camera opened successfully
    if capture.isOpened() is False:
        print("Error opening the camera")

    flag = False
    # Read until video is completed
    while capture.isOpened():
        # Capture frame-by-frame from the camera
        ret, frame = capture.read()

        if ret is True:
            # Display the captured frame:
            # cv2.imshow('Input frame from the camera', frame)
            netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(frame)

            startDetectTime = time.time()
            isFighting = violenceDetector.Detect(netInput)
            endDetectTime = time.time()
            listOfForwardTime.append(endDetectTime - startDetectTime)

            targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2 * deploySettings.BORDER_SIZE
            currentImage = cv2.resize(frame, (targetSize, targetSize))
            if isFighting:
                resultImage = cv2.copyMakeBorder(
                    frame,
                    deploySettings.BORDER_SIZE,
                    deploySettings.BORDER_SIZE,
                    deploySettings.BORDER_SIZE,
                    deploySettings.BORDER_SIZE,
                    cv2.BORDER_CONSTANT,
                    value=deploySettings.FIGHT_BORDER_COLOR)

            else:
                resultImage = cv2.copyMakeBorder(
                    frame,
                    deploySettings.BORDER_SIZE,
                    deploySettings.BORDER_SIZE,
                    deploySettings.BORDER_SIZE,
                    deploySettings.BORDER_SIZE,
                    cv2.BORDER_CONSTANT,
                    value=deploySettings.NO_FIGHT_BORDER_COLOR)
            # frameText = "Violence Detected!" if isFighting else "No Violence Detected."
            # textColor = deploySettings.FIGHT_BORDER_COLOR if isFighting else deploySettings.NO_FIGHT_BORDER_COLOR
            # cv2.putText(frame, frameText, (50, 50), font, 4, (0, 255, 0), 2, cv2.LINE_AA)
            cv2.imshow("Violence Detection", resultImage)
            if shouldSaveResult:
                videoSavor.AppendFrame(resultImage)

            userResponse = cv2.waitKey(1)
            if userResponse == ord('q'):
                capture.release()
                cv2.destroyAllWindows()
                flag = True
                break
            else:
                isCurrentFrameValid, currentImage = capture.read()
        print("Details about current frame:")
        PrintUnsmoothedResults(violenceDetector.unsmoothedResults)
        averagedForwardTime = np.mean(listOfForwardTime)
        # print("Averaged Forward Time: ", averagedForwardTime)

        if flag:
            break
コード例 #17
0
cap = cv2.VideoCapture(0)
violenceDetector = ViolenceDetector()

count=0
baslangicsn=list()
bitissn=list()
	
	
while(True):
    # Capture frame-by-frame
    ret, currentImage = cap.read()
    # do what you want with frame
    #  and then save to file
    cv2.imwrite('/home/murat/Desktop/image.png', currentImage)
    count +=1
    netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage)
    startDetectTime = time.time()
    isFighting = violenceDetector.Detect(netInput)
    endDetectTime = time.time()
    

    targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2*deploySettings.BORDER_SIZE
    currentImage = cv2.resize(currentImage, (targetSize, targetSize))

    if isFighting:#şiddet tespit edildi
        p=0
        font = cv2.FONT_HERSHEY_SIMPLEX
        bottomLeftCornerOfText = (10,50)
        fontScale = 1
        fontColor = (255,255,255)
        lineType = 2