import random import glob import os.path from data import DataSet from processor import process_image from keras.models import load_model import base64 from PIL import Image import StringIO app = Flask(__name__) app.config['SECRET_KEY'] = 'vnkdjnfjknfl1232#' socketio = SocketIO(app) violenceDetector = ViolenceDetector() count = 0 baslangicsn = list() bitissn = list() @app.route('/') def sessions(): return render_template('index_webcam.html') def messageReceived(methods=['GET', 'POST']): print('message was received!!!')
import settings.DeploySettings as deploySettings import settings.DataSettings as dataSettings import src.data.ImageUtils as ImageUtils import numpy as np import operator import random import glob import os.path from data import DataSet from processor import process_image from keras.models import load_model cap = cv2.VideoCapture(0) violenceDetector = ViolenceDetector() count=0 baslangicsn=list() bitissn=list() while(True): # Capture frame-by-frame ret, currentImage = cap.read() # do what you want with frame # and then save to file cv2.imwrite('/home/murat/Desktop/image.png', currentImage) count +=1 netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage) startDetectTime = time.time()
def DetectViolence(PATH_FILE_NAME_TO_SAVE_RESULT): # font for text used on video frames font = cv2.FONT_HERSHEY_SIMPLEX violenceDetector = ViolenceDetector() capture = cv2.VideoCapture(0) shouldSaveResult = (PATH_FILE_NAME_TO_SAVE_RESULT != None) if shouldSaveResult: videoSavor = VideoSavor(PATH_FILE_NAME_TO_SAVE_RESULT + "_Result", capture) listOfForwardTime = [] # Get some properties of VideoCapture (frame width, frame height and frames per second (fps)): frame_width = capture.get(cv2.CAP_PROP_FRAME_WIDTH) frame_height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT) fps = capture.get(cv2.CAP_PROP_FPS) # Print these values: print("CV_CAP_PROP_FRAME_WIDTH: '{}'".format(frame_width)) print("CV_CAP_PROP_FRAME_HEIGHT : '{}'".format(frame_height)) print("CAP_PROP_FPS : '{}'".format(fps)) # Check if camera opened successfully if capture.isOpened() is False: print("Error opening the camera") flag = False # Read until video is completed while capture.isOpened(): # Capture frame-by-frame from the camera ret, frame = capture.read() if ret is True: # Display the captured frame: # cv2.imshow('Input frame from the camera', frame) netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(frame) startDetectTime = time.time() isFighting = violenceDetector.Detect(netInput) endDetectTime = time.time() listOfForwardTime.append(endDetectTime - startDetectTime) targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2 * deploySettings.BORDER_SIZE currentImage = cv2.resize(frame, (targetSize, targetSize)) if isFighting: resultImage = cv2.copyMakeBorder( frame, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, cv2.BORDER_CONSTANT, value=deploySettings.FIGHT_BORDER_COLOR) else: resultImage = cv2.copyMakeBorder( frame, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, cv2.BORDER_CONSTANT, value=deploySettings.NO_FIGHT_BORDER_COLOR) # frameText = "Violence Detected!" if isFighting else "No Violence Detected." # textColor = deploySettings.FIGHT_BORDER_COLOR if isFighting else deploySettings.NO_FIGHT_BORDER_COLOR # cv2.putText(frame, frameText, (50, 50), font, 4, (0, 255, 0), 2, cv2.LINE_AA) cv2.imshow("Violence Detection", resultImage) if shouldSaveResult: videoSavor.AppendFrame(resultImage) userResponse = cv2.waitKey(1) if userResponse == ord('q'): capture.release() cv2.destroyAllWindows() flag = True break else: isCurrentFrameValid, currentImage = capture.read() print("Details about current frame:") PrintUnsmoothedResults(violenceDetector.unsmoothedResults) averagedForwardTime = np.mean(listOfForwardTime) # print("Averaged Forward Time: ", averagedForwardTime) if flag: break
def DetectViolence(PATH_FILE_NAME_OF_SOURCE_VIDEO, PATH_FILE_NAME_TO_SAVE_RESULT): violenceDetector = ViolenceDetector() videoReader = cv2.VideoCapture(PATH_FILE_NAME_OF_SOURCE_VIDEO) shouldSaveResult = (PATH_FILE_NAME_TO_SAVE_RESULT != None) if shouldSaveResult: videoSavor = VideoSavor(PATH_FILE_NAME_TO_SAVE_RESULT + "_Result", videoReader) listOfForwardTime = [] isCurrentFrameValid, currentImage = videoReader.read() count = 0 baslangicsn = list() bitissn = list() while isCurrentFrameValid: count += 1 netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage) startDetectTime = time.time() isFighting = violenceDetector.Detect(netInput) endDetectTime = time.time() listOfForwardTime.append(endDetectTime - startDetectTime) targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2 * deploySettings.BORDER_SIZE currentImage = cv2.resize(currentImage, (targetSize, targetSize)) if isFighting: #şiddet tespit edildi p = 0 font = cv2.FONT_HERSHEY_SIMPLEX bottomLeftCornerOfText = (10, 50) fontScale = 1 fontColor = (255, 255, 255) lineType = 2 if len(baslangicsn) == len(bitissn): baslangicsn.append(count / 25) cv2.putText(currentImage, "Siddet tespit edildi", bottomLeftCornerOfText, font, fontScale, fontColor, lineType) bottomLeftCornerOfText = (10, 450) else: if len(baslangicsn) != len(bitissn): bitissn.append(count / 25) font = cv2.FONT_HERSHEY_SIMPLEX bottomLeftCornerOfText = (10, 450) fontScale = 1 fontColor = (255, 255, 255) lineType = 2 cv2.putText(currentImage, "Siddet tespit edilmedi", bottomLeftCornerOfText, font, fontScale, fontColor, lineType) cv2.imshow("Violence Detection", currentImage) if shouldSaveResult: videoSavor.AppendFrame(currentImage) userResponse = cv2.waitKey(1) if userResponse == ord('q'): videoReader.release() cv2.destroyAllWindows() break else: isCurrentFrameValid, currentImage = videoReader.read() PrintUnsmoothedResults(violenceDetector.unsmoothedResults) averagedForwardTime = np.mean(listOfForwardTime) bitissn.append(count / 25) print(len(baslangicsn), "-------", len(bitissn)) for index in range(len(baslangicsn)): try: print("tespit edilen sureler", baslangicsn.pop(index), "------", bitissn.pop(index)) except IndexError: print("----son----") print("Averaged Forward Time: ", averagedForwardTime)
def DetectViolence(PATH_FILE_NAME_OF_SOURCE_VIDEO, PATH_FILE_NAME_TO_SAVE_RESULT): violenceDetector = ViolenceDetector() videoReader = cv2.VideoCapture(PATH_FILE_NAME_OF_SOURCE_VIDEO) #videoReader.set(cv2.CAP_PROP_FPS, 80) #video_fps = videoReader.get(cv2.CAP_PROP_FPS) videoReader.get(cv2.CAP_PROP_FPS) shouldSaveResult = (PATH_FILE_NAME_TO_SAVE_RESULT != None) if shouldSaveResult: videoSavor = VideoSavor(PATH_FILE_NAME_TO_SAVE_RESULT + "_Result", videoReader) listOfForwardTime = [] isCurrentFrameValid, currentImage = videoReader.read() while isCurrentFrameValid: netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage) startDetectTime = time.time() isFighting = violenceDetector.Detect(netInput) endDetectTime = time.time() listOfForwardTime.append(endDetectTime - startDetectTime) targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2 * deploySettings.BORDER_SIZE currentImage = cv2.resize(currentImage, (targetSize, targetSize)) if isFighting: resultImage = cv2.copyMakeBorder( currentImage, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, cv2.BORDER_CONSTANT, value=deploySettings.FIGHT_BORDER_COLOR) cv2.putText(resultImage, "Violence detected", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 225), 2) else: resultImage = cv2.copyMakeBorder( currentImage, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, cv2.BORDER_CONSTANT, value=deploySettings.NO_FIGHT_BORDER_COLOR) cv2.putText(resultImage, "No violence", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2) # display a piece of text to the frame (so we can benchmark # fairly against the fast method) # cv2.putText(resultImage, "Slow Method", (10, 30), # cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) cv2.imshow("Violence Detection", resultImage) if shouldSaveResult: videoSavor.AppendFrame(resultImage) userResponse = cv2.waitKey(1) #userResponse = cv2.waitkey(int(2000/video_fps)) fps.update() # if userResponse == ord('q'): fps.stop() print("[INFO] elasped time: {:.2f}".format(fps.elapsed())) # print("[INFO] approx. FPS: {:.2f}".format(fps.fps())) # videoReader.release() cv2.destroyAllWindows() break else: isCurrentFrameValid, currentImage = videoReader.read() PrintUnsmoothedResults(violenceDetector.unsmoothedResults) averagedForwardTime = np.mean(listOfForwardTime) print("Averaged Forward Time: ", averagedForwardTime)
def DetectViolence(PATH_FILE_NAME_OF_SOURCE_VIDEO, saveresult): violenceDetector = ViolenceDetector() videoReader = cv2.VideoCapture(PATH_FILE_NAME_OF_SOURCE_VIDEO) if saveresult == True: videoSavor = VideoSavor(PATH_FILE_NAME_OF_SOURCE_VIDEO + "_Result", videoReader) listOfForwardTime = [] isCurrentFrameValid, currentImage = videoReader.read() while isCurrentFrameValid: netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage) startDetectTime = time.time() isFighting = violenceDetector.Detect(netInput) endDetectTime = time.time() listOfForwardTime.append(endDetectTime - startDetectTime) targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2 * deploySettings.BORDER_SIZE currentImage = cv2.resize(currentImage, (targetSize, targetSize)) if isFighting: resultImage = cv2.copyMakeBorder( currentImage, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, cv2.BORDER_CONSTANT, value=deploySettings.FIGHT_BORDER_COLOR) # Using cv2.putText() method resultImage = cv2.putText(resultImage, 'Violence :(', deploySettings.org, deploySettings.font, deploySettings.fontScale, deploySettings.color2, deploySettings.thickness, cv2.LINE_AA) else: resultImage = cv2.copyMakeBorder( currentImage, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, cv2.BORDER_CONSTANT, value=deploySettings.NO_FIGHT_BORDER_COLOR) resultImage = cv2.putText(resultImage, 'NonViolence :)', deploySettings.org, deploySettings.font, deploySettings.fontScale, deploySettings.color1, deploySettings.thickness, cv2.LINE_AA) cv2.imshow("Violence Detection", resultImage) if saveresult == True: videoSavor.AppendFrame(resultImage) userResponse = cv2.waitKey(1) if userResponse == ord('q'): videoReader.release() cv2.destroyAllWindows() break else: isCurrentFrameValid, currentImage = videoReader.read() PrintUnsmoothedResults(violenceDetector.unsmoothedResults) averagedForwardTime = np.mean(listOfForwardTime) print("Averaged Forward Time: ", averagedForwardTime)
def DetectViolence(PATH_FILE_NAME_OF_SOURCE_VIDEO, PATH_FILE_NAME_TO_SAVE_RESULT): violenceDetector = ViolenceDetector() videoReader = cv2.VideoCapture(PATH_FILE_NAME_OF_SOURCE_VIDEO) shouldSaveResult = (PATH_FILE_NAME_TO_SAVE_RESULT != None) num_frames = 0 startTime = time.time() if shouldSaveResult: videoSavor = VideoSavor( PATH_FILE_NAME_TO_SAVE_RESULT + "_Result", videoReader) listOfForwardTime = [] isCurrentFrameValid, currentImage = videoReader.read() resultImage = cv2.copyMakeBorder(currentImage, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, cv2.BORDER_CONSTANT, value=deploySettings.NO_FIGHT_BORDER_COLOR) while isCurrentFrameValid: nowTime = time.time() if ((nowTime - startTime)) > num_frames: netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage) startDetectTime = time.time() isFighting = violenceDetector.Detect(netInput) endDetectTime = time.time() listOfForwardTime.append(endDetectTime - startDetectTime) targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2*deploySettings.BORDER_SIZE currentImage = cv2.resize(currentImage, (targetSize, targetSize)) if isFighting: resultImage = cv2.copyMakeBorder(currentImage, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, cv2.BORDER_CONSTANT, value=deploySettings.FIGHT_BORDER_COLOR) else: resultImage = cv2.copyMakeBorder(currentImage, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, cv2.BORDER_CONSTANT, value=deploySettings.NO_FIGHT_BORDER_COLOR) startTime = time.time() cv2.imshow("Violence Detection", resultImage) if shouldSaveResult: videoSavor.AppendFrame(resultImage) userResponse = cv2.waitKey(1) if userResponse == ord('q'): videoReader.release() cv2.destroyAllWindows() break else: isCurrentFrameValid, currentImage = videoReader.read() PrintUnsmoothedResults(violenceDetector.unsmoothedResults) averagedForwardTime = np.mean(listOfForwardTime) print("Averaged Forward Time: ", averagedForwardTime)
def DetectViolence(PATH_FILE_NAME_OF_SOURCE_VIDEO, PATH_FILE_NAME_TO_SAVE_RESULT): violenceDetector = ViolenceDetector() videoReader = cv2.VideoCapture(PATH_FILE_NAME_OF_SOURCE_VIDEO) shouldSaveResult = (PATH_FILE_NAME_TO_SAVE_RESULT != None) if shouldSaveResult: videoSavor = VideoSavor(PATH_FILE_NAME_TO_SAVE_RESULT + "_Result", videoReader) listOfForwardTime = [] isCurrentFrameValid, currentImage = videoReader.read() data = DataSet() model = load_model( '/home/furkan/five-video-classification-methods-master/inception.023-3.04.hdf5' ) # Predict. image_arr = np.expand_dims(currentImage, axis=0) predictions = model.predict(image_arr) label_predictions = {} for i, label in enumerate(data.classes): label_predictions[label] = predictions[0][i] sorted_lps = sorted(label_predictions.items(), key=operator.itemgetter(1), reverse=True) listeString = list() listeValue = list() for i, class_prediction in enumerate(sorted_lps): # Just get the top five. if i > 4: break #print("%s: %.2f" % (class_prediction[0], class_prediction[1])) listeString.append(class_prediction[0]) listeValue.append(class_prediction[1]) maxValue = max(listeValue) maxValueIndex = listeValue.index(maxValue) #print(maxValueIndex,"--",maxValue) #print(listeString[maxValueIndex]) i += 1 X = 0 while isCurrentFrameValid: netInput = ImageUtils.ConvertImageFrom_CV_to_NetInput(currentImage) startDetectTime = time.time() isFighting = violenceDetector.Detect(netInput) endDetectTime = time.time() listOfForwardTime.append(endDetectTime - startDetectTime) targetSize = deploySettings.DISPLAY_IMAGE_SIZE - 2 * deploySettings.BORDER_SIZE currentImage = cv2.resize(currentImage, (targetSize, targetSize)) if isFighting: #şiddet tespit edildi if X == 50: listeString.clear() listeValue.clear() image_arr = np.expand_dims(currentImage, axis=0) predictions = model.predict(image_arr) label_predictions = {} for i, label in enumerate(data.classes): label_predictions[label] = predictions[0][i] sorted_lps = sorted(label_predictions.items(), key=operator.itemgetter(1), reverse=True) for i, class_prediction in enumerate(sorted_lps): # Just get the top five. if i > 4: break #print("%s: %.2f" % (class_prediction[0], class_prediction[1])) listeString.append(class_prediction[0]) listeValue.append(class_prediction[1]) maxValue = 0 maxValue = max(listeValue) maxValueIndex = listeValue.index(maxValue) print(listeString[maxValueIndex], "--", maxValue) print(listeString[maxValueIndex]) i += 1 x = 0 else: X += 1 resultImage = cv2.copyMakeBorder( currentImage, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, cv2.BORDER_CONSTANT, value=deploySettings.FIGHT_BORDER_COLOR) font = cv2.FONT_HERSHEY_SIMPLEX bottomLeftCornerOfText = (10, 300) fontScale = 1 fontColor = (255, 255, 255) lineType = 2 cv2.putText(resultImage, listeString[maxValueIndex], bottomLeftCornerOfText, font, fontScale, fontColor, lineType) print(listeString[maxValueIndex], "--", maxValue) else: resultImage = cv2.copyMakeBorder( currentImage, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, deploySettings.BORDER_SIZE, cv2.BORDER_CONSTANT, value=deploySettings.NO_FIGHT_BORDER_COLOR) cv2.imshow("Violence Detection", resultImage) if shouldSaveResult: videoSavor.AppendFrame(resultImage) userResponse = cv2.waitKey(1) if userResponse == ord('q'): videoReader.release() cv2.destroyAllWindows() break else: isCurrentFrameValid, currentImage = videoReader.read() PrintUnsmoothedResults(violenceDetector.unsmoothedResults) averagedForwardTime = np.mean(listOfForwardTime) print("Averaged Forward Time: ", averagedForwardTime)