def adminAjaxGetGraphData():
    videoVO = VideoVO()
    videoDAO = VideoDAO()

    index_VideoId = request.args.get('index_VideoId')

    videoVO.videoId = index_VideoId

    ajaxGraphDataList = videoDAO.ajaxGetGraphData(videoVO)

    print("ajaxGraphDataList >>>>>>>>>>>>>>>>>> ", ajaxGraphDataList)

    graphDict = {}
    counter = False
    if len(ajaxGraphDataList) != 0:
        counter = True

        dict1 = {}
        for i in ajaxGraphDataList:
            dict1['illegalCarCount'] = i.illegalCarCount
            dict1['legalCarCount'] = i.legalCarCount

            graphDict.update(dict1)

    print('graphDict>>>', graphDict)
    if counter:
        response = {'responseKey': graphDict}
        print('response>>>>>>>>', response)

    else:
        response = {'responseKey': 'Error'}

    return jsonify(response)
Exemple #2
0
def userDeleteVideo():
    try:
        if adminLoginSession() == 'user':
            videoVO = VideoVO()
            videoDAO = VideoDAO()
            videoId = request.args.get( 'videoId' )

            videoVO.videoId = videoId
            videoVOList = videoDAO.deleteVideo( videoVO )

            videoInputFileName = videoVOList.videoInputFileName
            videoInputFilePath = videoVOList.videoInputFilePath

            videoOutputFileName = videoVOList.videoOutputFileName
            videoOutputFilePath = videoVOList.videoOutputFilePath

            if videoInputFileName is not None:
                path = videoInputFilePath.replace( '..', 'project' ) + videoInputFileName
                os.remove( path )

            if videoOutputFileName is not None:
                path = videoOutputFilePath.replace( '..', 'project' ) + videoOutputFileName
                os.remove( path )

            return redirect( url_for( 'userViewVideo' ) )
        else:
            return adminLogoutSession()
    except Exception as ex:
        print( ex )
def adminAjaxLoadDateRegister():
    videoVO = VideoVO()
    videoDAO = VideoDAO()

    index_LoginId = request.args.get('index_LoginId')

    videoVO.video_LoginId = index_LoginId

    ajaxAdminIndexDateVOList = videoDAO.ajaxDateAdminIndex(videoVO)

    print("ajaxAdminIndexDateVOList >>>>>>>>>>>>>>>>>> ",
          ajaxAdminIndexDateVOList)

    ajaxDateDictList = [i.as_dict() for i in ajaxAdminIndexDateVOList]
    print("ajaxDateDictList >>>>>>>>>>>>>>>>>> ", ajaxDateDictList)

    ajaxAdminIndexDateList = []
    for i in ajaxDateDictList:
        ajaxAdminIndexDateList.append({
            "videoId":
            i['videoId'],
            "uploadDate":
            i['uploadDate'].strftime('%d/%m/%Y')
        })

    print("ajaxAdminIndexDateList >>>>>>>>>>>>>>>>>> ", ajaxAdminIndexDateList)

    return jsonify(ajaxAdminIndexDateList)
def userDeleteVideo():
    try:
        if adminLoginSession() == 'user':
            videoVO = VideoVO()
            videoDAO = VideoDAO()

            videoId = request.args.get('videoId')
            videoVO.videoId = videoId
            videoVOList = videoDAO.deleteVideo(videoVO)

            inputPath = videoVOList.inputVideoFilePath.replace(
                '..', 'project') + videoVOList.inputVideoFilename
            outputPath = videoVOList.outputVideoFilePath.replace(
                '..', 'project') + videoVOList.outputVideoFilename

            try:
                os.remove(inputPath)
                os.remove(outputPath)

            except Exception as ex:
                print(ex)

            return redirect(url_for('userViewVideo'))
        else:
            adminLogoutSession()
    except Exception as ex:
        print(ex)
Exemple #5
0
def userViewVideo():
    try:
        if adminLoginSession() == 'user':
            videoVO = VideoVO()
            videoDAO = VideoDAO()

            videoVO.video_LoginId = session['session_loginId']
            videoVOList = videoDAO.viewVideo( videoVO )
            return render_template( 'user/viewVideo.html', videoVOList=videoVOList )
        else:
            return adminLogoutSession()
    except Exception as ex:
        print( ex )
Exemple #6
0
def deleteVideo():

    try:
        if session['loginRole'] == 'admin':
            videoVO = VideoVO()
            videoDAO = VideoDAO()
            videoVO.videoId = request.args.get('videoId')
            videoVO.videoActiveStatus = 'deactive'
            videoDAO.deleteVideo(videoVO)
            return redirect(url_for('viewVideo'))
        else:
            return render_template('admin/login.html')
    except:
        return render_template('admin/login.html')
Exemple #7
0
def insertVideo():
    try:
        if session['loginRole'] == 'admin':
            videoVO = VideoVO()
            videoDAO = VideoDAO()
            UPLOAD_FOLDER = 'C:/project/admin/project/static/adminResources/video'

            app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER

            file = request.files['videoFile']

            filename = secure_filename(file.filename)

            filepath = os.path.join(app.config['UPLOAD_FOLDER'])

            file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
            videoVO.videoFilename = filename
            print videoVO.videoFilename
            videoVO.videoFilepath = filepath
            videoVO.videoTitle = request.form['videoTitle']
            videoVO.videoDescription = request.form['videoDescription']
            videoVO.videoDate, videoVO.videoTime = str(
                datetime.now()).split(' ')
            videoVO.videoActiveStatus = 'active'
            videoDAO.insertVideo(videoVO)
            return render_template('admin/addVideos.html')
        else:
            return render_template('admin/login.html')
    except:
        return render_template('admin/login.html')
def userinsertVideo():
    try:
        if adminLoginSession() == 'user':

            videoVO = VideoVO()
            videoDAO = VideoDAO()

            videoType = request.form['videoType']
            video_LoginId = session['session_loginId']

            file = request.files['file']
            print(file)

            videoFileName = secure_filename(file.filename)
            print(videoFileName)

            videoFilePath = os.path.join(app.config['UPLOAD_FOLDER'])
            print(videoFilePath)

            file.save(os.path.join(videoFilePath, videoFileName))

            videoUploadDate = datetime.date(datetime.now())
            videoUploadTime = datetime.time(datetime.now())

            videoVO.videoType = videoType
            videoVO.videoFileName = videoFileName
            videoVO.videoFilePath = videoFilePath.replace("project", "..")

            videoVO.videoUploadDate = videoUploadDate
            videoVO.videoUploadTime = videoUploadTime
            videoVO.video_LoginId = video_LoginId

            videoDAO.userInsertVideo(videoVO)

            return redirect(url_for('userviewVideo'))

        else:
            return adminLogoutSession()

    except Exception as ex:
        print(ex)
Exemple #9
0
def userInsertVideo():
    try:
        if adminLoginSession() == 'user':
            videoVO = VideoVO()
            videoDAO = VideoDAO()

            uploadDate = str(datetime.now().date())
            uploadTime = datetime.now().strftime('%H:%M:%S')

            random_number = random.randint(1, 1000)

            videoOutputFileName = "output_" + str(random_number) + ".webm"
            videoOutputFilePath = app.config['UPLOAD_FOLDER']

            outputVideo = videoOutputFilePath + videoOutputFileName

            capture = cv2.VideoCapture(
                r"F:\projectworkspace\project\static\adminResources\inputVideo\input_1.mp4"
            )

            fourcc = cv2.VideoWriter_fourcc(*'VP80')
            out = cv2.VideoWriter(outputVideo, fourcc, capture_duration,
                                  (640, 480))

            # Create two opencv named windows
            cv2.namedWindow("base-image", cv2.WINDOW_AUTOSIZE)
            cv2.namedWindow("result-image", cv2.WINDOW_AUTOSIZE)

            # Position the windows next to eachother
            cv2.moveWindow("base-image", 0, 100)
            cv2.moveWindow("result-image", 400, 100)

            # Start the window thread for the two windows we are using
            cv2.startWindowThread()

            # The color of the rectangle we draw around the face
            rectangleColor = (0, 165, 255)

            # variables holding the current frame number and the current faceid
            frameCounter = 0
            currentFaceID = 0

            # Variables holding the correlation trackers and the name per faceid
            faceTrackers = {}
            faceNames = {}

            try:
                while True:
                    # Retrieve the latest image from the webcam
                    rc, fullSizeBaseImage = capture.read()

                    # Resize the image to 320x240
                    baseImage = cv2.resize(fullSizeBaseImage, (320, 240))

                    # Result image is the image we will show the user, which is a
                    # combination of the original image from the webcam and the
                    # overlayed rectangle for the largest face
                    resultImage = baseImage.copy()

                    # STEPS:
                    # * Update
                    #
                    #
                    # all trackers and remove the ones that are not
                    #   relevant anymore
                    # * Every 10 frames:
                    #       + Use face detection on the current frame and look
                    #         for faces.
                    #       + For each found face, check if centerpoint is within

                    #         existing tracked box. If so, nothing to do
                    #       + If centerpoint is NOT in existing tracked box, then
                    #         we add a new tracker with a new face-id

                    # Increase the framecounter
                    frameCounter += 1

                    # Update all the trackers and remove the ones for which the update
                    # indicated the quality was not good enough
                    fidsToDelete = []
                    for fid in faceTrackers.keys():
                        trackingQuality = faceTrackers[fid].update(baseImage)

                        # If the tracking quality is good enough, we must delete
                        # this tracker
                        if trackingQuality < 7:
                            fidsToDelete.append(fid)

                    for fid in fidsToDelete:
                        print("Removing fid " + str(fid) +
                              " from list of trackers")
                        faceTrackers.pop(fid, None)

                    # Every 10 frames, we will have to determine which faces
                    # are present in the frame
                    if (frameCounter % 10) == 0:

                        # For the face detection, we need to make use of a gray
                        # colored image so we will convert the baseImage to a
                        # gray-based image
                        gray = cv2.cvtColor(baseImage, cv2.COLOR_BGR2GRAY)
                        # Now use the haar cascade detector to find all faces
                        # in the image
                        faces = faceCascade.detectMultiScale(gray, 1.3, 5)

                        # Loop over all faces and check if the area for this
                        # face is the largest so far
                        # We need to convert it to int here because of the
                        # requirement of the dlib tracker. If we omit the cast to
                        # int here, you will get cast errors since the detector
                        # returns numpy.int32 and the tracker requires an int
                        matchedFid = None
                        for (_x, _y, _w, _h) in faces:
                            x = int(_x)
                            y = int(_y)
                            w = int(_w)
                            h = int(_h)

                            # calculate the centerpoint
                            x_bar = x + 0.5 * w
                            y_bar = y + 0.5 * h

                            # Variable holding information which faceid we
                            # matched with

                            # Now loop over all the trackers and check if the
                            # centerpoint of the face is within the box of a
                            # tracker
                            for fid in faceTrackers.keys():
                                tracked_position = faceTrackers[
                                    fid].get_position()

                                t_x = int(tracked_position.left())
                                t_y = int(tracked_position.top())
                                t_w = int(tracked_position.width())
                                t_h = int(tracked_position.height())

                                # calculate the centerpoint
                                t_x_bar = t_x + 0.5 * t_w
                                t_y_bar = t_y + 0.5 * t_h

                                # check if the centerpoint of the face is within the
                                # rectangleof a tracker region. Also, the centerpoint
                                # of the tracker region must be within the region
                                # detected as a face. If both of these conditions hold
                                # we have a match
                                if ((t_x <= x_bar <= (t_x + t_w))
                                        and (t_y <= y_bar <= (t_y + t_h))
                                        and (x <= t_x_bar <=
                                             (x + w)) and (y <= t_y_bar <=
                                                           (y + h))):
                                    matchedFid = fid

                            # If no matched fid, then we have to create a new tracker
                            if matchedFid is None:
                                print("Creating new tracker " +
                                      str(currentFaceID))

                                start_time = time.time()
                                minutes = 0

                                # Create and store the tracker
                                tracker = dlib.correlation_tracker()
                                tracker.start_track(
                                    baseImage,
                                    dlib.rectangle(x - 10, y - 20, x + w + 10,
                                                   y + h + 20))

                                faceTrackers[currentFaceID] = tracker

                                # Start a new thread that is used to simulate
                                # face recognition. This is not yet implemented in this
                                # version :)
                                t = threading.Thread(target=doRecognizePerson,
                                                     args=(faceNames,
                                                           currentFaceID))
                                t.start()

                                # Increase the currentFaceID counter
                                currentFaceID += 1

                    # Now loop over all the trackers we have and draw the rectangle
                    # around the detected faces. If we 'know' the name for this person
                    # (i.e. the recognition thread is finished), we print the name
                    # of the person, otherwise the message indicating we are detecting
                    # the name of the person
                    for fid in faceTrackers.keys():
                        tracked_position = faceTrackers[fid].get_position()

                        t_x = int(tracked_position.left())
                        t_y = int(tracked_position.top())
                        t_w = int(tracked_position.width())
                        t_h = int(tracked_position.height())

                        cv2.rectangle(resultImage, (t_x, t_y),
                                      (t_x + t_w, t_y + t_h), rectangleColor,
                                      2)

                        if fid in faceNames.keys():
                            cv2.putText(resultImage, faceNames[fid],
                                        (int(t_x + t_w / 2), int(t_y)),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                        (255, 255, 255), 2)
                        else:
                            cv2.putText(resultImage, "Detecting...",
                                        (int(t_x + t_w / 2), int(t_y)),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                        (255, 255, 255), 2)

                        for fid in faceTrackers.keys():
                            print("fid>>>>>>>", fid)

                            ffid = faceTrackers[fid]
                            seconds = int(time.time() -
                                          start_time) - minutes * 60
                            print(seconds)
                            if ffid is not None and seconds == capture_duration:
                                print(seconds)
                                sender = "*****@*****.**"

                                receiver = session['session_loginUsername']

                                currentDate = datetime.now().date()
                                currentTime = datetime.now().strftime(
                                    '%H:%M:%S')

                                body = 'Your child is watching t.v continue:  \nDate:{}\nTime:{}'.format(
                                    currentDate, currentTime)

                                msg = MIMEMultipart()

                                msg['From'] = sender

                                msg['To'] = receiver

                                msg['Subject'] = "DETECTION"

                                msg.attach(MIMEText(body, 'plain'))
                                # To change the payload into encoded form
                                server = smtplib.SMTP('smtp.gmail.com', 587)

                                server.starttls()

                                server.login(sender,
                                             "qazwsxedcrfvtgb1234567890")

                                text = msg.as_string()

                                server.sendmail(sender, receiver, text)

                                server.quit()
                                break
                    # Since we want to show something larger on the screen than the
                    # original 320x240, we resize the image again
                    #
                    # Note that it would also be possible to keep the large version
                    # of the baseimage and make the result image a copy of this large
                    # base image and use the scaling factor to draw the rectangle
                    # at the right coordinates.
                    largeResult = cv2.resize(resultImage, (640, 480),
                                             interpolation=cv2.INTER_AREA)

                    out.write(largeResult)

                    # Finally, we want to show the images on the screen
                    cv2.imshow("base-image", baseImage)
                    cv2.imshow("result-image", largeResult)

                    # Check if a key was pressed and if it was Q, then break
                    # from the infinite loop
                    pressedKey = cv2.waitKey(2)
                    if pressedKey == ord('q'):
                        break

                capture.release()
                out.release()
                cv2.destroyAllWindows()

            # To ensure we can also deal with the user pressing Ctrl-C in the console
            # we have to check for the KeyboardInterrupt exception and break out of
            # the main loop
            except KeyboardInterrupt as e:
                pass

            videoVO.videoOutputFileName = videoOutputFileName
            videoVO.videoOutputFilePath = videoOutputFilePath.replace(
                'project', '..')
            videoVO.videoUploadDate = uploadDate
            videoVO.videoUploadTime = uploadTime
            videoVO.video_LoginId = session['session_loginId']

            videoDAO.insertVideo(videoVO)

            return redirect(url_for('userViewVideo'))
        else:
            return adminLogoutSession()
    except Exception as ex:
        print(ex)
Exemple #10
0
def userInsertVideo():
    try:
        if adminLoginSession() == 'user':
            videoVO = VideoVO()
            videoDAO = VideoDAO()

            file = request.files['file']

            videoInputFileName = secure_filename( file.filename )
            videoInputFilePath = os.path.join( app.config['UPLOAD_INPUTVIDEO_FOLDER'] )

            file.save( os.path.join( videoInputFilePath, videoInputFileName ) )

            inputVideo = videoInputFilePath + videoInputFileName
            videoOutputFileName = videoInputFileName.replace( '.mp4', '.webm' )
            videoOutputFilePath = os.path.join( app.config['UPLOAD_OUTPUTVIDEO_FOLDER'] )

            outputVideo = videoOutputFilePath + videoOutputFileName

            confidence_default = 0.4
            skip_frames = 25

            s1 = 'project/static/userResource/modelDump/MobileNetSSD_deploy.prototxt'
            model = 'project/static/userResource/modelDump/MobileNetSSD_deploy.caffemodel'

            userName = session['session_loginUsername']

            print( "userName="******"background", "aeroplane", "bicycle", "bird", "boat",
                       "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
                       "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
                       "sofa", "train", "tvmonitor"]

            # load our serialized model from disk
            print( "[INFO] loading model..." )
            net = cv2.dnn.readNetFromCaffe( s1, model )
            # print(">>>>>>>>>>>>>>>>>>>>>>>loding")

            # if a video path was not supplied, grab a reference to the webcam
            if not inputVideo:
                print( "[INFO] starting video stream..." )
                vs = VideoStream( src=0 ).start()
                time.sleep( 2.0 )

            # otherwise, grab a reference to the video file
            else:
                print( "[INFO] opening video file..." )
                vs = cv2.VideoCapture( inputVideo )

            # initialize the video writer (we'll instantiate later if need be)
            writer = None

            # initialize the frame dimensions (we'll set them as soon as we read
            # the first frame from the video)
            W = None
            H = None

            # instantiate our centroid tracker, then initialize a list to store
            # each of our dlib correlation trackers, followed by a dictionary to
            # map each unique object ID to a TrackableObject
            ct = CentroidTracker( maxDisappeared=40, maxDistance=50 )
            trackers = []
            trackableObjects = {}

            # initialize the total number of frames processed thus far, along
            # with the total number of objects that have moved either up or down
            totalFrames = 0
            miny = 0
            # start the frames per second throughput estimator
            fps = FPS().start()

            # loop over frames from the video stream
            while True:
                # grab the next frame and handle if we are reading from either
                # VideoCapture or VideoStream
                frame = vs.read()
                frame = frame[1] if (inputVideo != False) else frame

                # if we are viewing a video and we did not grab a frame then we
                # have reached the end of the video
                if inputVideo is not None and frame is None:
                    break

                # resize the frame to have a maximum width of 500 pixels (the
                # less data we have, the faster we can process it), then convert
                # the frame from BGR to RGB for dlib
                frame = imutils.resize( frame, width=500 )
                rgb = cv2.cvtColor( frame, cv2.COLOR_BGR2RGB )

                # if the frame dimensions are empty, set them
                if W is None or H is None:
                    (H, W) = frame.shape[:2]

                # if we are supposed to be writing a video to disk, initialize
                # the writer
                if outputVideo is not None and writer is None:
                    fourcc = cv2.VideoWriter_fourcc( *"VP80" )
                    writer = cv2.VideoWriter( outputVideo, fourcc, 30, (W, H), True )

                # initialize the current status along with our list of bounding
                # box rectangles returned by either (1) our object detector or
                # (2) the correlation trackers
                status = "Waiting"
                rects = []

                # check to see if we should run a more computationally expensive
                # object detection method to aid our tracker
                if totalFrames % skip_frames == 0:
                    # set the status and initialize our new set of object trackers
                    status = "Detecting"
                    trackers = []
                    print( "detectiong" )

                    # convert the frame to a blob and pass the blob through the
                    # network and obtain the detections
                    blob = cv2.dnn.blobFromImage( frame, 0.007843, (W, H), 127.5 )
                    net.setInput( blob )
                    detections = net.forward()

                    # loop over the detections
                    for i in np.arange( 0, detections.shape[2] ):
                        # extract the confidence (i.e., probability) associated
                        # with the prediction
                        confidence = detections[0, 0, i, 2]
                        # print("in detectioooon for loop")

                        # filter out weak detections by requiring a minimum
                        # confidence
                        if confidence > confidence_default:
                            # extract the index of the class label from the
                            # detections list
                            idx = int( detections[0, 0, i, 1] )

                            # if the class label is not a person, ignore it
                            if CLASSES[idx] != "person":
                                print( "detect person" )
                                continue

                            # compute the (x, y)-coordinates of the bounding box
                            # for the object
                            box = detections[0, 0, i, 3:7] * np.array( [W, H, W, H] )
                            (startX, startY, endX, endY) = box.astype( "int" )

                            # construct a dlib rectangle object from the bounding
                            # box coordinates and then start the dlib correlation
                            # tracker
                            tracker = dlib.correlation_tracker()

                            compare.append( (startX, startY) )
                            for i in compare:
                                if miny < i[1]:
                                    miny = i[1]
                            # add the tracker to our list of trackers so we can
                            # utilize it during skip frames
                            rect = dlib.rectangle( int( startX ), int( miny ), int( endX ), int( endY ) )
                            tracker.start_track( rgb, rect )
                            trackers.append( tracker )


                # otherwise, we should utilize our object *trackers* rather than
                # object *detectors* to obtain a higher frame processing throughput
                else:
                    miny = 0
                    # loop over the trackers
                    for tracker in trackers:
                        # set the status of our system to be 'tracking' rather
                        # than 'waiting' or 'detecting'
                        status = "Tracking"

                        # update the tracker and grab the updated position
                        tracker.update( rgb )
                        pos = tracker.get_position()

                        # unpack the position object
                        startX = int( pos.left() )
                        startY = int( pos.top() )
                        endX = int( pos.right() )
                        endY = int( pos.bottom() )

                        # add the bounding box coordinates to the rectangles list

                        compare.append( (startX, startY) )
                        for i in compare:
                            if miny < i[1]:
                                miny = i[1]

                        rects.append( (startX, miny, endX, endY) )

                        cv2.rectangle( frame, (startX, miny), (endX, endY), 2 )

                # draw a horizontal line in the center of the frame -- once an
                # object crosses this line we will determine whether they were
                # moving 'up' or 'down'
                cv2.line( frame, (int( W // 1.5 ), 0), (int( W // 1.5 ), H), (0, 255, 255), 2 )
                cv2.line( frame, (int( W // 2 ), 0), (int( W // 2 ), H), (255, 0, 0), 2 )
                cv2.line( frame, (0, int( H // 5 )), (W, int( H // 5 )), (255, 0, 255), 2 )
                # use the centroid tracker to associate the (1) old object
                # centroids with (2) the newly computed object centroids
                objects = ct.update( rects )
                caution = 0
                danger = 0
                safe = 0
                for i in rects:

                    if i[1] < H // 5:
                        continue

                    if W // 3 > i[0] > W // 3:
                        caution += 1
                        Cautionlist.append( caution )
                    elif i[0] > W // 3:
                        safe += 1
                        Safelist.append( safe )
                    elif i[0] < W // 3:
                        danger += 1
                        Dangerlist.append( danger )

                # loop over the tracked objects
                for (objectID, centroid) in objects.items():
                    # check to see if a trackable object exists for the current
                    # object ID
                    to = trackableObjects.get( objectID, None )

                    trackableObjects[objectID] = to

                    # draw both the ID of the object and the centroid of the
                    # object on the outputVideo frame
                    text = "ID {}".format( objectID )
                    if len( rects ) == 0:
                        continue

                    cv2.putText( frame, text, (rects[len( rects ) - 1][0] - 10, rects[len( rects ) - 1][1] - 10),
                                 cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2 )
                    cv2.circle( frame, (rects[len( rects ) - 1][0], rects[len( rects ) - 1][1]), 4, (0, 255, 0), -1 )

                # construct a tuple of information we will be displaying on the
                # frame
                info = [

                    ("Danger", danger),
                    ("Caution", caution),
                    ("Safe", safe),
                    ("Status", status),
                ]

                for (i, (k, v)) in enumerate( info ):
                    text = "{}: {}".format( k, v )
                    cv2.putText( frame, text, (10, H - ((i * 20) + 20)),
                                 cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2 )
                # check to see if we should write the frame to disk
                if writer is not None:
                    writer.write( frame )

                cv2.imshow( "Frame", frame )
                key = cv2.waitKey( 1 ) & 0xFF

                if key == ord( "q" ):
                    break

                totalFrames += 1

                fps.update()

                if len( Dangerlist ) == 2:
                    # song = AudioSegment.from_wav("1.wav")
                    # start = time.time()  # the variable that holds the starting time
                    # elapsed = 0  # the variable that holds the number of seconds elapsed.
                    # while elapsed < 10:  # while less than 30 seconds have elapsed
                    #     play(song)
                    #
                    #     elapsed = time.time() - start

                    message = "______DANGER_______Child has touch the grill."
                    fromaddr = "*****@*****.**"
                    toaddr = userName
                    msg = MIMEMultipart()
                    msg['From'] = fromaddr
                    msg['To'] = toaddr
                    msg['Subject'] = "Child Security"
                    msg.attach( MIMEText( message, 'plain' ) )
                    server = smtplib.SMTP( 'smtp.gmail.com', 587 )
                    server.starttls()
                    server.login( fromaddr, "9925646618" )
                    text = msg.as_string()
                    server.sendmail( fromaddr, toaddr, text )
                    server.quit()

            print( "frame by camera", totalFrames )
            fps.stop()

            if writer is not None:
                writer.release()

            if not inputVideo:
                vs.stop()

            else:
                vs.release()

            cv2.destroyAllWindows()

            videoUploadDate = str( datetime.now().date() )
            videoUploadTime = datetime.now().strftime( '%H:%M:%S' )

            videoVO.videoUploadDate = videoUploadDate
            videoVO.videoUploadTime = videoUploadTime
            videoVO.videoInputFileName = videoInputFileName
            videoVO.videoInputFilePath = videoInputFilePath.replace( 'project', '..' )
            videoVO.videoOutputFileName = videoOutputFileName
            videoVO.videoOutputFilePath = videoOutputFilePath.replace( 'project', '..' )
            videoVO.video_LoginId = session['session_loginId']
            videoDAO.insertVideo( videoVO )

            return redirect( url_for( 'userViewVideo' ) )
        else:
            return adminLogoutSession()
    except Exception as ex:
        print( ex )
def userInsertVideo():
    try:
        if adminLoginSession() == 'user':

            inputvideoFilename = request.args.get('videoFilename')
            inputvideoFilepath = request.args.get('videoFilepath')
            video_data = request.args.get('video_data')
            outputvideoFilename = request.args.get('outputVideoFilename')
            outputvideoFilePath = request.args.get('outputFolder')
            totalIllegalCarCount = request.args.get('totalIllegalCarCount')
            totalLegalCarCount = request.args.get('totalLegalCarCount')
            videoVO = VideoVO()
            videoDAO = VideoDAO()
            # saving filename in database
            todayDate = date.today()  # saving current date in database
            nowTime = datetime.now()  # saving current date in database
            inputvideoFilepath = inputvideoFilepath.replace(
                "project", "..")  # saving filepath in database
            outputvideoFilePath = outputvideoFilePath.replace("project", "..")
            videoVO.video_CameraId = video_data.split(',')[0]
            videoVO.video_CrossroadId = video_data.split(',')[1]
            videoVO.video_AreaId = video_data.split(',')[2]
            videoVO.video_LoginId = session['session_LoginId']
            videoVO.inputVideoFilename = inputvideoFilename
            videoVO.inputVideoFilePath = str(inputvideoFilepath)
            videoVO.uploadTime = nowTime.strftime("%H:%M:%S")
            videoVO.uploadDate = todayDate
            videoVO.outputVideoFilename = outputvideoFilename
            videoVO.outputVideoFilePath = outputvideoFilePath
            videoVO.illegalCarCount = totalIllegalCarCount
            videoVO.legalCarCount = totalLegalCarCount
            videoDAO.insertVideo(videoVO)

            return redirect(url_for('userViewVideo'))
        else:
            adminLogoutSession()
    except Exception as ex:
        print(ex)