def webreg(userexcel):
    args = {
        "east": "frozen_east_text_detection.pb",
        "min_confidence": 0.5,
        "width": 320,
        "height": 320,
        "padding": 0.0,
        "webcam": "",  # path of webcam
        "excel": userexcel,
    }

    (W, H) = (None, None)  # actual dimensions of image 720x460 etc
    (newW, newH) = (args["width"], args["height"])  # required dimewnsion
    (rW, rH) = (None, None)  # ration of both

    layerNames = [
        "feature_fusion/Conv_7/Sigmoid",  # Scores - probability
        "feature_fusion/concat_3"
    ]  # geometry - dimensions of the bounding box

    print("loading EAST text detector...")

    net = cv2.dnn.readNet(args["east"])

    # if no webcam path, grabbing the reference to the web cam

    print("[INFO] starting webcam stream...")
    vs = WebcamVideoStream(src=0).start()  # 0 for default webcam
    # time.sleep(1.0)

    fnumber = -10
    vfname = []
    T = []
    predictedTexts = []
    fps = FPS().start()
    while True:
        if args["webcam"]:
            fnumber += 10
            vs.set(cv2.CAP_PROP_POS_FRAMES, fnumber)

        frame = vs.read()
        frame = frame[1] if args.get("webcam", False) else frame

        # check to see if we have reached the end of the stream
        if frame is None:
            break

        # resize the frame maintain aspect ratio
        frame = imutils.resize(frame, width=1000)
        orig = frame.copy()

        if W is None or H is None:
            (H, W) = frame.shape[:2]  # actual size
            rW = W / float(newW)
            rH = H / float(newH)

        # resize the frame
        frame = cv2.resize(frame, (newW, newH))

        # construct a blob
        blob = cv2.dnn.blobFromImage(frame,
                                     1.0, (newW, newH),
                                     (123.68, 116.78, 103.94),
                                     swapRB=True,
                                     crop=False)

        net.setInput(blob)
        (scores, geometry) = net.forward(layerNames)

        # decode the predictions obtaining probabilites and position of box
        (rects, confidences) = decode_predictions_video(scores, geometry, args)

        boxes = non_max_suppression(np.array(rects), probs=confidences)

        pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
        for (startX, startY, endX, endY) in boxes:
            # scaling the bounding box coordinates based on the respective ratios
            startX = int(startX * rW)
            startY = int(startY * rH)
            endX = int(endX * rW)
            endY = int(endY * rH)

            # applying padding in percentage
            dX = int((endX - startX) * args["padding"])
            dY = int((endY - startY) * args["padding"])

            # apply padding to each side of the bounding box, respectively
            startX = max(0, startX - dX)
            startY = max(0, startY - dY)
            endX = min(W, endX + (dX * 2))
            endY = min(H, endY + (dY * 2))

            # extract the actual padded image out of original
            roi = orig[startY:endY, startX:endX]

            # config stating language, LSTM model and stating all is one line of text
            config = ("-l eng --oem 1 --psm 7")

            # obtaining text out of image
            text = filterText_video(
                pytesseract.image_to_string(roi, config=config))
            # text = pytesseract.image_to_string(roi, config=config)
            if text and text not in predictedTexts:
                # add the bounding box coordinates and text

                print("Predicted Text")
                print("========")

                # timestamps for webcam wil be in realtime whereas for webcam will be according to webcam
                if args["webcam"]:
                    print(text, " at time ~ ", end="")
                    printTime(vs.get(cv2.CAP_PROP_POS_MSEC)
                              )  # converting millisecs into hour min secs
                else:
                    Tt = datetime.datetime.now().strftime(
                        "%H:%M:%S on %d/%m/%Y")
                T.append(Tt)
                predictedTexts.append(text)
                # draw the bounding box on the frame
                cv2.rectangle(orig, (startX, startY), (endX, endY),
                              (0, 255, 0), 2)

                os.chdir(r"C:\Users\hp\Desktop\ml\static")
                rnd = time.time()
                fname = str(rnd) + ".jpg"
                cv2.imwrite(fname, orig)
                os.chdir(r"C:\Users\hp\Desktop\ml")
                vfname.append(fname)

        fps.update()

        # show the output frame
        cv2.imshow("Text Detection", orig)

        # if the `q` key was pressed, break from the loop
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # if we are using a webcam, release the pointer
    if not args["webcam"]:
        vs.stop()
    # otherwise, release the file pointer of webcam
    else:
        vs.release()

    # close all windows
    cv2.destroyAllWindows()

    data = pd.read_csv("C:\\Users\\hp\\Desktop\\ml\\static\\" + args["excel"])
    df = pd.DataFrame(data, columns=['Bib_no'])

    bib_list = df.values.tolist()
    print('bib_list : ', bib_list)

    print('predictedTexts : ', predictedTexts)
    n = len(bib_list)

    pred = []
    for el in predictedTexts:
        sub = el.split(', ')
        pred.append(sub)

    print('pred:', pred)

    if (len(pred) != n):
        pred.append(None)
    print('pred:', pred)

    p = len(pred)

    b = []
    for i in range(n):
        for j in range(p):
            if (bib_list[i] == pred[j]):
                df.loc[i, 'Status'] = T[j]
                #df.loc[i, 'Time and Date'] = T[j]
                break
            else:
                df.loc[i, 'Status'] = 'Not Predicted'

    #df.Status.fillna("Not predicted", inplace=True)

    print(df)

    download_source = (
        r'C:\Users\hp\Desktop\ml\static\Wexcel\output_video.xlsx')
    df.to_excel(download_source)

    return predictedTexts, vfname, T
mon = {'top': 0, 'left': 0, 'width': 1280, 'height': 720}
sct = None
if "screen" in url:
    sct = mss.mss()

webcam=False
#webcam=True
cap = None

if sct is None:
    if webcam:
        #cap = WebcamVideoStream(src=""+str(videoUrl)+"").start()
        cap = WebcamVideoStream(videoUrl).start()
    else:
        cap = cv2.VideoCapture(videoUrl)
        cap.set(cv2.CAP_PROP_FRAME_WIDTH, procWidth)
        cap.set(cv2.CAP_PROP_FRAME_HEIGHT, procHeight)

#cap = cv2.VideoCapture(videoUrl)

count=50
#skip=2000
skip=skipNr

SKIP_EVERY=150 #pick a frame every 5 seconds

count=1000000
#skip=0 #int(7622-5)
SKIP_EVERY=0

every=SKIP_EVERY