コード例 #1
1
def recognize():
    file = 'barcode.jpg'
    image = cv2.imread(file)
    plt.imshow(image)
    barcodes = pyzbar.decode(image)
    for barcode in barcodes:
        (x,y,w,h)=barcode.rect
        cv2.rectangle(image,(x,y),(x+w,y+h),(0,0,255),2)
コード例 #2
1
ファイル: image.py プロジェクト: CoderBotOrg/coderbot
 def find_qr_code(self):
     text_found = None
     img_size = self._data.shape
     gray = cv2.cvtColor(self._data, cv2.COLOR_BGR2GRAY)
     image_code = decode(gray)
     for symbol in image_code:
         text_found = symbol.data.decode("utf-8")
         break
     return text_found
コード例 #3
0
ファイル: check.py プロジェクト: dj-shin/ctf2018-writeups
def check(path):
    image = np.array(imageio.imread(path))
    arr = np.zeros(shape=(45, 45), dtype=np.uint8)
    patch_size = 100
    for i in range(image.shape[0] // patch_size):
        for j in range(image.shape[1] // patch_size):
            d = decode(image[i * patch_size : (i + 1) * patch_size, j * patch_size : (j + 1) * patch_size])[0].data
            n = bin(int(d.decode()))[2:]
            n = '0' * (25 - len(n)) + n
            d = np.array([int(c) for c in n])
            d = d.reshape((5, 5))
            arr[i * d.shape[0]: (i + 1) * d.shape[0], j * d.shape[1] : (j + 1) * d.shape[1]] = d

    return decode(np.repeat(np.repeat(arr, 4, axis=0), 4, axis=1) * 255)[0].data
コード例 #4
0
ファイル: iNR.py プロジェクト: yuriprym/INR-tester-program
     def verification(self):
         ap = argparse.ArgumentParser()
         ap.add_argument("-i","--image", required=True, help="path to input image")
         args=vars(ap.parse_args())

         print("[INFO] starting video stream ...")
         vs = VideoSteam(usePiCamera=True).start()
         time.sleep(60.0)

         found =set()

         while True:

             frame=vs.read()
             frame=imutils.resize(frame,width=400)

             barcodes=pyzbar.decode(frame)
             for barcode in barcodes:
                 (x,y,w,h)= barcode.rect
                 cv2.retangle(image,(x,y),(x+w,y+h),(0,0,255),2)
             
                 barcodeData = barcode.data.decode("utf-8")
                 barcodeType = barcode.type

                 text= "{} ({})".format(barcodeData, barcodeType)
             cv2.putText(image,text,(x,y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255),2)
             if barcodeData not in found:
                return None
             else:
                 found.intersection(barcodeData)
コード例 #5
0
ファイル: qrcode.py プロジェクト: Martwall/home-assistant
    def process_image(self, image):
        """Process image."""
        import io
        from pyzbar import pyzbar
        from PIL import Image

        stream = io.BytesIO(image)
        img = Image.open(stream)

        barcodes = pyzbar.decode(img)
        if barcodes:
            self._state = barcodes[0].data.decode("utf-8")
        else:
            self._state = None
コード例 #6
0
ファイル: bulk_qr_split.py プロジェクト: Submitty/Submitty
def main(args):
    """Scan through PDF and split PDF and images."""
    filename = args[0]
    split_path = args[1]
    qr_prefix = args[2]
    qr_suffix = args[3]
    try:
        os.chdir(split_path)
        pdfPages = PdfFileReader(filename)
        pdf_writer = PdfFileWriter()
        i = cover_index = id_index = 0
        page_count = 1
        prev_file = ''
        data = []
        output = {}
        for page_number in range(pdfPages.numPages):
            # convert pdf to series of images for scanning
            page = convert_from_bytes(
                open(filename, 'rb').read(),
                first_page=page_number+1, last_page=page_number+2)[0]
            # increase contrast of image for better QR decoding
            cv_img = numpy.array(page)
            mask = cv2.inRange(cv_img, (0, 0, 0), (200, 200, 200))
            inverted = 255 - cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
            # decode img - only look for QR codes
            val = pyzbar.decode(inverted, symbols=[ZBarSymbol.QRCODE])
            if val != []:
                # found a new qr code, split here
                # convert byte literal to string
                data = val[0][0].decode("utf-8")
                if data == "none":  # blank exam with 'none' qr code
                    data = "BLANK EXAM"
                else:
                    pre = data[0:len(qr_prefix)]
                    suf = data[(len(data)-len(qr_suffix)):len(data)]
                    if qr_prefix != '' and pre == qr_prefix:
                        data = data[len(qr_prefix):]
                    if qr_suffix != '' and suf == qr_suffix:
                        data = data[:-len(qr_suffix)]
                cover_index = i
                cover_filename = '{}_{}_cover.pdf'.format(filename[:-4], i)
                output_filename = '{}_{}.pdf'.format(filename[:-4], cover_index)

                output[output_filename] = {}
                output[output_filename]['id'] = data
                # save pdf
                if i != 0 and prev_file != '':
                    output[prev_file]['page_count'] = page_count
                    with open(prev_file, 'wb') as out:
                        pdf_writer.write(out)

                    page.save('{}.jpg'.format(prev_file[:-4]), "JPEG", quality=100)

                if id_index == 1:
                    # correct first pdf's page count and print file
                    output[prev_file]['page_count'] = page_count
                    with open(prev_file, 'wb') as out:
                        pdf_writer.write(out)

                    page.save('{}.jpg'.format(prev_file[:-4]), "JPEG", quality=100)

                # start a new pdf and grab the cover
                cover_writer = PdfFileWriter()
                pdf_writer = PdfFileWriter()
                cover_writer.addPage(pdfPages.getPage(i))
                pdf_writer.addPage(pdfPages.getPage(i))

                # save cover
                with open(cover_filename, 'wb') as out:
                    cover_writer.write(out)

                # save cover image
                page.save('{}.jpg'.format(cover_filename[:-4]), "JPEG", quality=100)

                id_index += 1
                page_count = 1
                prev_file = output_filename
            else:
                # add pages to current split_pdf
                page_count += 1
                pdf_writer.addPage(pdfPages.getPage(i))
            i += 1

        # save whatever is left
        output_filename = '{}_{}.pdf'.format(filename[:-4], cover_index)
        output[output_filename]['id'] = data
        output[output_filename]['page_count'] = page_count

        with open(output_filename, 'wb') as out:
            pdf_writer.write(out)

        if not os.path.exists('decoded.json'):
            # write json to file for parsing page counts and decoded ids later
            with open('decoded.json', 'w') as out:
                json.dump(output, out, sort_keys=True, indent=4)
        else:
            with open('decoded.json') as file:
                prev_data = json.load(file)

            prev_data.update(output)

            with open('decoded.json', 'w') as out:
                json.dump(prev_data, out)

        # remove original, unsplit file
        os.remove(filename)
    except Exception:
        print("\nbulk_qr_split.py: Failed when splitting pdf " + str(filename))
        traceback.print_exc()
        sys.exit(1)
コード例 #7
0
    color_frame = frames.get_color_frame()
    if not depth_frame or not color_frame:
        continue

    # Convert images to numpy arrays
    depth_image = np.asanyarray(depth_frame.get_data())
    color_image = np.asanyarray(color_frame.get_data())

    # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
    depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

    # Stack both images horizontally
    #images = np.hstack((color_image, depth_colormap))

    # find the barcodes in the frame and decode each of the barcodes
    barcodes = pyzbar.decode(color_image)
    #barcodes = pyzbar.decode(color_image, None, True)
    #print(barcodes)#for debug

    # loop over the detected barcodes
    for barcode in barcodes:
        # extract the bounding box location of the barcode and draw
        # the bounding box surrounding the barcode on the image
        (x, y, w, h) = barcode.rect
        cv2.rectangle(color_image, (x, y), (x + w, y + h), (0, 0, 255), 2)

        # the barcode data is a bytes object so if we want to draw it
        # on our output image we need to convert it to a string first
        barcodeData = barcode.data.decode("utf-8")
        barcodeType = barcode.type
コード例 #8
0
from pyzbar.pyzbar import decode
from PIL import Image
# pip install pillow

img = Image.open(
    "C:/Users/DELL/Desktop/Projects/Python/Python projects/6. QRcode/TestFolder/myQRcode.png"
)

result = decode(img)

print(result)
コード例 #9
0
def findAndReadQR_OldVer(img):
    grayScale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # blurred = cv2.GaussianBlur(grayScale , (5,5) ,1)
    # blurred = cv2.medianBlur(grayScale , 5)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))

    grayScale = clahe.apply(grayScale)
    thresh = cv2.adaptiveThreshold(grayScale, 255,
                                   cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                   cv2.THRESH_BINARY, 63, 4)
    #cv2.imshow("Gray", thresh)
    _, cnts, heiarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                         cv2.CHAIN_APPROX_SIMPLE)
    # cv2.drawContours(img , cnts, -1, (0, 255, 0), 3)
    qrResults = []
    if heiarchy is not None:
        # Go through contour heiarchy and find the 3 squares(Finder patterns) in the QR code
        allPatterns = traverseHeiarchy(heiarchy[0], 0, cnts)
        if allPatterns:  # allPatterns contains sets of 3 finder patterns
            for patterns in allPatterns:
                bottom = 0
                right = 0
                orientation = -1
                # Use image moments to calculate the centers of the finder patterns and then figure out
                # which two of them are part of the longest side in the triangle
                M = [
                    cv2.moments(patterns[0]),
                    cv2.moments(patterns[1]),
                    cv2.moments(patterns[2])
                ]
                centers = [
                    Point(M[0]['m10'] / M[0]['m00'],
                          M[0]['m01'] / M[0]['m00']),
                    Point(M[1]['m10'] / M[1]['m00'],
                          M[1]['m01'] / M[1]['m00']),
                    Point(M[2]['m10'] / M[2]['m00'], M[2]['m01'] / M[2]['m00'])
                ]
                len1 = distSq(centers[0], centers[1])
                len2 = distSq(centers[0], centers[2])
                len3 = distSq(centers[1], centers[2])
                patternPoints = []
                for c in patterns:
                    patternPoints.append(convertToPoints(c))
                max = len1 if len1 > len2 else len2
                max = len3 if len3 > max else max
                top = 1
                # Time to figure out the indice of the pattern that's top
                rm = [0, 2]
                if max == len1:
                    top = 2
                    rm = [0, 1]
                elif max == len2:
                    top = 1
                    rm = [0, 2]
                elif max == len3:
                    top = 0
                    rm = [1, 2]
                sideCenters = [centers[rm[0]], centers[rm[1]]]

                # We'll use the equation for the line ax+by+c=0 to figure out how the qr code is oriented
                # Once we've figured that out we know which finder pattern is the "bottom" and the "right"
                a, b, c = linearEquation(sideCenters[0], sideCenters[1])
                if b == 0:
                    continue
                dist = distFromPointToLine(a, b, c, centers[top])

                a = a / b
                if a > 0 and dist < 0:
                    orientation = NORTH_ORIENTATION
                    bottom = rm[0]
                    right = rm[1]
                elif a > 0 and dist > 0:
                    orientation = SOUTH_ORIENTATION
                    bottom = rm[1]
                    right = rm[0]
                elif a < 0 and dist > 0:
                    orientation = EAST_ORIENTATION
                    right = rm[0]
                    bottom = rm[1]
                elif a < 0 and dist < 0:
                    orientation = WEST_ORIENTATION
                    bottom = rm[0]
                    right = rm[1]

                # Now we want to find the 4 point in the finder pattern like if it was a square
                bottomVerticies = getVertices(patterns[bottom], -a)
                rightVerticies = getVertices(patterns[right], -a)
                topVerticies = getVertices(patterns[top], -a)
                # We'll rotate the points with respect to the orientation
                bottomVerticies = rotateCorners(bottomVerticies, orientation)
                rightVerticies = rotateCorners(rightVerticies, orientation)
                topVerticies = rotateCorners(topVerticies, orientation)

                lastCorner = lineIntersection(rightVerticies[1],
                                              rightVerticies[2],
                                              bottomVerticies[3],
                                              bottomVerticies[2])
                maxX = len(img[0])
                maxY = len(img)
                # This would mean that the entire qr code is not in the picture, so we can't read it
                if lastCorner.x < 0 or lastCorner.x >= maxX or lastCorner.y < 0 or lastCorner.y >= maxY:
                    # print "Possible QR Found"
                    continue

                qrCorners = [[topVerticies[0].x, topVerticies[0].y],
                             [rightVerticies[1].x, rightVerticies[1].y],
                             [bottomVerticies[3].x, bottomVerticies[3].y],
                             [lastCorner.x, lastCorner.y]]
                qrX = (qrCorners[0][0] + qrCorners[1][0] + qrCorners[2][0] +
                       qrCorners[3][0]) / 4
                qrY = (qrCorners[0][1] + qrCorners[1][1] + qrCorners[2][1] +
                       qrCorners[3][1]) / 4

                src = np.float32(qrCorners)
                dst = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]])
                ##We have to transform the part of the image where the qr code is, to something our qr reader can read
                matrix = cv2.getPerspectiveTransform(src, dst)

                final = cv2.warpPerspective(thresh, matrix, (300, 300))
                cv2.imshow("Gray", final)
                qrResult = decode(final, [ZBarSymbol.QRCODE])
                if len(qrResult) is not 0:
                    # circleWidthArray = [102, 101, 90.5, 90, 120, 0, 81.5]

                    # print "Width %d" % dict[text]

                    side1 = ((qrCorners[0][0] - qrCorners[1][0])**2 +
                             (qrCorners[0][1] - qrCorners[1][1])**2)
                    side2 = (qrCorners[0][0] - qrCorners[2][0])**2 + (
                        qrCorners[0][1] - qrCorners[2][1])**2
                    side3 = (qrCorners[1][0] - qrCorners[3][0])**2 + (
                        qrCorners[1][1] - qrCorners[3][1])**2
                    side4 = (qrCorners[2][0] - qrCorners[3][0])**2 + (
                        qrCorners[2][1] - qrCorners[3][1])**2
                    side1 = math.sqrt(side1)
                    side2 = math.sqrt(side2)
                    side3 = math.sqrt(side3)
                    side4 = math.sqrt(side4)
                    dist = Focal / ((side1 + side2 + side3 + side4) / 4)

                    # nv = [np.array([[qrX, qrY], [qrX+normVec[0], qrY+normVec[1]]], dtype=np.int32)]
                    # cv2.drawContours(img, nv, -1, (0, 255, 0), 3)

                    text = qrResult[0][0]
                    print(text)
                    if text in dict:
                        qrResults.append(
                            QRresult(text, (qrX, qrY), dist, dict[text]))

                    # print "Circle width: ",dict[text], "cm"
                else:
                    # qrX = (qrCorners[0][0] + qrCorners[1][0] + qrCorners[2][0] + qrCorners[3][0]) / 4
                    # qrY = (qrCorners[0][1] + qrCorners[1][1] + qrCorners[2][1] + qrCorners[3][1]) / 4
                    # side1 = ((qrCorners[0][0] - qrCorners[1][0]) ** 2 + (qrCorners[0][1] - qrCorners[1][1]) ** 2)
                    # side2 = (qrCorners[0][0] - qrCorners[2][0]) ** 2 + (qrCorners[0][1] - qrCorners[2][1]) ** 2
                    # side1 = math.sqrt(side1)
                    # side2 = math.sqrt(side2)
                    # dist = Focal / ((side1 + side2) / 2)
                    # text = "CAN'T READ"
                    # qrResults.append(QRresult(text, (qrX, qrY), dist))
                    continue
    return qrResults
コード例 #10
0
ファイル: qr.py プロジェクト: 15331335/qr

def get_file_content(filePath):
    with open(filePath, 'rb') as fp:
        return fp.read()


timestamp = str(datetime.datetime.now().microsecond)
filetype = ".png"
filename = timestamp + filetype

os.system("screencapture -i " + filename)

# qr
try:
    result = decode(Image.open(filename))
except IOError:
    pass
else:
    if len(result) >= 1:
        for i in result:
            os.system("open -a /Applications/Safari.app " + i.data)

    # ocr
    else:
        client = AipOcr(APP_ID, API_KEY, SECRET_KEY)
        image = get_file_content(filename)
        result = client.basicGeneral(image, options)
        result_str = ""
        for i in range(result["words_result_num"]):
            result_str = result_str + result["words_result"][i]["words"]
コード例 #11
0
vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)

# open the output CSV file for writing and initialize the set of
# barcodes found thus far
csv = open(args["output"], "w")
found = set()
# loop over the frames from the video stream
while True:
    # grab the frame from the threaded video stream and resize it to
    # have a maximum width of 400 pixels
    frame = vs.read()
    frame = imutils.resize(frame, width=600)

    # find the barcodes in the frame and decode each of the barcodes
    barcodes = pyzbar.decode(frame)
    # loop over the detected barcodes
    for barcode in barcodes:
        # extract the bounding box location of the barcode and draw
        # the bounding box surrounding the barcode on the image
        (x, y, w, h) = barcode.rect
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)

        # the barcode data is a bytes object so if we want to draw it
        # on our output image we need to convert it to a string first
        barcodeData = barcode.data.decode("utf-8")  # THIS IS IMPORTANT
        barcodeType = barcode.type
        print(barcodeData)
        # draw the barcode data and barcode type on the image
        text = "{} ({})".format(barcodeData, barcodeType)
        cv2.putText(frame, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
コード例 #12
0
 def from_qr(cls, path):
     return cls(pyzbar.decode(Image.open(path))[0].data)
コード例 #13
0
import pyautogui
from pyzbar.pyzbar import decode

from PIL import Image
import pyotp
import time

#Screen shot current windows and detect qr code  
myScreenshot = pyautogui.screenshot()
myScreenshot.save(r'.\qr.png')
x = decode(Image.open('qr.png'))

#Clean  decoded qr code and get secret key
x=str(x)
start = 't='
end = '&'
secret = (x[x.find(start)+len(start):x.rfind(end)])

#if you want to enter your secret key comment upper line and uncomment down line 
#secret = "YOUR KEY GOES HERE"
while True:
    totp = pyotp.TOTP(secret, interval= 30)
    x= int(time.time())
    x = 30 - (x %30)
    print("Current OTP:   "+ str(totp.now()) + "     Until time:  "+ str(x) ,end="\r")
    #print("Current OTP:   "+ str(totp.now()) + "     Until time: "+ str(x) )
    time.sleep(1)

"""
Default parameters;
6 digit otp
コード例 #14
0
def Scan(state):
    global tracker
    global cameraNo, bar, z, headMovement, posBar
    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-o", "--output", type=str, default="barcodes.csv",
            help="path to output CSV file containing barcodes")
    args = vars(ap.parse_args())

    try:
        # Proxy to camera and motion
        print("Starting video stream...")
        camera = ALProxy("ALVideoDevice", "Mario.local", 9559)
        headMovement = ALProxy("ALMotion", "Mario.local", 9559)
        tracker = ALProxy("ALTracker", "Mario.local", 9559)
    except:
        print("Error connecting to proxies")

    csv = open(args["output"], "w")
    found = set()
    targetName = "Face"
    faceWidth = 0.1 # default
    tracker.registerTarget(targetName, faceWidth)
    # Subscribe to camera
    if state == "unsure":
        cameraNo = 0
    elif state == "deal" or state == "boneyard":
        cameraNo = 1
    # mainDomino.cameraNo
    # mainDomino.cameraNo
    AL_kQVGA = 3 # 1280x960
    AL_kBGRColorSpace = 13

    try:
        captureDevice = camera.subscribeCamera("captureDevice", cameraNo, AL_kQVGA, AL_kBGRColorSpace, 10)
    except:
        print("Error subscribing to camera")
    # init = False
    # Create image
    width = 1280
    height = 960
    image = np.zeros((height, width, 3), np.uint8)

    bar = []
    posBar = []
    
    while True: 
        # Position head at center
        moveHeadCenter() 


        # Get the image
        try:
            result = camera.getImageRemote(captureDevice)
        except:
            print("Error getting image remotely")

        if result == None:
            print 'Error: Cannot capture.'
        elif result[6] == None:
            print 'Error: No image data string.'
        else:
            # Translate value to mat
            values = map(ord, list(result[6]))
            i = 0
            for y in range(0, height):
                for x in range(0, width):
                    image.itemset((y, x, 0), values[i + 0])
                    image.itemset((y, x, 1), values[i + 1])
                    image.itemset((y, x, 2), values[i + 2])
                    i += 3
                    
            # if cameraNo == 1:
            #     tiles = image[300:800, 0:1280]
            # else:
            tiles = image

            grey = cv2.cvtColor(tiles, cv2.COLOR_BGR2GRAY) # convert image to grey
            ret,thresh1 = cv2.threshold(grey,127,255,cv2.THRESH_BINARY)
            ret3,th3 = cv2.threshold(grey,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
            
            barcodes = pyzbar.decode(th3) # find and decode barcodes

            # Loop over detected barcodes
            for barcode in barcodes:

                (x, y, w, h) = barcode.rect
                # Convert data into string
                barcodeData = barcode.data.decode("utf-8")
                barcodeType = barcode.type
                
                # Put barcode data, x and y coords on the image
                text = "{}".format(barcodeData)
                cv2.putText(image, text, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
 
                if barcodeData not in found:
                    found.add(barcodeData)
                    # Put new found barcodes into an array                        
                    barcode = map(int, barcodeData) 
                    # Put the barcode x,y position into array
                    posXYZ = [x, y, z]
                    # Add info to barcode position array, so we know where the barcode was found
                    if len(barcode) > 2:
                        break
                    else:
                        bar.insert(0, barcode)
                        posBar.insert(0, posXYZ)
                    print bar
                    print posBar
                    #moveHeadLeft()
                   # moveHeadRight()

                    if state == "deal":
                        # If we haven't seen all tiles that have been dealt, move head from side to side.
                        # if len(bar) < 7:
                        #     moveHeadLeft()
                        #     moveHeadRight()
                        # If we have seen them all, then stop video stream.
                        if len(bar) == 7:

                            # tracker.track(targetName)
                            camera.releaseImage(captureDevice)
                            camera.unsubscribe(captureDevice)
                            # camera.stop()
                            csv.close()
                            cv2.destroyAllWindows()
                            #camera.stop()
                            return bar, posBar
                    elif state == "boneyard" or state == "unsure":
                        if len(bar) == 1:
                            # tracker.track(targetName)
                            camera.releaseImage(captureDevice)
                            camera.unsubscribe(captureDevice)
                            csv.close()
                            cv2.destroyAllWindows()
                            #camera.stop()
                            # camera.stop()
                            return bar, posBar

                        
            # Show video stream

        cv2.imshow("Barcode Scanner", th3)
        #cv2.imshow("Barcode Scanner", image)
        #cv2.imshow("new image", new_image)

            # cv2.waitKey(1)
        key = cv2.waitKey(1) & 0xFF
        
     
            # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break
     
    # close the output CSV file do a bit of cleanup
    # print("[INFO] cleaning up...")
    csv.close()
    cv2.destroyAllWindows()
    camera.stop()
コード例 #15
0
ファイル: Demo.py プロジェクト: FeHanHanBlues/Navigation
def getQrCode():
    """
    db & wy
    :return: position (left, right, front, none), value (Qr id, none)
    """
    global qLeft, qRight
    time.sleep(1)
    frames = [qLeft.get(), qRight.get()]
    # cv2.imshow('test', frames[0])
    # cv2.waitKey(0)
    # for i in range(2):
    #     frames.append(cv2.cvtColor(cap[i].read()[1], cv2.COLOR_BGR2GRAY))
    #     time.sleep(1)
    #     frames.append(cv2.cvtColor(cap[i].read()[1], cv2.COLOR_BGR2GRAY))
    #     time.sleep(1)
    #     frames.append(cv2.cvtColor(cap[i].read()[1], cv2.COLOR_BGR2GRAY))

    hearts = []
    QrValues = []
    for frame in frames:
        ret, binary = cv2.threshold(frame, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)

        test, contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

        polycontours = []
        for contour in contours:
            tmp = cv2.approxPolyDP(contour, 10, True)
            if len(tmp) == 4:
                polycontours.append(tmp)
        
        maxArea = 0
        maxContour = None
        for contour in polycontours:
            if(len(contour) != 4):
                continue
            if cv2.contourArea(contour) > maxArea:
                maxArea = cv2.contourArea(contour)
                maxContour = contour
        if maxArea < 3000:
            continue
        Points = np.array(maxContour, dtype=np.float32).reshape(4, 2)
        dstPoints = np.array([[0, 200],
                     [200, 200],
                     [200, 0],
                     [0, 0]], dtype=np.float32)
        transMat = cv2.getPerspectiveTransform(Points, dstPoints)
        QRimg = cv2.warpPerspective(frame, transMat, (200, 200))
        QrValues += pyzbar.decode(QRimg)
        ret, QRimg = cv2.threshold(QRimg, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        QrValues += pyzbar.decode(QRimg)
        heart = np.mean(maxContour.reshape(4,2), axis=0)
        hearts.append(heart[0])
    
    if len(hearts) == 0:
        return None, None
    position = None
    diff = np.mean(hearts) - frames[0].shape[1] / 2
    if diff > 100:
        position = RIGHT
    elif diff < -100:
        position = LEFT
    else:
        position = FRONT
    if len(QrValues) == 0:
        return position, None
    else:
        return position, int(QrValues[0].data.decode('utf-8'))

    return "", ""
コード例 #16
0
                # stock_snack_process.join()
            product_info = 'test'
            frame = self._draw_barcode_frame(frame, barcode_info[0:1], product_info)
            self.have_detection = True
        return self.have_detection, frame


    def _is_object_font_of_camera_change(self, new_frame):
        if (self.old_frame is not None and self.old_frame.any()):
            old_gray = cv2.cvtColor(self.old_frame, cv2.COLOR_BGR2GRAY)
            new_gray = cv2.cvtColor(new_frame, cv2.COLOR_BGR2GRAY)
            (score, diff) = compare_ssim(old_gray, new_gray, full=True)
            self.old_frame = new_frame
            # print(score)
            if (score > 0.75):
                return False
            else:
                return True
        else:
            self.old_frame = new_frame
            return True
        # return True


if __name__=='__main__':
    # image = Image.open('/home/deeplearning/snack_bar/client/barcode.png')
    image = cv2.imread('/home/deeplearning/snack_bar/client/barcode.png')
    height, width = image.shape[:2]
    # cv2.cvtColor(image, cv2.COLOR_BAYER_BG2GRAY)
    code = decode((image[:,:,0].astype('uint8').tobytes(),width,height))
    print(code)
コード例 #17
0
import cv2
import numpy as np
import pyzbar.pyzbar as pyzbar
import json

image = cv2.imread("ara.png")

decodedObjects = pyzbar.decode(image)
for obj in decodedObjects:
    print("Type:", obj.type)
    ou_data = obj.data
    print("Data: ", obj.data, "\n")

print('ou_data', ou_data, type(ou_data))
value = ou_data.decode('utf-8')
value = json.loads(value.replace("'", '"'))
print(value, type(value))

print("Name: ", value['name'])
print("DOB: ", value['DOB'])
print("Address: ", value['address'])

cv2.imshow("Frame", image)
cv2.waitKey(0)
コード例 #18
0
ファイル: final.py プロジェクト: ando-shoki/auto_scoring
def upload_file():
    #bugが出たらkerasがやばいかも
    #global graph
    #with graph.as_default():
    if request.method == 'POST':
        '''
        @img_file : 回答用紙のファイル
        '''
        if ('img_file' not in request.files):
            flash('ファイルがありません')  #flashはユーザーに挙動が正しいかを知らせる
            return redirect(request.url)
        img_file = request.files['img_file']

        if img_file.filename == '':
            flash('ファイルがありません')
            return redirect(request.url)
        if img_file and allowed_file(img_file.filename):

            img_filename = secure_filename(img_file.filename)
            img_file.save(os.path.join(UPLOAD_FOLDER, img_filename))
            img_filepath = os.path.join(UPLOAD_FOLDER, img_filename)

            # qr_img = keras.preprocessing.image.load_img(img_filepath ,grayscale=True,target_size=(image_size,image_size))
            qr_imgcv = cv2.imread(img_filepath)
            # gray_img = cv2.cvtColor(qr_img, cv2.COLOR_BGR2GRAY)
            gray_imgcv = cv2.cvtColor(qr_imgcv, cv2.COLOR_BGR2GRAY)

            gamma = 1.3
            gamma_table = [
                np.power(x / 255.0, gamma) * 255.0 for x in range(256)
            ]
            gamma_table = np.round(np.array(gamma_table)).astype(np.uint8)
            gray_imgcv = cv2.LUT(gray_imgcv, gamma_table)
            blurred_img = cv2.GaussianBlur(gray_imgcv, (5, 5), 0)
            barcodes_1 = pyzbar.decode(blurred_img)
            barcodes_2 = pyzbar.decode(gray_imgcv)

            #なるべくすべでのQRコードを読み取る
            if len(barcodes_1) >= len(barcodes_2):
                barcodes = barcodes_1
            else:
                barcodes = barcodes_2

            #the picture's number
            i = 1

            ans_correct = 0
            ans_false = []
            qr_list = np.empty((1, 3))
            for barcode in barcodes:
                (x, y, w, h) = barcode.rect
                #以下imwriteは確認
                #binary image
                barcodeData = barcode.data.decode("utf-8")
                crop_cut = gray_imgcv[y:y + h, x + w + 5:x + 2 * w + 5]

                #If the font is too small, do this
                kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
                crop_cut = cv2.erode(crop_cut, kernel)
                th, binary = cv2.threshold(crop_cut, 125, 255,
                                           cv2.THRESH_BINARY)
                #cv2.imwrite('crop_cut_binary_{}.jpg'.format(i),binary)

                #center the number and padding
                binary2black = 255 - binary
                #cv2.imwrite('binary2black_{}.jpg'.format(i), binary2black)

                _, thresh = cv2.threshold(
                    binary, 125, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
                contours, hierarchy = cv2.findContours(thresh, 3,
                                                       cv2.CHAIN_APPROX_SIMPLE)
                cnt = contours[0]
                X, Y, W, H = cv2.boundingRect(cnt)
                rectangle = binary2black[Y:Y + H, X:X + W]

                #cv2.imwrite('Padding_{}.jpg'.format(i),Padding)
                rec_shape = rectangle.shape[0] / rectangle.shape[1]
                if rec_shape <= 1.3:
                    Padding = cv2.copyMakeBorder(rectangle,
                                                 round(H * 0.33),
                                                 round(H * 0.26),
                                                 round(W * 0.6),
                                                 round(W * 0.6),
                                                 cv2.BORDER_CONSTANT,
                                                 value=[0, 0, 0])
                elif rec_shape <= 4.0:
                    Padding = cv2.copyMakeBorder(rectangle,
                                                 round(H * 0.33),
                                                 round(H * 0.26),
                                                 round(W * 0.8),
                                                 round(W * 0.8),
                                                 cv2.BORDER_CONSTANT,
                                                 value=[0, 0, 0])
                else:
                    Padding = cv2.copyMakeBorder(rectangle,
                                                 round(H * 0.33),
                                                 round(H * 0.26),
                                                 round(W * 5),
                                                 round(W * 5),
                                                 cv2.BORDER_CONSTANT,
                                                 value=[0, 0, 0])

                #Resize and sharpen image
                resized = cv2.resize(Padding, (28, 28),
                                     interpolation=cv2.INTER_AREA)
                #cv2.imwrite('resized_{}.jpg'.format(i), resized)
                #resized = np.array(resized.reshape(1,28,28), dtype = 'float64')

                #increase the picture's number
                i += 1

                #Predict and show the result
                pred = model.predict(resized.reshape(1, 28, 28)).argmax()
                print("No.{0} question's answer is {1}".format(
                    int(barcodeData) + 1, pred))

                qr_que = int(barcodeData)
                qr_ans = int(pred)
                true_ans = int(answer[qr_que])

                if qr_ans == true_ans:
                    print('The answer is correct')
                    ans_correct += 1
                else:
                    print('The answer is incorrect')
                    ans_false.append(qr_que + 1)

                qr_list = np.append(qr_list, [[qr_que + 1, qr_ans, true_ans]],
                                    axis=0)

        qr_list = np.delete(qr_list, 0, 0)
        qr_list = qr_list[qr_list[:, 0].argsort(), :]
        x1 = (ans_correct / 20.0) * 100
        ans_false.sort()
        print('Predict false: {}'.format(ans_false))
        print(
            'If the prediction is 100% correct. The correct rate of answer is {} %'
            .format(x1))
        return render_template('result.html',
                               res_list=qr_list,
                               ans_correct=ans_correct)
    #img_read = cv2.imread('test2.jpg')
    #read_pred(img_read)
    return render_template('index.html')
コード例 #19
0
circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,500,param1=50,param2=100,minRadius=100,maxRadius=0)

circles = np.uint16(np.around(circles))
for i in circles[0,:]:
    circle = (i[0],i[1])
    circle_radius = i[2]
    mask = np.zeros(cimg.shape, dtype=np.uint8)
    cv2.circle(mask, circle, circle_radius, (255, 255, 255), -1, 8, 0)
    # Apply mask (using bitwise & operator)
    ROI = cimg & mask
    # Crop/center result (assuming max_loc is of the form (x, y))
    ROI = ROI[circle[1] - circle_radius:circle[1] + circle_radius,
                                circle[0] - circle_radius:circle[0] + circle_radius, :]
    
    try:
        QR = pyzbar.decode(ROI)
        QR_data = QR[0][0]
        # draw the outer circle
        cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
        # draw the center of the circle
        cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
        circle = (i[0],i[1])
        circle_radius = i[2]
        circle_x = i[0]
        circle_y = i[1]
        height, width, channels = cimg.shape
        delta_x = circle_x - (width/2)
        delta_y = circle_y - (height/2)
        font = cv2.FONT_HERSHEY_SIMPLEX
        text_x = 'deltaX: ' + str(delta_x) + 'px'
        text_y = 'deltaY: ' + str(delta_y) + 'px'
コード例 #20
0
time.sleep(2.0)

# open the output CSV file for writing and initialize the set of
# barcodes found thus far
csv = open(args["output"], "w")
found = set()

# loop over the frames from the video stream
while True:
	# grab the frame from the threaded video stream and resize it to
	# have a maximum width of 400 pixels
	frame = vs.read()
	frame = imutils.resize(frame, width=400)

	# find the barcodes in the frame and decode each of the barcodes
	barcodes = pyzbar.decode(frame)

	# loop over the detected barcodes
	for barcode in barcodes:
		# extract the bounding box location of the barcode and draw
		# the bounding box surrounding the barcode on the image
		(x, y, w, h) = barcode.rect
		cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)

		# the barcode data is a bytes object so if we want to draw it
		# on our output image we need to convert it to a string first
		barcodeData = barcode.data.decode("utf-8")
		barcodeType = barcode.type

		# draw the barcode data and barcode type on the image
		text = "{} ({})".format(barcodeData, barcodeType)
コード例 #21
0
from pyzbar.pyzbar import decode
from PIL import Image
d = decode(Image.open('test.png'))
print(d[0].data.decode())
コード例 #22
0
ファイル: main.py プロジェクト: dehuy69/visitor_welcomes

df = pd.read_csv('visitors.csv')
csv_edit_time = os.path.getmtime('visitors.csv')
camera = PiCamera()
camera.resolution = (480, 320)
rawCapture = PiRGBArray(camera)
privious_data = []
camera.start_preview(fullscreen=False, window=(100, 20, 480, 320))
for frame in camera.capture_continuous(rawCapture,
                                       format="bgr",
                                       use_video_port=True):
    try:
        if os.path.getmtime('visitors.csv') > csv_edit_time:
            df = pd.read_csv('visitors.csv')
            csv_edit_time = os.path.getmtime('visitors.csv')
        frame = rawCapture.array
        decodedObjects = pyzbar.decode(frame)
        if decodedObjects != []:
            for obj in decodedObjects:
                if obj.data != privious_data:
                    print('Type : ', obj.type)
                    print('Data : ', obj.data, '\n')
                    qr_id = obj.data.decode('utf8')
                    visitor = df.query("qr_id == @qr_id")
                    say(visitor)
                    privious_data = obj.data
        rawCapture.truncate(0)
    except:
        camera.stop_preview()
        break
コード例 #23
0
ファイル: CheQr.py プロジェクト: somnathbm/arya-cheqr
def decode(im):
    # Find QR codes
    decodedObjects = pyzbar.decode(im)

    return decodedObjects
コード例 #24
0
from pyzbar import pyzbar
import cv2

image = cv2.imread("QRmatrix.png")
barcodes = pyzbar.decode(image)

for barcode in barcodes:
    (x, y, w, h) = barcode.rect
    cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
    barcodeData = barcode.data.decode()
    barcodeType = barcode.type

    text = "{} ({})".format(barcodeData, barcodeType)
    cv2.putText(image, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                (0, 0, 255), 2)

cv2.imshow("Image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
コード例 #25
0
ファイル: DSC-Task3.py プロジェクト: z404/DSC-Tasks
#Menu based program
print('PROGRAM TO GENERATE AND DECODE A QR CODE')
print('Enter 1 to Generate a QR code')
print('Enter 2 to Decode a QR code')
#input for menu
a = input()
#switch case for menu
if a == '1':
    #generating qr code with given data
    qr = qrcode.QRCode(
        version=1,
        error_correction=qrcode.constants.ERROR_CORRECT_L,
        box_size=10,
        border=4,
    )
    print('Enter data to encode')
    qr.add_data(input())
    qr.make(fit=True)
    #saving the picture as a jpg (from pillow)
    img = qr.make_image(fill_color="black", back_color="white")
    img.save(os.getcwd() + '/qrcode.jpg')
    print('QR Code is saved in current directory as "qrcode.jpg"')
elif a == '2':
    #Reading and decoding a qr code
    print("Enter the QR Code's file name. (Save it in current directory)")
    img = Image.open(input())
    result = decode(img)[0].data.decode("utf-8")
    #printing result
    print("Decoded result =", result)
コード例 #26
0
    async def listener(self, message: discord.Message):
        """Find QR code in message attachments"""
        if not message.guild:
            return
        # check if enabled
        if not await self.config.guild(message.guild).get_attr(KEY_ENABLED)():
            self.logger.debug(
                "QR Checker disabled for %s (%s); return early",
                message.guild.name,
                message.guild.id,
            )
            return
        if not message.attachments:
            self.logger.debug("No attachments, return early")
            return
        for attachment in message.attachments:
            contentType = attachment.content_type
            if not contentType:
                self.logger.debug("Unknown content type, continue")
                continue
            elif contentType and "image" not in contentType:
                self.logger.debug("Not an image, continue")
                continue

            # At this point we decern that it's an image.
            try:
                fp = io.BytesIO(await attachment.read())
                image = Image.open(fp)
                codes: List[Decoded] = decode(image)
                self.logger.debug("Found %s codes", len(codes))
            except Exception:
                self.logger.error("Couldn't check file.", exc_info=True)
                return

            if not codes:
                self.logger.debug("No QR codes found.")
                return

            self.logger.info(
                "%s#%s (%s) posted some QR code(s) in #%s (%s)",
                message.author.name,
                message.author.discriminator,
                message.author.id,
                message.channel.name,
                message.channel.id,
            )

            numQrCodes = len(codes)
            if numQrCodes == 1:
                code = codes[0]
                data = code.data.decode()
                if len(data) > 1900:
                    contents = f"{data[:1900]}..."
                else:
                    contents = data
                msg = (f"Found a QR code from {message.author.mention}, "
                       f"the contents are: ```{contents}```")
                await message.reply(
                    msg,
                    mention_author=False,
                    allowed_mentions=discord.AllowedMentions.none())
            else:
                pages: List[str] = []
                pages.append(
                    f"Found several QR codes from {message.author.mention}, their contents are:"
                )
                for code in codes:
                    data = code.data.decode()
                    if len(data) > 1990:
                        contents = f"```{data[:1990]}...```"
                    else:
                        contents = f"```{data}```"
                    pages.append(contents)

                firstMessage = True
                sentMessages = 0

                ctx = await self.bot.get_context(message)
                for textToSend in pagify("\n".join(pages),
                                         escape_mass_mentions=True):
                    if firstMessage:
                        await message.reply(
                            textToSend,
                            mention_author=False,
                            allowed_mentions=discord.AllowedMentions.none(),
                        )
                        firstMessage = False
                    elif sentMessages > 10:
                        self.logger.debug(
                            "Sent more than 10 messages, bail early")
                        break
                    else:
                        await ctx.send(
                            textToSend,
                            allowed_mentions=discord.AllowedMentions.none())
                    sentMessages += 1
コード例 #27
0
    def get_frame(self,
                  shapeof,
                  bound_lowX=100,
                  bound_highX=300,
                  bound_lowY=50,
                  bound_highY=250,
                  ptX=200,
                  ptY=150,
                  radius=100,
                  detect="no",
                  license="no",
                  start="no",
                  helmet_detect="no"):
        global vs, flag
        global t1, flag

        stri = "No message"
        qr = "No qr code detected"

        face_cascade = cv2.CascadeClassifier(
            'haarcascades/haarcascade_russian_plate_number.xml')

        #To check if the bottom 2 points of the detected square lie in the desired circle(authorized area)
        def check_value(startX, startY, endX, endY, ptX, ptY, radius):
            val = (startX - ptX)**2 + (endY - ptY)**2
            if (val - radius**2 > 0):
                return -1
            val = (endX - 200)**2 + (endY - ptY)**2
            if (val - radius**2 > 0):
                return -1
            return 1

        frame = vs.read()

        frame = imutils.resize(frame, width=400)
        # grab the frame dimensions and convert it to a blob
        (h, w) = frame.shape[:2]

        blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843,
                                     (300, 300), 127.5)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(frame, 1.3, 5)

        net.setInput(blob)
        detections = net.forward()
        boundaries_y = [([0, 100, 160], [70, 255, 255])]
        boundaries_r = [([0, 60, 180], [100, 120, 255])]
        font = cv2.FONT_HERSHEY_SIMPLEX

        def detectcolor(boundaries, img):
            for (lower, upper) in boundaries:
                lower = np.array(lower, dtype="uint8")
                upper = np.array(upper, dtype="uint8")
                mask = cv2.inRange(img, lower, upper)
                output = cv2.bitwise_and(img, img, mask=mask)
            ncol = 0
            for i in range(0, output.shape[0]):
                for j in range(0, output.shape[1]):
                    if (output[i][j][0] > 0 or output[i][j][1] > 0
                            or output[i][j][2] > 0):
                        ncol = ncol + 1
            colorperc = round(ncol / (img.shape[1] * img.shape[0]) * 100, 2)
            return ([colorperc, output])

        for i in np.arange(0, detections.shape[2]):
            confidence = detections[0, 0, i, 2]

            if confidence > 0.20:
                idx = int(detections[0, 0, i, 1])
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")

                label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
                #For license plate detection
                if (license == "yes"):
                    for (x, y, w, h) in faces:
                        cv2.rectangle(frame, (x, y), (x + w, y + h),
                                      (255, 0, 255), 2)
                        roi_gray = gray[y:y + h, x:x + w]
                        roi_color = frame[y:y + h, x:x + w]
                #Qr code detection
                decodedObjected = pyzbar.decode(frame)
                for obj in decodedObjected:
                    qr = obj.data.decode('utf-8')
                    (a, b, c, d) = obj.rect
                    cv2.rectangle(frame, (a, b), (a + c, b + d), (255, 255, 0),
                                  3)
                #To display the rectangles only if human is detected
                if (idx == 15 and start == "yes"):
                    #If boundary is a square
                    if (shapeof == "square"):
                        cv2.rectangle(frame, (bound_lowX, bound_lowY),
                                      (bound_highX, bound_highY), (255, 0, 0),
                                      2)
                        if (startX < bound_lowX or endY < bound_lowY
                                or endY > bound_highY or endX > bound_highX):
                            cv2.rectangle(frame, (startX, startY),
                                          (endX, endY), (0, 255, 0), 2)
                            if ((endX - startX) * (endY - startY)):
                                y = startY - 15 if startY - 15 > 15 else startY + 15
                                cv2.putText(frame, "Authorized " + label,
                                            (startX, y),
                                            cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                            COLORS[idx], 2)
                                stri = "Authorized area, person found"
                        else:
                            cv2.rectangle(frame, (startX, startY),
                                          (endX, endY), (0, 0, 255), 2)
                            y = startY - 15 if startY - 15 > 15 else startY + 15
                            cv2.putText(frame, "Unauthorized " + label,
                                        (startX, y), cv2.FONT_HERSHEY_SIMPLEX,
                                        0.5, COLORS[idx], 2)
                            stri = "UnAuthorized area,person found! PLease checK!"
                    #If boundary is a circle
                    else:
                        cv2.circle(frame, (ptX, ptY), radius, (255, 0, 0), 2)
                        value = check_value(startX, startY, endX, endY, ptX,
                                            ptY, radius)
                        if (value == -1):
                            cv2.rectangle(frame, (startX, startY),
                                          (endX, endY), (0, 255, 0), 2)
                            y = startY - 15 if startY - 15 > 15 else startY + 15
                            cv2.putText(frame, "Authorized " + label,
                                        (startX, y), cv2.FONT_HERSHEY_SIMPLEX,
                                        0.5, COLORS[idx], 2)
                            stri = "Unauthorized area,person found! PLease checK!"
                        else:
                            cv2.rectangle(frame, (startX, startY),
                                          (endX, endY), (0, 0, 255), 2)
                            y = startY - 15 if startY - 15 > 15 else startY + 15
                            cv2.putText(frame, "Authorized " + label,
                                        (startX, y), cv2.FONT_HERSHEY_SIMPLEX,
                                        0.5, COLORS[idx], 2)
                            stri = "Authorized area, person found"
                #For helment detection
                if (helmet_detect == "yes"):
                    h = int((endX - startX) * 0.3)
                    w = (endX - startX)
                    if (endX - startX > 20):
                        Forehead = frame[startY:startY + h, startX:startX + w]
                        det_y = detectcolor(boundaries_y, Forehead)
                        det_r = detectcolor(boundaries_r, Forehead)
                        if (det_y[0] > 1 or det_r[0] > 1):
                            cls = "Helmet"
                            R = 0
                            G = 255
                        else:
                            cls = "Head"
                            R = 255
                            G = 0
                        frame = cv2.rectangle(frame, (startX, startY),
                                              (startX + w, startY + h),
                                              (0, G, R), 5)
                        cv2.putText(
                            frame,
                            cls + ":" + str(round(confidence * 100, 2)) + "%",
                            (startX, startY - 5), font, 1, (0, G, R), 4,
                            cv2.LINE_AA)
                #For face detection
                if (detect == "yes"):
                    global deti
                    rects = detector(gray, 0)

                    for rect in rects:

                        # determine the facial landmarks for the face region, then

                        # convert the facial landmark (x, y)-coordinates to a NumPy

                        # array

                        shape = predictor(gray, rect)

                        shape = face_utils.shape_to_np(shape)

                        y = shape[45][1] - shape[36][1]

                        x = shape[45][0] - shape[36][0]

                        if (0.01 * x >= y):
                            #print("IN loop")

                            R = 0

                            G = 255

                            B = 0
                            #To capture image in case a face is found
                            frame1 = frame[
                                max(shape[20][1] - int(x * 1.2), 0
                                    ):min(shape[9][1] +
                                          int(x * 0.1), frame.shape[0]),
                                max(shape[1][0] - int(x * 0.2), 0
                                    ):min(shape[16][0] +
                                          int(x * 0.2), frame.shape[1])]
                            cv2.imwrite("face_64op/" + str(t1) + ".jpg",
                                        frame1)
                            deti = "no"
                            if (flag == 1):
                                os.system(
                                    "mpg123 " +
                                    file)  #Sound played when face is captured
                                flag = 0

                        else:

                            R = 255

                            G = 0

                            B = 0

                        # loop over the (x, y)-coordinates for the facial landmarks

                        for (x, y) in shape:

                            cv2.circle(
                                frame, (x, y), 1, (B, G, R), -1
                            )  #cv2.rectangle(frame,(startX,startY),(int(endX/5),int(endY/5)),(255,0,0),2)

        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes(), stri, qr, deti  #Returns the frame,text,qrcode
コード例 #28
0
def findAndReadQR(img):
    #cv2.imshow("Window" , img)
    results = decode(img, [ZBarSymbol.QRCODE])
    #cv2.imshow("Window" , img)
    closestQR = None
    for result in results:
        p1 = result[3][0]
        p2 = result[3][1]
        p3 = result[3][2]
        p4 = result[3][3]
        QRRotatedRight = False
        QRRotatedLeft = False
        leftSide = 0  #Left most side
        rightSide = 0  #Right Most Side
        lowerSide = 0
        upperSide = 0

        if (p1.y <= p2.y and p2.y >= p3.y and p3.x >= p4.x and p2.x <= p4.x):
            leftSide = math.sqrt((p1.x - p2.x)**2 + (p1.y - p2.y)**2)
            rightSide = math.sqrt((p3.x - p4.x)**2 + (p3.y - p4.y)**2)
            lowerSide = math.sqrt((p2.x - p3.x)**2 + (p2.y - p3.y)**2)
            upperSide = math.sqrt((p1.x - p4.x)**2 + (p1.y - p4.y)**2)
            sideAvg = (leftSide + rightSide) / 2
        else:
            leftSide = math.sqrt((p1.x - p4.x)**2 + (p1.y - p4.y)**2)
            rightSide = math.sqrt((p2.x - p3.x)**2 + (p2.y - p3.y)**2)
            lowerSide = math.sqrt((p1.x - p2.x)**2 + (p1.y - p2.y)**2)
            upperSide = math.sqrt((p3.x - p4.x)**2 + (p3.y - p4.y)**2)
            sideAvg = (leftSide + rightSide) / 2

        p = leftSide / rightSide
        print p
        if p <= 0.95:
            QRRotatedLeft = True
        elif p >= 1.05:
            QRRotatedRight = True
        distance = Focal / sideAvg
        centerX = (p1.x + p2.x + p3.x + p4.x) / 4
        centerY = (p1.y + p2.y + p3.y + p4.y) / 4
        #ctr = np.array([centerX , centerY]).reshape((-1, 1, 2)).astype(np.int32)
        #corners = [[p1.x , p1.y], [p2.x , p2.y] , [p3.x , p3.y] , [p4.x , p4.y]]
        #corn2 = np.array(corners).reshape((-1, 1, 2)).astype(np.int32)
        #cv2.drawContours(img , [corn2] , 0 , (0 , 255 , 0) , 5)
        #cv2.drawContours(img , ctr , 0, (255 , 0 , 0) , 5)
        #cv2.putText(img, '1', (p1.x, p1.y), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2, cv2.LINE_AA)
        #cv2.putText(img, '2', (p2.x, p2.y), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2, cv2.LINE_AA)
        #cv2.putText(img, '3', (p3.x, p3.y), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2, cv2.LINE_AA)
        #cv2.putText(img, '4', (p4.x, p4.y), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2, cv2.LINE_AA)
        #cv2.imshow("Window" , img)
        if result[0] not in dict:
            continue
        if closestQR == None:
            closestQR = QRresult(result[0], (centerX, centerY), distance,
                                 dict[result[0]], QRRotatedRight,
                                 QRRotatedLeft, rightSide, lowerSide, leftSide,
                                 upperSide)
        elif closestQR.distance > distance:
            closestQR = QRresult(result[0], (centerX, centerY), distance,
                                 dict[result[0]], QRRotatedRight,
                                 QRRotatedLeft, rightSide, lowerSide, leftSide,
                                 upperSide)
    #cv2.imshow("Window" , img)
    return closestQR
コード例 #29
0
def search_barcode_item(item, config=None):
    if config is None:
        config = get_config()

    logger = None

    text_log = config["Logging"].getboolean("text_log")
    img_log = config["Logging"].getboolean("img_log")
    show_img = config["Logging"].getboolean("show_img")
    config["Logging"]["log_path"] = config["Logging"]["log_path"].format(
        date_time=str(datetime.datetime.today()))
    log_path = config["Logging"]["log_path"]
    margin_x = config["Barcode"].getint("margin_x")
    margin_y = config["Barcode"].getint("margin_y")

    if img_log:
        if not os.path.exists(log_path + item.name + "/"):
            os.makedirs(log_path + item.name + "/")
    if text_log:
        logger = logging.getLogger("SEARCH_BARCODE")
        logger.info("Module SEARCH_BARCODE started")

    def logging_action(name, image):
        if show_img:
            cv.imshow(name, image)
        if img_log:
            cv.imwrite(log_path + item.name + "/" + name + ".jpg", image)
        if text_log:
            logger.info(
                repr(name) + " saved on " +
                repr(log_path + item.name + "/" + name + ".jpg"))

    gray = cv.cvtColor(item.img, cv.COLOR_BGR2GRAY)

    logging_action("1-gray.jpg", gray)

    grad_x = cv.Sobel(gray, ddepth=cv.CV_32F, dx=1, dy=0, ksize=-1)

    logging_action("2-dx.jpg", grad_x)

    gradient = cv.convertScaleAbs(grad_x)

    logging_action("3-gradient.jpg", gradient)

    blurred = cv.GaussianBlur(gradient, (1, 45), 0)
    (_, thresh) = cv.threshold(blurred, 225, 255, cv.THRESH_BINARY)

    logging_action("4-thresh.jpg", thresh)

    # construct a closing kernel and apply it to the thresholded image
    kernel = cv.getStructuringElement(cv.MORPH_RECT, (17, 1))
    closed = cv.morphologyEx(thresh, cv.MORPH_CLOSE, kernel)

    logging_action("5-closed.jpg", closed)

    closed = cv.erode(closed, None, iterations=4)
    closed = cv.dilate(closed, None, iterations=4)

    logging_action("6-dilate.jpg", closed)

    (cnts, _) = cv.findContours(closed.copy(), cv.RETR_EXTERNAL,
                                cv.CHAIN_APPROX_SIMPLE)
    max_rect = cv.minAreaRect(cnts[0])
    max_area = int(max_rect[1][0] * max_rect[1][1])
    for cnt in cnts:
        rect = cv.minAreaRect(cnt)  # пытаемся вписать прямоугольник
        area = int(rect[1][0] * rect[1][1])
        if area > max_area:
            max_area = area
            max_rect = rect
    max_rect = ((max_rect[0][0], max_rect[0][1]),
                (max_rect[1][0] + 2 * margin_x,
                 max_rect[1][1] + 2 * margin_y), max_rect[2])
    box = cv.boxPoints(max_rect)  # поиск четырех вершин прямоугольника
    box = np.int0(box)  # округление координат
    img_copy = item.img.copy()

    if img_log:
        cv.drawContours(img_copy, [box], 0, (255, 0, 0), 2)

    img_crop = crop_rect(item.img, max_rect)

    logging_action("7-barcode.jpg", img_crop)

    barcode = decode(img_crop, symbols=[ZBarSymbol.EAN13])

    logging_action("8-dest.jpg", img_copy)
    if show_img:
        cv.waitKey()
        cv.destroyAllWindows()
    if len(barcode) > 0:
        item.barcode = Barcode(barcode[0].data.decode("utf-8"), max_rect, box,
                               img_crop)
    else:
        item.barcode = Barcode(None, max_rect, box, img_crop)
    return item
コード例 #30
0
    # ret, frame = cap.read()
    ret, frame = cap.read()


    cv2.imshow('frame',frame)
   
    x = input("Ready Press Y if Yes :")
    if x == 'Y' or x == 'y':
       
        return_value, image = cap.read() 
        # cv2.imshow('image',image)

        # image.save('img.png')
        cv2.imwrite('img.png',image)
        time.sleep(1.5)
        res = decode(Image.open('img.png'))
        idata = res[0].data
        print(str(idata))
        break

    if x == 'n' or x == 'N':
        exit()


    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()

コード例 #31
0
def gen():
    """Video streaming generator function."""
    while True:
        with torch.no_grad():
            cfg = 'cfg/yolov3.cfg'
            cfghaz = 'cfg/yolov3-tiny-custom.cfg'
            data_cfg = 'directories.data'
            data_cfghaz = 'directories.data'
            weights = os.path.join(os.getcwd(),
                                   os.path.join("weights", "yolov3.pt"))
            weightshaz = os.path.join(os.getcwd(),
                                      os.path.join("weights", "bestv1.pt"))
            images = 'data/samples'
            output = 'output'  # output folder
            img_size = 416  #416
            conf_thres = 0.02
            nms_thres = 0.02
            save_txt = False
            save_images = False
            webcam = True

            device = torch_utils.select_device()

            # Initialize model

            model = Darknet(cfg, img_size)
            hazmatmodel = Darknet(cfghaz, img_size)

            model.load_state_dict(
                torch.load(weights, map_location=device)['model'])
            hazmatmodel.load_state_dict(
                torch.load(weightshaz, map_location=device)['model'])

            model.to(device).eval()
            hazmatmodel.to(device).eval()

            # Set Dataloader
            vid_path, vid_writer = None, None

            #start webcam

            save_images = False
            dataloader = LoadWebcam(img_size=img_size)

            # Get classes and colors
            classes = load_classes(
                parse_data_cfg(data_cfg)['names'],
                "example_single_class.names")
            classeshaz = load_classes(
                parse_data_cfg(data_cfghaz)['names'], "hazmat.names")
            colors = [[random.randint(0, 255) for _ in range(3)]
                      for _ in range(len(classes))]

            # Lucas kanade params
            lk_params = dict(
                winSize=(15, 15),
                maxLevel=4,
                criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10,
                          0.03))  #0.03
            count = 0
            global point, point_selected, old_points
            point_selected = False
            point = ()
            old_points = np.array([[]])

            #for i, (path, img, im0, vid_cap) in enumerate(dataloader):
            for i, (path, img, im0) in enumerate(dataloader):

                im0save = im0.copy()
                count += 1

                if count == 1:
                    start = time.time()
                    height, width = im0.shape[:2]

                if count != 1:
                    p1x_old = p1x
                    p2y_old = p2y
                f = open("output.txt", "r")
                for i in f:
                    haz = i.split()[0]
                    coco = i.split()[1]
                    zbar = i.split()[2]
                    try:
                        p1x = int(float(i.split()[3]) * width)
                        p2y = int(float(i.split()[4]) * height)
                    except:
                        p1x = int(0.5 * width)
                        p2y = int(0.5 * height)
                    p = i.split()[5]
                f.close()

                #print(width, height)
                #print(haz, coco)

                t = time.time()
                save_path = str(Path(output) / Path(path).name)

                # Get detections
                img = torch.from_numpy(img).unsqueeze(0).to(device)

                if ONNX_EXPORT:
                    torch.onnx.export(model,
                                      img,
                                      'weights/model.onnx',
                                      verbose=True)
                    return

                def select_point(event, x, y, flags, params):
                    global point, point_selected, old_points
                    #print(event, x, y, flags, params)
                    if event == cv2.EVENT_LBUTTONDOWN:
                        point = (x, y)
                        point_selected = True
                        old_points = np.array([[x, y]], dtype=np.float32)

                if (point_selected == True):
                    #print("selected")
                    gray_frame = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)

                    cv2.circle(im0, point, 5, (0, 0, 255), 2)
                    new_points, status, error = cv2.calcOpticalFlowPyrLK(
                        old_gray, gray_frame, old_points, None, **lk_params)
                    old_gray = gray_frame.copy()
                    old_points = new_points
                    x, y = new_points.ravel()
                    cv2.circle(im0, (x, y), 5, (0, 255, 0), -1)

                if p == "false":
                    point_selected = False

                new = None
                if count != 1:
                    if p1x != p1x_old or p2y != p2y_old:
                        new = True

                if (point_selected == False):
                    if p == "true":
                        old_gray = cv2.cvtColor(
                            im0, cv2.COLOR_BGR2GRAY)  #im0 is BGR img is RGB
                        select_point(1, p1x, p2y, 1, None)
                else:

                    if new == True:
                        select_point(1, p1x, p2y, 1, None)
                        new = False
                    #print("run")
                    old_gray = gray_frame.copy()

                if haz == "true":
                    predhaz = hazmatmodel(img)
                    detectionshaz = non_max_suppression(
                        predhaz, conf_thres, nms_thres)[0]

                    if detectionshaz is not None and len(detectionshaz) > 0:
                        # Rescale boxes from 416 to true image size
                        scale_coords(img_size, detectionshaz[:, :4],
                                     im0.shape).round()

                        # Print results to screen
                        for c in detectionshaz[:, -1].unique():
                            n = (detectionshaz[:, -1] == c).sum()
#-------------------------                            print('%g %ss' % (n, classeshaz[int(c)]), end=', ')

# Draw bounding boxes and labels of detections
                        for *xyxy, conf, cls_conf, cls in detectionshaz:

                            # Add bbox to the image
                            label = '%s %.2f' % (classeshaz[int(cls)], conf)
                            plot_one_box(xyxy,
                                         im0,
                                         label=label,
                                         color=colors[int(cls)])

                if coco == "true":
                    pred = model(img)
                    detections = non_max_suppression(pred, conf_thres,
                                                     nms_thres)[0]

                    if detections is not None and len(detections) > 0:
                        # Rescale boxes from 416 to true image size
                        scale_coords(img_size, detections[:, :4],
                                     im0.shape).round()

                        # Print results to screen
                        for c in detections[:, -1].unique():
                            n = (detections[:, -1] == c).sum()


#-------------------------                            print('%g %ss' % (n, classes[int(c)]), end=', ')

# Draw bounding boxes and labels of detections
                        for *xyxy, conf, cls_conf, cls in detections:

                            # Add bbox to the image
                            label = '%s %.2f' % (classes[int(cls)], conf)
                            plot_one_box(xyxy,
                                         im0,
                                         label=label,
                                         color=colors[int(cls)])

                if zbar == "true":
                    #-------------------------                    print("zbar")
                    barcodes = decode(im0save)
                    for barcode in barcodes:
                        # extract the bounding box location of the barcode and draw the
                        # bounding box surrounding the barcode on the image
                        (x, y, w, h) = barcode.rect
                        cv2.rectangle(im0, (x, y), (x + w, y + h), (0, 0, 255),
                                      2)

                        # the barcode data is a bytes object so if we want to draw it on
                        # our output image we need to convert it to a string first
                        barcodeData = barcode.data.decode("utf-8")
                        barcodeType = barcode.type

                        # draw the barcode data and barcode type on the image
                        text = "{} ({})".format(barcodeData, barcodeType)
                        cv2.putText(im0, text, (x, y - 10),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255),
                                    2)

                        # print the barcode type and data to the terminal
                        print("[INFO] Found {} barcode: {}".format(
                            barcodeType, barcodeData))

                end = time.time()
                seconds = end - start
                start = time.time()
                if seconds == 0:
                    seconds = 1

                cv2.putText(im0, str(round(1 / seconds)), (10, 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)

                #cv2.imshow(weights, im0)
                cv2.imwrite('t.jpg', im0)

                yield (b'--frame\r\n'
                       b'Content-Type: image/jpeg\r\n\r\n' +
                       open('t.jpg', 'rb').read() + b'\r\n')

                #print(time.time() - t)
                '''if (time.time() - t) > 0.01:
コード例 #32
0
    def decode_color_qr_code(self):
        print "Demultiplexing color QR code......",
        start_time = time.time()

        cqr = Image.open(self.path)
        cqr_data = cqr.getdata()

        # Create a directory for the demultiplexed QR codes
        if not os.path.exists(os.path.dirname(self.folder_path)):
            try:
                os.makedirs(os.path.dirname(self.folder_path))
            except OSError as exc:
                if exc.errno != errno.EEXIST:
                    raise

        qr_data = []
        for i in xrange(24):
            qr_data.append('')

        # Extract the RGB values of the color QR code
        for pixel in cqr_data:
            r, g, b = pixel
            r = '{0:08b}'.format(r)
            g = '{0:08b}'.format(g)
            b = '{0:08b}'.format(b)

            ctr = 0
            # Get the values of the QR codes responsible for the red channel
            for i in xrange(0, 8):
                qr_data[i] += r[ctr]
                ctr += 1

            ctr = 0
            # Get the values of the QR codes responsible for the green channel
            for i in xrange(8, 16):
                qr_data[i] += g[ctr]
                ctr += 1

            ctr = 0
            # Get the values of the QR codes responsible for the blue channel
            for i in xrange(16, 24):
                qr_data[i] += b[ctr]
                ctr += 1

        ctr = 0
        # Create pixel data for 24 QR codes
        # Each data in the qr_data corresponds to a single QR code
        for data in qr_data:
            img_data = []
            # If pixel is '0', then it is black (0, 0, 0)
            # while a '1' corresponds to white (255, 255, 255)
            for pixel in data:
                if pixel == '0':
                    # if pixel == '1':
                    img_data.append((0, 0, 0))
                else:
                    img_data.append((255, 255, 255))

            # Create QR code image
            qr_code = Image.new(
                'RGB', (ColorQRCode.SCALE * 185, ColorQRCode.SCALE * 185))
            qr_code.putdata(img_data)
            qr_code.save(
                str(self.folder_path) + "decoded" + str(ctr + 1) + ".png")
            ctr += 1

        print "Done: ",
        print("%s seconds" % (time.time() - start_time))

        print "Decoding QR codes.................",
        start_time = time.time()

        data = ""
        path = self.folder_path.replace('/', '\\')
        # Decode the QR codes and concatenate the results
        for i in xrange(1, 25):
            decoded_data = decode(
                Image.open(path + 'decoded' + str(i) + '.png'))
            if decoded_data:
                data += str(decoded_data[0].data)

        # If nothing is decoded, then the image is not a QR code
        if not data:
            return -1

        filetype = ""
        # If file is in base64, decode it
        if not data.startswith('{0:b}'.format(
                ColorQRCode.FILE_TYPES.index('txt')).zfill(8)):
            data = base64.b64decode(data)

        print "Done: ",
        print("%s seconds" % (time.time() - start_time))

        print "Producing encoded file............",
        start_time = time.time()

        # Get the initial byte of the data to determine the file type and remove it from the data
        ft = data[:8]
        data = data[8:]
        filetype = ColorQRCode.FILE_TYPES[int(ft, 2)]

        # Write the data with the extracted filetype
        with open('DECODED' + str(self.filename) + '.' + filetype,
                  'wb') as file:
            file.write(data)

        print "Done: ",
        print("%s seconds" % (time.time() - start_time))
コード例 #33
0
# https://www.pyimagesearch.com/2014/12/15/real-time-barcode-detection-video-python-opencv/
# https://www.pyimagesearch.com/2014/11/24/detecting-barcodes-images-python-opencv/

 
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
	help="path to input image")
args = vars(ap.parse_args())


# load the input image
image = cv2.imread(args["image"])
 
# find the barcodes in the image and decode each of the barcodes
barcodes = pyzbar.decode(image)

# loop over the detected barcodes
for barcode in barcodes:
	# extract the bounding box location of the barcode and draw the
	# bounding box surrounding the barcode on the image
	(x, y, w, h) = barcode.rect
	cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
 
	# the barcode data is a bytes object so if we want to draw it on
	# our output image we need to convert it to a string first
	barcodeData = barcode.data.decode("utf-8")
	barcodeType = barcode.type
 
	# draw the barcode data and barcode type on the image
	text = "{} ({})".format(barcodeData, barcodeType)
コード例 #34
0
ファイル: qrcode_demo.py プロジェクト: rinkeigun/linux_module
#pip install qrcode
#pip install pyzbar

from pyzbar.pyzbar import decode
from PIL import Image

import qrcode #qrcodeを起動
import os


# qrcode の生成
img = qrcode.make('hoge') #''内の文字をQRコードに変換
img.show() #生成したQRコードを表示
img.save('qr_img.png') #QRコードに名前をつけて保存


# QRコード(QRcode.png)の指定
image = 'weixin\\test.png'
# QRコードの読取り
data = decode(Image.open(image))
# コード内容テキストファイル(output.txt)に出力
f = open('output.txt','a')
f.write(data[0][0].decode('utf-8', 'ignore'))
f.close()

コード例 #35
0
ファイル: app.py プロジェクト: cyber-bad/Hostel-Pass
def dabbu():

    ap = argparse.ArgumentParser()
    ap.add_argument("-o",
                    "--output",
                    type=str,
                    default="barcodes.csv",
                    help="path to output CSV file containing barcodes")
    args = vars(ap.parse_args())
    # print("[INFO] starting video stream...")
    vs = VideoStream(src=0).start()
    # csv = open(args["output"], "a")
    # found = set()
    while True:

        frame = vs.read()
        frame = imutils.resize(frame, width=400)
        cv2.line(frame, (85, 50), (100, 50), (255, 255, 255), 2)

        barcodes = pyzbar.decode(frame)
        for barcode in barcodes:

            # (x, y, w, h) = barcode.rect
            # cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)

            barcodeData = barcode.data.decode("utf-8")
            # barcodeType = barcode.type

            # time.sleep(5)

            # print(barcodeData)
            pdata = barcodeData
            cango = "yes"

            cursor = mysql.connection.cursor()

            cursor.execute(
                '''
                        UPDATE users

                        SET permission = %s                            

                        WHERE email = %s       
                        ''', (cango, pdata))

            mysql.connection.commit()

            cursor.execute('SELECT * FROM users WHERE email = %s', ({pdata}))

            bdata = cursor.fetchone()

            name = bdata[1]

            # print(name)

            if barcodeData is None:
                vs.stop()
                return render_template('dabbu.html')
            else:
                vs.stop()
                return render_template('dabbu.html', dabbud=name)

            # text = "{} ({})".format(barcodeData, barcodeType)
            # cv2.putText(frame, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)

            # return render_template('dabbu.html')

            # if barcodeData not in found:
            #     return "Barcode not found"

        # cv2.imshow("QR-Code Scanner", frame)
        # key = cv2.waitKey(1) & 0xFF
        #
        # if key == ord("q"):
        vs.stop()
        break

    # print("[INFO] cleaning up...")
    # csv.close()
    # cv2.destroyAllWindows()
    vs.stop()
    return render_template('dabbu.html')
コード例 #36
0
ファイル: acceso.py プロジェクト: diegocba9210/Qrcode
from pyzbar.pyzbar import decode

ser = serial.Serial('COM5', 9600)

#img = cv2.imread('1.png')
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)

with open('myDataFile.txt', 'r') as f:
    myDataList = f.read().splitlines()

while True:

    success, img = cap.read()
    for barcode in decode(img):
        myData = barcode.data.decode('utf-8')
        print(myData)

        if myData in myDataList:
            myOutput = 'Authorizado'
            myColor = (0, 255, 0)
            ser.write(str('h').encode())
        else:
            myOutput = 'No-Authorizado'
            myColor = (0, 0, 255)
            ser.write(str('n').encode())

        pts = np.array([barcode.polygon], np.int32)
        pts = pts.reshape((-1, 1, 2))
        cv2.polylines(img, [pts], True, myColor, 5)
コード例 #37
0
ファイル: app.py プロジェクト: cyber-bad/Hostel-Pass
def back():

    ap = argparse.ArgumentParser()
    ap.add_argument("-o",
                    "--output",
                    type=str,
                    default="barcodes.csv",
                    help="path to output CSV file containing barcodes")
    vs = VideoStream(src=0).start()

    while True:

        frame = vs.read()
        frame = imutils.resize(frame, width=400)
        cv2.line(frame, (85, 50), (100, 50), (255, 255, 255), 2)

        barcodes = pyzbar.decode(frame)
        for barcode in barcodes:
            barcodeData = barcode.data.decode("utf-8")

            print(barcodeData)
            pdata = barcodeData

            cursor = mysql.connection.cursor()

            cursor.execute('SELECT * FROM users WHERE email = %s', ({pdata}))

            bdata = cursor.fetchone()

            # back to hostel

            status = "outhostel"

            upstatus = "inhostel"

            cango = "no"

            cursor.execute(
                '''
                                                UPDATE users

                                                SET permission = %s

                                                WHERE email = %s 
                                                ''', (cango, pdata))

            mysql.connection.commit()

            cursor.execute(
                '''
                                                                        UPDATE users

                                                                        SET status = %s

                                                                        WHERE email = %s 
                                                                        ''',
                (upstatus, pdata))

            mysql.connection.commit()

            # cursor.execute('''
            #                                                 UPDATE users
            #
            #                                                 SET status = %s
            #
            #                                                 WHERE email = %s
            #                                                 ''', ({upstatus}))
            #
            # mysql.connection.commit()

            name = bdata[1]

            if barcodeData is None:
                vs.stop()
                return render_template('back.html')
            else:
                vs.stop()
                return render_template('back.html', gated=name)

        break

    vs.stop()
    return render_template('back.html')
コード例 #38
0
ファイル: organizer.py プロジェクト: mchakka/SlapCast
        def org_thread(inputVideo):
            global success
            self.processButton.grid_forget()
            self.OrgLabel.grid(row=4, column=0, padx=10, pady=10)
            self.progress.grid(row=4, sticky='E', padx=10, pady=10)
            self.progress.start()

            print("CHECK")

            timeStamps = {}
            video = inputVideo
            cap = cv2.VideoCapture(video)

            fps = cap.get(cv2.CAP_PROP_FPS)

            success, frame = cap.read()

            success = True
            count = 0
            frame1 = 0
            switch = 0
            timeStamp1 = ''

            while success:
                if (count % (int(fps / 2))) == 0:
                    success, frame = cap.read()
                    count += 1
                    data = decode(frame, symbols=[ZBarSymbol.QRCODE])
                    if data == []:
                        continue
                    else:
                        dataClean = (data[0].data).decode('utf8')
                        timeStamps[dataClean] = count
                        if switch == 0:
                            timeStamp1 = dataClean
                            frame1 = count
                            switch += 1
                        if dataClean != timeStamp1:
                            frame2 = count
                            fileName = timeStamp1.split(':')[
                                0] + '.' + timeStamp1.split(':')[1] + '.mp4'
                            self.Trim(str(frame1 / fps), str(frame2 / fps - 1),
                                      video, fileName)
                            sceneNum = int(timeStamp1.split(':')[0])
                            takeNum = int(timeStamp1.split(':')[1])

                            timeStamp1 = dataClean
                            frame1 = count
                            fileNameFinal = timeStamp1.split(':')[
                                0] + '.' + timeStamp1.split(':')[1] + '.mp4'
                            sceneNumFinal = int(timeStamp1.split(':')[0])
                            takeNumFinal = int(timeStamp1.split(':')[1])

                            if not os.path.exists('%s/Scene %d' %
                                                  (projectName, sceneNum)):
                                os.makedirs('%s/Scene %d' %
                                            (projectName, sceneNum))
                            dirName = ('%s/Scene %d' %
                                       (projectName, sceneNum)) + (
                                           '/Take %d' % (takeNum)) + '.mp4'
                            shutil.move(fileName, dirName)
                else:
                    success, frame = cap.read()
                    count += 1
            try:
                self.Trim(str(frame1 / fps), str(count / fps), video,
                          fileNameFinal)
                if not os.path.exists('%s/Scene %d' %
                                      (projectName, sceneNumFinal)):
                    os.makedirs('%s/Scene %d' % (projectName, sceneNumFinal))
                dirNameFinal = ('%s/Scene %d' %
                                (projectName, sceneNumFinal)) + (
                                    '/Take %d' % (takeNumFinal)) + '.mp4'
                shutil.move(fileNameFinal, dirNameFinal)
                success = True
            except UnboundLocalError:
                success = False
                messagebox.showinfo(
                    "Error", "There was no QR Code found in this video.")

            for key in timeStamps:
                sceneNum = int(key.split(':')[0])
                takeNum = int(key.split(':')[1])
                try:
                    timeStampsCleaned[sceneNum][takeNum] = timeStamps[key]
                except:
                    timeStampsCleaned[sceneNum] = {}
                    timeStampsCleaned[sceneNum][takeNum] = timeStamps[key]

            print(timeStampsCleaned)
            cap.release()
            cv2.destroyAllWindows()

            # Timer for keeping track of performance
            END_TIME = datetime.now()
            print('Duration to Organize: {}'.format(END_TIME - START_TIME) +
                  '\n')
コード例 #39
0
ファイル: brute.py プロジェクト: irvinlim/square-ctf-2018
import os
from itertools import permutations

from PIL import Image
from pyzbar.pyzbar import decode

images = [Image.open(os.path.join('shredded', '%s.png' % i)) for i in xrange(0, 27)]
widths, heights = zip(*(i.size for i in images))

total_width = sum(widths)
max_height = max(heights)

for perm in permutations(range(27)):
    new_im = Image.new('RGB', (total_width, max_height))
    x_offset = 0
    for im in images:
        new_im.paste(im, (x_offset, 0))
        x_offset += im.size[0]

    data = decode(new_im)
    print(data)

    new_im.show()
    break