Esempio n. 1
1
class ALPR(ContextEngineBase):

    # Trained classifier
    alpr = None;

    # Top n highest confidence predictions
    n = 5

    def __init__(self, complexity, numInputs, outputClassifier, inputClassifiers, appFieldsDict):
        ContextEngineBase.__init__(self,complexity, numInputs, outputClassifier, inputClassifiers, appFieldsDict)
        self.alpr = Alpr("us", "/etc/openalpr/openalpr.conf", "/home/pi/openalpr/runtime_data")
        if not self.alpr.is_loaded():
            print("Error loading OpenALPR")
            sys.exit(1)
        self.alpr.set_top_n(self.n)
        self.alpr.set_default_region("va")

    #  Execute the trained classifier against the given test sample
    #  inputObsVector is a path to the video file
    def execute(self, inputObsVector):
        if(len(inputObsVector) == self.numInputs):
            y_Test = self.predict(inputObsVector);
            return y_Test;
        else:
            print("Wrong dimensions, fail to execute");
            return None;

    #  Grabs frames and returns top n predictions per frame.
    def predict(self, x_Test):
        cap = cv2.VideoCapture(x_Test[0])
        if not cap.isOpened():
            print("vid open error")
            cap.open()
        fps = 25
        timedelta = 0
        detectCounter = [0]
        detectCounter[0] = 0
        plates_list = np.empty([0, self.n])
        while(cap.isOpened()):
            ret, frame = cap.read()
            if (detectCounter[0] < fps*timedelta):
                detectCounter[0] += 1
                continue
            detectCounter[0] = 0
            if ret:
                pretime = time.time()
                ret, enc = cv2.imencode("*.bmp", frame)
                results = self.alpr.recognize_array(bytes(bytearray(enc)))
                posttime = time.time()
                plates = np.empty([1,self.n], dtype='a5')
                for s in range(0, self.n):
                    plates[0][s] = ""
                for plate in results['results']:
                    i = 0
                    for candidate in plate['candidates']:
                        platenum = candidate['plate'].encode('ascii','ignore')
                        plates[0][i] = platenum
                        i += 1
                timedelta = posttime - pretime # in seconds
                plates_list = np.vstack((plates_list, plates))
            else:
                break
        return plates_list;
Esempio n. 2
0
def pred_alpr(image1): #### SHould add the region

    alpr = Alpr("us", "C:\\Users\\shobeir.mazinani\\Desktop\\BasharsALPR\\OpenALPR-Python\\openalpr_64\\openalpr.conf", "C:\\Users\\shobeir.mazinani\\Desktop\\BasharsALPR\\OpenALPR-Python\\openalpr_64\\runtime_data")
    if not alpr.is_loaded():
        print("Error loading OpenALPR")
        sys.exit(1)
    else:
        print("OpenALPR was loaded correctly")

    
    alpr.set_top_n(1)
    alpr.set_default_region("ca")
    results = alpr.recognize_file(image1)
    #print(results)
    

    i = 0
    for plate in results['results']:
        i += 1
        print("Plate #%d" % i)
        print("   %12s %12s" % ("Plate", "Confidence"))
        for candidate in plate['candidates']:
            prefix = "-"
            if candidate['matches_template']:
                prefix = "*"
            #print("  %s %12s%12f" % (prefix, candidate['plate'], candidate['confidence']))

    # Call when completely done to release memory
    # alpr.unload()
    
    #print("Sleep for some time")
    #time.sleep(5)
    #print("I am done")

    return candidate['plate'], candidate['confidence']
Esempio n. 3
0
File: main.py Progetto: riaz/Recee
def initNumPlateRecognizer(region="eu"):
    alpr = Alpr("eu", "nplate_train/openalpr.conf.in", "nplate_train/runtime_data")
    if not alpr.is_loaded():
        print ("Error loading OpenALPR")
        sys.exit(1)
    alpr.set_default_region(region)
    return alpr
Esempio n. 4
0
def license_read(filenames=[]):
    alpr = None
    #tell alpr which country license plate to use and where to find the openalpr.conf file and the
    #runtime_data folder
    alpr = Alpr("us", "/etc/openalpr/openalpr.conf", "/home/baus/Documents/openalpr/runtime_data/")
    #Ensures that the alpr is loaded and can be used
    if not alpr.is_loaded():
        print("Error loading OpenALPR")
        return "Error"
    elif(alpr.is_loaded()):
        alpr.set_top_n(1)
        alpr.set_default_region('md')

        license_plates=[]
        #for all the images that was sent, check if license plate exists
        for x in range(5):
            results = alpr.recognize_file(filenames[x])
            for plate in results["results"]:
                for candidate in plate["candidates"]:
                    #Appends the license plate to the list
                    #Appends nothing if it didnt find any license plate
                    license_plates.append(candidate["plate"])
        return license_plates
    return "Error"
def f(data):

    parser = ArgumentParser(description='OpenALPR Python Test Program')

    parser.add_argument("-c", "--country", dest="country", action="store", default="us",
                      help="License plate Country" )

    OpenALPR_path = "C:/Users/Franco/Documents/Github/control-vehicular/Otros/Deteccion/openalpr_32bit/"

    parser.add_argument("--config", dest="config", action="store", default=OpenALPR_path+"openalpr.conf",
                      help="Path to openalpr.conf config file" )

    parser.add_argument("--runtime_data", dest="runtime_data", action="store", default=OpenALPR_path+"runtime_data",
                      help="Path to OpenALPR runtime_data directory" )

    #parser.add_argument('plate_image', help='License plate image file')

    options = parser.parse_args()

    print(options.country, options.config, options.runtime_data)

    alpr = None
    try:
        alpr = Alpr(options.country.encode('ascii'), options.config.encode('ascii'), options.runtime_data.encode('ascii'))

        if not alpr.is_loaded():
            print("Error loading OpenALPR")
        else:
            print("Using OpenALPR " + alpr.get_version().decode('ascii'))

            alpr.set_top_n(7)
            alpr.set_default_region(b"wa")
            alpr.set_detect_region(False)
            # jpeg_bytes = open(options.plate_image, "rb").read()
            # results = alpr.recognize_array(jpeg_bytes)
            jpeg_bytes = data
            results = alpr.recognize_array(bytes(bytearray(data)))
            
            # Uncomment to see the full results structure
            # import pprint
            # pprint.pprint(results)

            print("Image size: %dx%d" %(results['img_width'], results['img_height']))
            print("Processing Time: %f" % results['processing_time_ms'])

            i = 0 
            if results['results']:
                print("%12s%12f" % (results['results'][0]['plate'], results['results'][0]['confidence']))
            for plate in results['results']:
                i += 1
                print("Plate #%d" % i)
                print("   %12s %12s" % ("Plate", "Confidence"))
                for candidate in plate['candidates']:
                    prefix = "-"
                    if candidate['matches_template']:
                        prefix = "*"

                    print("  %s %12s%12f" % (prefix, candidate['plate'], candidate['confidence']))



    finally:
        if alpr:
            alpr.unload()
options = parser.parse_args()

dst = os.getcwd() + '\cropedImages'  # destination to save the images
if not os.path.exists(dst):
    os.makedirs(dst)

fieldnames = ['ID', 'Plate number', 'Image Path', 'Date', 'Time']
fileID = open('file.csv', 'w')  # file to save the data
newFileWriter = csv.writer(fileID)
newFileWriter.writerow(fieldnames)

alpr = Alpr(options.country, options.config, options.runtime_data)

#alpr = Alpr("mx", "C:/openALPR_1/openalpr_64/openalpr.conf", "C:/openALPR_1/openalpr_64/runtime_data")
#alpr.set_top_n(10)
alpr.set_default_region("md")
if not alpr.is_loaded():
    sys.exit(1)

print("Using OpenALPR " + alpr.get_version())
cap = cv2.VideoCapture(options.Video_Source)
bg_subtractor = cv2.createBackgroundSubtractorMOG2(history=500,
                                                   detectShadows=True)

print('Training BG Subtractor...')
cv2.namedWindow('op', cv2.WINDOW_NORMAL)
cnt = 0
while True:
    ok, frame = cap.read()
    if not ok:
Esempio n. 7
0
from openalpr import Alpr
import sys
import cv2

alpr = Alpr('us', 'openalpr.conf', 'runtime_data')
if not alpr.is_loaded():
    print("Error loading OpenALPR")
    sys.exit(1)

alpr.set_top_n(20)
alpr.set_default_region("ca")

#img1 = cv2.imread("us-4.jpg",1)
#cv2.imshow('Image',img1)
#cv2.waitKey(0)
results = alpr.recognize_file('us-3.jpg')

i = 0
print results

if len(results['results']) == 0:
    print "No results found"
    sys.exit()

for plate in results['results']:
    i += 1
    print("Plate #%d" % i)
    print("   %12s %12s" % ("Plate", "Confidence"))
    for candidate in plate['candidates']:
        prefix = "-"
        if candidate['matches_template']:
Esempio n. 8
0
def process_video(video, license_plate, _id):
    alpr = Alpr('us', ALPR_CONFIG, RUNTIME_DATA)
    if not alpr.is_loaded():
        print("Error loading OpenALPR")
        sys.exit(1)

    alpr.set_top_n(5)
    alpr.set_default_region("md")

    def get_output_layers(net):
        layer_names = net.getLayerNames()
        output_layers = [
            layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()
        ]
        return output_layers

    def get_info(frame, file_type):
        if file_type == 'array':
            results = alpr.recognize_ndarray(frame)
        else:
            results = alpr.recognize_file(frame)
        if results['results']:
            top_plate = results['results'][0]['candidates'][0]['plate']
            confidence = results['results'][0]['candidates'][0]['confidence']
            x1 = results["results"][0]["coordinates"][0]["x"]
            y1 = results["results"][0]["coordinates"][0]["y"]
            x2 = results["results"][0]["coordinates"][2]["x"]
            y2 = results["results"][0]["coordinates"][2]["y"]
            return [x1, y1, x2, y2], top_plate, confidence
        else:
            return False

    def draw_boxes(regions_of_interest, plate_number, confidence, img):
        x, y, x1, y1 = regions_of_interest[0], regions_of_interest[
            1], regions_of_interest[2], regions_of_interest[3]
        cv2.rectangle(img, (x, y), (x1, y1), (0, 255, 0), 2)
        text = "{} {:.1f}%".format(plate_number, confidence)
        cv2.putText(img, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    (255, 0, 0), 2)

    video = cv2.VideoCapture(video)
    w, h = None, None

    while True:
        grabbed, frame = video.read()
        if not grabbed:
            break
        start = time.time()

        try:
            regions_of_interest, top_plate, confidence = get_info(
                frame, 'array')
            print(regions_of_interest, top_plate, confidence)
            #confirming license plate
            if str(top_plate) == license_plate:
                draw_boxes(regions_of_interest, top_plate, confidence, frame)
                cv2.imwrite(f'{IMAGE_FOLDER}/confirmation{_id}.png', frame)
                return f'{IMAGE_FOLDER}/confirmation{_id}.png'
        except:
            print('License Plate not Detected')

        end = time.time()
        print(f"[INFO] Processing Time - {end-start}")

    video.release()
    alpr.unload()
Esempio n. 9
0
parser.add_argument('plate_image', help='License plate image file')

options = parser.parse_args()

alpr = None
try:
    alpr = Alpr(options.country, options.config, options.runtime_data)

    if not alpr.is_loaded():
        print("Error loading OpenALPR")
    else:
        print("Using OpenALPR " + alpr.get_version())

        alpr.set_top_n(7)
        alpr.set_default_region("wa")
        alpr.set_detect_region(False)
        jpeg_bytes = open(options.plate_image, "rb").read()
        results = alpr.recognize_array(jpeg_bytes)

        # Uncomment to see the full results structure
        # import pprint
        # pprint.pprint(results)

        print("Image size: %dx%d" %(results['img_width'], results['img_height']))
        print("Processing Time: %f" % results['processing_time_ms'])

        i = 0
        for plate in results['results']:
            i += 1
            print("Plate #%d" % i)
Esempio n. 10
0
class recognizer(threading.Thread):

    min_conf_patternmatch = 75.0
    min_conf_nopatternmatch = 85.0

    lock = None

    def __init__(self,
                 source_dir,
                 postproc_hit_dir,
                 postproc_nohit_dir,
                 postproc_nohit_lowconf_dir=None,
                 output_json_dir=None,
                 output_csv_file=None,
                 default_region=None,
                 lock=None):

        threading.Thread.__init__(self)

        ## save the lock (if specified)
        ## passing in a lock object allows
        ## multiple recognizer threads to run
        ## without bumping into each other

        self.lock = lock

        ## check and clean up config
        ## TODO: use os.path to test files and directories, then raise appropriate errors

        self.source_dir = source_dir.rstrip("/")
        self.postproc_hit_dir = postproc_hit_dir.rstrip("/")
        self.postproc_nohit_dir = postproc_nohit_dir
        if (self.postproc_nohit_dir):
            self.postproc_nohit_dir = self.postproc_nohit_dir.rstrip("/")

        self.postproc_nohit_lowconf_dir = postproc_nohit_lowconf_dir
        if (self.postproc_nohit_lowconf_dir):
            self.postproc_nohit_lowconf_dir = self.postproc_nohit_lowconf_dir.rstrip(
                "/")
        else:
            self.postproc_nohit_lowconf_dir = self.postproc_nohit_dir

        if not (output_json_dir) and not (output_csv_file):
            raise (TypeError(
                'must specify output_csv_file and/or output_json_dir'))

        self.output_json_dir = output_json_dir.rstrip("/")
        self.output_csv_file = output_csv_file

        print "recognizer:init - initializing alpr"
        self.alpr = Alpr("us", "/etc/openalpr/openalpr.conf",
                         "/usr/share/openalpr/runtime_data")
        if not self.alpr.is_loaded():
            print("recognizer:init - error loading OpenALPR")
            sys.exit(1)

        self.alpr.set_top_n(10)

        if (default_region):
            self.alpr.set_default_region(default_region)

        self.running = True
        print "recognizer:init - done initializing alpr"

    def __del__(self):
        print "recognizer:del - unloading alpr"
        self.alpr.unload()
        print "recognizer:del - done"

    def stop(self):
        print "recognizer:stop - waiting for recognizer thread to finish"
        self.running = False
        self.join()
        print "recognizer:stop - recognizer thread finished"

    def run(self):
        tid = self.ident

        while (self.running):
            sys.stdout.flush()

            time.sleep(0.05)
            files = sorted(os.listdir(self.source_dir))

            #            if (len(files) > 0):
            #                print "recognizer:run[{}] - found {} files in {}".format(tid, len(files), self.source_dir)

            for file in files:
                # if we're supposed to be shutting down, then break out of the file processing loop
                if (self.running == False):
                    break

                matches = []
                lowconf_hit = False

                # make sure it looks like one of ours
                if (not (re.match('^\d+(.*)\.jpg$', file))):
                    if file == "README" or file.startswith(".") or re.search(
                            '.lock$', file):
                        pass  # silently ignore lock files and hidden files
                    else:
                        print "recognizer:run - ignoring file with bad name {}".format(
                            file)
                else:

                    # to be thread safe, create a lock file while we process
                    img_file = self.source_dir + "/" + file
                    lock_file = img_file + ".lock"

                    # set up file lock while blocking other threads
                    try:
                        if (self.lock):
                            self.lock.acquire()

                        # does the file still exist? if not, skip it silently -- another thread processed already
                        if not (os.path.exists(img_file)):
                            continue

                        # is the file already locked? if so, skip it and say something -- could be another thread working on it or could be a stale lock
                        ## TODO: auto remove old locks
                        try:
                            lock_stat = os.stat(lock_file)
                            if lock_stat:  # lock file exists
                                lock_age = time.time() - lock_stat.st_mtime
                                if (lock_age > STALE_LOCK_AGE):
                                    print "recognizer:run - removing stale lock file ({:.0f}s) for {}".format(
                                        lock_age, file)
                                    os.unlink(lock_file)
                                else:
                                    continue  # file recently locked -- skip it silently
                        except OSError:
                            pass  # ignore this error -- indicates lock file doesn't exist

                        # create the lock file
                        with open(lock_file, "w") as f:
                            f.write("{}".format(self.ident))

                    finally:
                        if (self.lock):
                            self.lock.release()

                    # do plate recognition
                    start_time = time.time()
                    results = self.alpr.recognize_file(self.source_dir + "/" +
                                                       file)
                    recognize_secs = time.time() - start_time
                    print "recognizer:run - recognized {:s} in {:.4f}s found {:2d} possible plates".format(
                        self.source_dir + "/" + file, recognize_secs,
                        len(results['results']))

                    # remove lock file
                    os.remove(lock_file)

                    # review results
                    for plate in results['results']:
                        best_match_plate = None
                        best_match_template = None
                        best_match_confidence = 0.0

                        for candidate in plate['candidates']:
                            if (candidate['matches_template']):
                                if (candidate['confidence'] >
                                        self.min_conf_patternmatch
                                        and candidate['confidence'] >
                                        best_match_confidence):
                                    best_match_plate = candidate['plate']
                                    best_match_confidence = candidate[
                                        'confidence']
                                    best_match_template = True
                            else:
                                if (candidate['confidence'] >
                                        self.min_conf_nopatternmatch
                                        and candidate['confidence'] >
                                        best_match_confidence):
                                    best_match_plate = candidate['plate']
                                    best_match_confidence = candidate[
                                        'confidence']
                                    best_match_template = False

                        if (best_match_plate):
                            print "recognizer:run - best match: {} (confidence: {:.3f}, template: {})".format(
                                best_match_plate, best_match_confidence,
                                "yes" if best_match_template else "no")
                            match = {
                                'recognize_time':
                                time.strftime("%Y-%m-%d %H:%M:%S"),
                                'recognize_epoch_time':
                                "{:.0f}".format(start_time),
                                'recognize_secs':
                                "{:0.4f}".format(recognize_secs),
                                'plate':
                                best_match_plate,
                                'confidence':
                                "{:0.2f}".format(best_match_confidence),
                                'matches_template':
                                best_match_template,
                                'file':
                                file
                            }

                            matches.append(match)
                        else:
                            lowconf_hit = True
                            print "recognizer:run - insufficient confidence"

                    # record matches (if any) and move the file away
                    if (len(matches) > 0):

                        # extract GPS and other EXIF data, append to match record, then write output
                        with open(self.source_dir + "/" + file,
                                  'rb') as jpgfile:
                            tags = exifread.process_file(jpgfile,
                                                         details=False)

                            # extract the image capture date and time
                            if (tags['EXIF DateTimeOriginal']):
                                exif_datetimeoriginal = time.strptime(
                                    "{}".format(tags['EXIF DateTimeOriginal']),
                                    '%Y:%m:%d %H:%M:%S')

                            # extract the GPS coordinates (convert from DMS to DD) and altitude
                            exif_gpslongitude = 0.0
                            exif_gpslatitude = 0.0
                            exif_gpsaltitude = 0
                            tag_lat = tags['GPS GPSLatitude']
                            if (tag_lat and len(tag_lat.values) == 3
                                    and tag_lat.values[0].den > 0):
                                exif_gpslatitude = (
                                    float(tag_lat.values[0].num) /
                                    float(tag_lat.values[0].den)) + (
                                        (float(tag_lat.values[1].num) /
                                         float(tag_lat.values[1].den)) / 60.0
                                    ) + ((float(tag_lat.values[2].num) / float(
                                        tag_lat.values[2].den)) / 3600.0)
                                exif_gpslatitude *= -1 if (str(
                                    tags['GPS GPSLatitudeRef']) == "S") else 1

                            tag_lon = tags['GPS GPSLongitude']
                            if (tag_lon and len(tag_lon.values) == 3
                                    and tag_lon.values[0].den > 0):
                                exif_gpslongitude = (
                                    float(tag_lon.values[0].num) /
                                    float(tag_lon.values[0].den)) + (
                                        (float(tag_lon.values[1].num) /
                                         float(tag_lon.values[1].den)) / 60.0
                                    ) + ((float(tag_lon.values[2].num) / float(
                                        tag_lon.values[2].den)) / 3600.0)
                                exif_gpslongitude *= -1 if (str(
                                    tags['GPS GPSLongitudeRef']) == "W") else 1

                            tag_altitude = tags['GPS GPSAltitude']
                            if (tag_altitude
                                    and tag_altitude.values[0].den > 0):
                                exif_gpsaltitude = float(
                                    tag_altitude.values[0].num) / float(
                                        tag_altitude.values[0].den)

                            # store EXIF data in match records
                            for match in (matches):
                                if (exif_datetimeoriginal):
                                    match[
                                        'capture_epoch_time'] = '{:.0f}'.format(
                                            time.mktime(exif_datetimeoriginal))
                                    match['capture_time'] = time.strftime(
                                        "%Y-%m-%d %H:%M:%S",
                                        exif_datetimeoriginal)
                                else:
                                    match['capture_epoch_time'] = 0
                                    match['capture_time'] = ''

                                match['capture_longitude'] = "{:0.7f}".format(
                                    exif_gpslongitude)
                                match['capture_latitude'] = "{:0.7f}".format(
                                    exif_gpslatitude)
                                match['capture_altitude_m'] = "{:0.2f}".format(
                                    exif_gpsaltitude)

                        # write matches to CSV
                        if (self.output_csv_file):
                            write_header = False if os.access(
                                self.output_csv_file, os.F_OK) else True

                            try:
                                # only one thread can write to the CSV at a time
                                if (self.lock):
                                    self.lock.acquire()

                                with open(self.output_csv_file,
                                          "a") as csvfile:
                                    writer = csv.DictWriter(
                                        csvfile, [
                                            "recognize_time",
                                            "recognize_epoch_time", "plate",
                                            "confidence", "matches_template",
                                            "file", "recognize_secs",
                                            'capture_time',
                                            'capture_epoch_time',
                                            'capture_latitude',
                                            'capture_longitude',
                                            'capture_altitude_m'
                                        ])
                                    if (write_header):
                                        writer.writeheader()

                                    writer.writerow(match)
                            finally:
                                if (self.lock):
                                    self.lock.release()

                        # write JSON (each file is unique, so no thread locking needed)
                        if (self.output_json_dir):
                            json_file = self.output_json_dir + "/" + file[:file.index(
                                ".jpg")] + ".json"
                            with open(json_file, "w") as jsonfile:
                                jsonfile.write(json.dumps(matches))

                        # move the file
                        os.rename(self.source_dir + "/" + file,
                                  self.postproc_hit_dir + "/" + file)
                    elif (lowconf_hit):  #insufficient confidence
                        if (self.postproc_nohit_lowconf_dir):
                            os.rename(
                                self.source_dir + "/" + file,
                                self.postproc_nohit_lowconf_dir + "/" + file)
                        else:
                            os.unlink(self.source_dir + "/" + file)
                    else:  #no hit
                        if (self.postproc_nohit_dir):
                            os.rename(self.source_dir + "/" + file,
                                      self.postproc_nohit_dir + "/" + file)
                        else:
                            os.unlink(self.source_dir + "/" + file)
Esempio n. 11
0
license_plates = []

# OpenALPR object for license plate detection based on the us configuration
alpr = Alpr(
    'us',
    'C:/Users/dhruv/Desktop/Git/openalpr-2.3.0-win-64bit/openalpr_64/openalpr.conf',
    'C:/Users/dhruv/Desktop/Git/openalpr-2.3.0-win-64bit/openalpr_64/runtime_data'
)
if not alpr.is_loaded():
    print("Error loading OpenALPR")
    sys.exit(1)

alpr.set_top_n(
    10
)  # Maximum number of unique license plate numbers detected by API per frame
alpr.set_default_region("on")  # Set default license plate region to Ontario

print("! starting video capture")
cap = cv2.VideoCapture(0)  # OpenCV Video capture
if not cap.isOpened():
    alpr.unload()
    sys.exit('Failed to open video file!')

curr_plate = ''  # Plate currently being read

# initialize a flask object
app = Flask(__name__)

# initialize the video stream and allow the camera sensor to
# warmup
# vs = VideoStream(usePiCamera=1).start()
Esempio n. 12
0
from openalpr import Alpr

alpr = Alpr("us", "/path/to/openalpr.conf", "/path/to/runtime_data")
if not alpr.is_loaded():
    print("Error loading OpenALPR")
    sys.exit(1)

alpr.set_top_n(20)
alpr.set_default_region("md")

results = alpr.recognize_file("./image/DSCN0416.jpg")

i = 0
for plate in results['results']:
    i += 1
    print("Plate #%d" % i)
    print("   %12s %12s" % ("Plate", "Confidence"))
    for candidate in plate['candidates']:
        prefix = "-"
        if candidate['matches_template']:
            prefix = "*"

        print("  %s %12s%12f" % (prefix, candidate['plate'], candidate['confidence']))

# Call when completely done to release memory
alpr.unload()
Esempio n. 13
0
alpr = None
files = []
try:
    __location__ = os.path.realpath(
        os.path.join(os.getcwd(), os.path.dirname(__file__)))
    alpr = Alpr("eu", options.config, options.runtime_data)

    if not alpr.is_loaded():
        print("Error loading OpenALPR")
    else:
        #print("Using OpenALPR " + alpr.get_version())
        files += [each for each in os.listdir('.') if each.endswith('.png')]
        print("<table>")
        for file in files:
            alpr.set_top_n(7)
            alpr.set_default_region("eu")
            alpr.set_detect_region(True)
            jpeg_bytes = open(__location__ + file, "rb").read()
            results = alpr.recognize_array(jpeg_bytes)

            # Uncomment to see the full results structure
            # import pprint
            # pprint.pprint(results)

            #print("Image size: %dx%d" %(results['img_width'], results['img_height']))
            #print("Processing Time: %f" % results['processing_time_ms'])

            i = 0
            if (results['results']):
                print("<tr><td><img width='640' height='480' src='" + file +
                      "' /></td><td>")
Esempio n. 14
0
parser.add_argument('--interval', help='interval between polling for a new image to process', type=int, default=10)
parser.add_argument('--preprocess',
                    nargs='*',
                    help='prewarp value to apply against each, and/or the x1,y1,x2,y2 values to crop',
                    default=[],
                    action=PreProcessRulesAction)

args = parser.parse_args()

config_path = os.getenv('OPEN_ALPR_CONFIG_PATH')
open_alpr = Alpr(args.country, config_path, "/usr/share/openalpr/runtime_data")
if not open_alpr.is_loaded():
    print("Error loading OpenALPR")
    sys.exit(1)

open_alpr.set_default_region(args.region)


def poll(alpr, input_url, output_url, interval, preprocessing_rules):
    if args.verbose:
        print("Polling {}".format(input_url))

    image = None
    try:
        image = download_image(input_url)
    except urllib.URLError:
        eprint("Failed to poll {}".format(input_url))
        pass

    if image is not None:
        alpr_results = process_image(alpr, image, preprocessing_rules)
import sys
import os
import requests
import json

headers = {'content-type': 'application/json'}
url = "http://requestb.in/vd5wylvd"
alpr = Alpr("us", "/etc/openalpr/openalpr.conf",
            "/usr/share/openalpr/runtime_data")

if not alpr.is_loaded():
    print "Error loading OpenALPR"
    sys.exit(1)

alpr.set_top_n(1)
alpr.set_default_region("us")

cap = cv2.VideoCapture(1)

probablePlates = {}
wasPlate = False
numEx = 5
count = 0

while True:
    ret, frame = cap.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    temp = "frame%d.jpg" % ret
    cv2.imwrite(temp, frame)

    results = alpr.recognize_file(temp)
Esempio n. 16
0
def main(args):
    # Check input arguments
    if len(args) < 2:
        sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0])
        sys.exit(1)

    global global_alpr_engine
    global_alpr_engine = Alpr("eu", "/etc/openalpr/openalpr.conf", "/usr/share/openalpr/runtime_data")

    if not global_alpr_engine.is_loaded():
        print("Error loading OpenALPR")
        sys.exit(1)

    global_alpr_engine.set_top_n(10)
    global_alpr_engine.set_default_region("fi")

    for i in range(0,len(args)-1):
        fps_streams["stream{0}".format(i)]=GETFPS(i)
    number_sources=len(args)-1

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streamux \n ")

    #IK: read stream/mp4 file
    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    pipeline.add(streammux)
    #IK: number_sources is important!!!
    for i in range(number_sources):
        print("Creating source_bin ",i," \n ")
        uri_name=args[i+1]
        if uri_name.find("rtsp://") == 0 :
            is_live = True
        source_bin=create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname="sink_%u" %i
        sinkpad= streammux.get_request_pad(padname) 
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad=source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)
    print("Creating Pgie \n ")

    #IK: Primary neural net
    # Use nvinfer to run inferencing on decoder's output,
    # behaviour of inferencing is set through config file
    pgie1 = Gst.ElementFactory.make("nvinfer", "primary-inference-1")
    if not pgie1:
        sys.stderr.write(" Unable to create pgie1 \n")

    #IK: tracker
    tracker = Gst.ElementFactory.make("nvtracker", "tracker")
    if not tracker:
        sys.stderr.write(" Unable to create tracker \n")

    #IK: secondary network 1
    sgie = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine")
    if not sgie:
        sys.stderr.write(" Unable to make sgie \n")

    tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")
    print("Creating nvvidconv \n ")

    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")

    # Create OSD to draw on the converted RGBA buffer
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd")
    if not nvvidconv_postosd:
        sys.stderr.write(" Unable to create nvvidconv_postosd \n")

    # Create a caps filter
    caps = Gst.ElementFactory.make("capsfilter", "filter")
    caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420"))
    
    # Make the encoder
    if codec == "H264":
        encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder")
        print("Creating H264 Encoder")
    elif codec == "H265":
        encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder")
        print("Creating H265 Encoder")
    if not encoder:
        sys.stderr.write(" Unable to create encoder")
    encoder.set_property('bitrate', bitrate)
    if is_aarch64():
        encoder.set_property('preset-level', 1)
        encoder.set_property('insert-sps-pps', 1)
        encoder.set_property('bufapi-version', 1)
    
    # Make the payload-encode video into RTP packets
    if codec == "H264":
        rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay")
        print("Creating H264 rtppay")
    elif codec == "H265":
        rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay")
        print("Creating H265 rtppay")
    if not rtppay:
        sys.stderr.write(" Unable to create rtppay")
    
    #IK: RTSP sink
    # Make the UDP sink
    updsink_port_num = 5400
    sink = Gst.ElementFactory.make("udpsink", "udpsink")
    if not sink:
        sys.stderr.write(" Unable to create udpsink")
    
    sink.set_property('host', '224.224.255.255')
    sink.set_property('port', updsink_port_num)
    sink.set_property('async', False)
    sink.set_property('sync', 1)
    
    if is_live:
        print("Atleast one of the sources is live")
        streammux.set_property('live-source', 1)

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)
    
    #IK: set config file for each network
    #Set properties of pgie and sgie
    pgie1.set_property('config-file-path', "dstest2_pgie1_config.txt")
    sgie.set_property('config-file-path', "dstest2_sgie_config.txt")

    tiler.set_property("rows",1)
    tiler.set_property("columns",1)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)

    #Set properties of tracker
    config = configparser.ConfigParser()
    config.read('dstest2_tracker_config.txt')
    config.sections()

    for key in config['tracker']:
        if key == 'tracker-width' :
            tracker_width = config.getint('tracker', key)
            tracker.set_property('tracker-width', tracker_width)
        if key == 'tracker-height' :
            tracker_height = config.getint('tracker', key)
            tracker.set_property('tracker-height', tracker_height)
        if key == 'gpu-id' :
            tracker_gpu_id = config.getint('tracker', key)
            tracker.set_property('gpu_id', tracker_gpu_id)
        if key == 'll-lib-file' :
            tracker_ll_lib_file = config.get('tracker', key)
            tracker.set_property('ll-lib-file', tracker_ll_lib_file)
        if key == 'll-config-file' :
            tracker_ll_config_file = config.get('tracker', key)
            tracker.set_property('ll-config-file', tracker_ll_config_file)
        if key == 'enable-batch-process' :
            tracker_enable_batch_process = config.getint('tracker', key)
            tracker.set_property('enable_batch_process', tracker_enable_batch_process)
    
    #IK: populate pipeline
    print("Adding elements to Pipeline \n")
    pipeline.add(streammux)
    pipeline.add(pgie1)
    pipeline.add(tracker)
    pipeline.add(sgie)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    pipeline.add(nvvidconv_postosd)
    pipeline.add(caps)
    pipeline.add(encoder)
    pipeline.add(rtppay)
    pipeline.add(sink)

    #IK: elements linked together
    # we link the elements together
    # file-source -> h264-parser -> nvh264-decoder ->
    # nvinfer -> nvvidconv -> nvosd -> video-renderer
    print("Linking elements in the Pipeline \n")
    streammux.link(pgie1)
    pgie1.link(tracker)
    tracker.link(sgie)
    sgie.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(nvosd)
    nvosd.link(nvvidconv_postosd)
    nvvidconv_postosd.link(caps)
    caps.link(encoder)
    encoder.link(rtppay)
    rtppay.link(sink)

    # create and event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)
    
    # Start streaming
    rtsp_port_num = 8554
    
    server = GstRtspServer.RTSPServer.new()
    server.props.service = "%d" % rtsp_port_num
    server.attach(None)

    factory = GstRtspServer.RTSPMediaFactory.new()
    factory.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (updsink_port_num, codec))
    factory.set_shared(True)
    server.get_mount_points().add_factory("/ds-test", factory)
    
    print("\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-test ***\n\n" % rtsp_port_num)
    
    # Lets add probe to get informed of the meta data generated, we add probe to
    # the sink pad of the osd element, since by that time, the buffer would have
    # had got all the metadata.
    sink_pad = nvosd.get_static_pad("sink")
    if not sink_pad:
        sys.stderr.write(" Unable to get sink pad \n")
    else:
        #IK: Custom method call to display data on screen
        sink_pad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)

    print("Starting pipeline \n")
    
    # start play back and listed to events
    pipeline.set_state(Gst.State.PLAYING)
    try:
      loop.run()
    except:
      pass

    # cleanup
    pipeline.set_state(Gst.State.NULL)
import os
import requests
import json
# import paho.mqtt.client as client

headers = {'content-type': 'application/json'}
url = "http://requestb.in/vd5wylvd"
alpr = Alpr("us", "/etc/openalpr/openalpr.conf",
            "/usr/share/openalpr/runtime_data")

if not alpr.is_loaded():
    print "Error loading OpenALPR"
    sys.exit(1)

alpr.set_top_n(1)
alpr.set_default_region("us")

cap = cv2.VideoCapture(0)

# mqttc = client.Client()
# mqttc.connect("localhost", 1883, 60)

probablePlates = {}
wasPlate = False
numEx = 5
count = 0

while True:
    ret, frame = cap.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    temp = "frame%d.jpg" % ret
import sys
import cv2
import time


video = "../../Images/vid1.mp4"


alpr = Alpr("in", "/usr/local/share/openalpr/config/openalpr.defaults.conf", "./openalpr/runtime_data")
if not alpr.is_loaded():
    print("Error loading OpenALPR")
    sys.exit(1)

# select top 10 results
alpr.set_top_n(10)
alpr.set_default_region("in")

cap = cv2.VideoCapture(video)

# for smooth termination after video ends
frame_counter = 0
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
print(length)

start_time = time.time()

while(frame_counter < length):

    ret,frame = cap.read()
    frame_counter += 3
    results = alpr.recognize_ndarray(frame)
Esempio n. 19
0
File: test.py Progetto: riaz/Recee
from openalpr import Alpr
import sys

alpr = Alpr("eu", "nplate_train/openalpr.conf.in", "nplate_train/runtime_data")
if not alpr.is_loaded():
    print("Error loading OpenALPR")
    sys.exit(1)

#alpr.set_top_n(20)
alpr.set_default_region("eu")

results = alpr.recognize_file("/home/riaz/Desktop/hack/2009_09_08_drive_0010/I1_000388.png")

for plate in results['results']:

    if len(plate['candidates']) > 0:
        print "Found: %12s %12f" % ( plate['candidates'][0]['plate'],plate['candidates'][0]['confidence']) 
        
# Call when completely done to release memory
alpr.unload()
Esempio n. 20
0
class LprRunner(NDUCameraRunner):
    def __init__(self, config, _connector_type):
        super().__init__()
        self.counter = 0
        self._send_data = config.get("send_data", False)

        city_codes_fn = "/data/city_codes.json"
        if not os.path.isfile(city_codes_fn):
            city_codes_fn = os.path.dirname(
                os.path.abspath(__file__)) + city_codes_fn.replace(
                    "/", os.path.sep)
        with open(city_codes_fn, encoding="UTF-8") as f_in:
            self._cities = json.load(f_in)

        conf_fn = "/data/openalpr_64/openalpr.conf"
        #conf_fn = "//usr/local/share/openalpr/config/openalpr.defaults.conf"
        # conf_fn = "/data/openalpr_64/runtime_data/config/eu.conf"
        if not os.path.isfile(conf_fn):
            conf_fn = os.path.dirname(
                os.path.abspath(__file__)) + conf_fn.replace("/", os.path.sep)

        runtime_data = "/data/openalpr_64/runtime_data/"
        #runtime_data = "/usr/local/share/openalpr/runtime_data/"
        if not os.path.isdir(runtime_data):
            runtime_data = os.path.dirname(
                os.path.abspath(__file__)) + runtime_data.replace(
                    "/", os.path.sep)

        # self._alpr = Alpr("us", "/path/to/openalpr.conf", "/path/to/runtime_data")
        # self._alpr = Alpr("eu", conf_fn, runtime_data)
        self._alpr = Alpr("tr", conf_fn, runtime_data)

        if not self._alpr.is_loaded():
            print("Error loading OpenALPR")

        # self._alpr.set_top_n(20)
        # self._alpr.set_default_region("md")
        # self._alpr.set_top_n(1)
        self._alpr.set_default_region("tr")
        self._alpr.set_country("tr")

        # region lp detection,
        onnx_fn = "/data/yolov4-tiny_lp_416_static.onnx"
        self.input_size = 416

        if not os.path.isfile(onnx_fn):
            onnx_fn = os.path.dirname(
                os.path.abspath(__file__)) + onnx_fn.replace("/", os.path.sep)

        classes_filename = "/data/class.names"
        if not os.path.isfile(classes_filename):
            classes_filename = os.path.dirname(
                os.path.abspath(__file__)) + classes_filename.replace(
                    "/", os.path.sep)
        self.class_names = ["lp"]
        self.sess_tuple = onnx_helper.get_sess_tuple(onnx_fn)
        # endregion

    def get_name(self):
        return "lpr"

    def get_settings(self):
        settings = {}
        return settings

    def process_frame(self, frame, extra_data=None):
        def to_bbox(coordinates, rect_, rh_, rw_):
            x1 = coordinates[0]["x"] * rw_
            y1 = coordinates[0]["y"] * rh_
            x2 = coordinates[2]["x"] * rw_
            y2 = coordinates[2]["y"] * rh_
            if rect_ is not None:
                x1 += rect_[1]
                y1 += rect_[0]
                x2 += rect_[1]
                y2 += rect_[0]
            return [y1, x1, y2, x2]

        def enumerate_images(frame_):
            result = yolo_helper.predict_v4(self.sess_tuple, self.input_size,
                                            self.class_names, frame)
            for _class_name, _score, rect0, item_ in NDUUtility.enumerate_result_items(
                    result, return_item=True):
                rect1 = geometry_helper.add_padding_rect(rect0, 0.5)
                yield image_helper.crop(frame, rect1), rect0, item_

        res = []
        for image, rect, item in enumerate_images(frame):
            h0, w0 = image_helper.image_h_w(image)

            # def order_points(pts):
            #     # initialzie a list of coordinates that will be ordered
            #     # such that the first entry in the list is the top-left,
            #     # the second entry is the top-right, the third is the
            #     # bottom-right, and the fourth is the bottom-left
            #     rect = np.zeros((4, 2), dtype="float32")
            #
            #     # the top-left point will have the smallest sum, whereas
            #     # the bottom-right point will have the largest sum
            #     s = pts.sum(axis=1)
            #     rect[0] = pts[np.argmin(s)]
            #     rect[2] = pts[np.argmax(s)]
            #
            #     # now, compute the difference between the points, the
            #     # top-right point will have the smallest difference,
            #     # whereas the bottom-left will have the largest difference
            #     diff = np.diff(pts, axis=1)
            #     rect[1] = pts[np.argmin(diff)]
            #     rect[3] = pts[np.argmax(diff)]
            #
            #     # return the ordered coordinates
            #     return rect
            #
            # def four_point_transform(image, pts):
            #     # obtain a consistent order of the points and unpack them
            #     # individually
            #     rect = order_points(pts)
            #     (tl, tr, br, bl) = rect
            #
            #     # compute the width of the new image, which will be the
            #     # maximum distance between bottom-right and bottom-left
            #     # x-coordiates or the top-right and top-left x-coordinates
            #     widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
            #     widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
            #     maxWidth = max(int(widthA), int(widthB))
            #
            #     # compute the height of the new image, which will be the
            #     # maximum distance between the top-right and bottom-right
            #     # y-coordinates or the top-left and bottom-left y-coordinates
            #     heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
            #     heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
            #     maxHeight = max(int(heightA), int(heightB))
            #
            #     # now that we have the dimensions of the new image, construct
            #     # the set of destination points to obtain a "birds eye view",
            #     # (i.e. top-down view) of the image, again specifying points
            #     # in the top-left, top-right, bottom-right, and bottom-left
            #     # order
            #     dst = np.array([
            #         [0, 0],
            #         [maxWidth - 1, 0],
            #         [maxWidth - 1, maxHeight - 1],
            #         [0, maxHeight - 1]], dtype="float32")
            #
            #     # compute the perspective transform matrix and then apply it
            #     M = cv2.getPerspectiveTransform(rect, dst)
            #     warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
            #     return warped
            #
            # def deskew(image):
            #     gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            #     gray = cv2.bitwise_not(gray)
            #     thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
            #     coords = np.column_stack(np.where(thresh > 0))
            #     angle = cv2.minAreaRect(coords)[-1]
            #     if angle < -45:
            #         angle = -(90 + angle)
            #     else:
            #         angle = -angle
            #     (h, w) = image.shape[:2]
            #     center = (w // 2, h // 2)
            #     M = cv2.getRotationMatrix2D(center, angle, 1.0)
            #     return cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
            #
            # # def remove_noise_and_smooth(file_name):
            # #     img = cv2.imread(file_name, 0)
            # #     filtered = cv2.adaptiveThreshold(img.astype(np.uint8), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 41)
            # #     kernel = np.ones((1, 1), np.uint8)
            # #     opening = cv2.morphologyEx(filtered, cv2.MORPH_OPEN, kernel)
            # #     closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
            # #     img = image_smoothening(img)
            # #     or_image = cv2.bitwise_or(img, closing)
            # #     return or_image
            #
            # # image = deskew(image)
            # # image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

            h1, w1 = image_helper.image_h_w(image)
            while w1 < 400:
                image = cv2.pyrUp(image)
                h1, w1 = image_helper.image_h_w(image)
            # cv2.imshow("lpr", image)
            # cv2.waitKey(500)

            success, encoded_image = cv2.imencode('.jpg', image)
            content2 = encoded_image.tobytes()
            results = self._alpr.recognize_array(content2)

            added = False
            #print("LPR: ", item.get(constants.RESULT_KEY_RECT))
            for plate in results['results']:
                txt = plate["plate"]
                if len(txt) > 2:
                    score = plate["confidence"] / 100.0
                    if score > 0.01:
                        city_code = txt[0:2]
                        city_name = None
                        if city_code in self._cities:
                            city_name = self._cities[city_code]
                        if city_name is None:
                            class_name = "PL: {}".format(txt)
                        else:
                            class_name = "PL: {} {}".format(city_name, txt)

                        val = {
                            constants.RESULT_KEY_RECT: rect,
                            constants.RESULT_KEY_SCORE: score,
                            constants.RESULT_KEY_CLASS_NAME: class_name
                        }

                        if self._send_data:
                            if city_name is None:
                                val[constants.RESULT_KEY_DATA] = {"pl": txt}
                            else:
                                val[constants.RESULT_KEY_DATA] = {
                                    "pl": txt,
                                    "city": city_name
                                }

                        res.append(val)
                        added = True
            if not added:
                val = {
                    constants.RESULT_KEY_RECT: rect,
                    constants.RESULT_KEY_SCORE: 0,
                    constants.RESULT_KEY_CLASS_NAME: "PL: "
                }
                res.append(val)
        return res