def get_alpr():
    alpr = Alpr("us", "/etc/openalpr/openalpr.conf",
                "/usr/share/openalpr/runtime_data")
    if not alpr.is_loaded():
        print("Error loading OpenALPR")
        return None
    print("Using OpenALPR" + alpr.get_version())
    return alpr
Esempio n. 2
0
    def faceDetection(self, frame):
        global value
        global percentage
        alpr = Alpr("pak", "path/config/openalpr.conf",
                    "path/openalpr/runtime_data")

        frame = cv2.resize(frame, (740, 480))

        faces = plateCascade.detectMultiScale(frame,
                                              scaleFactor=1.1,
                                              minNeighbors=5,
                                              minSize=(30, 30))
        self.allfaces = faces
        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 4)
            cv2.putText(frame,
                        str(value) + "-" + str(percentage) + "%",
                        (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1,
                        (0, 0, 255), 2)

        if not alpr.is_loaded():
            print("Error loading OpenALPR")
        else:
            print("Using OpenALPR " + alpr.get_version())

            alpr.set_top_n(7)
            alpr.set_default_region("wa")
            alpr.set_detect_region(False)

            cv2.imwrite("1.png", frame)
            jpeg_bytes = open("1.png", "rb").read()

            results = alpr.recognize_array(jpeg_bytes)

            print("Image size: %dx%d" %
                  (results['img_width'], results['img_height']))
            print("Processing Time: %f" % results['processing_time_ms'])
            #print(str(results['results'][0][0]['candidates']['plate']))
            i = 0
            count = 0
            for plate in results['results']:
                i = 1
                print("Plate #%d" % i)
                print("   %12s %12s" % ("Plate", "Confidence"))
                for candidate in plate['candidates']:
                    prefix = "-"
                    if candidate['matches_template']:
                        prefix = "*"
                    if count >= 1:
                        break
                    print(
                        "  %s %12s%12f" %
                        (prefix, candidate['plate'], candidate['confidence']))
                    value = candidate['plate']
                    percentage = candidate['confidence']
                    count = count + 1

        self.bbFrame = frame
Esempio n. 3
0
def main():
    try:
        print("Starting...")
        alpr = Alpr(country, config, runtime_data)
        
        if not alpr.is_loaded():
            print("Error loading OpenALPR")
        else:
            print("Using OpenALPR " + alpr.get_version())

            alpr.set_top_n(1)
            alpr.set_detect_region(False)

            # initialize the video stream and allow the cammera sensor to warmup
            video_source = (0 if options["videosource"] == None else options["videosource"])
            vs = VideoStream(usePiCamera=options["picamera"] > 0, src=video_source).start()
            time.sleep(2.0)
            _frame_number = 0
            print("Running...")

            # loop over the frames from the video stream
            while True:
                frame = vs.read()
                #frame = imutils.resize(frame)
                #frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                _frame_number += 1
                if _frame_number % FRAME_SKIP == 0:
                    frame_array = (cv2.imencode(".jpg", frame)[1]).tostring()
                    results = alpr.recognize_array(frame_array)
                    if len(results["results"]) > 0:
                        pool.apply_async(_validate, args=[frame_array, results, device, iot, storage])
                                
                if options["imshow"]:
                    # show the frame
                    cv2.imshow("Frame", frame)
                
                key = cv2.waitKey(1) & 0xFF
                # if the `q` key was pressed, break from the loop
                if key == ord("q"):
                    break

    except:
        print("[main] Unexpected error:", sys.exc_info())

    finally:
        if alpr:
            alpr.unload()
    
        pool.close()
        cv2.destroyAllWindows()
        vs.stop()
Esempio n. 4
0
def plater(image_paths) -> bool:
    alpr = None
    found_count = 0     
    image_dir = "./images/"
    try:
        alpr = Alpr("us", "./baller.alpr.config", "/usr/share/openalpr/runtime_data")

        if not alpr.is_loaded():
            print("Error loading OpenALPR")
        else:
            print("Using OpenALPR " + alpr.get_version())

            alpr.set_top_n(5)
            alpr.set_default_region("tx")
            alpr.set_detect_region(True)


            for entry in image_paths:  
                jpeg_bytes = open(os.path.join(image_dir, entry), "rb").read()
                results = alpr.recognize_array(jpeg_bytes)

                # print ("kakakak \n\n\n")
                # print (results['results'])
                # for plate in results['results']:
                #     i += 1
                #     print("Plate #%d" % i)
                #     print("   %12s %12s" % ("Plate", "Confidence"))
                #     for candidate in plate['candidates']:
                #         prefix = "-"
                #         if candidate['matches_template']:
                #             prefix = "*"

                #         print("  %s %12s%12f" % (prefix, candidate['plate'], candidate['confidence']))
                
                if len(results['results']) > 0:
                    found_count += 1

        return found_count > 0

    finally:
        if alpr:
            alpr.unload()
Esempio n. 5
0
try:
    
    country = 'us'
    config = '/etc/openalpr/openalpr.conf'
    runtime_data = '/usr/share/openalpr/runtime_data'
    basepath = 'Images'
    approval = False
    
    i=0
    
    print("begin try")
    alpr = Alpr(country, config, runtime_data)
    
    plate_image = basepath + "/CapturedImage.jpg"
    
    print("Using OpenALPR " + alpr.get_version())

    if not alpr.is_loaded():
        print("Error loading OpenALPR")
    else:
        print("ALPR Loaded.")
        
        #while True:
        while i<=10:
            
            #input = GPIO.input(11)
            #if input == 0:
            if i<10:
                print "No Movement."
                #GPIO.output(3, 0) # Turn light off.
            #elif input == 1:
def main(_):
    # loading detection label map
    # PATH_TO_LABELS = '/home/hu/Downloads/tensorflow-models/research/VOCdevkit/VOC2012/data/cad_label_map.pbtxt'
    NUM_CLASSES = 3
    label_map = label_map_util.load_labelmap(FLAGS.detect_labels_path)
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
    category_index = label_map_util.create_category_index(categories)
    '''
  {1: {'id': 1, 'name': 'person'},
   2: {'id': 2, 'name': 'vanRear'},
   3: {'id': 3, 'name': 'plate'}}
  '''

    # loading classification label map
    node_lookup = NodeLookup(FLAGS.classify_labels_path)
    # human_string = node_lookup.id_to_string(node_id)

    if not FLAGS.video_file:
        raise ValueError('You must supply the video file name (--video_file)')

    ############################################################
    print("initializing...")
    ############################################################
    detect_inference = Detect_inference(FLAGS.detect_model_path)
    lr_inference = LR_inference(FLAGS.classify_model_path)
    alpr = Alpr('cn', 'OpenALPR_python/openalpr.conf',
                'OpenALPR_python/runtime_data')
    if not alpr.is_loaded():
        print('!!! Error loading OpenALPR')
    else:
        print('Using OpenALPR', alpr.get_version())
        # image = cv2.imread("../OpenALPR_python/5.png")
        # best = alpr.recognize_plate(image)
        # alpr.unload()
    ''' ##################test models########################
  img_to_detect = cv2.imread('/media/hu/186E61E86E61BF5E/video-analysis/test_images/35.jpg')
  img_to_classify = cv2.imread('/home/hu/Downloads/tensorflow-models/research/VOCdevkit/VOC2012/models/model/frozen_inference_graph/loadingRate/tesdata/93.png')
  (boxes, scores, classes, num) = detect_inference.predict(img_to_detect)
  classify_predictions = lr_inference.predict(img_to_classify)
  vis_util.visualize_boxes_and_labels_on_image_array(
          img_to_detect,
          np.squeeze(boxes),
          np.squeeze(classes).astype(np.int32),
          np.squeeze(scores),
          category_index,
          use_normalized_coordinates=True,
          line_thickness=2)
  cv2.imshow('test', img_to_detect)
  cv2.waitKey(0)

  classify_predictions = np.squeeze(classify_predictions)

  top_k = classify_predictions.argsort()[-2:][::-1]
  top_names = []
  for node_id in top_k:
    human_string = node_lookup.id_to_string(node_id)
    top_names.append(human_string)
    score = classify_predictions[node_id]
    print('id:[%d] name:[%s] (score = %.5f)' % (node_id, human_string, score))
    print(classify_predictions, top_k, top_names)
  '''

    ############################################################
    print("program launched...")
    ############################################################
    displayWidth = 1280
    extra_region = 100

    cap = cv2.VideoCapture(FLAGS.video_file)
    fps = cap.get(cv2.CAP_PROP_FPS)
    wd = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    ht = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    left_bound = 1 / 3
    right_bound = 2 / 3
    # bottom_bound = None

    ratiorgb = ht / wd
    displayHeight = int(displayWidth * ratiorgb)

    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    outfile = cv2.VideoWriter('output63_8_0.avi', fourcc, fps,
                              (displayWidth, displayHeight + extra_region))
    #cv2.namedWindow('test')

    state = -1  # -1 : unknown, 0 : noVan, 1 : van
    plate = None
    loadingRate = None
    plateNumber = 0
    vanRearCounter = 0
    noVanRearCounter = 0
    confirmVanRear = 5
    confirmNoVanRear_for_noVan = int(fps)
    confirmNoVanRear_for_van = int(2 * fps)
    vanRears = []
    plates = []
    min_score_thresh = 0.5
    x_center = 0
    van_is_moving = False
    hasVanRear = False
    bestPlate = None

    fontFace = cv2.FONT_HERSHEY_COMPLEX
    duration_displaying_loading_rate = int(10 * fps)
    counter_for_displaying = 0

    while True:
        ret, image_np = cap.read()
        if not ret:
            break
        (boxes, scores, classes, num) = detect_inference.predict(image_np)
        boxes = np.squeeze(boxes)
        classes = np.squeeze(classes).astype(np.int32)
        scores = np.squeeze(scores)
        if van_is_moving:
            hasVanRear = False
            for i in range(int(num[0])):
                if scores[i] >= min_score_thresh:
                    if classes[i] == 2:
                        x_center = (boxes[i][1] + boxes[i][3]) / 2
                        if x_center >= left_bound and x_center <= right_bound:
                            hasVanRear = True
                            # vanRearCounter += 1
                            # vanRears.append(image_np[int(boxes[i][0]*ht):int(boxes[i][2]*ht)+1, int(boxes[i][1]*wd):int(boxes[i][3]*wd)+1])
                            for j in range(int(num[0])):
                                if scores[j] >= min_score_thresh:
                                    if classes[j] == 3 and boxes[j][0] >= boxes[
                                            i][0] and boxes[j][1] >= boxes[i][
                                                1] and boxes[j][2] <= boxes[i][
                                                    2] and boxes[j][
                                                        3] <= boxes[i][3]:
                                        plates.append(image_np[
                                            int(boxes[j][0] *
                                                ht):int(boxes[j][2] * ht) + 1,
                                            int(boxes[j][1] *
                                                wd):int(boxes[j][3] * wd) + 1])
                                        vanRears.append(image_np[
                                            int(boxes[i][0] *
                                                ht):int(boxes[j][0] * ht) + 1,
                                            int(boxes[i][1] *
                                                wd):int(boxes[i][3] * wd) + 1])
                                        #cv2.imshow('plate', plates[-1])
                                        #cv2.imshow('vanRear', vanRears[-1])
                                        #cv2.waitKey(1)
                                        break  # already found plate, then break the loop
                                else:
                                    break
                            break
                else:
                    break
            if not hasVanRear:
                noVanRearCounter += 1
                if noVanRearCounter == confirmNoVanRear_for_noVan:  # and plates != []:
                    sum1 = vanRears[0].shape[1] + vanRears[1].shape[
                        1] + vanRears[2].shape[1]
                    sum2 = vanRears[-1].shape[1] + vanRears[-2].shape[
                        1] + vanRears[-3].shape[1]
                    if sum1 > sum2:
                        van_is_moving = False  # noVan confirmed
                        state = 0  # noVan
                        noVanRearCounter = 0
                        counter_for_displaying = 0
                        print(
                            '-----------------------noVan confirmed-------------------------------'
                        )
                        # evaluate loading rate and plate number
                        plateNumber = len(plates)
                        bestPlate = alpr.recognize_plate(
                            plates[int(0.1 * plateNumber) + 1])
                        plate = bestPlate['Plate']
                        #classify_predictions = lr_inference.predict(img_to_classify)
                        classify_predictions = lr_inference.predict(
                            vanRears[int(0.05 * plateNumber) + 1])
                        classify_predictions = np.squeeze(classify_predictions)

                        top_k = classify_predictions.argsort()[-1:][::-1]
                        top_names = []
                        for node_id in top_k:
                            human_string = node_lookup.id_to_string(node_id)
                            top_names.append(human_string)
                            score = classify_predictions[node_id]
                            loadingRate = human_string
                        vanRears.clear()
                        plates.clear()
                    else:
                        pass
                elif noVanRearCounter == confirmNoVanRear_for_van:  # and plates != []:
                    #sum1 = vanRears[5].shape[1] + vanRears[6].shape[1] + vanRears[7].shape[1]
                    #sum2 = vanRears[-6].shape[1] + vanRears[-7].shape[1] + vanRears[-8].shape[1]
                    #if sum1 < sum2:
                    van_is_moving = False  # van confirmed
                    state = 1  # van
                    noVanRearCounter = 0
                    counter_for_displaying = 0
                    print(
                        '+++++++++++++++++++++++++++van confirmed++++++++++++++++++++++++++++++++++'
                    )
                    # evaluate loading rate and plate number
                    plateNumber = len(plates)
                    cv2.imwrite('plate.png', plates[int(0.9 * plateNumber)])
                    bestPlate = alpr.recognize_plate(plates[int(0.9 *
                                                                plateNumber)])
                    plate = bestPlate['Plate']
                    classify_predictions = lr_inference.predict(vanRears[int(
                        0.95 * plateNumber)])
                    classify_predictions = np.squeeze(classify_predictions)

                    top_k = classify_predictions.argsort()[-1:][::-1]
                    top_names = []
                    for node_id in top_k:
                        human_string = node_lookup.id_to_string(node_id)
                        top_names.append(human_string)
                        score = classify_predictions[node_id]
                        loadingRate = human_string
                    vanRears.clear()
                    plates.clear()
            else:
                noVanRearCounter = 0  # van is still moving
        else:
            hasVanRear = False
            for i in range(int(num[0])):
                if scores[i] >= min_score_thresh:
                    if classes[i] == 2:
                        x_center = (boxes[i][1] + boxes[i][3]) / 2
                        if x_center >= left_bound and x_center <= right_bound:
                            hasVanRear = True
                            vanRearCounter += 1
                            # vanRears.append(image_np[int(boxes[i][0]*ht):int(boxes[i][2]*ht)+1, int(boxes[i][1]*wd):int(boxes[i][3]*wd)+1])
                            for j in range(int(num[0])):
                                if scores[j] >= min_score_thresh:
                                    if classes[j] == 3 and boxes[j][0] >= boxes[
                                            i][0] and boxes[j][1] >= boxes[i][
                                                1] and boxes[j][2] <= boxes[i][
                                                    2] and boxes[j][
                                                        3] <= boxes[i][3]:
                                        plates.append(image_np[
                                            int(boxes[j][0] *
                                                ht):int(boxes[j][2] * ht) + 1,
                                            int(boxes[j][1] *
                                                wd):int(boxes[j][3] * wd) + 1])
                                        vanRears.append(image_np[
                                            int(boxes[i][0] *
                                                ht):int(boxes[j][0] * ht) + 1,
                                            int(boxes[i][1] *
                                                wd):int(boxes[i][3] * wd) + 1])
                                        #cv2.imshow('plate', plates[-1])
                                        #cv2.imshow('vanRear', vanRears[-1])
                                        #cv2.waitKey(1)
                                        break  # already found plate, then break the loop
                                else:
                                    break  # confidence too low, then break the loop
                            break  # already found vanRear, then break the loop
                else:
                    break  # confidence too low, then break the loop

            if not hasVanRear:
                vanRearCounter = 0
                vanRears.clear()
                plates.clear()
                # van_is_moving = False
            elif vanRearCounter == confirmVanRear:
                if plates != []:
                    van_is_moving = True
                    vanRearCounter = 0
                    print(
                        '===================van is moving======================'
                    )
                else:
                    vanRearCounter = 0
            else:
                #van_is_moving = False # just continue to accumulate evidence for van_is_moving to be True
                pass

        vis_util.visualize_boxes_and_labels_on_image_array(
            image_np,
            boxes,
            classes,
            scores,
            category_index,
            use_normalized_coordinates=True,
            max_boxes_to_draw=None,
            min_score_thresh=.5,
            line_thickness=2)
        image_np = cv2.resize(image_np, (displayWidth, displayHeight))
        canvas = cv2.copyMakeBorder(image_np,
                                    0,
                                    extra_region,
                                    0,
                                    0,
                                    cv2.BORDER_CONSTANT,
                                    value=[0, 0, 0])
        if van_is_moving:
            cv2.putText(canvas, 'a van is moving', (int(
                0.1 * displayWidth), int(displayHeight + 0.7 * extra_region)),
                        fontFace, 2, (255, 255, 255), 2, 8)
        elif state == 0:
            # noVan confirmed
            # duration_displaying_loading_rate = int(10*fps)
            # counter_for_displaying = 0
            if counter_for_displaying < duration_displaying_loading_rate:
                # display
                cv2.putText(canvas, 'the van has departured',
                            (int(0.1 * displayWidth),
                             int(displayHeight + 0.3 * extra_region)),
                            fontFace, 1, (255, 255, 255), 2, 8)
                cv2.putText(
                    canvas,
                    u'loadingRate: %s, plateNumber: %s' % (loadingRate, plate),
                    (int(0.1 * displayWidth),
                     int(displayHeight + 0.8 * extra_region)), fontFace, 1,
                    (255, 255, 255), 2, 8)
                counter_for_displaying += 1
            else:
                # stop displaying
                # counter_for_displaying = 0
                pass
        elif state == 1:
            # van confirmed
            if counter_for_displaying < duration_displaying_loading_rate:
                # display
                cv2.putText(canvas, 'the van is in dock',
                            (int(0.1 * displayWidth),
                             int(displayHeight + 0.3 * extra_region)),
                            fontFace, 1, (255, 255, 255), 2, 8)
                cv2.putText(
                    canvas,
                    u'loadingRate: %s, plateNumber: %s' % (loadingRate, plate),
                    (int(0.1 * displayWidth),
                     int(displayHeight + 0.8 * extra_region)), fontFace, 1,
                    (255, 255, 255), 2, 8)
                counter_for_displaying += 1
            else:
                # stop displaying
                # counter_for_displaying = 0
                pass
        outfile.write(canvas)
        #cv2.imshow('test', canvas)
        #if cv2.waitKey(1) == ord('q'):
        #break

    cap.release()
    outfile.release()
    alpr.unload()
    cv2.destroyAllWindows()
Esempio n. 7
0
    lastCrawl = None
    jsonFiles = None

    start = time.time()

    logging.basicConfig(level=logging.INFO)
    if options.verbosity:
        logging.getLogger().setLevel(logging.DEBUG)
    logging.info('Lancement de croustibatch')

    if not options.tesseract:
        algo = Alpr(options.country, options.config, options.runtime_data)
        algo.set_detect_region("d")
        algo.set_top_n(5)
        algo.set_detect_region(True)
        logging.info("Using OpenALPR " + algo.get_version())
    else:
        algo = "thresh"

    # if not algo.is_loaded():
    #     logging.error("Error loading OpenALPR")
    # else:

    ftp = FTP(options.ftphost)
    ftp.login(options.loginftp, options.passwordftp)

    #if not os.path.isdir(options.directory):
    #    os.mkdir(options.directory)
    ftp.cwd(options.pathtosource)

    cwd = ftp.pwd()
Esempio n. 8
0
class License():
    def __init__(self):
        super()
        self.country = 'kr'
        self.config = 'openalpr.conf'
        self.runtime_data = 'runtime_data'
        self.alpr = Alpr(self.country, self.config, self.runtime_data)

        if not self.alpr.is_loaded():
            print("Error loading OpenALPR")

        print("Using OpenALPR " + self.alpr.get_version())
        self.number = '-1'

    def imgProcessing(self, file):
        _file = self.capture()
        if (_file == None):
            _file = file
        results = self.alpr.recognize_file(_file)
        print("Image size: %dx%d" %
              (results['img_width'], results['img_height']))
        print("Processing Time: %f" % results['processing_time_ms'])

        i = 0
        for plate in results['results']:
            i += 1
            print("Plate #%d" % i)
            print("   %12s %12s" % ("Plate", "Confidence"))
            self.number = plate['candidates'][0]['plate']
            for candidate in plate['candidates']:
                prefix = "-"
                if candidate['matches_template']:
                    prefix = "*"

                print("  %s %12s%12f" %
                      (prefix, candidate['plate'], candidate['confidence']) +
                      "%")
        #번호판 문자열을 찾지 못햇을경우 12자1234
        if (self.number == '-1' or len(results) == 0):
            self.number = '00허7777'
            return self.number
        return self.number

    def capture(self):
        filepath = './take.jpg'
        _cap = cv2.VideoCapture(1)
        if not _cap.isOpened():
            return None
        ret, frame = _cap.read()
        ret, frame = _cap.read()
        ret, frame = _cap.read()
        ret, frame = _cap.read()
        ret, frame = _cap.read()
        ret, frame = _cap.read()
        dst = cv2.flip(frame, 1)
        dst2 = cv2.resize(dst,
                          dsize=(0, 0),
                          fx=1.3,
                          fy=1,
                          interpolation=cv2.INTER_LINEAR)
        cv2.imwrite(filepath, dst2)
        _cap.release()
        return filepath

    def __del__(self):
        print("license class del")
Esempio n. 9
0
    def __init__(self, num_streams, step, resolution, thres, gpu=False, runtime=None, config=None, quiet=False):

        # Transfer parameters to attributes
        self.quiet = quiet
        self.message('Initializing...')
        self.num_streams = num_streams
        self.step = step
        if isinstance(resolution, str):
            if resolution == 'all':
                self.resolution = ['vga', '720p', '1080p', '4k']
            else:
                self.resolution = [resolution]
        elif isinstance(resolution, list):
            self.resolution = resolution
        else:
            raise ValueError('Expected list or str for resolution, but received {}'.format(resolution))
        self.thres = thres
        self.gpu = gpu

        # Detect operating system and alpr version
        if platform.system().lower().find('linux') == 0:
            self.operating = 'linux'
            self.cpu_model = get_cpu_model('linux')
        elif platform.system().lower().find('windows') == 0:
            self.operating = 'windows'
            self.cpu_model = get_cpu_model('windows')
        else:
            raise OSError('Detected OS other than Linux or Windows')
        self.message('\tOperating system: {}'.format(self.operating.capitalize()))
        self.message('\tCPU model: {}'.format(self.cpu_model))
        alpr = Alpr('us', '', '')
        self.message('\tOpenALPR version: {}'.format(alpr.get_version()))
        alpr.unload()

        # Prepare other attributes
        if self.operating == 'linux':
            self.downloads = '/tmp/alprbench'
        else:
            self.downloads = os.path.join(os.environ['TEMP'], 'alprbench')
        if not os.path.exists(self.downloads):
            os.mkdir(self.downloads)
        self.cpu_usage = {r: [] for r in self.resolution}
        self.threads_active = False
        self.frame_counter = 0
        self.mutex = Lock()
        self.streams = []
        self.round_robin = cycle(range(self.num_streams))
        self.results = PrettyTable()
        self.results.field_names = ['Resolution', 'Total FPS', 'CPU (Avg)', 'CPU (Max)', 'Frames']

        # Define default runtime and config paths if not specified
        if runtime is not None:
            self.runtime = runtime
        else:
            self.runtime = '/usr/share/openalpr/runtime_data'
            if self.operating == 'windows':
                self.runtime = 'C:/OpenALPR/Agent' + self.runtime
        if config is not None:
            self.config = config
        else:
            self.config = '/usr/share/openalpr/config/openalpr.defaults.conf'
            if self.operating == 'windows':
                self.config = 'C:/OpenALPR/Agent' + self.config
        self.message('\tRuntime data: {}'.format(self.runtime))
        self.message('\tOpenALPR configuration: {}'.format(self.config))

        # Enable GPU acceleration
        if self.gpu:
            with open(self.config, 'r') as f:
                lines = [l.strip() for l in f.read().split('\n') if l != '']
            lines.append('hardware_acceleration = 1')
            self.config = os.path.join(self.downloads, 'openalpr.conf')
            with open(self.config, 'w') as f:
                for l in lines:
                    f.write('{}\n'.format(l))