def do_the_thing(filename, account_number='016210003383', amount=2500): process_image = ProcessImage(image_file=filename) thresholded_image = process_image.get_processed_image() extract_characters = ExtractCharacters( thresholded_image=thresholded_image) parsed_text = extract_characters.extract() # print(parsed_text) check_slip = CheckSlip(parsed_text=parsed_text) account_verified = check_slip.check_account_number( account_number=account_number) amount_verified = check_slip.check_payment(amount=amount) transaction_number, confidence = check_slip.check_transaction_number() # cv2.imshow('thresholded image', thresholded_image) # cv2.waitKey(0) # cv2.destroyAllWindows() return { 'account_verified': account_verified, 'amount_verified': amount_verified, 'transaction_number': transaction_number, 'confidence': confidence }
def __init__(self, test_file, train, threshold): self.test_file = test_file #compute probability for test data self.test = ProcessImage(self.test_file) self.test.loadImg() print "test image size:" print self.test.getImgSize() self.N = self.test.width * self.test.height self.train = train self.gauss_threshold = threshold self.lower_bound = 1.0 / 3.0 self.upper_bound = 2.0 / 3.0 self.extractLetters()
def main(): # global parameters original_image_name = [ "01_225x255.jpg", "02_272x272.jpg", "03_300x300.jpg", "04_381x378.jpg", "05_500x500.jpg", "06_630x475.jpg" ] images_folder = './images/' # starts procedure print 'Procedure sarted' # overall stats file m = open(images_folder + 'overall_stats.txt', 'w') # identify image regions using connected components algorithm for a in range(len(original_image_name)): lista = [] f = open(images_folder + original_image_name[a] + '_stats.txt', 'w') print >> f, original_image_name[a] print original_image_name[a] pi = ProcessImage(images_folder + original_image_name[a]) for i in range(100): # record start time start = time.clock() # identify images pi.identify_labels() # record end time end = time.clock() # add time spent to list lista.append(end - start) pass pass # not background pixels print >> f, pi.get_not_background_pixels() # save image pi.save_image() # print stats to file i = 1 for row in lista: print >> f, '%d|%r' % (i, row) i = i + 1 pass # calculate mean mean = sum(lista) / len(lista) print >> m, '%d|%r' % (pi.get_not_background_pixels(), mean) pass # Ends procedure print 'Procedure ended'
def __init__(self): # Global variables for executions self._title_exam = '' self._path_dataset_out = '' # Dependences self._process = ProcessImage() self._pupil = Pupil() self._eye = Eye() # Limit cash dependences self._max_execution_with_cash = 20 # Directoris self._projects_path = '/media/marcos/Dados/Projects' self._dataset_path = '{}/Datasets/exams'.format(self._projects_path) self._dataset_out = '{}/Results/PupilDeep/Frames'.format( self._projects_path) self._dataset_label = '{}/Results/PupilDeep/Labels'.format( self._projects_path) # Stops and executions self._frame_stop = 150 self._frame_start = 100 self._movie_stop = 0 self._list_not_available = [] self._list_available = [ '25080325_08_2019_08_48_58', '25080425_08_2019_08_53_48' ] # self._list_available = ['25080325_08_2019_08_48_58', '25080425_08_2019_08_53_48', '25080425_08_2019_08_55_59', '25080425_08_2019_09_05_40', '25080425_08_2019_09_08_25'] # self._list_available = ['new_benchmark'] # Params color self._white_color = (255, 255, 0) self._gray_color = (170, 170, 0) # Params text and circle print image self._position_text = (30, 30) self._font_text = cv2.FONT_HERSHEY_DUPLEX self._size_point_pupil = 5 # Params dataset labels out self._title_label = 'frame,center_x,center_y,radius,flash,eye_size,img_mean,img_std,img_median'
def __init__(self, test_file, train, threshold): self.test_file = test_file #compute probability for test data self.test = ProcessImage(self.test_file) self.test.loadImg() print "test image size:" print self.test.getImgSize() self.N = self.test.width*self.test.height self.train = train self.gauss_threshold = threshold self.lower_bound = 1.0/3.0 self.upper_bound = 2.0/3.0 self.extractLetters()
def pre_process_image(img_path): process_img = ProcessImage(img_path) image = process_img.read_image() plt.imshow(image) process_img.apply_gaussian_blur((5, 5)) edges = process_img.apply_canny_edge_detection(40, 120) plt.imshow(edges) maksed_img = process_img.apply_mask_to_img() plt.imshow(maksed_img) return image, edges, maksed_img
def __init__(self, num): self.samples = [] self.num = num self.tot_mean = [0.0, 0.0, 0.0] self.tot_std = [0.0, 0.0, 0.0] #lets' go through all sample inputs and extract their distributions for i in range(1, num + 1): sample_file = "buses/sample_" + str(i) + ".jpg" #sample_file = "../buses/boundary.jpg" sample = ProcessImage(sample_file) print "train image size:" print sample.getImgSize() #load training data for bus head sign sample.loadImg() self.samples.append(sample) self.computeTotMean() self.computeTotStd()
class Classify: def __init__(self, test_file, train, threshold): self.test_file = test_file #compute probability for test data self.test = ProcessImage(self.test_file) self.test.loadImg() print "test image size:" print self.test.getImgSize() self.N = self.test.width*self.test.height self.train = train self.gauss_threshold = threshold self.lower_bound = 1.0/3.0 self.upper_bound = 2.0/3.0 self.extractLetters() def extractLetters(self): self.runGaussian() def runGaussian(self): passed = 0 #threshold = 1.0 self.letter = [] for x in range(self.test.width): for y in range(self.test.height): z1 = abs(self.test.pixels[x,y][0] - self.train.tot_mean[0])/self.train.tot_std[0] z2 = abs(self.test.pixels[x,y][1] - self.train.tot_mean[1])/self.train.tot_std[1] z3 = abs(self.test.pixels[x,y][2] - self.train.tot_mean[2])/self.train.tot_std[2] #print "-----> (z1=%f, z2=%f, z3=%f)" % (z1, z2, z3) #print "(r=%f, g=%f, b=%f)" % (test.pixels[x,y][0], # test.pixels[x,y][1], # test.pixels[x,y][2]) if (z1 <= self.gauss_threshold) and (z2 <= self.gauss_threshold) and (z3 <= self.gauss_threshold): passed += 1 #print "(x=%d, y=%d)" % (x, y) #print "(z1=%f, z2=%f, z3=%f)" % (z1, z2, z3) self.letter.append((x,y)) self.extractXYMaps() self.extractVertHorizLines() self.detectLeftMost() self.detectRightMost() self.detectBottomHorizLine() self.detectTopHorizLine() self.printDetectedLines() def extractXYMaps(self): #store x map self.x_map = {} for point in self.letter: if point[0] not in self.x_map: self.x_map[point[0]] = [point[1]] else: self.x_map[point[0]].append(point[1]) #store y map self.y_map = {} for point in self.letter: if point[1] not in self.y_map: self.y_map[point[1]] = [point[0]] else: self.y_map[point[1]].append(point[0]) def extractVertHorizLines(self): #detect vertical lines self.vertical = {} for key, value in self.x_map.iteritems(): vertical_len = value[-1] - value[0] value.sort() #find gap max_gap = 0 for index in range(len(value) - 1): if abs(value[index] - value[index+1]) > max_gap: max_gap = abs(value[index] - value[index+1]) self.vertical[key] = (len(value), max_gap, vertical_len) #detect horizontal line self.horizontal = {} for key, value in self.y_map.iteritems(): horizontal_len = value[-1] - value[0] value.sort() #find gap max_gap = 0 for index in range(len(value) - 1): if abs(value[index] - value[index+1]) > max_gap: max_gap = abs(value[index] - value[index+1]) self.horizontal[key] = (len(value), max_gap, horizontal_len) def detectLeftMost(self): #find leftmost self.max_l_line = 0 self.max_l_x = 0 for x in range(int(self.test.width*self.lower_bound)): if x in self.vertical and self.vertical[x][0] > self.max_l_line: self.max_l_line = self.vertical[x][0] self.max_l_x = x def detectRightMost(self): #find rightmost self.max_r_line = 0 self.max_r_x = 0 for x in range(int(self.test.width*self.upper_bound), self.test.width): if x in self.vertical and self.vertical[x][0] > self.max_r_line: self.max_r_line = self.vertical[x][0] self.max_r_x = x def detectBottomHorizLine(self): #find bottom horizontal line self.max_b_line = 0 self.max_b_y = 0 gap = 0 for y in range(int(self.test.height*self.upper_bound), self.test.height): if y in self.horizontal and self.horizontal[y][0] > self.max_b_line: self.max_b_line = self.horizontal[y][0] gap = self.horizontal[y][1] self.max_b_y = y def detectTopHorizLine(self): #find top horizontal line self.max_t_line = 0 self.max_t_y = 0 for y in range(int(self.test.height*self.lower_bound)): if y in self.horizontal and self.horizontal[y][0] > self.max_t_line: self.max_t_line = self.horizontal[y][0] self.max_t_y = y def printDetectedLines(self): print "detected patterns:" print "left-most-line: x=%d & len=%d" % (self.max_l_x, self.max_l_line) print "right-most-line: x=%d & len=%d" % (self.max_r_x, self.max_r_line) print "bottom-line: y=%d & len=%d" % (self.max_b_y, self.max_b_line) print "top-line: y=%d & len=%d" % (self.max_t_y, self.max_t_line)
def pupil_process(self, paths): pupil_deep = PupilDeep() process = ProcessImage() pupil = Pupil(pupil_deep) draw = DrawImages() # information = Information() self._path_label = paths['path_label'] self._add_label(self._title_label) exam = cv2.VideoCapture(paths['path_exam']) fps = exam.get(cv2.CAP_PROP_FPS) # patient_exam, param_exam = information.get_information_exam(paths['path_information'], fps) number_frame = 0 while True: _, frame = exam.read() if (frame is None) or ((self._frame_stop > 0) and (number_frame >= self._frame_stop)): break if (self._frame_start > 0) and (number_frame < self._frame_start): number_frame += 1 continue original = np.copy(frame) img_orig_gray = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY) # self._save_images({'original': original}, number_frame, paths['path_out']) img_process, flash = process.process_image(original) # self._save_images({'process': img_process}, number_frame, paths['path_out']) img_mean, img_std, img_median = img_process.mean(), img_process.std(), np.median(img_process) center, radius, points, images, mean_binary = pupil.pupil_detect(img_process) # binary = images['binary_pre_process'] # binary = draw.mark_center(binary, center) # binary = draw.draw_circles(binary, points, 2, self._white_color) # self._save_images({'binary': binary}, number_frame, paths['path_out'], center) img_process = draw.mark_center(img_process, center) img_process = draw.draw_circles(img_process, points, 2, self._white_color) img_process = draw.draw_circles(img_process, [(center[0], center[1])], radius, self._white_color) self._save_images({'img_process': img_process}, number_frame, paths['path_out']) # self._save_histogram(images['histogram'], number_frame, paths['path_out']) # img_presentation = cv2.hconcat([img_orig_gray, binary, img_process]) # label = 'Frame=%d;Radius=%d;Center=(%d,%d);BinMean=(%f)' % ( # number_frame, radius, center[0], center[1], mean_binary) # self._show_image(img_presentation, label, number_frame, paths['path_out']) # flash_information, color_information = information.get_information_params(number_frame) params = ['patient_exam', 'param_exam', number_frame, center[0], center[1], radius, flash, 'flash_information', 'color_information', 0, img_mean, img_std, img_median] self._add_params_label(params) number_frame += 1 cv2.destroyAllWindows() exam.release()
def __stylize__(self): sess2 = tf.InteractiveSession() tinkered_tensor = tf.Variable(self.content_image, trainable=True) content_tensor = tf.constant(self.content_image) model2 = tf.keras.applications.VGG16(include_top=False, weights="imagenet", input_tensor=tinkered_tensor) model2.trainable = False model3 = tf.keras.applications.VGG16(include_top=False, weights="imagenet", input_tensor=content_tensor) model3.trainable = False loss = tf.Variable(initial_value=tf.zeros(1)) loss.initializer.run() c_loss = self.content_loss( model3.get_layer("block1_conv2").output, model2.get_layer("block1_conv2").output) t_loss = self.tv_loss(tinkered_tensor) s_loss = 0 for layer in self.layers_for_opt: current_gram = self.gram_matrix(model2, layer) s_loss += self.style_loss(self.grams[layer], current_gram, 1) loss = self.content_weight * c_loss + self.style_weight * s_loss + self.tv_weight * t_loss opt = tf.train.AdamOptimizer(learning_rate=self.learning_rate) run_it = opt.minimize(loss, var_list=tinkered_tensor) tinkered_tensor.initializer.run() sess2.run(tf.variables_initializer(opt.variables())) for i in range(self.iterations): _, current_loss = sess2.run([run_it, loss]) print(i, current_loss) if i != 0 and i % 10 == 0: final_image = sess2.run(tinkered_tensor)[0] img = ProcessImage.unprocess_image(final_image) img.save("temp_output.jpg") img.show() continue_optimizing = input("Continue optimizing (yes / no)? ") if continue_optimizing == "no": break else: continue self.final_image = sess2.run(tinkered_tensor)[0] sess2.close()
from process_image import ProcessImage import lane_line_finding from moviepy.editor import VideoFileClip if __name__ == "__main__": cal = lane_line_finding.Calibration.create_calibration() pt = lane_line_finding.PerspectiveTransform.create_perspective_transform( 0, 0, 0) video_name = "project_video.mp4" # video_name = "00_crop_project_video.mp4" pi = ProcessImage(pt, cal, smooth_window=5) project_output = '170527_0.5.1-project-output_{}'.format(video_name) clip = VideoFileClip("../" + video_name) project_clip = clip.fl_image(pi.run) project_clip.write_videofile(project_output, audio=False) print("Output video: {}".format(project_output))
class Main: def __init__(self): # Global variables for executions self._title_exam = '' self._path_dataset_out = '' # Dependences self._process = ProcessImage() self._pupil = Pupil() self._eye = Eye() # Limit cash dependences self._max_execution_with_cash = 20 # Directoris self._projects_path = '/media/marcos/Dados/Projects' self._dataset_path = '{}/Datasets/exams'.format(self._projects_path) self._dataset_out = '{}/Results/PupilDeep/Frames'.format( self._projects_path) self._dataset_label = '{}/Results/PupilDeep/Labels'.format( self._projects_path) # Stops and executions self._frame_stop = 150 self._frame_start = 100 self._movie_stop = 0 self._list_not_available = [] self._list_available = [ '25080325_08_2019_08_48_58', '25080425_08_2019_08_53_48' ] # self._list_available = ['25080325_08_2019_08_48_58', '25080425_08_2019_08_53_48', '25080425_08_2019_08_55_59', '25080425_08_2019_09_05_40', '25080425_08_2019_09_08_25'] # self._list_available = ['new_benchmark'] # Params color self._white_color = (255, 255, 0) self._gray_color = (170, 170, 0) # Params text and circle print image self._position_text = (30, 30) self._font_text = cv2.FONT_HERSHEY_DUPLEX self._size_point_pupil = 5 # Params dataset labels out self._title_label = 'frame,center_x,center_y,radius,flash,eye_size,img_mean,img_std,img_median' def _add_label(self, information): with open('{}/{}_label.csv'.format(self._dataset_label, self._title_exam), 'a', newline='') as file: file.write('{}\n'.format(information)) file.close() def _make_path(self, path=''): try: if path == '': os.mkdir(self._path_dataset_out) else: os.mkdir(path) except FileExistsError: pass def _show_image(self, image, label, number_frame, color=None): system_continue = True paint = self._white_color if color is None else color cv2.putText(image, label, self._position_text, self._font_text, 0.9, paint) # cv2.namedWindow('Analysis', cv2.WINDOW_NORMAL) # cv2.imshow('Analysis', image) # order = cv2.waitKey(1) # # if order == 32: # time.sleep(2) # elif order == ord('q'): # system_continue = False self._save_images({'final': image}, number_frame) return system_continue def _save_images(self, images, number_frame, center=(0, 0)): for key, value in images.items(): if 'binary' in key: image = self._mark_center(value, center) else: image = value out = '{}/{}_{}.png'.format(self._path_dataset_out, key, number_frame) cv2.imwrite(out, image) def _save_histogram(self, histogram, number_frame): pl.hist(histogram, bins='auto') pl.title('Histogram Frame: {}'.format(number_frame)) pl.xlabel("Value") pl.ylabel("Frequency") pl.savefig("{}/histogram_{}.png".format(self._path_dataset_out, number_frame)) def _mark_eye(self, image, right, left): cv2.line(image, (right[0], right[1]), (left[0], left[1]), self._white_color, 1) return image def _mark_center(self, image, center): color = self._white_color cv2.line(image, (center[0] - 10, center[1]), (center[0] + 10, center[1]), color, 1) cv2.line(image, (center[0], center[1] - 10), (center[0], center[1] + 10), color, 1) return image def _draw_circles(self, image, points, radius=0, color=None): for point in points: rad = radius if radius > 0 else self._size_point_pupil paint = self._gray_color if color is None else color cv2.circle(image, (point[0], point[1]), rad, paint, 2) return image def _pupil_process(self, path_exam): number_frame = 0 system_continue = True exam = cv2.VideoCapture(path_exam) while system_continue: _, frame = exam.read() if (frame is None) or ((self._frame_stop > 0) and (number_frame >= self._frame_stop)): break if (self._frame_start > 0) and (number_frame < self._frame_start): number_frame += 1 continue original = np.copy(frame) img_orig_gray = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY) self._save_images({'original': original}, number_frame) img_process, flash = self._process.process_image(original) self._save_images({'process': img_process}, number_frame) img_mean, img_std, img_median = img_process.mean( ), img_process.std(), np.median(img_process) center, radius, points, images = self._pupil.pupil_detect( img_process) binary = images['binary_pre_process'] binary = self._mark_center(binary, center) binary = self._draw_circles(binary, points, 2, self._white_color) self._save_images({'binary': binary}, number_frame, center) img_process = self._mark_center(img_process, center) img_process = self._draw_circles(img_process, points, 2, self._white_color) img_process = self._draw_circles(img_process, [(center[0], center[1])], radius, self._white_color) self._save_images({'img_process': img_process}, number_frame) self._save_histogram(images['histogram'], number_frame) img_presentation = cv2.hconcat( [img_orig_gray, binary, img_process]) label = 'Frame=%d;Radius=%d;Center=(%d,%d);Eye=(%d);Flash=(%d)' % ( number_frame, radius, center[0], center[1], 0, flash) system_continue = self._show_image(img_presentation, label, number_frame) self._add_label("{},{},{},{},{},{},{},{},{}".format( number_frame, center[0], center[1], radius, flash, 0, img_mean, img_std, img_median)) number_frame += 1 cv2.destroyAllWindows() exam.release() def run(self): files = os.listdir(self._dataset_path) number_movie = 0 for file in files: if (self._movie_stop > 0) and (number_movie >= self._movie_stop): break self._title_exam = file.replace('.mp4', '') self._path_dataset_out = '{}/{}'.format(self._dataset_out, self._title_exam) if (len(self._list_available) > 0) and (self._title_exam not in self._list_available): continue if self._title_exam in self._list_not_available: continue self._add_label(self._title_label) self._make_path() start_time = time.time() path_exam = '{}/{}'.format(self._dataset_path, file) self._pupil_process(path_exam) end_time = time.time() self._add_label('Execition time: {} minuts'.format( (end_time - start_time) / 60)) number_movie += 1
class Classify: def __init__(self, test_file, train, threshold): self.test_file = test_file #compute probability for test data self.test = ProcessImage(self.test_file) self.test.loadImg() print "test image size:" print self.test.getImgSize() self.N = self.test.width * self.test.height self.train = train self.gauss_threshold = threshold self.lower_bound = 1.0 / 3.0 self.upper_bound = 2.0 / 3.0 self.extractLetters() def extractLetters(self): self.runGaussian() def runGaussian(self): passed = 0 #threshold = 1.0 self.letter = [] for x in range(self.test.width): for y in range(self.test.height): z1 = abs(self.test.pixels[x, y][0] - self.train.tot_mean[0]) / self.train.tot_std[0] z2 = abs(self.test.pixels[x, y][1] - self.train.tot_mean[1]) / self.train.tot_std[1] z3 = abs(self.test.pixels[x, y][2] - self.train.tot_mean[2]) / self.train.tot_std[2] #print "-----> (z1=%f, z2=%f, z3=%f)" % (z1, z2, z3) #print "(r=%f, g=%f, b=%f)" % (test.pixels[x,y][0], # test.pixels[x,y][1], # test.pixels[x,y][2]) if (z1 <= self.gauss_threshold) and ( z2 <= self.gauss_threshold) and (z3 <= self.gauss_threshold): passed += 1 #print "(x=%d, y=%d)" % (x, y) #print "(z1=%f, z2=%f, z3=%f)" % (z1, z2, z3) self.letter.append((x, y)) self.extractXYMaps() self.extractVertHorizLines() self.detectLeftMost() self.detectRightMost() self.detectBottomHorizLine() self.detectTopHorizLine() self.printDetectedLines() def extractXYMaps(self): #store x map self.x_map = {} for point in self.letter: if point[0] not in self.x_map: self.x_map[point[0]] = [point[1]] else: self.x_map[point[0]].append(point[1]) #store y map self.y_map = {} for point in self.letter: if point[1] not in self.y_map: self.y_map[point[1]] = [point[0]] else: self.y_map[point[1]].append(point[0]) def extractVertHorizLines(self): #detect vertical lines self.vertical = {} for key, value in self.x_map.iteritems(): vertical_len = value[-1] - value[0] value.sort() #find gap max_gap = 0 for index in range(len(value) - 1): if abs(value[index] - value[index + 1]) > max_gap: max_gap = abs(value[index] - value[index + 1]) self.vertical[key] = (len(value), max_gap, vertical_len) #detect horizontal line self.horizontal = {} for key, value in self.y_map.iteritems(): horizontal_len = value[-1] - value[0] value.sort() #find gap max_gap = 0 for index in range(len(value) - 1): if abs(value[index] - value[index + 1]) > max_gap: max_gap = abs(value[index] - value[index + 1]) self.horizontal[key] = (len(value), max_gap, horizontal_len) def detectLeftMost(self): #find leftmost self.max_l_line = 0 self.max_l_x = 0 for x in range(int(self.test.width * self.lower_bound)): if x in self.vertical and self.vertical[x][0] > self.max_l_line: self.max_l_line = self.vertical[x][0] self.max_l_x = x def detectRightMost(self): #find rightmost self.max_r_line = 0 self.max_r_x = 0 for x in range(int(self.test.width * self.upper_bound), self.test.width): if x in self.vertical and self.vertical[x][0] > self.max_r_line: self.max_r_line = self.vertical[x][0] self.max_r_x = x def detectBottomHorizLine(self): #find bottom horizontal line self.max_b_line = 0 self.max_b_y = 0 gap = 0 for y in range(int(self.test.height * self.upper_bound), self.test.height): if y in self.horizontal and self.horizontal[y][0] > self.max_b_line: self.max_b_line = self.horizontal[y][0] gap = self.horizontal[y][1] self.max_b_y = y def detectTopHorizLine(self): #find top horizontal line self.max_t_line = 0 self.max_t_y = 0 for y in range(int(self.test.height * self.lower_bound)): if y in self.horizontal and self.horizontal[y][0] > self.max_t_line: self.max_t_line = self.horizontal[y][0] self.max_t_y = y def printDetectedLines(self): print "detected patterns:" print "left-most-line: x=%d & len=%d" % (self.max_l_x, self.max_l_line) print "right-most-line: x=%d & len=%d" % (self.max_r_x, self.max_r_line) print "bottom-line: y=%d & len=%d" % (self.max_b_y, self.max_b_line) print "top-line: y=%d & len=%d" % (self.max_t_y, self.max_t_line)
def __init__(self): self.process = ProcessImage(MIN_CONF_LEVEL, blur_level, FRAME_SIZE) self.action = Action() self.SKIP = False self.attempt = 1 self.frame_with_no_face = 0
class Main: def __init__(self): self.process = ProcessImage(MIN_CONF_LEVEL, blur_level, FRAME_SIZE) self.action = Action() self.SKIP = False self.attempt = 1 self.frame_with_no_face = 0 def skipFrame(self, skip_time): self.SKIP = True skip = th.Timer(skip_time, self.unskip) skip.start() def unskip(self): self.SKIP = False def checkFaces(self, faces, frame): global AUTHORIZED for face in faces['faces']: confidence = "{:.2f}".format(face['top_prediction']['confidence'] * 100) if float(confidence) > MIN_CONF_LEVEL: timer = th.Timer(UNLOCK_TIME, setUnathorized) AUTHORIZED = True # Do somthing after authentication self.action.authorized(frame) print( '[\033[1;32mSUCCESS\033[0;0m]\033[32m({} {}%) unlocking the system for {} sec\033[0m' .format(face['top_prediction']['label'], confidence, UNLOCK_TIME)) timer.start() break if not AUTHORIZED: self.attempt += 1 print('[\033[1;33mFAILED\033[0;0m] {} Unknown face/s detected!'. format(len(faces['faces']))) if self.attempt > MAX_ATTEMPT: print( '[\033[1;31mUNAUTHORIZED\033[0;0m] Sending message to the admin...' ) # Do something about unauthorized person self.action.unauthorized(frame) def capture(self): self.attempt = 1 frame_with_no_face = 0 print('Start video capturing...') capture = cv2.VideoCapture("test_video.mp4") # capture = cv2.VideoCapture("http://192.168.31.10:8888") while True: success, frame = capture.read() if not success: print('End of frames') break if self.SKIP: continue frame = self.process.reshape(frame) # Resize the source image has_face, is_blur = self.process.detectFace(frame) if has_face and not is_blur: print('[\033[0;36mATTEMPT {}\033[0;0m] Trying to recognize...'. format(self.attempt)) faces = self.action.onlineRecognition(frame) if len(faces['faces']) > 0: self.checkFaces(faces, frame) frame_with_no_face = 0 if has_face else frame_with_no_face + 1 self.skipFrame(.5) cv2.imshow('Camera Output', frame) if cv2.waitKey(1) & 0xFF == ord( 'q' ) or self.attempt > MAX_ATTEMPT or AUTHORIZED or frame_with_no_face > MAX_EMPTY: print('Stop capturing.') break capture.release() cv2.destroyAllWindows() def start(self): print('System is ready') while True: action = int( input('\n1. Start video capture\n2. Exit\n\nChoose: ')) if action == 1: self.capture() else: print('Exiting the program.') break def __call__(self): self.capture()