def main(args): global face_detector global classifier_object if (not args.checkpoint_path): raise ValueError( 'You must supply the checkpoint path with --checkpoint_path') if (not os.path.exists(args.checkpoint_path)): print( 'The checkpoint path is missing. Error processing the data source without the checkpoint path.' ) return (False) if (not args.dataset_dir): raise ValueError( 'You must supply the dataset directory with --dataset_dir') if (not os.path.exists(args.dataset_dir)): print( 'The dataset directory is missing. Error processing the data source without the dataset directory.' ) return (False) if (args.model_root_dir): model_root_dir = args.model_root_dir else: model_root_dir = NetworkFactory.model_deploy_dir() last_network = 'ONet' face_detector = FaceDetector(last_network, model_root_dir) classifier_object = Classifier() if (not classifier_object.load_dataset(args.dataset_dir)): return (False) if (not classifier_object.load_model(args.checkpoint_path, args.model_name, args.gpu_memory_fraction)): return (False) image1 = cv2.imread(args.image1) input_image_height, input_image_width, input_image_channels = image1.shape #print(input_image_height, input_image_width) image2 = cv2.imread(args.image2) input_image_height, input_image_width, input_image_channels = image2.shape #print(input_image_height, input_image_width) i1 = features(image1) i2 = features(image2) #if(not (i1 and i2)): #return(False) result = l1_loss(i1, i2) print("The answer is " + str(result)) result = l2_loss(i1, i2) print("The answer is " + str(result)) result = dot_product(i1, i2) print("The answer is " + str(result))
def main(args): global classifier_object if(not args.checkpoint_path): raise ValueError('You must supply the checkpoint path with --checkpoint_path') if(not os.path.exists(args.checkpoint_path)): print('The checkpoint path is missing. Error processing the data source without the checkpoint path.') return(False) if(not args.dataset_dir): raise ValueError('You must supply the dataset directory with --dataset_dir') if(not os.path.exists(args.dataset_dir)): print('The dataset directory is missing. Error processing the data source without the dataset directory.') return(False) classifier_object = Classifier() if(not classifier_object.load_dataset(args.dataset_dir)): return(False) if(not classifier_object.load_model(args.checkpoint_path, args.model_name, args.gpu_memory_fraction)): return(False) directory = args.directory onlyfiles=[f for f in listdir(directory) if isfile (join(directory,f))] images = np.empty(len(onlyfiles),dtype=object) print('Processing ', len(onlyfiles), ' number of images.') for n in range(0,len(onlyfiles)): image = cv2.imread(join(directory, onlyfiles[n])) images[n] = image min_l1=1 max_l1=0 min_l2=1 max_l2=0 min_dot=1 max_dot=0 for i in range(0,len(images)): input_image_height, input_image_width, input_image_channels = images[i].shape #print(input_image_height, input_image_width) for j in range(i,len(images)): input_image_height, input_image_width, input_image_channels = images[j].shape #print(input_image_height, input_image_width) i1 = features(images[i]) i2 = features(images[j]) result=l1_loss(i1,i2) if(result<min_l1): min_l1=result #l1_i1=images[i] #l1_i2=images[j] if(result>max_l1): max_l1=result #l1_i1=images[i] #l1_i2=images[j] result=l2_loss(i1,i2) if(result<min_l2): min_l2=result if(result>max_l2): max_l2=result result=dot_product(i1,i2) if(result < 0.4): print('Non simmilar files (', result,') are - ', onlyfiles[i], onlyfiles[j]) if(result<min_dot): min_dot=result if(result>max_dot): max_dot=result print ("L1 loss minimum value is",min_l1) print ("L1 loss maximum value is",max_l1) print ("L2 loss minimum value is",min_l2) print ("L2 loss maximum value is",max_l2) print ("arcosine loss minimum value is",min_dot) print ("arcosine loss maximum value is",max_dot)
def main(args): probability_threshold = 50.0 if (not args.input_tsv_file): raise ValueError( 'You must supply input TSV file with --input_tsv_file.') if (not args.output_tsv_file): raise ValueError( 'You must supply output TSV file with --output_tsv_file.') if (not os.path.isfile(args.input_tsv_file)): return (False) model_root_dir = NetworkFactory.model_deploy_dir() last_network = 'ONet' face_detector = FaceDetector(last_network, model_root_dir) classifier_object = Classifier() if (not classifier_object.load_dataset(args.dataset_dir)): return (False) if (not classifier_object.load_model(args.checkpoint_path, args.model_name, args.gpu_memory_fraction)): return (False) network_size = classifier_object.network_image_size() number_of_images = 0 good_images = 0 input_tsv_file = open(args.input_tsv_file, 'r') output_tsv_file = open(args.output_tsv_file, 'w') while (True): input_data = input_tsv_file.readline().strip() if (not input_data): break number_of_images += 1 fields = input_data.split('\t') line_number = str(fields[0]) image_string = fields[1] decoded_image_string = base64.b64decode(image_string) image_data = np.fromstring(decoded_image_string, dtype=np.uint8) input_image = cv2.imdecode(image_data, cv2.IMREAD_COLOR) height, width, channels = input_image.shape cv2.imwrite('image.png', input_image) #misc.imsave('image.png', input_image) input_image = misc.imread('image.png') input_clone = np.copy(input_image) boxes_c, landmarks = face_detector.detect(input_clone) face_probability = 0.0 found = False crop_box = [] for index in range(boxes_c.shape[0]): if (boxes_c[index, 4] > face_probability): found = True face_probability = boxes_c[index, 4] bounding_box = boxes_c[index, :4] crop_box = [ int(max(bounding_box[0], 0)), int(max(bounding_box[1], 0)), int(min(bounding_box[2], width)), int(min(bounding_box[3], height)) ] if (found): cropped_image = input_image[crop_box[1]:crop_box[3], crop_box[0]:crop_box[2], :] else: cropped_image = input_image #resized_image = cv2.resize(cropped_image, (network_size, network_size), interpolation=cv2.INTER_LINEAR) resized_image = misc.imresize(cropped_image, (network_size, network_size), interp='bilinear') class_names_probabilities = classifier_object.classify( resized_image, print_results=False) predicted_name = '' probability = 0.0 if (len(class_names_probabilities) > 0): names = map(operator.itemgetter(0), class_names_probabilities) probabilities = map(operator.itemgetter(1), class_names_probabilities) predicted_name = str(names[0]) probability = probabilities[0] if ((probability > probability_threshold) or (probability > (probabilities[1] + probabilities[2] / 2.0))): good_images += 1 print(number_of_images, ', predicted_name -', predicted_name, ', probability -', probability) print('Accuracy = ', (good_images * 100.0) / number_of_images, ' for ', number_of_images, ' images.') #cv2.imshow('image', cropped_image) #cv2.waitKey(); output_tsv_file.write(line_number + '\t' + str(predicted_name) + '\t' + str(probability) + '\n') print('Accuracy = ', (good_images * 100.0) / number_of_images, ' for ', number_of_images, ' images.') return (True)
def process(args): class_names = DatasetAnalyzer.read_class_names(args.class_name_file) if(len(class_names) == 0): class_names = DatasetAnalyzer.get_class_names(args.source_dir) no_of_classes = len(class_names) if(no_of_classes == 0): return(False) classifier_object = Classifier() if(not classifier_object.load_dataset(args.dataset_dir)): return(False) if(not classifier_object.load_model(args.checkpoint_path, args.model_name, args.gpu_memory_fraction)): return(False) source_path = os.path.expanduser(args.source_dir) target_path = os.path.expanduser(args.target_dir) good_files = 0 bad_files = 0 processed_classes = 0 show_result = int(max( (no_of_classes * (5.0/100.0)), 10) ) for class_name in class_names: source_class_dir = os.path.join(source_path, class_name) if(not os.path.isdir(source_class_dir)): continue image_file_names = os.listdir(source_class_dir) for image_file_name in image_file_names: source_filename = os.path.join(source_class_dir, image_file_name) if(not os.path.isfile(source_filename)): continue try: current_image = cv2.imread(source_filename, cv2.IMREAD_COLOR) except (IOError, ValueError, IndexError) as error: continue if(current_image is None): continue class_names_probabilities = classifier_object.classify(current_image, args.use_top) is_good = False for predicted_name, probability in class_names_probabilities: if(predicted_name == class_name): is_good = True break if(is_good): good_files = good_files + 1 else: target_class_dir = os.path.join(target_path, class_name) if( not os.path.exists(target_class_dir) ): os.makedirs(target_class_dir) target_filename = os.path.join(target_class_dir, image_file_name) os.rename(source_filename, target_filename) bad_files = bad_files + 1 processed_classes += 1 if( processed_classes % show_result == 0): print(processed_classes, ' classes are processed.') print("Good files are - " + str(good_files) + " and bad files are - " + str(bad_files)) return(True)
def main(args): if (not args.checkpoint_path): raise ValueError( 'You must supply the checkpoint path with --checkpoint_path') if (not os.path.exists(args.checkpoint_path)): print( 'The checkpoint path is missing. Error processing the data source without the checkpoint path.' ) return (False) if (not args.dataset_dir): raise ValueError( 'You must supply the dataset directory with --dataset_dir') if (not os.path.exists(args.dataset_dir)): print( 'The dataset directory is missing. Error processing the data source without the dataset directory.' ) return (False) if (args.model_root_dir): model_root_dir = args.model_root_dir else: model_root_dir = NetworkFactory.model_deploy_dir() last_network = 'ONet' face_detector = FaceDetector(last_network, model_root_dir) classifier_object = Classifier() if (not classifier_object.load_dataset(args.dataset_dir)): return (False) if (not classifier_object.load_model(args.checkpoint_path, args.model_name, args.gpu_memory_fraction)): return (False) webcamera = cv2.VideoCapture(args.webcamera_id) webcamera.set(3, 600) webcamera.set(4, 800) image = cv2.imread('/git-space/16.jpg') input_image_height, input_image_width, input_image_channels = image.shape print(input_image_height, input_image_width) face_probability = 0.75 minimum_face_size = 24 while True: start_time = cv2.getTickCount() status, current_frame = webcamera.read() is_busy = False if status: current_image = np.array(current_frame) image_clone = np.copy(current_image) if (is_busy): continue is_busy = True boxes_c, landmarks = face_detector.detect(image_clone) end_time = cv2.getTickCount() time_duration = (end_time - start_time) / cv2.getTickFrequency() frames_per_sec = 1.0 / time_duration cv2.putText(current_frame, '{:.2f} FPS'.format(frames_per_sec), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2) for index in range(boxes_c.shape[0]): bounding_box = boxes_c[index, :4] probability = boxes_c[index, 4] crop_box = [] if (probability > face_probability): height, width, channels = image_clone.shape crop_box = [ int(max(bounding_box[0], 0)), int(max(bounding_box[1], 0)), int(min(bounding_box[2], width)), int(min(bounding_box[3], height)) ] cropped_image = image_clone[crop_box[1]:crop_box[3], crop_box[0]:crop_box[2], :] crop_height, crop_width, crop_channels = cropped_image.shape if (crop_height < minimum_face_size) or ( crop_width < minimum_face_size): continue cv2.rectangle(image_clone, (crop_box[0], crop_box[1]), (crop_box[2], crop_box[3]), (0, 255, 0), 1) class_names_probabilities = classifier_object.classify( cropped_image, 1) predicted_name = class_names_probabilities[0][0] probability = class_names_probabilities[0][1] if (probability > args.threshold): cv2.putText( image_clone, predicted_name + ' - {:.2f}'.format(probability), (crop_box[0], crop_box[1] - 2), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) cv2.imshow("", image_clone) is_busy = False if cv2.waitKey(1) & 0xFF == ord('q'): break else: print('Error detecting the webcamera.') break webcamera.release()
def main(args): output_dir = os.path.expanduser(args.output_dir) if(not os.path.exists(output_dir)): os.mkdir(output_dir) is_processed = {} probability_threshold = [ 0.95, 0.90, 0.85, 0.80 ] if(not args.input_tsv_file): raise ValueError('You must supply input TSV file with --input_tsv_file.') if(not os.path.isfile(args.input_tsv_file)): return(False) model_root_dir = NetworkFactory.model_deploy_dir() last_network='ONet' face_detector = FaceDetector(last_network, model_root_dir) classifier_object = Classifier() if(not classifier_object.load_dataset(args.dataset_dir)): return(False) if(not classifier_object.load_model(args.checkpoint_path, args.model_name, args.gpu_memory_fraction)): return(False) network_size = classifier_object.network_image_size() celebrity_count = 0 for current_threshold in probability_threshold: input_tsv_file = open(args.input_tsv_file, 'r') while( True ): input_data = input_tsv_file.readline().strip() if( not input_data ): break fields = input_data.split('\t') class_name = str(fields[2]) if class_name in is_processed.keys(): continue image_string = fields[1] image_search_rank = fields[3] decoded_image_string = base64.b64decode(image_string) image_data = np.fromstring(decoded_image_string, dtype=np.uint8) input_image = cv2.imdecode(image_data, cv2.IMREAD_COLOR) height, width, channels = input_image.shape class_dir = fields[2] img_name = class_dir + '.png' cv2.imwrite('image.png', input_image) #misc.imsave('image.png', input_image) input_image = misc.imread('image.png') input_clone = np.copy(input_image) boxes_c, landmarks = face_detector.detect(input_clone) face_probability = 0.0 found = False crop_box = [] for index in range(boxes_c.shape[0]): if(boxes_c[index, 4] > face_probability): found = True face_probability = boxes_c[index, 4] bounding_box = boxes_c[index, :4] crop_box = [int(max(bounding_box[0],0)), int(max(bounding_box[1],0)), int(min(bounding_box[2], width)), int(min(bounding_box[3], height))] if(found): cropped_image = input_image[crop_box[1]:crop_box[3],crop_box[0]:crop_box[2],:] else: cropped_image = input_image #resized_image = cv2.resize(cropped_image, (network_size, network_size), interpolation=cv2.INTER_LINEAR) resized_image = misc.imresize(cropped_image, (network_size, network_size), interp='bilinear') class_names_probabilities = classifier_object.classify(resized_image, print_results=False) predicted_name = '' probability = 0.0 if(len(class_names_probabilities) > 0): names = map(operator.itemgetter(0), class_names_probabilities) probabilities = map(operator.itemgetter(1), class_names_probabilities) predicted_name = str(names[0]) probability = probabilities[0] if( class_name != predicted_name ): continue if(probability < current_threshold): continue is_processed[class_name] = True full_class_dir = os.path.join(output_dir, class_dir) if not os.path.exists(full_class_dir): os.mkdir(full_class_dir) celebrity_count = celebrity_count + 1 full_path = os.path.join(full_class_dir, img_name) cv2.imwrite(full_path, resized_image) #cv2.imshow('image', cropped_image) #cv2.waitKey(); print('Processed ', celebrity_count, 'celebrities.') return(True)