def process_videos(reprocess): print('Initializing main function') print('Warning! This routine will overwrite the selected files') init_dir = '/home/mauricio/Videos/Oviedo/' if platform == 'win32': init_dir = 'C:\\SharedFTP\\Videos\\' options = {'initialdir': init_dir} folder = filedialog.askdirectory(**options) if folder is None: print('Folder not selected') else: print(folder) print('Extracting all mjpeg files') for root, _, files in os.walk(folder): for file in files: full_path = os.path.join(root, file) extension = ClassUtils.get_filename_extension(full_path) if extension == '.mjpeg': if not reprocess: mjpegx_path = full_path.replace(".mjpeg", ".mjpegx") if os.path.exists(mjpegx_path): print('Ignoring already converted file {0}'.format(mjpegx_path)) continue print('Converting ' + full_path) ClassMjpegConverter.convert_video_mjpeg(full_path) print('Done!')
def pre_process_images(list_folders_scores, recalculate): print('Start pre_processing images') # Loading instances instance_pose = ClassOpenPose() for folder, min_score in list_folders_scores: for file in os.listdir(folder): full_path = os.path.join(folder, file) extension = ClassUtils.get_filename_extension(full_path) if extension != '.jpg': print('Ignoring file {0}'.format(full_path)) continue file_no_ext = ClassUtils.get_filename_no_extension(full_path) arr_file_name = os.path.join(folder, '{0}.json'.format(file_no_ext)) # If image recalculation if not recalculate: if os.path.isfile(arr_file_name): print('File already processed {0}'.format(full_path)) continue # Processing file print('Processing file {0}'.format(full_path)) image = cv2.imread(full_path) arr, img_draw = instance_pose.recognize_image_tuple(image) arr_pass = list() # Checking vector integrity for all elements # Verify there is at least one arm and one leg for elem in arr: if ClassUtils.check_vector_integrity_part(elem, min_score): arr_pass.append(elem) # If there is more than one person with vector integrity if len(arr_pass) != 1: for elem in arr_pass: pt1, pt2 = ClassUtils.get_rectangle_bounds(elem, ClassUtils.MIN_POSE_SCORE) cv2.rectangle(img_draw, pt1, pt2, (0, 0, 255), 3) cv2.namedWindow('main_window') cv2.imshow('main_window', img_draw) print(arr) print(arr_pass) cv2.waitKey(0) cv2.destroyAllWindows() raise Exception('Invalid len: {0} file {1}'.format(len(arr_pass), full_path)) person_arr = arr_pass[0] arr_str = json.dumps(person_arr.tolist()) with open(arr_file_name, 'w') as text_file: text_file.write(arr_str) print('Done!')
def test_color_compare(): print('Test color comparision') print('Loading image comparision') # Loading instances instance_pose = ClassOpenPose() # Avoid to open two prompts obj_img = ClassDescriptors.load_images_comparision_ext(instance_pose, min_score, load_one_img=True) hist_pose1 = obj_img['listPoints1'] list_process = list() # Iterating in examples folder for root, _, files in os.walk(EXAMPLES_FOLDER): for file in files: full_path = os.path.join(root, file) extension = ClassUtils.get_filename_extension(full_path) if extension == '.jpg': list_process.append(full_path) # Sorting list list_process.sort() list_result = list() score_max_pt = -1 for full_path in list_process: print('Processing file: {0}'.format(full_path)) json_path = full_path.replace('.jpg', '.json') with open(json_path, 'r') as f: obj_json = json.loads(f.read()) his_pose2 = obj_json['histPose'] diff = ClassDescriptors.get_kmeans_diff(hist_pose1, his_pose2) print('Diff {0}'.format(diff)) if diff <= 15: res = True else: res = False list_result.append({ 'filename': ClassUtils.get_filename_no_extension(full_path), 'score': res }) # list_result.sort(key=lambda x: x['score']) print('Printing list result') print(json.dumps(list_result, indent=2)) print('min_score: {0}'.format(score_max_pt)) print('Done!')
def cnn_image_generation_folder(): # Initializing instance nn list_folders = list() list_folders.append(ClassUtils.cnn_class_folder) instance_nn = ClassNN(ClassNN.model_dir_pose, classes_number, hidden_layers) # File walk for folder in list_folders: for root, _, files in os.walk(folder): for file in files: full_path = os.path.join(root, file) extension = ClassUtils.get_filename_extension(full_path) if extension == '.json': print('Processing: {0}'.format(full_path)) if 'ori' in full_path: raise Exception('Full path contains ori folder!') with open(full_path, 'r') as f: json_txt = f.read() json_data = json.loads(json_txt) # All image generation image_name_pos = ClassUtils.get_filename_no_extension(full_path) + '_p.bmp' image_name_angle = ClassUtils.get_filename_no_extension(full_path) + '_a.bmp' image_name_pose = ClassUtils.get_filename_no_extension(full_path) + '_s.bmp' image_name_angles_black = ClassUtils.get_filename_no_extension(full_path) + '_b.bmp' image_name_pos_black = ClassUtils.get_filename_no_extension(full_path) + '_o.bmp' image_name_pos_black_rem = ClassUtils.get_filename_no_extension(full_path) + '_r.bmp' image_res_pos = create_cnn_image_pos(json_data) image_res_angle = create_cnn_image_angles(json_data) image_res_pose = create_cnn_image_pose(json_data, instance_nn) image_res_angles_black = create_image_cnn_angles_black(json_data) image_res_pos_black = create_cnn_image_pos_black(json_data) image_res_pos_black_rem = create_cnn_image_pos_black_rem(json_data) # print('Writing image pos: {0}'.format(image_name_pos)) cv2.imwrite(image_name_pos, image_res_pos) # print('Writing image angle: {0}'.format(image_name_angle)) cv2.imwrite(image_name_angle, image_res_angle) # print('Writing image pose: {0}'.format(image_name_pose)) cv2.imwrite(image_name_pose, image_res_pose) # print('Writing image angles black: {0}'.format(image_name_angles_black)) cv2.imwrite(image_name_angles_black, image_res_angles_black) # print('Writing image pos black: {0}'.format(image_name_pos_black)) cv2.imwrite(image_name_pos_black, image_res_pos_black) print('Writing image pos black rem: {0}'.format(image_name_pos_black_rem)) cv2.imwrite(image_name_pos_black_rem, image_res_pos_black_rem) print('Done!')
def get_poses_seq(folder: str, instance_nn: ClassNN, instance_pose: ClassOpenPose, only_json=False): # List all folders list_files = [] for file in os.listdir(folder): list_files.append(os.path.join(folder, file)) # Sorting elements list_files.sort() # Get elements list_desc = list() for path in list_files: ext = ClassUtils.get_filename_extension(path) if only_json: if ext != '.json': print('Ignoring file: {0}'.format(path)) continue with open(path, 'r') as file: person_arr_str = file.read() person_arr = json.loads(person_arr_str) else: if ext != '.jpg': print('Ignoring file: {0}'.format(path)) continue print('Processing file {0}'.format(path)) image = cv2.imread(path) arr = instance_pose.recognize_image(image) arr_pass = [] for person_arr in arr: if ClassUtils.check_vector_integrity_part( person_arr, min_score): arr_pass.append(person_arr) if len(arr_pass) != 1: print('Ignoring file {0} - Len arr_pass: {1}'.format( path, len(arr_pass))) continue person_arr = arr_pass[0] result_desc = ClassDescriptors.get_person_descriptors( person_arr, min_score) list_desc.append(result_desc['fullDesc']) list_desc_np = np.asarray(list_desc, np.float) results = instance_nn.predict_model_array(list_desc_np) list_classes = [] for result in results: list_classes.append(result['classes']) return list_classes
def reprocess_images(list_folder_data): print('Init reprocess_images') for index, item in enumerate(list_folder_data): folder = item[0] min_score = item[1] print('Processing folder: {0}'.format(folder)) list_files = os.listdir(folder) random.Random(seed).shuffle(list_files) for num_file, filename in enumerate(list_files): file = os.path.join(folder, filename) extension = ClassUtils.get_filename_extension(file) if extension == '.json': with open(file, 'r') as f: data_str = f.read() data_json = json.loads(data_str) if 'vectors' in data_json: print('Processing json file with new format: {0}'.format(file)) person_arr = data_json['vectors'] else: print('Processing json file: {0}'.format(file)) person_arr = data_json valid = ClassUtils.check_vector_integrity_pos(person_arr, min_score) only_pos = ClassUtils.check_vector_only_pos(person_arr, min_score) if not valid: raise Exception('Vector integrity not valid for file: {0}'.format(file)) if only_pos: raise Exception('Invalid vector to perform detection') descriptors = ClassDescriptors.get_person_descriptors(person_arr, min_score, cam_number=0, image=None, calib_params=None, decode_img=False, instance_nn_pose=None) with open(file, 'w') as f: f.write(json.dumps(descriptors)) transformed_points = descriptors['transformedPoints'] # Save pose for debugging purposes re_scale_factor = 100 new_points = ClassDescriptors.re_scale_pose_factor(transformed_points, re_scale_factor, min_score) img_pose = ClassDescriptors.draw_pose_image(new_points, min_score, is_transformed=True) new_file_name = ClassUtils.get_filename_no_extension(file) + '_1.jpg' cv2.imwrite(new_file_name, img_pose) print('Done!')
def delete_prev_json_files(list_folders_scores): print('Deleting previous json files') for folder, _ in list_folders_scores: print('Processing folder {0}'.format(folder)) for file in os.listdir(folder): full_path = os.path.join(folder, file) ext = ClassUtils.get_filename_extension(full_path) if ext == '.json': print('Deleting file {0}'.format(full_path)) os.remove(full_path) print('Done deleting files!')
def calculate_poses(option: Option, nn_classifier: ClassNN, svm_classifier: ClassSVM): print('Calculating poses using nn') # Recalculate all poses and get confidence for classInfo in list_classes: folder = classInfo['folderPath'] for root, _, files in os.walk(folder): for file in files: full_path = os.path.join(root, file) ext = ClassUtils.get_filename_extension(full_path) if '_rawdata' in file and ext == '.json': print('Processing file: {0}'.format(full_path)) with open(full_path, 'r') as f: file_txt = f.read() file_json = json.loads(file_txt) list_poses = file_json['listPoses'] for pose in list_poses: angles = pose['angles'] transformed_points = pose['transformedPoints'] # Using transformed points and angles list_desc = list() list_desc += ClassUtils.get_flat_list(transformed_points) # Convert to numpy list_desc_np = np.asanyarray(list_desc, np.float) if option == Option.NN: result = nn_classifier.predict_model_fast(list_desc_np) else: result = svm_classifier.predict_model(list_desc_np) pose['class'] = int(result['classes']) pose['probabilities'] = result['probabilities'].tolist() # Writing again into file file_txt = json.dumps(file_json, indent=4) new_full_path = ClassUtils.change_ext_training(full_path, '{0}_posedata'.format(option.value)) with open(new_full_path, 'w') as f: f.write(file_txt) # Done print('Done processing elements')
def cnn_reprocess_images(): print('Re processing images') list_folders = list() list_folders.append(ClassUtils.cnn_class_folder) # Loading instances instance_nn = ClassNN(ClassNN.model_dir_pose, classes_number, hidden_layers) # File walk count = 0 for folder in list_folders: for root, _, files in os.walk(folder): for file in files: full_path = os.path.join(root, file) extension = ClassUtils.get_filename_extension(full_path) if extension == '.json': print('Processing file: {0}'.format(full_path)) with open(full_path, 'r') as f: json_txt = f.read() json_data = json.loads(json_txt) list_poses = json_data['listPoses'] for pose in list_poses: angles = pose['angles'] transformed_points = pose['transformedPoints'] list_desc = list() list_desc += angles list_desc += ClassUtils.get_flat_list(transformed_points) list_desc_np = np.asanyarray(list_desc, dtype=np.float) res = instance_nn.predict_model_fast(list_desc_np) pose['keyPose'] = int(res['classes']) pose['probability'] = 1 # Writing data again data_txt = json.dumps(json_data, indent=2) with open(full_path, 'w') as f: f.write(data_txt) count += 1 print('Done') print('Total files processed: {0}'.format(count))
def re_process_folder(): print('Init folder processing') init_dir = '/home/mauricio/Pictures/BTF' options = {'initialdir': init_dir} folder = filedialog.askdirectory(**options) if folder is None: raise Exception('Folder not selected!') files = os.listdir(folder) for file in files: full_path = os.path.join(folder, file) ext = ClassUtils.get_filename_extension(full_path) if ext == '.jpg': print('Processing file: {0}'.format(full_path)) file_json = ClassUtils.get_filename_no_extension( full_path) + '.json' with open(file_json, 'r') as f: json_txt = f.read() json_data = json.loads(json_txt) if 'vectors' in json_data: vectors = json_data['vectors'] elif 'vector' in json_data: vectors = json_data['vector'] else: raise Exception('Vector not found!') img_cv = cv2.imread(full_path) param = ClassDescriptors.get_person_descriptors(vectors, min_score, image=img_cv, decode_img=False) with open(file_json, 'w') as f: f.write(json.dumps(param, indent=2)) print('Done!')
def main(): print('Initializing conversion function') folder = ClassUtils.video_base_path while True: print('Checking videos') for root, _, files in os.walk(folder): for file in files: full_path = os.path.join(root, file) extension = ClassUtils.get_filename_extension(full_path) if extension == '.mjpeg': print(file) print('Reprocessing ' + full_path + ' to mjpegx') camera_number = ClassUtils.get_cam_number_from_path( full_path) ClassMjpegConverter.convert_video_mjpeg(full_path) print('Done checking - Waiting for 5 secs') time.sleep(5)
def reprocess_list_partial(): print('Reprocess list partial') # Loading zone calib info with open(ClassUtils.zone_calib_path, 'r') as f: zone_txt = f.read() zone_data = json.loads(zone_txt) # Checking for data in all folders and reprocess for idx_cls, classInfo in enumerate(list_classes): folder = classInfo['folderPath'] for root, _, files in os.walk(folder): for file in files: full_path = os.path.join(root, file) ext = ClassUtils.get_filename_extension(full_path) if ext == '.json' and '_posedata' in file: print('Processing file: {0}'.format(full_path)) process_list_partial(full_path, zone_data, idx_cls)
def main(): print('Initializing main function') # Withdrawing TKInter Tk().withdraw() # Ask for directory init_dir = '/home/mauricio/CNN/Images' options = {'initialdir': init_dir} dir_name = filedialog.askdirectory(**options) if not dir_name: print('Directory not selected') else: for root, subdirs, files in os.walk(dir_name): for file in files: full_path = os.path.join(root, file) print('Processing {0}'.format(full_path)) extension = ClassUtils.get_filename_extension(file) if extension != '.jpg': print('Ignoring file no jpg {0}'.format(full_path)) continue name = ClassUtils.get_filename_no_extension(file) if len(name) < total_digits: new_name = '' for _ in range(total_digits - len(name)): new_name += '0' new_name += name new_name += extension new_full_path = os.path.join(root, new_name) print('Rename file {0} to {1}'.format(full_path, new_full_path)) os.rename(full_path, new_full_path) print('Done!')
def read_hists(folder_name, list_files): hist_red = [0 for _ in range(256)] hist_green = [0 for _ in range(256)] hist_blue = [0 for _ in range(256)] for path in list_files: fullname = os.path.join(folder_name, path) ext = ClassUtils.get_filename_extension(fullname) if ext == '.json': with open(fullname, 'r') as f: dict_json_str = f.read() dict_json = json.loads(dict_json_str) hists = dict_json['hists'] for i in range(256): hist_red[i] += hists[0][i] hist_green[i] += hists[0][i] hist_blue[i] += hists[0][i] return hist_red, hist_green, hist_blue
def main(): print('Initializing main function') print('Warning - You must convert to mjpegx first') # Withdrawing Tkinter window Tk().withdraw() init_dir = '/home/mauricio/Videos/Oviedo/' options = {'initialdir': init_dir} folder = filedialog.askdirectory(**options) if folder is None: print('Folder not selected') else: print(folder) # Initializing pose instance instance_nn_pose = ClassNN(model_dir=ClassNN.model_dir_pose, classes=ClassNN.classes_num_pose, hidden_number=ClassNN.hidden_num_pose) print('Extracting all mjpegx files') for root, _, files in os.walk(folder): for file in files: full_path = os.path.join(root, file) extension = ClassUtils.get_filename_extension(full_path) if extension == '.mjpegx': print(file) print('Reprocessing ' + full_path + ' to mjpegx') camera_number = ClassUtils.get_cam_number_from_path( full_path) ClassMjpegConverter.convert_video_mjpegx( full_path, camera_number, instance_nn_pose)
def __init__(self, model_path: str): self.model = None # type: hmm.MultinomialHMM ext = ClassUtils.get_filename_extension(model_path) if ext != '.pkl': raise Exception('Extension of model must be .pkl') self.model_path = model_path self.list_mapping = [] # Trying to load model if os.path.exists(model_path): print('Loading model from {0}'.format(model_path)) self.model = joblib.load(model_path) map_path = model_path.replace('.pkl', '.json') print('Loading list_mapping from {0}'.format(map_path)) with open(map_path, 'r') as file: map_str = file.read() self.list_mapping = json.loads(map_str) else: print('Model {0} must be trained'.format(self.model_path))
def classify_markov(): global hmm_models print('Init classification using markov') training_data = list() training_labels = list() training_files = list() eval_data = list() eval_labels = list() eval_files = list() # Loading classes for index, item in enumerate(list_folder_data): folder = item['folderPath'] label = item['label'] num_file = 0 list_paths = list() for root, _, files in os.walk(folder): for file in files: full_path = os.path.join(root, file) ext = ClassUtils.get_filename_extension(full_path) if ext == '.json': list_paths.append(full_path) total_samples = len(list_paths) total_train = int(total_samples * 80 / 100) # Shuffle samples random.Random(seed).shuffle(list_paths) for full_path in list_paths: list_key_poses = list() with open(full_path, 'r') as f: json_txt = f.read() json_data = json.loads(json_txt) list_poses = json_data['listPoses'] for pose in list_poses: list_key_poses.append(pose['keyPose']) if num_file < total_train: training_data.append(list_key_poses) training_labels.append(label) training_files.append(full_path) else: eval_data.append(list_key_poses) eval_labels.append(label) eval_files.append(full_path) num_file += 1 print('Total training: {0}'.format(len(training_data))) print('Total eval: {0}'.format(len(eval_data))) res = input('Press 1 to train. Press 2 to eval - Press 3 to train iter: ') if res == '1': train_markov(training_data, training_labels, training_files, eval_data, eval_labels) elif res == '2': # Pre-loading models for i in range(len(list_folder_data)): model_path = os.path.join(ClassHMM.model_hmm_folder_action, 'model{0}.pkl'.format(i)) hmm_model = ClassHMM(model_path) hmm_models.append(hmm_model) eval_markov(eval_data, eval_labels) elif res == '3': train_markov_iter(training_data, training_labels, training_files, eval_data, eval_labels) else: raise Exception('Option not implemented: {0}'.format(res))
def test_color_compare_hist(perform_eq=False): print('Test color comparision') print('Loading image comparision') # Loading instances instance_pose = ClassOpenPose() ignore_json_color = False if perform_eq: ignore_json_color = True # Avoid to open two prompts obj_img = ClassDescriptors.load_images_comparision_ext( instance_pose, min_score, load_one_img=True, perform_eq=perform_eq, ignore_json_color=ignore_json_color) # Extract color comparision from image hist1 = obj_img['listPoints1'] # Generating examples folder list_process = list() for root, _, files in os.walk(EXAMPLES_FOLDER): for file in files: full_path = os.path.join(root, file) extension = ClassUtils.get_filename_extension(full_path) if extension == '.jpg': list_process.append(full_path) # Sorting list list_process.sort() """ list_result = list() score_max_pt = -1 """ for full_path in list_process: print('Processing file: {0}'.format(full_path)) json_path = full_path.replace('.jpg', '.json') with open(json_path, 'r') as f: obj_json = json.loads(f.read()) image2 = cv2.imread(full_path) if perform_eq: image2 = ClassUtils.equalize_hist(image2) if 'vector' in obj_json: pose2 = obj_json['vector'] elif 'vectors' in obj_json: pose2 = obj_json['vectors'] else: raise Exception('Invalid vector property for vector custom') hist2 = ClassDescriptors.get_points_by_pose(image2, pose2, min_score) diff = ClassDescriptors.get_kmeans_diff(hist1, hist2) # Getting mean y from image 2 - discarding purposes pt1, pt2 = ClassUtils.get_rectangle_bounds(pose2, min_score) image2_crop = image2[pt1[1]:pt2[1], pt1[0]:pt2[0]] image2_ycc = cv2.cvtColor(image2_crop, cv2.COLOR_BGR2YCrCb) mean_y = np.mean(image2_ycc[:, :, 0]) print('Diff color: {0} - Mean y: {1}'.format(diff, mean_y)) """ list_result.sort(key=lambda x: x['score']) print('Printing list result') print(list_result) print('min_score: {0}'.format(score_max_pt)) """ print('Done!')
def load_descriptors(instance_nn_train: ClassNN, instance_nn_pose: ClassNN, pose_type: Desc): training_data = list() training_labels = list() eval_data = list() eval_labels = list() training_files = list() eval_files = list() for index, item in enumerate(list_folder_data): folder = item['folderPath'] label = item['label'] print('Processing folder path: {0}'.format(folder)) num_file = 0 list_paths = list() for root, _, files in os.walk(folder): for file in files: full_path = os.path.join(root, file) extension = ClassUtils.get_filename_extension(full_path) if extension == '.json': list_paths.append(full_path) total_samples = len(list_paths) total_train = int(total_samples * 80 / 100) # Shuffle samples random.Random(seed).shuffle(list_paths) for full_path in list_paths: # Reading data with open(full_path, 'r') as f: json_txt = f.read() json_data = json.loads(json_txt) list_poses = json_data['listPoses'] # Sampling data descriptor = list() for index_size in range(samples_size): index_pose = int(len(list_poses) * index_size / samples_size) pose = list_poses[index_pose] transformed_points = pose['transformedPoints'] angles = pose['angles'] list_desc = list() list_desc += angles list_desc += ClassUtils.get_flat_list(transformed_points) if pose_type == Desc.POSES: list_desc_np = np.asanyarray(list_desc, dtype=np.float) res = instance_nn_pose.predict_model_fast(list_desc_np) # Add descriptor with probabilities for elem in res['probabilities']: descriptor.append(elem) elif pose_type == Desc.ALL: for elem in list_desc: descriptor.append(elem) elif pose_type == Desc.POINTS: list_flat = ClassUtils.get_flat_list(transformed_points) for elem in list_flat: descriptor.append(elem) else: raise Exception( 'Pose type not recognized: {0}'.format(pose_type)) if num_file < total_train: training_data.append(descriptor) training_labels.append(label) training_files.append(full_path) else: eval_data.append(descriptor) eval_labels.append(label) eval_files.append(full_path) num_file += 1 # Convert data to numpy array training_data_np = np.asanyarray(training_data, dtype=np.float) training_labels_np = np.asanyarray(training_labels, dtype=int) eval_data_np = np.asanyarray(eval_data, dtype=np.float) eval_labels_np = np.asanyarray(eval_labels, dtype=int) print('Shape images training: {0}'.format(training_data_np.shape)) print('Shape labels training: {0}'.format(training_labels_np.shape)) if training_data_np.shape[0] == 0: raise Exception('No files found!') res = input('Press 1 to train - 2 to eval: ') if res == '1': train_model(training_data_np, training_labels_np, eval_data_np, eval_labels_np, instance_nn_train, steps=30000) elif res == '2': eval_model(eval_data_np, eval_labels_np, instance_nn_train) else: raise Exception('Option not implemented!')
def load_pose_descriptors(cls, type_desc: EnumDesc): training_data = list() training_labels = list() training_files = list() eval_data = list() eval_labels = list() eval_files = list() classes_number = 0 if type_desc == EnumDesc.ANGLES or type_desc == EnumDesc.ANGLES_TRANSFORMED: data_folder = cls.list_folder_data_angles else: data_folder = cls.list_folder_data cont = True while cont: cont = False for folder_data in data_folder: if folder_data[2] == classes_number: classes_number += 1 cont = True break # Iterate folder for index, item in enumerate(data_folder): folder = item[0] min_score = item[1] label = item[2] list_files = os.listdir(folder) random.Random(cls.seed).shuffle(list_files) total_train = int(len(list_files)) * 70 / 100 for num_file, file in enumerate(list_files): full_path = os.path.join(folder, file) extension = ClassUtils.get_filename_extension(full_path) if extension != '.json': print('Ignoring file {0}'.format(full_path)) continue with open(full_path, 'r') as text_file: arr_json = text_file.read() params = json.loads(arr_json) vectors = params['vectors'] angles = params['angles'] transformed_points = params['transformedPoints'] valid = ClassUtils.check_vector_integrity_pos( vectors, min_score) only_pos = ClassUtils.check_vector_only_pos(vectors, min_score) if not valid: raise Exception( 'Vector integrity not valid for file: {0}'.format( full_path)) if only_pos: raise Exception('Invalid vector to perform detection') # Fill training and eval list # Use angles and position information data_to_add = cls._get_descriptor_list(angles, transformed_points, type_desc) if num_file < total_train: training_data.append(data_to_add) training_labels.append(label) training_files.append(full_path) else: eval_data.append(data_to_add) eval_labels.append(label) eval_files.append(full_path) # Convert data to numpy array training_data_np = np.asanyarray(training_data, dtype=np.float) training_labels_np = np.asanyarray(training_labels, dtype=int) print(training_labels_np) eval_data_np = np.asanyarray(eval_data, dtype=np.float) eval_labels_np = np.asanyarray(eval_labels, dtype=int) print(eval_labels_np) training_files_np = np.asanyarray(training_files, dtype=np.str) eval_files_np = np.asanyarray(eval_files, dtype=np.str) # Getting label_names label_names = [] for folder, _, label in data_folder: names = folder.split('/') label_name = names[-1] # Check if last character is / if len(label_name) == 0: label_names = names[-2] label_names.append((label_name, label)) print('Total training: {0}'.format(len(training_labels))) print('Total eval: {0}'.format(len(eval_labels))) print('Shape training: {0}'.format(training_data_np.shape)) print('Shape eval: {0}'.format(eval_data_np.shape)) print('Classes number: {0}'.format(classes_number)) results = { 'trainingData': training_data_np, 'trainingLabels': training_labels_np, 'evalData': eval_data_np, 'evalLabels': eval_labels_np, 'trainingFiles': training_files_np, 'evalFiles': eval_files_np, 'labelNames': label_names, 'classesNumber': classes_number } return results
def main(): print('Initializing main function') print('Folder selection') folder_images = '/home/mauricio/PosesProcessed/folder_images' folder_images_draw = '/home/mauricio/PosesProcessed/folder_images_draw' Tk().withdraw() # Getting options init_dir = '/home/mauricio/Videos/Oviedo' options = {'initialdir': init_dir} dir_name = askdirectory(**options) if not dir_name: raise Exception('Directory not selected') # Create directory if does not exists if not os.path.isdir(folder_images): os.makedirs(folder_images) # Create directory if does not exists if not os.path.isdir(folder_images_draw): os.makedirs(folder_images_draw) # Initializing openpose instance instance_pose = ClassOpenPose() for root, subdirs, files in os.walk(dir_name): for file in files: full_path = os.path.join(root, file) print('Processing {0}'.format(full_path)) extension = ClassUtils.get_filename_extension(full_path) if extension == '.mjpeg': file_info = ClassMjpegReader.process_video(full_path) else: print('Extension ignored: {0}'.format(extension)) continue # Getting idcam from file id_cam = full_path.split('/')[-2] print('IdCam: {0}'.format(id_cam)) for index, info in enumerate(file_info): print('Processing {0} of {1} from {2}'.format(index, len(file_info), full_path)) frame = info[0] ticks = info[1] image_np = np.frombuffer(frame, dtype=np.uint8) image = cv2.imdecode(image_np, cv2.IMREAD_ANYCOLOR) arr, image_draw = instance_pose.recognize_image_tuple(image) min_score = 0.05 arr_pass = list() for elem in arr: if ClassUtils.check_vector_integrity_pos(elem, min_score): arr_pass.append(elem) if len(arr_pass) > 0: # Draw rectangles for all candidates for person_arr in arr_pass: pt1, pt2 = ClassUtils.get_rectangle_bounds(person_arr, min_score) cv2.rectangle(image_draw, pt1, pt2, (0, 0, 255), 5) # Overwriting 1 full_path_images = os.path.join(folder_images, '{0}_{1}.jpg'.format(ticks, id_cam)) print('Writing image {0}'.format(full_path_images)) cv2.imwrite(full_path_images, image) # Overwriting 2 full_path_draw = os.path.join(folder_images_draw, '{0}_{1}.jpg'.format(ticks, id_cam)) print('Writing image {0}'.format(full_path_images)) cv2.imwrite(full_path_draw, image_draw) print('Done!')