def get_images(folder_in, H, W): images = [] for filename in tools_IO.get_filenames(folder_in, '*.jpg'): image = cv2.imread(folder_in + filename) image = tools_image.smart_resize(image, H, W) images.append(image) return numpy.array(images)
def generate_features(self, path_input, path_output, limit=1000000, mask='*.png,*.jpg'): if not os.path.exists(path_output): os.makedirs(path_output) dict_last_layers, dict_bottlenects = self.__last_full_layers4() outputs = [ self.model.layers[len(self.model.layers) + i].output for i in dict_bottlenects.values() ] patterns = numpy.sort( numpy.array([ f.path[len(path_input):] for f in os.scandir(path_input) if f.is_dir() ])) for each in patterns: print(each) local_filenames = tools_IO.get_filenames(path_input + each, mask)[:limit] feature_filename = path_output + '/' + each + '_' + self.name + '.txt' features, filenames = [], [] if not os.path.isfile(feature_filename): bar = progressbar.ProgressBar(max_value=len(local_filenames)) for b, local_filename in enumerate(local_filenames): bar.update(b) image = cv2.imread(path_input + each + '/' + local_filename) if image is None: continue image_resized = tools_image.smart_resize( image, self.input_image_size[0], self.input_image_size[1]) image_resized = numpy.expand_dims(image_resized / 255.0, axis=0) bottlenecks = Model(self.model.input, outputs).predict(image_resized) feature = numpy.hstack( (bottlenecks[0].flatten(), bottlenecks[1].flatten())) features.append(feature[0]) filenames.append(local_filename) features = numpy.array(features) mat = numpy.zeros((features.shape[0], features.shape[1] + 1)).astype(numpy.str) mat[:, 0] = filenames mat[:, 1:] = features tools_IO.save_mat(mat, feature_filename, fmt='%s', delim='\t') return
def merge_all_folders(list_of_folders, folder_out, target_W, target_H, filename_watermark=None): if filename_watermark is not None: image_watermark = cv2.imread(filename_watermark) w = int(target_W / 5) h = int(w * image_watermark.shape[0] / image_watermark.shape[1]) image_watermark = cv2.resize(image_watermark, (w, h)) else: image_watermark = None tools_IO.remove_files(folder_out, create=True) pad = 0 bg_color = (255, 255, 255) empty = numpy.full((target_H, target_W, 3), numpy.array(bg_color, dtype=numpy.uint8), dtype=numpy.uint8) cnt = 0 for folder_in in list_of_folders: for filename_in in tools_IO.get_filenames(folder_in, '*.jpg,*.png'): base_name, ext = filename_in.split('/')[-1].split( '.')[0], filename_in.split('/')[-1].split('.')[1] image = cv2.imread(folder_in + filename_in) image = tools_image.smart_resize(image, target_H - 2 * pad, target_W - 2 * pad, bg_color=bg_color) result = tools_image.put_image(empty, image, pad, pad) if filename_watermark is not None: result = tools_image.put_image( result, image_watermark, 0, result.shape[1] - image_watermark.shape[1]) cv2.imwrite(folder_out + 'res_%06d_' % cnt + base_name + '.png', result) cnt += 1 print(base_name) return # ---------------------------------------------------------------------------------------------------------------------
def process_file_debug(self, filename_in, folder_out): image = cv2.imread(filename_in) image_resized = tools_image.smart_resize(image, self.input_image_size[0], self.input_image_size[1]) image_resized = numpy.expand_dims(image_resized / 255.0, axis=0) u_boxes, u_scores, u_classes = detector_YOLO3_core.get_tensors_box_score_class_unfilter( self.model.output, self.anchors, self.anchor_mask, len(self.class_names), self.input_tensor_shape, score_threshold=0.01, iou_threshold=self.nms_threshold) boxes_yxyx, classes, scores = self.sess.run( [u_boxes, u_classes, u_scores], feed_dict={ self.model.input: image_resized, self.input_tensor_shape: [image.shape[0], image.shape[1]], K.learning_phase(): 0 }) self.process_file(filename_in, folder_out + filename_in.split('/')[-1]) total_image = tools_image.desaturate(image) for c in list(set(classes)): idx = numpy.where(classes == c) temp_image = tools_YOLO.draw_classes_on_image( tools_image.desaturate(image), boxes_yxyx[idx], [1] * len(idx[0]), self.colors[c], draw_score=False) total_image = tools_YOLO.draw_classes_on_image(total_image, boxes_yxyx[idx], scores[idx], self.colors[c], draw_score=True) cv2.imwrite( folder_out + 'class_%02d-%s-p%02d.png' % (c, self.class_names[c], 100 * scores[idx].max()), temp_image) cv2.imwrite(folder_out + 'all_boxes.png', total_image) return
def process_image(self, image): image_resized = tools_image.smart_resize(image, self.input_image_size[0], self.input_image_size[1]) image_resized = numpy.expand_dims(image_resized / 255.0, axis=0) boxes_yxyx, classes, scores = self.sess.run( [self.boxes, self.classes, self.scores], feed_dict={ self.model.input: image_resized, self.input_tensor_shape: [image.shape[0], image.shape[1]], K.learning_phase(): 0 }) return boxes_yxyx, classes, scores
def get_bottleneck_features(self, filenames_list, dict_bottlenects): images_resized = [] for filename in filenames_list: image_resized = tools_image.smart_resize(cv2.imread(filename), self.input_image_size[0], self.input_image_size[1]) images_resized.append(image_resized / 255.0) images_resized = numpy.array(images_resized).astype(numpy.float) L = len(self.model.layers) outputs = [ self.model.layers[L + i].output for i in dict_bottlenects.values() ] bottlenecks = Model(self.model.input, outputs).predict(images_resized) return bottlenecks
def draw_objects(image_bg, placeholdes, images): for p, im in zip(placeholdes, images): target_image_height = int(p[3] - p[1]) + 1 target_image_width = int(p[2] - p[0]) + 1 start_row = int(p[1]) - 1 start_col = int(p[0]) - 1 if target_image_height * target_image_width > 0: small_image = tools_image.smart_resize(im, target_image_height, target_image_width) if small_image.shape[0] > 0 and small_image.shape[0] > 0: image_bg = tools_image.put_image(image_bg, small_image, start_row, start_col) return image_bg
def save_bottleneck_features(self, folder_out, filenames_list, dict_bottlenects): outputs = [ self.model.layers[len(self.model.layers) + i].output for i in dict_bottlenects.values() ] image_resized = numpy.zeros( (1, self.input_image_size[0], self.input_image_size[1], 3)) bottlenecks = Model(self.model.input, outputs).predict(image_resized) store0 = tools_HDF5.HDF5_store(filename=folder_out + 'bottlenecks_0.hdf5', object_shape=bottlenecks[0][0].shape, dtype=numpy.float32) store1 = tools_HDF5.HDF5_store(filename=folder_out + 'bottlenecks_1.hdf5', object_shape=bottlenecks[1][0].shape, dtype=numpy.float32) if len(bottlenecks) > 2: store2 = tools_HDF5.HDF5_store( filename=folder_out + 'bottlenecks_2.hdf5', object_shape=bottlenecks[2][0].shape, dtype=numpy.float32) bar = progressbar.ProgressBar(max_value=len(filenames_list)) print('\nSaving bottleneck features\n') for b, filename in enumerate(filenames_list): if not os.path.isfile(filename): continue image = cv2.imread(filename) if image is None: continue bar.update(b) image_resized = tools_image.smart_resize(image, self.input_image_size[0], self.input_image_size[1]) image_resized = numpy.expand_dims(image_resized / 255.0, axis=0) bottlenecks = Model(self.model.input, outputs).predict(image_resized) store0.append(bottlenecks[0][0]) store1.append(bottlenecks[1][0]) if len(bottlenecks) > 2: store2.append(bottlenecks[2][0]) return
def example_face_perspective(filename_actor, filename_obj, filename_3dmarkers=None, do_debug=False): D = detector_landmarks.detector_landmarks( '..//_weights//shape_predictor_68_face_landmarks.dat', filename_3dmarkers) image_actor = cv2.imread(filename_actor) image_actor = tools_image.smart_resize(image_actor, 640, 640) R = tools_GL3D.render_GL3D(filename_obj=filename_obj, W=image_actor.shape[1], H=image_actor.shape[0], is_visible=False, projection_type='P', scale=(1, 1, 0.25)) L = D.get_landmarks(image_actor) L3D = D.model_68_points L3D[:, 2] = 0 rvec, tvec = D.get_pose_perspective(image_actor, L, L3D, R.mat_trns) print('[ %1.2f, %1.2f, %1.2f], [%1.2f, %1.2f, %1.2f]' % (rvec[0], rvec[1], rvec[2], tvec[0], tvec[1], tvec[2])) image_3d = R.get_image_perspective(rvec, tvec, do_debug=do_debug) clr = (255 * numpy.array(R.bg_color)).astype(numpy.int) result = tools_image.blend_avg(image_actor, image_3d, clr, weight=0) cv2.imwrite('./images/output/face_GL.png', result) M = pyrr.matrix44.multiply(pyrr.matrix44.create_from_eulers(rvec), pyrr.matrix44.create_from_translation(tvec)) R.mat_model, R.mat_view = tools_pr_geom.decompose_model_view(M) result = tools_render_CV.draw_points_numpy_MVP(L3D, image_actor, R.mat_projection, R.mat_view, R.mat_model, R.mat_trns) result = D.draw_landmarks_v2(result, L) cv2.imwrite('./images/output/face_CV_MVP.png', result) return
def get_images(foldername, filename, delim=' ', smart_resized_target=None, limit=10000): with open(filename) as f: lines = f.readlines()[1:limit] list_filenames = [line.split(' ')[0] for line in lines] filenames_dict = sorted(set(list_filenames)) images = [] for filename in filenames_dict: image = tools_image.rgb2bgr(cv2.imread(foldername + filename)) if smart_resized_target is not None: image = tools_image.smart_resize(image, smart_resized_target[0], smart_resized_target[1]) images.append(image) return numpy.array(images)
def draw_boxes(class_ID, folder_annotation, file_markup_true, file_markup_pred, path_out, delim=' ', metric='recall', confidence=0.10, iou_th=0.1, ovp_th=0.5, ovd_th=0.5): tools_IO.remove_files(path_out, create=True) tools_IO.remove_files(path_out + '0/', create=True) tools_IO.remove_files(path_out + '1/', create=True) #foldername = '/'.join(file_markup_true.split('/')[:-1]) + '/' with open(file_markup_true) as f: lines_true = f.readlines()[1:] with open(file_markup_pred) as f: lines_pred = f.readlines()[1:] file_true, file_pred, coord_true, coord_pred, conf_true, conf_pred, hit_true, hit_pred = calc_hits_stats_iou( lines_true, lines_pred, class_ID, delim, folder_annotation, iuo_th=iou_th, ovp_th=ovp_th, ovd_th=ovd_th) red = (0, 32, 255) amber = (0, 192, 255) green = (0, 192, 0) marine = (128, 128, 0) gray = (128, 128, 128) hit_colors_true = [red, green] hit_colors_pred = [amber, marine] bar = progressbar.ProgressBar(max_value=len(set(file_true))) for b, filename in enumerate(set(file_true)): bar.update(b) image = cv2.imread(folder_annotation + filename) if image is None: continue image = tools_image.desaturate(image) image = tools_image.smart_resize(image, 416, 416) is_hit = 0 is_FP = 1 idx = numpy.where(file_true == filename) for coord, hit, conf in zip(coord_true[idx], hit_true[idx], conf_true[idx]): if conf < confidence: hit = 0 cv2.rectangle(image, (coord[0], coord[1]), (coord[2], coord[3]), hit_colors_true[hit], thickness=2) is_hit = max(is_hit, hit) idx = numpy.where(file_pred == filename) for coord, hit, conf in zip(coord_pred[idx], hit_pred[idx], conf_pred[idx]): if conf < confidence: hit = 0 cv2.rectangle(image, (coord[0], coord[1]), (coord[2], coord[3]), gray, thickness=1) else: cv2.rectangle(image, (coord[0], coord[1]), (coord[2], coord[3]), hit_colors_pred[hit], thickness=1) is_FP = min(is_FP, hit) if metric == 'recall': subfolder = str(is_hit) else: subfolder = str(is_FP) cv2.imwrite(path_out + subfolder + '/' + filename.split('/')[-1], image) return