def main(): score_threshold = 0.00001 if not exists(join('results', f'D{config.phi}')): os.makedirs(join('results', f'D{config.phi}')) train_model, inference_model = build_model(phi=config.phi, num_classes=len(config.classes), score_threshold=score_threshold) train_model.load_weights(config.weight_path) train_model.save_weights(config.weight_path) inference_model.load_weights(config.weight_path, by_name=True) f_names = [] with open(join(config.data_dir, 'val.txt')) as reader: for line in reader.readlines(): f_names.append(line.rstrip().split(' ')[0]) result_dict = {} for f_name in tqdm.tqdm(f_names): image_path = join(config.data_dir, config.image_dir, f_name + '.jpg') label_path = join(config.data_dir, config.label_dir, f_name + '.xml') image = cv2.imread(image_path) src_image = image.copy() image = image[:, :, ::-1] h, w = image.shape[:2] image, scale = preprocess_image(image, image_size=config.image_size) boxes, scores, labels = inference_model.predict_on_batch( [np.expand_dims(image, axis=0)]) boxes, scores, labels = np.squeeze(boxes), np.squeeze( scores), np.squeeze(labels) boxes = postprocess_boxes(boxes=boxes, scale=scale, height=h, width=w) indices = np.where(scores[:] > score_threshold)[0] boxes = boxes[indices] draw_boxes(src_image, boxes) pred_boxes_np = [] for pred_box in boxes: x_min, y_min, x_max, y_max = pred_box pred_boxes_np.append([x_min, y_min, x_max, y_max]) true_boxes = [] for element in parse_fn(label_path).getroot().iter('object'): box = find_node(element, 'bndbox') x_min = find_node(box, 'xmin', 'bndbox.xmin', parse=float) - 1 y_min = find_node(box, 'ymin', 'bndbox.ymin', parse=float) - 1 x_max = find_node(box, 'xmax', 'bndbox.xmax', parse=float) - 1 y_max = find_node(box, 'ymax', 'bndbox.ymax', parse=float) - 1 true_boxes.append([x_min, y_min, x_max, y_max]) result = { 'detection_boxes': pred_boxes_np, 'groundtruth_boxes': true_boxes, 'confidence': scores } result_dict[f'{f_name}.jpg'] = result cv2.imwrite(join('results', f'D{config.phi}', basename(image_path)), src_image[:, :, ::-1]) # cv2.namedWindow('image', cv2.WINDOW_NORMAL) # cv2.imshow('image', src_image) # cv2.waitKey(0) with open(join('results', f'D{config.phi}', 'd4.pickle'), 'wb') as writer: pickle.dump(result_dict, writer)
def load_label(f_name): try: tree = parse_fn( join(config.base_dir, config.label_dir, f_name + '.xml')) return parse_annotations(tree.getroot()) except ParseError as error: raise_from(ValueError(f'invalid annotations file: {f_name}: {error}'), None) except ValueError as error: raise_from(ValueError(f'invalid annotations file: {f_name}: {error}'), None)
def main(): threshold = 0.22 pipeline_config = join('weights', 'D4', 'd4.config') configs = config_util.get_configs_from_pipeline_file(pipeline_config) model_config = configs['model'] detection_model = model_builder.build(model_config=model_config, is_training=False) ckpt = tf.compat.v2.train.Checkpoint(model=detection_model) ckpt.restore(join('weights', 'D4', 'ckpt-389')).expect_partial() detect_fn = detection_function(detection_model) f_names = [] with open(join('..', 'Dataset', 'Dubai', 'val.txt')) as reader: lines = reader.readlines() for line in lines: f_names.append(line.rstrip().split(' ')[0]) result_dict = {} for f_name in tqdm.tqdm(f_names): image_path = join('..', 'Dataset', 'Dubai', 'IMAGES', f_name + '.jpg') label_path = join('..', 'Dataset', 'Dubai', 'LABELS', f_name + '.xml') image = cv2.imread(image_path) image = image[:, :, ::-1] input_tensor = tf.convert_to_tensor(np.expand_dims(image, 0), dtype=tf.float32) detections, predictions_dict, shapes = detect_fn(input_tensor) image_np = image.copy() scores = detections['detection_scores'][0].numpy() pred_boxes = detections['detection_boxes'][0].numpy() im_height, im_width, _ = image.shape pred_boxes_np = [] for pred_box in pred_boxes: y_min, x_min, y_max, x_max = pred_box x_min = int(x_min * im_width) y_min = int(y_min * im_height) x_max = int(x_max * im_width) y_max = int(y_max * im_height) pred_boxes_np.append([x_min, y_min, x_max, y_max]) true_boxes = [] for element in parse_fn(label_path).getroot().iter('object'): true_boxes.append(parse_annotation(element)) result = { 'detection_boxes': pred_boxes_np, 'groundtruth_boxes': true_boxes, 'confidence': scores } result_dict[f'{f_name}.jpg'] = result draw_boxes(image_np, pred_boxes, scores, threshold) cv2.imwrite(join('results', basename(image_path)), image_np[:, :, ::-1]) with open(join('results', 'd4.pickle'), 'wb') as writer: pickle.dump(result_dict, writer)
def load_label(f_name): try: tree = parse_fn( join(config.data_dir, config.label_dir, f_name + '.xml')) return parse_annotations(tree.getroot()) except ParseError as error: raise_from( ValueError('invalid annotations file: {}: {}'.format( f_name, error)), None) except ValueError as error: raise_from( ValueError('invalid annotations file: {}: {}'.format( f_name, error)), None)
def main(): if not exists('results'): os.makedirs('results') f_names = [] with open(join(config.base_dir, 'val.txt')) as reader: lines = reader.readlines() for line in lines: f_names.append(line.rstrip().split(' ')[0]) result_dict = {} model = nn.build_model(training=False) model.load_weights("weights/model245.h5", True) for f_name in tqdm.tqdm(f_names): image_path = join(config.base_dir, config.image_dir, f_name + '.jpg') label_path = join(config.base_dir, config.label_dir, f_name + '.xml') image = cv2.imread(image_path) image_np = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image_np, scale, dw, dh = util.resize(image_np) image_np = image_np.astype(np.float32) / 255.0 boxes, scores, _ = model.predict(image_np[np.newaxis, ...]) boxes[:, [0, 2]] = (boxes[:, [0, 2]] - dw) / scale boxes[:, [1, 3]] = (boxes[:, [1, 3]] - dh) / scale true_boxes = [] for element in parse_fn(label_path).getroot().iter('object'): true_boxes.append(parse_annotation(element)[2]) result = { 'pred_boxes': boxes, 'true_boxes': true_boxes, 'confidence': scores } result_dict[f'{f_name}.jpg'] = result image = draw_bbox(image, boxes, scores) cv2.imwrite(f'results/{f_name}.png', image) with open(join('results/yolo.pickle'), 'wb') as writer: pickle.dump(result_dict, writer)