def main(args): """ Show detected objects with boxes, lables and prediction scores in a vide stream """ # Load yolo model with pretrained weights print("Create YoloV3 model") config_parser = ConfigParser(args.config) model = config_parser.create_model(skip_detect_layer=False) detector = config_parser.create_detector(model) # Open video stream cap = cv2.VideoCapture(args.camera) if (cap.isOpened() == False): print("(Error) Could not open video stream") exit() # Detect objects in stream times = [] detect = 0 while True: # Capture every nth frame only because we are too slow # to capture every frame... ret, image = cap.read() #image, _ = resize_image(image, None, config_parser.get_net_size(), keep_ratio=True) if not ret: print("(Error) Lost connection to video stream") break # Detect objects and measure timing if detect <= 0: t1 = time.time() min_prob = 0.90 boxes, labels, probs = detector.detect(image, min_prob) t2 = time.time() times.append(t2 - t1) times = times[-20:] detect = 50 detect -= 1 # Display detected objects visualize_boxes(image, boxes, labels, probs, config_parser.get_labels()) image = cv2.putText( image, "Time: {:.2f}ms".format(sum(times) / len(times) * 1000), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2) cv2.imshow('Frame', image) # Exit with 'q' if cv2.waitKey(25) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows()
def __init__(self, trained_tracknet_network, config_path='pretrained_model/kitti.json', bbox_size=128): # Get the TrackNet model self.identifier = trained_tracknet_network self.config_path = config_path self.bbox_size = bbox_size # Create the YOLO-v3 model and load the weights config_parser = ConfigParser(self.config_path) model = config_parser.create_model(skip_detect_layer=False) self.detector = config_parser.create_detector(model)
def main(): args = argparser.parse_args() # 1. create yolo model & load weights config_parser = ConfigParser(args.config) model = config_parser.create_model(skip_detect_layer=False) detector = config_parser.create_detector(model) labels = config_parser.get_labels() for image in args.images: predictImage(image, detector, labels) saveResults() return 0
from yolo.train import train_fn from yolo.config import ConfigParser argparser = argparse.ArgumentParser( description='train yolo-v3 network') argparser.add_argument( '-c', '--config', default="configs/svhn.json", help='config file') if __name__ == '__main__': args = argparser.parse_args() config_parser = ConfigParser(args.config) # 1. create generator train_generator, valid_generator = config_parser.create_generator() # 2. create model model = config_parser.create_model() # 3. training learning_rate, save_dname, n_epoches = config_parser.get_train_params() train_fn(model, train_generator, valid_generator, learning_rate=learning_rate, save_dname=save_dname,
default="configs/predict_coco.json", help='config file') argparser.add_argument( '-i', '--image', default="tests/samples/sample.jpeg", help='path to image file') if __name__ == '__main__': args = argparser.parse_args() image_path = args.image # 1. create yolo model & load weights config_parser = ConfigParser(args.config) model = config_parser.create_model(skip_detect_layer=False) detector = config_parser.create_detector(model) # 2. Load image image = cv2.imread(image_path) image = image[:,:,::-1] # 3. Run detection boxes, labels, probs = detector.detect(image, 0.5) print(probs) # 4. draw detected boxes visualize_boxes(image, boxes, labels, probs, config_parser.get_labels()) # 5. plot
import tensorflow as tf import argparse argparser = argparse.ArgumentParser(description='evaluate yolo-v3 network') argparser.add_argument('-c', '--config', default="configs/test.json", help='config file') argparser.add_argument('-s', '--save_dname', default=None) argparser.add_argument('-t', '--threshold', type=float, default=0.5) if __name__ == '__main__': from yolo.config import ConfigParser args = argparser.parse_args() print(args) config_parser = ConfigParser(args.config) model = config_parser.create_model() evaluator = config_parser.create_evaluator(model) score = evaluator.run(threshold=args.threshold, save_dname=args.save_dname) print(score)
# 3. Run detection boxes, labels, probs = detector.detect(image, 0.5) # print(list(zip(labels, probs))) if len(labels) == 0: print(image_path, "nothing found") for (l, p) in zip(labels, probs): print(image_path, class_labels[l], p) # # 4. draw detected boxes # visualize_boxes(image, boxes, labels, probs, config_parser.get_labels()) # # # 5. plot # plt.imshow(image) # plt.show() if __name__ == '__main__': args = argparser.parse_args() # 1. create yolo model & load weights config_parser = ConfigParser(args.config) model = config_parser.create_model(skip_detect_layer=False) detector = config_parser.create_detector(model) labels = config_parser.get_labels() for image in args.images: predictImage(image, detector, labels)
import argparse from yolo.train import train_fn from yolo.config import ConfigParser argparser = argparse.ArgumentParser(description='train yolo-v3 network') argparser.add_argument('-c', '--config', default="configs/test.json", help='config file') if __name__ == '__main__': args = argparser.parse_args() # config = './configs/svhn.json' config = args.config config_parser = ConfigParser(config) # 1. create generator split_train_valid = config_parser.split_train_val() train_generator, valid_generator = config_parser.create_generator( split_train_valid=split_train_valid) # 2. create model model = config_parser.create_model() # 3. training learning_rate, save_dir, weight_name, n_epoches, checkpoint_path = config_parser.get_train_params( ) train_fn(model, train_generator, valid_generator,
# minimum_percentage_probability=50, # log_progress=False, display_percentage_probability=True display_object_name=True save_detected_video=True per_frame_function=None per_second_function=None per_minute_function=None video_complete_function=None return_detected_frame=False detection_timeout=None # 1. create yolo model & load weights args = argparser.parse_args() config_parser = ConfigParser(args.config) model = config_parser.create_model(skip_detect_layer=False) detector = config_parser.create_detector(model) # /////////////////////////////////////////////////////////////////// output_frames_dict = {} output_frames_count_dict = {} input_video = cv2.VideoCapture(input_file_path) output_video_filepath = output_file_path + '.avi' frame_width = int(input_video.get(3)) frame_height = int(input_video.get(4)) output_video = cv2.VideoWriter(output_video_filepath, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),