def main(args): """ Show detected objects with boxes, lables and prediction scores in a vide stream """ # Load yolo model with pretrained weights print("Create YoloV3 model") config_parser = ConfigParser(args.config) model = config_parser.create_model(skip_detect_layer=False) detector = config_parser.create_detector(model) # Open video stream cap = cv2.VideoCapture(args.camera) if (cap.isOpened() == False): print("(Error) Could not open video stream") exit() # Detect objects in stream times = [] detect = 0 while True: # Capture every nth frame only because we are too slow # to capture every frame... ret, image = cap.read() #image, _ = resize_image(image, None, config_parser.get_net_size(), keep_ratio=True) if not ret: print("(Error) Lost connection to video stream") break # Detect objects and measure timing if detect <= 0: t1 = time.time() min_prob = 0.90 boxes, labels, probs = detector.detect(image, min_prob) t2 = time.time() times.append(t2 - t1) times = times[-20:] detect = 50 detect -= 1 # Display detected objects visualize_boxes(image, boxes, labels, probs, config_parser.get_labels()) image = cv2.putText( image, "Time: {:.2f}ms".format(sum(times) / len(times) * 1000), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2) cv2.imshow('Frame', image) # Exit with 'q' if cv2.waitKey(25) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows()
def __init__(self, trained_tracknet_network, config_path='pretrained_model/kitti.json', bbox_size=128): # Get the TrackNet model self.identifier = trained_tracknet_network self.config_path = config_path self.bbox_size = bbox_size # Create the YOLO-v3 model and load the weights config_parser = ConfigParser(self.config_path) model = config_parser.create_model(skip_detect_layer=False) self.detector = config_parser.create_detector(model)
def main(): args = argparser.parse_args() # 1. create yolo model & load weights config_parser = ConfigParser(args.config) model = config_parser.create_model(skip_detect_layer=False) detector = config_parser.create_detector(model) labels = config_parser.get_labels() for image in args.images: predictImage(image, detector, labels) saveResults() return 0
argparser = argparse.ArgumentParser( description='train yolo-v3 network') argparser.add_argument( '-c', '--config', default="configs/svhn.json", help='config file') if __name__ == '__main__': args = argparser.parse_args() config_parser = ConfigParser(args.config) # 1. create generator train_generator, valid_generator = config_parser.create_generator() # 2. create model model = config_parser.create_model() # 3. training learning_rate, save_dname, n_epoches = config_parser.get_train_params() train_fn(model, train_generator, valid_generator, learning_rate=learning_rate, save_dname=save_dname, num_epoches=n_epoches)
help='config file') argparser.add_argument( '-i', '--image', default="tests/samples/sample.jpeg", help='path to image file') if __name__ == '__main__': args = argparser.parse_args() image_path = args.image # 1. create yolo model & load weights config_parser = ConfigParser(args.config) model = config_parser.create_model(skip_detect_layer=False) detector = config_parser.create_detector(model) # 2. Load image image = cv2.imread(image_path) image = image[:,:,::-1] # 3. Run detection boxes, labels, probs = detector.detect(image, 0.5) print(probs) # 4. draw detected boxes visualize_boxes(image, boxes, labels, probs, config_parser.get_labels()) # 5. plot plt.imshow(image)