def __init__(self, video_file, queue_sz=128, transforms=[]): super().__init__(queue_sz=queue_sz, transforms=transforms) self.stream = cv.VideoCapture(get_realpath(video_file)) self.total_video_frames = int(self.stream.get(cv.CAP_PROP_FRAME_COUNT)) if self.total_video_frames <= 0: warning( 'WARNING: Could not get the total number of frames of the video. The file may be corrupted!' )
def __init__(self, params_file, keep_portions=True): params = pickle.load(gzip.open(get_realpath(params_file))) self.img_shape = params['image shape'] self.k = params['K'] self.d = params['D'] self.k_optimal = None if keep_portions: self.k_optimal, _ = cv.getOptimalNewCameraMatrix( self.k, self.d, self.img_shape, 0.7, self.img_shape, True) else: self.k_optimal = self.k self.map1 = None self.map2 = None self.imap1 = None self.imap2 = None self.map1, self.map2 = cv.fisheye.initUndistortRectifyMap( self.k, self.d, None, self.k_optimal, self.img_shape, cv.CV_16SC2) self.imap1, self.imap2 = self._get_inverse_mappings()
parser.add_argument( '-p', '--percent-train-samples', type=float, default=0.7, help='The percentage of train samples. Default is 70%%', ) parser.add_argument( '--frames-ext', type=str, default='jpg', help='The extension of the input frames.', ) args = parser.parse_args() dataset_folder = get_realpath(args.dataset_folder) frames_folder = os.path.join(dataset_folder, 'JPEGImages') assert os.path.isdir(frames_folder) frames_keys = _get_frames_keys(frames_folder, args.frames_ext) n_train = int(args.percent_train_samples * len(frames_keys)) sets_folder = os.path.join(dataset_folder, 'ImageSets', 'Main') if not os.path.isdir(sets_folder): os.makedirs(sets_folder) with open(os.path.join(sets_folder, 'trainval.txt'), 'w+') as f: for img_key in frames_keys[:n_train]: f.write(f'{img_key}\n')
'--output-name', type=str, required=False, help= 'The name of the output file containing the camera calibration parameters.', ) parser.add_argument( '-of', '--output-folder', type=str, required=False, help='Path to the folder where the output, file will be stored.', ) args = parser.parse_args() input_folder = get_realpath(args.input_folder) if not os.path.isdir(input_folder): print('The given input folder is not valid') sys.exit(-1) if args.output_folder: output_folder = get_realpath(args.output_folder) if not os.path.isdir(output_folder): print('The given output folder is not valid') sys.exit(-1) else: output_folder = get_realpath('./resources') if not os.path.isdir(output_folder): os.makedirs(output_folder) if args.output_name:
) parser.add_argument( '-o', '--output', type=str, required=True, help='The output folder of the dataset.', ) parser.add_argument( '--frames-ext', type=str, default='jpg', help='The extension of the input frames.', ) args = parser.parse_args() input_annos_folder = get_realpath(args.input_annos) input_frames_folder = get_realpath(args.input_frames) output_folder = get_realpath(args.output) if not os.path.isdir(output_folder): os.makedirs(output_folder) anno_data = get_annotations_dict( input_frames_folder, input_annos_folder, args.frames_ext ) _save_database( anno_data, output_folder, args.frames_ext )
from wagon_tracking.tracking import WagonTracker from wagon_tracking.transforms import DistortionRectifier from wagon_tracking.utils import get_realpath from wagon_tracking.videostream import VideoFileStream, VideoLiveStream if len(sys.argv) < 5: print( 'Usage: python run_ssd_example.py <net type> <model path> <label path>' '<video file | device location> [camera_param_file]') sys.exit(0) net_type = sys.argv[1] model_path = sys.argv[2] label_path = sys.argv[3] video_path = sys.argv[4] if len(sys.argv) > 5: camera_parameters = get_realpath(sys.argv[5]) transform = [DistortionRectifier(camera_parameters)] else: transform = [] if os.path.exists(video_path): cap = VideoFileStream(video_path, queue_sz=64, transforms=transform) # capture from file else: cap = VideoLiveStream(video_path, transforms=transform) # capture from camera frame_time = int(1 / cap.get(cv2.CAP_PROP_FPS) * 1000) cap.start() '''-------------------------- Test code --------------------------''' frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
from wagon_tracking.detection import WagonDetector from wagon_tracking.transforms import DistortionRectifier from wagon_tracking.videostream import VideoFileStream, VideoLiveStream from wagon_tracking.utils import get_realpath if len(sys.argv) < 5: print( 'Usage: python run_ssd_example.py <net type> <model path> <label path>' ' <video file | device location> [camera_param_file]' ) sys.exit(0) net_type = sys.argv[1] model_path = sys.argv[2] label_path = sys.argv[3] video_path = sys.argv[4] camera_parameters = get_realpath('resources/camera_parameters.pkl.gz') if len(sys.argv) > 5: camera_parameters = get_realpath(sys.argv[5]) transform = [DistortionRectifier(camera_parameters)] if os.path.exists(video_path): cap = VideoFileStream( video_path, queue_sz=64, transforms=transform ) # capture from file else: cap = VideoLiveStream(video_path, transforms=transform) # capture from camera frame_time = int(1 / cap.get(cv2.CAP_PROP_FPS) * 1000) cap.start() detector = WagonDetector(net_type, label_path, model_path)