def make_instance(self, *args, **kwargs): du.defaults( kwargs, { 'trial_map': { system.identifier: { image_source.identifier: trial_result.identifier for trial_result in self.trial_results if trial_result.identifier is not None and trial_result.system_id == system.identifier for image_source in self.image_sources if image_source.identifier is not None } for system in self.systems if system.identifier is not None }, 'result_map': { trial_result.identifier: { benchmark.identifier: benchmark_result.identifier for benchmark in self.benchmarks if benchmark.identifier is not None for benchmark_result in self.benchmark_results if benchmark_result.identifier is not None and benchmark_result.benchmark == benchmark.identifier and benchmark_result.trial_result == trial_result.identifier } for trial_result in self.trial_results if trial_result.identifier is not None } }) return MockExperiment(*args, **kwargs)
def test_hash(self): kwargs = { 'class_names': ('class_1', ), 'bounding_box': (152, 239, 14, 78), 'label_color': (127, 33, 67), 'relative_pose': tf.Transform(location=(123, -45, 23), rotation=(0.5, 0.23, 0.1)), 'object_id': 'LabelledObject-18569' } a = imeta.LabelledObject(**kwargs) b = imeta.LabelledObject(**kwargs) self.assertEqual(hash(a), hash(b)) b = imeta.LabelledObject( **du.defaults({'class_names': 'class_41'}, kwargs)) self.assertNotEqual(hash(a), hash(b)) b = imeta.LabelledObject( **du.defaults({'bounding_box': (47, 123, 45, 121)}, kwargs)) self.assertNotEqual(hash(a), hash(b)) b = imeta.LabelledObject( **du.defaults({'label_color': (247, 123, 14)}, kwargs)) self.assertNotEqual(hash(a), hash(b)) b = imeta.LabelledObject(**du.defaults( {'relative_pose': tf.Transform((62, -81, 43), (0.1, 0.1, 0.1))}, kwargs)) self.assertNotEqual(hash(a), hash(b)) b = imeta.LabelledObject( **du.defaults({'object_id': 'Cat-12'}, kwargs)) self.assertNotEqual(hash(a), hash(b))
def deserialize(cls, serialized, **kwargs): config = {} dataset_ids = {} if 'config' in serialized: du.defaults(config, serialized['config']) if 'dataset_ids' in serialized: du.defaults(dataset_ids, serialized['dataset_ids']) return cls(config, dataset_ids, **kwargs)
def __init__(self, config, dataset_ids=None): self._config = {name: False for name in dataset_names} self._dataset_ids = {} for key in self._config.keys(): if key in config and bool(config[key]): self._config[key] = True if dataset_ids is not None: du.defaults(self._dataset_ids, dataset_ids)
def make_image_collection(**kwargs): if 'images' not in kwargs: kwargs['images'] = {1: make_image()} du.defaults( kwargs, { 'type_': core.sequence_type.ImageSequenceType.SEQUENTIAL, 'id_': bson.ObjectId() }) return core.image_collection.ImageCollection(**kwargs)
def make_image(*args, **kwargs): du.defaults(kwargs, { 'data': np.random.randint(0, 255, (32, 32, 3), dtype='uint8'), 'data_id': 0, 'metadata': imeta.ImageMetadata( source_type=imeta.ImageSourceType.SYNTHETIC, hash_=b'\x1f`\xa8\x8aR\xed\x9f\x0b', camera_pose=tf.Transform() ) }) return core.image_entity.ImageEntity(*args, **kwargs)
def update_schema(serialized: dict, db_client: database.client.DatabaseClient): version = dh.get_schema_version( serialized, 'experiments:visual_slam:VisualSlamExperiment') if version < 1: # unversioned -> 1 if 'simulators' in serialized: serialized['kitti_simulators'] = serialized['simulators'] del serialized['simulators'] version = 1 if version < 2: simulators = {} if 'kitti_simulators' in serialized: simulators = du.defaults(simulators, serialized['kitti_simulators']) if 'euroc_simulators' in serialized: simulators = du.defaults(simulators, serialized['euroc_simulators']) if 'tum_simulators' in serialized: simulators = du.defaults(simulators, serialized['tum_simulators']) serialized['simulators'] = simulators # Check references if 'libviso' in serialized and not dh.check_reference_is_valid( db_client.system_collection, serialized['libviso']): del serialized['libviso'] if 'orbslam_systems' in serialized: keys = list(serialized['orbslam_systems'].keys()) for key in keys: if not dh.check_reference_is_valid( db_client.system_collection, serialized['orbslam_systems'][key]): del serialized['orbslam_systems'][key] if 'simulators' in serialized: keys = list(serialized['simulators'].keys()) for key in keys: if not dh.check_reference_is_valid( db_client.image_source_collection, serialized['simulators'][key]): del serialized['simulators'][key] if 'benchmark_rpe' in serialized and \ not dh.check_reference_is_valid(db_client.system_collection, serialized['benchmark_rpe']): del serialized['benchmark_rpe'] if 'benchmark_ate' in serialized and \ not dh.check_reference_is_valid(db_client.system_collection, serialized['benchmark_ate']): del serialized['benchmark_ate'] if 'benchmark_trajectory_drift' in serialized and \ not dh.check_reference_is_valid(db_client.system_collection, serialized['benchmark_trajectory_drift']): del serialized['benchmark_trajectory_drift'] if 'benchmark_tracking' in serialized and \ not dh.check_reference_is_valid(db_client.system_collection, serialized['benchmark_tracking']): del serialized['benchmark_tracking']
def make_instance(self, *args, **kwargs): states = [ ts.TrackingState.NOT_INITIALIZED, ts.TrackingState.OK, ts.TrackingState.LOST ] kwargs = du.defaults( kwargs, { 'system_id': np.random.randint(10, 20), 'trajectory': { np.random.uniform(0, 600): tf.Transform( location=np.random.uniform(-1000, 1000, 3), rotation=np.random.uniform(0, 1, 4)) for _ in range(100) }, 'ground_truth_trajectory': { np.random.uniform(0, 600): tf.Transform( location=np.random.uniform(-1000, 1000, 3), rotation=np.random.uniform(0, 1, 4)) for _ in range(100) }, 'tracking_stats': { np.random.uniform(0, 600): states[np.random.randint( 0, len(states))] for _ in range(100) }, 'sequence_type': core.sequence_type.ImageSequenceType.SEQUENTIAL, 'system_settings': { 'a': np.random.randint(20, 30) } }) return vs.SLAMTrialResult(*args, **kwargs)
def make_instance(self, *args, **kwargs): kwargs = du.defaults( kwargs, { 'trial_result_id': bson.ObjectId(), 'benchmark_id': bson.ObjectId(), 'state': batch_analysis.task.JobState.RUNNING, 'num_cpus': np.random.randint(0, 1000), 'num_gpus': np.random.randint(0, 1000), 'memory_requirements': '{}MB'.format(np.random.randint(0, 50000)), 'expected_duration': '{0}:{1}:{2}'.format(np.random.randint(1000), np.random.randint(60), np.random.randint(60)), 'node_id': 'node-{}'.format(np.random.randint(10000)), 'job_id': np.random.randint(1000) }) return task.BenchmarkTrialTask(*args, **kwargs)
def make_instance(self, *args, **kwargs): kwargs = du.defaults( kwargs, { 'benchmark_id': np.random.randint(0, 10), 'trial_result_id': np.random.randint(10, 20), 'settings': {} }) if 'lost_intervals' not in kwargs: kwargs['lost_intervals'] = [ track_bench.LostInterval( start_time=np.random.uniform(i, i + 0.49), end_time=np.random.uniform(i + 0.5, i + 1), distance=np.random.uniform(0, 1000), num_frames=np.random.randint(0, 100)) for i in range(50) ] if 'total_distance' not in kwargs: kwargs['total_distance'] = np.sum( np.array([i.distance for i in kwargs['lost_intervals']])) + 10000 if 'total_time' not in kwargs: kwargs['total_time'] = np.sum( np.array([i.duration for i in kwargs['lost_intervals']])) + 10 if 'total_frames' not in kwargs: kwargs['total_frames'] = np.sum( np.array([i.frames for i in kwargs['lost_intervals']])) + 10000 return track_res.TrackingBenchmarkResult(*args, **kwargs)
def make_instance(self, *args, **kwargs): kwargs = du.defaults(kwargs, { 'benchmark_id': 1, 'trial_result_id': 2, 'success': True }) return core.benchmark.BenchmarkResult(*args, **kwargs)
def make_instance(self, *args, **kwargs): kwargs = du.defaults( kwargs, { 'system_id': np.random.randint(10, 20), 'keypoints': self.keypoints, 'sequence_type': core.sequence_type.ImageSequenceType.SEQUENTIAL, 'system_settings': { 'a': np.random.randint(20, 30) }, 'keypoints_id': bson.ObjectId() }) if 'timestamps' not in kwargs: kwargs['timestamps'] = { idx + np.random.uniform(0, 1): identifier for idx, identifier in enumerate(kwargs['keypoints'].keys()) } if 'camera_poses' not in kwargs: kwargs['camera_poses'] = { identifier: tf.Transform(location=np.random.uniform(-1000, 1000, 3), rotation=np.random.uniform(-1, 1, 4)) for identifier in kwargs['keypoints'].keys() } return feature_result.FeatureDetectorResult(*args, **kwargs)
def make_instance(self, *args, **kwargs): kwargs = du.defaults(kwargs, { 'benchmark_id': 1, 'trial_result_id': 2, 'reason': 'For test purposes' }) return core.benchmark.FailedBenchmark(*args, **kwargs)
def make_instance(self, *args, **kwargs): kwargs = du.defaults( kwargs, { 'controller_id': bson.ObjectId(), 'simulator_id': bson.ObjectId(), 'simulator_config': {}, 'state': batch_analysis.task.JobState.RUNNING, 'num_cpus': np.random.randint(0, 1000), 'num_gpus': np.random.randint(0, 1000), 'memory_requirements': '{}MB'.format(np.random.randint(0, 50000)), 'expected_duration': '{0}:{1}:{2}'.format(np.random.randint(1000), np.random.randint(60), np.random.randint(60)), 'node_id': 'node-{}'.format(np.random.randint(10000)), 'job_id': np.random.randint(1000) }) return task.GenerateDatasetTask(*args, **kwargs)
def make_instance(self, *args, **kwargs): kwargs = du.defaults( kwargs, { 'system_id': bson.objectid.ObjectId(), 'sequence_type': core.sequence_type.ImageSequenceType.SEQUENTIAL, 'system_settings': { 'a': 1051 }, 'frame_deltas': { 0.3333: tf.Transform((0.1, 0.01, -0.01), (-0.01, 0.06, 1.001)), 0.6666: tf.Transform((0.92, 0.12, 0.02), (-0.1, 0.01, 0.12)), 1.0: tf.Transform((0.4, 0.03, -0.03), (0.03, -0.12, 0.772)), 1.3333: tf.Transform((0.84, -0.02, 0.09), (0.013, 0.28, 0.962)), 1.6666: tf.Transform((0.186, -0.014, -0.26), (0.7, -0.37, 0.9)), 2.0: tf.Transform((0.37, 0.38, 0.07), (0.38, -0.27, 0.786)) }, 'ground_truth_trajectory': { 0.3333: tf.Transform((0.1, 0.01, -0.01), (-0.01, 0.06, 1.001)), 0.6666: tf.Transform((25, 162, 26), (-0.1, 0.01, -0.12)), 1.0: tf.Transform((26, 67, 9), (0.03, -0.12, 0.572)), 1.3333: tf.Transform((82, 3, 78), (0.13, 0.25, 0.666)), 1.6666: tf.Transform((9, 78, 6), (0.27, -0.7, 0.2)), 2.0: tf.Transform((22, 89, 2), (0.7, -0.26, 0.87)) } }) return vo_res.VisualOdometryResult(*args, **kwargs)
def make_instance(self, *args, **kwargs): kwargs = du.defaults(kwargs, { 'focal_distance': 700, 'cu': 400, 'cv': 300, 'base': 0.6 }) return viso.LibVisOSystem(*args, **kwargs)
def make_image(**kwargs): if 'data' in kwargs: data = kwargs['data'] else: data = np.array([list(range(i, i + 100)) for i in range(100)]) metadata_kwargs = { 'source_type': imeta.ImageSourceType.SYNTHETIC, 'hash_': b'\xa5\xc9\x08\xaf$\x0b\x116' } if 'metadata' in kwargs and isinstance(kwargs['metadata'], dict): metadata_kwargs = du.defaults(kwargs['metadata'], metadata_kwargs) del kwargs['metadata'] kwargs = du.defaults(kwargs, { 'data': data, 'metadata': imeta.ImageMetadata(**metadata_kwargs) }) return core.image.Image(**kwargs)
def make_instance(self, *args, **kwargs): kwargs = du.defaults( kwargs, { 'images': self.images, 'type_': core.sequence_type.ImageSequenceType.SEQUENTIAL, 'db_client_': self.create_mock_db_client() }) return ic.ImageCollection(*args, **kwargs)
def make_instance(self, *args, **kwargs): kwargs = du.defaults(kwargs, { 'benchmark_id': np.random.randint(0, 10), 'trial_result_id': np.random.randint(10, 20), 'translational_error': {np.random.uniform(0, 600): np.random.uniform(-100, 100) for _ in range(100)}, 'rotational_error': {np.random.uniform(0, 600): np.random.uniform(-np.pi, np.pi) for _ in range(100)}, 'rpe_settings': {} }) return rpe_res.BenchmarkRPEResult(*args, **kwargs)
def make_instance(self, *args, **kwargs): kwargs = du.defaults( kwargs, { 'benchmark_id': 1, 'trial_result_id': 2, 'reference_id': 3, 'success': True }) return core.trial_comparison.TrialComparisonResult(*args, **kwargs)
def make_instance(self, *args, **kwargs): kwargs = du.defaults( kwargs, { 'inner': make_image_collection(), 'repeats': 3, 'type_override': core.sequence_type.ImageSequenceType.SEQUENTIAL }) return looper.LoopingCollection(*args, **kwargs)
def make_instance(self, *args, **kwargs): kwargs = du.defaults( kwargs, { 'benchmark_comparison_id': 1, 'benchmark_result': 2, 'reference_benchmark_result': 3, 'success': True }) return core.benchmark_comparison.BenchmarkComparisonResult( *args, **kwargs)
def make_instance(self, *args, **kwargs): kwargs = du.defaults( kwargs, { 'benchmark_id': bson.ObjectId(), 'trial_result_id': bson.ObjectId(), 'reference_id': bson.ObjectId(), 'feature_changes': self.changes, 'changes_id': bson.ObjectId() }) return detection_comp.FeatureDetectionComparisonResult(*args, **kwargs)
def make_instance(self, *args, **kwargs): match_types = [match_res.MatchType.TRUE_POSITIVE, match_res.MatchType.FALSE_POSITIVE, match_res.MatchType.TRUE_POSITIVE, match_res.MatchType.FALSE_NEGATIVE] kwargs = du.defaults(kwargs, { 'benchmark_id': np.random.randint(0, 10), 'trial_result_id': np.random.randint(10, 20), 'matches': {np.random.uniform(0, 600): match_types[np.random.randint(0, len(match_types))] for _ in range(100)}, 'settings': {} }) return match_res.MatchBenchmarkResult(*args, **kwargs)
def make_instance(self, *args, **kwargs): kwargs = du.defaults( kwargs, { 'inner': make_image_collection(), 'augmenters': [ simple_augments.HorizontalFlip(), simple_augments.Rotate270(), None ] }) return aug_coll.AugmentedImageCollection(*args, **kwargs)
def make_instance(self, *args, **kwargs): kwargs = du.defaults(kwargs, { 'config': { 'hessian_threshold': 122.4, 'num_octaves': 3, 'num_octave_layers': 4, 'extended': True, 'upright': False } }) return surf_detector.SurfDetector(*args, **kwargs)
def make_instance(self, *args, **kwargs): kwargs = du.defaults( kwargs, { 'max_speed': np.random.uniform(0, 10), 'max_turn_angle': np.random.uniform(0, np.pi), 'avoidance_radius': np.random.uniform(0, 10), 'avoidance_scale': np.random.uniform(0, 10), 'length': np.random.randint(0, 10000), 'seconds_per_frame': np.random.uniform(0, 1) }) return fly.FlythroughController(*args, **kwargs)
def make_instance(self, *args, **kwargs): kwargs = du.defaults( kwargs, { 'config': { 'num_features': 0, 'num_octave_layers': 4, 'contrast_threshold': 0.04, 'edge_threshold': 10, 'sigma': 1.6 } }) return sift_detector.SiftDetector(*args, **kwargs)
def test_is_depth_available_is_true_iff_all_images_have_depth_data(self): subject = ic.ImageCollection( images=self.images, type_=core.sequence_type.ImageSequenceType.SEQUENTIAL, db_client_=self.create_mock_db_client()) self.assertTrue(subject.is_depth_available) subject = ic.ImageCollection( type_=core.sequence_type.ImageSequenceType.SEQUENTIAL, images=du.defaults({1.7: make_image(depth_data=None)}, self.images), db_client_=self.create_mock_db_client()) self.assertFalse(subject.is_depth_available)
def make_instance(self, *args, **kwargs): kwargs = du.defaults( kwargs, { 'system_id': np.random.randint(10, 20), 'success': bool(np.random.randint(0, 1)), 'sequence_type': core.sequence_type.ImageSequenceType.SEQUENTIAL, 'system_settings': { 'a': np.random.randint(30, 40) } }) return core.trial_result.TrialResult(*args, **kwargs)