コード例 #1
0
    def __init__(self,
                 params: DatasetSeObjectsParams = DatasetSeObjectsParams(),
                 device: str = 'cuda'):
        super().__init__(device)

        self._se_io = self._get_installer(params)
        self._se_io.install_nodes(self)
コード例 #2
0
def get_common_params() -> DatasetSeObjectsParams:
    params = DatasetSeObjectsParams()
    params.dataset_size = SeDatasetSize.SIZE_24
    params.save_gpu_memory = True
    params.class_filter = None
    params.dataset_config = DatasetConfig.TRAIN_ONLY
    params.random_order = False
    return params
コード例 #3
0
ファイル: se_task0_topology.py プロジェクト: jvitku/torchsim
 def _get_installer(use_dataset: bool, curriculum: tuple, save_gpu_memory: bool, class_filter: List[int],
                    location_filter: float):
     if use_dataset:
         return SeIoTask0Dataset(
             DatasetSeObjectsParams(dataset_config=DatasetConfig.TRAIN_ONLY,
                                    save_gpu_memory=save_gpu_memory,
                                    class_filter=class_filter,
                                    location_filter_ratio=location_filter))
     else:
         return SeIoTask0(curriculum)
コード例 #4
0
def test_test_cycles():
    """Set the testing position to the last element and make step. Then test weather the cycle starts in the
    begining. """

    device = 'cuda'

    # init node
    params = DatasetSeObjectsParams()
    params.dataset_size = SeDatasetSize.SIZE_24
    params.dataset_config = DatasetConfig.TEST_ONLY
    params.random_order = False
    params.save_gpu_memory = False

    node = DatasetSeObjectsNode(params, seed=None)
    node.allocate_memory_blocks(AllocatingCreator(device))

    # make step
    node.step()

    # read outputs
    first_image = node.outputs.image_output.tensor
    first_label = node.outputs.task_to_agent_label_ground_truth.tensor

    # hack the position and make another step
    node._unit._pos = len(node._unit._test_images)
    node.step()

    # read another outputs
    output_image = node.outputs.image_output.tensor
    output_label = node.outputs.task_to_agent_label_ground_truth.tensor

    # assert they are both the same
    assert same(first_image, output_image)
    assert same(first_label, output_label)
コード例 #5
0
def test_se_task0_topologies_step(topology_class):
    """Test one step of all topologies which are supposed to solve SE task 0."""

    params = DatasetSeObjectsParams(save_gpu_memory=True)
    params.dataset_size = SeDatasetSize.SIZE_24

    topology = topology_class(use_dataset=True)

    topology.order_nodes()
    topology._update_memory_blocks()
    topology.step()

    landmark_id = SeIoAccessor.get_landmark_id_int(topology.se_io.outputs)
    label_id = SeIoAccessor.get_label_id(topology.se_io)

    assert type(landmark_id) is float
    assert landmark_id == FLOAT_NEG_INF

    assert type(label_id) is int
    assert label_id <= SeIoAccessor.get_num_labels(topology.se_io)

    assert SeIoAccessor.get_num_labels(
        topology.se_io) == DatasetSeObjectsUnit.NUM_LABELS
コード例 #6
0
    def __init__(self, use_dataset: bool = True):
        super().__init__("Task 0 - Base topology",
                         inputs=Task0BaseGroupInputs(self),
                         outputs=Task0BaseGroupOutputs(self))

        if use_dataset:
            params = DatasetSeObjectsParams(
                dataset_config=DatasetConfig.TRAIN_TEST, save_gpu_memory=True)
            self.se_node = DatasetSeObjectsNode(params)
        else:
            se_config = SpaceEngineersConnectorConfig()
            se_config.curriculum = list((0, -1))
            actions_descriptor = SpaceEngineersActionsDescriptor()
            self.se_node = SpaceEngineersConnectorNode(actions_descriptor,
                                                       se_config)
コード例 #7
0
def test_class_filter_and_data_sizes():
    """Collect n_steps outputs and verify their shapes and check that the class filter works."""

    device = 'cuda'

    n_steps = 20

    params = DatasetSeObjectsParams()
    params.dataset_size = SeDatasetSize.SIZE_24
    params.save_gpu_memory = True
    params.class_filter = [1, 2, 19, 5]
    params.dataset_config = DatasetConfig.TRAIN_ONLY
    params.random_order = True

    node = DatasetSeObjectsNode(params, seed=None)

    node.allocate_memory_blocks(AllocatingCreator(device))
    images, labels, labels_gt, = collect_data(node, n_steps)

    # labels and labels_gt should be equal in the training phase
    assert compare_sequences(labels, labels_gt)

    assert images[0].device.type == device
    assert labels[0].device.type == device
    assert labels_gt[0].device.type == device

    assert images[0].shape[0] == SeDatasetSize.SIZE_24.value
    assert images[0].shape[1] == SeDatasetSize.SIZE_24.value
    assert images[0].shape[2] == DatasetSeBase.N_CHANNELS

    assert labels[0].shape[0] == DatasetSeObjectsUnit.NUM_LABELS

    # go through all of the class labels and check if each is contained in the filter
    for label in labels:
        _, max_id = label.max(0)
        max_id = max_id.item()

        assert max_id in params.class_filter
コード例 #8
0
def test_correct_dimensions(device, start_training):
    """Install the SeDataset with the installer and test the installer accessor gives correct data types."""

    params = DatasetSeObjectsParams()
    params.dataset_size = SeDatasetSize.SIZE_24
    params.save_gpu_memory = True
    if start_training:
        params.dataset_config = DatasetConfig.TRAIN_TEST
    else:
        params.dataset_config = DatasetConfig.TEST_ONLY

    t = EmptyTopology(params)

    if start_training:
        assert t._se_io.get_testing_phase() == 0.0
    else:
        assert t._se_io.get_testing_phase() == 1.0

    t.order_nodes()
    t._update_memory_blocks()
    t.step()
    t.step()

    landmark_id = SeIoAccessor.get_landmark_id_int(t._se_io.outputs)
    label_id = SeIoAccessor.get_label_id(t._se_io)

    assert type(landmark_id) is float
    assert landmark_id == FLOAT_NEG_INF

    assert type(label_id) is int
    assert label_id <= SeIoAccessor.get_num_labels(t._se_io)

    assert SeIoAccessor.get_num_labels(
        t._se_io) == DatasetSeObjectsUnit.NUM_LABELS

    if start_training:
        assert t._se_io.get_testing_phase() == 0.0
    else:
        assert t._se_io.get_testing_phase() == 1.0

    print('done')
コード例 #9
0
 def __init__(self,
              params: DatasetSeObjectsParams = DatasetSeObjectsParams(
                  dataset_config=DatasetConfig.TRAIN_TEST,
                  save_gpu_memory=True)):
     self._params = params
コード例 #10
0
    def __init__(
        self,
        baseline_seed: int = None,
        layer_sizes: List[int] = (100, 100),
        class_filter: List[int] = None,
        image_size=SeDatasetSize.SIZE_24,
        random_order: bool = False,
        noise_amp: float = 0.0,
        use_se:
        bool = False  # True: use a running instance of SE for getting data; False: use a dataset
    ):
        """

        Args:
            baseline_seed:
            layer_sizes:
            class_filter:
            image_size:
            random_order:
            noise_amp:  in case the noise_amp is > 0, superpose noise with mean 0 and variance=noise_amp to the image
        """
        super().__init__("Task 0 - SeNodeGroup",
                         outputs=Task0BaseGroupOutputs(self))

        self.use_se = use_se
        if use_se:
            self._se_io = SeIoGeneral()
            self._se_io.se_config.render_width = image_size.value
            self._se_io.se_config.render_height = image_size.value
        else:
            # dataset and params
            params = DatasetSeObjectsParams()
            params.dataset_config = DatasetConfig.TRAIN_ONLY
            params.dataset_size = image_size
            params.class_filter = class_filter
            params.random_order = random_order
            params.seed = baseline_seed
            self._se_io = SeIoTask0Dataset(params)

        self._se_io.install_nodes(self)

        if use_se:
            blank_task_control = ConstantNode(
                (self._se_io.se_config.TASK_CONTROL_SIZE, ))

            actions_node = ConstantNode((4, ), name="actions")

            blank_task_labels = ConstantNode((20, ), name="labels")

            self.add_node(blank_task_control)
            self.add_node(actions_node)
            self.add_node(blank_task_labels)

            Connector.connect(blank_task_control.outputs.output,
                              self._se_io.inputs.task_control)
            Connector.connect(actions_node.outputs.output,
                              self._se_io.inputs.agent_action)
            Connector.connect(blank_task_labels.outputs.output,
                              self._se_io.inputs.agent_to_task_label)

        # baselines for each layer
        self._baselines = []
        for layer_size in layer_sizes:

            node = RandomNumberNode(upper_bound=layer_size, seed=baseline_seed)
            self.add_node(node)
            self._baselines.append(node)

        # baseline for the labels separately
        self._label_baseline = ConstantNode(shape=self._se_io.get_num_labels(),
                                            constant=0,
                                            name='label_const')
        self._random_label_baseline = RandomNumberNode(
            upper_bound=self._se_io.get_num_labels(), seed=baseline_seed)

        self.add_node(self._label_baseline)
        self.add_node(self._random_label_baseline)

        if noise_amp > 0.0:
            # add the noise to the output image?
            _random_noise_params = RandomNoiseParams()
            _random_noise_params.distribution = 'Normal'
            _random_noise_params.amplitude = noise_amp
            self._random_noise = RandomNoiseNode(_random_noise_params)
            self.add_node(self._random_noise)
            Connector.connect(self._se_io.outputs.image_output,
                              self._random_noise.inputs.input)
            Connector.connect(self._random_noise.outputs.output,
                              self.outputs.image.input)
        else:
            Connector.connect(self._se_io.outputs.image_output,
                              self.outputs.image.input)

        Connector.connect(self._se_io.outputs.task_to_agent_label,
                          self.outputs.labels.input)
コード例 #11
0
def test_save_gpu():
    """Collect n-steps outputs and verify that the sequences produced with save_gpu_memory true/false are the same."""

    device = 'cuda'

    n_steps = 20

    false_params = DatasetSeObjectsParams()
    false_params.dataset_size = SeDatasetSize.SIZE_24
    false_params.save_gpu_memory = True
    false_params.dataset_config = DatasetConfig.TRAIN_ONLY
    false_params.random_order = False
    params = DatasetSeObjectsParams()
    params.dataset_size = SeDatasetSize.SIZE_24
    params.dataset_config = DatasetConfig.TRAIN_ONLY
    params.random_order = False

    node = DatasetSeObjectsNode(false_params, seed=None)
    params_1 = copy(params)
    params_1.save_gpu_memory = True
    node = DatasetSeObjectsNode(params_1, seed=None)
    node.allocate_memory_blocks(AllocatingCreator(device))
    gpu_true_images, gpu_true_labels, labels_gt, = collect_data(node, n_steps)

    params_2 = copy(params)
    params_2.save_gpu_memory = False
    node = DatasetSeObjectsNode(params_2, seed=None)
    node.allocate_memory_blocks(AllocatingCreator(device))
    gpu_false_images, gpu_false_labels, labels_gt, = collect_data(
        node, n_steps)

    # note that here the epsilon is required (save_gpu introduces some rounding problems here)
    assert compare_sequences(gpu_true_images, gpu_false_images, epsilon=0.0001)
    assert compare_sequences(gpu_true_labels, gpu_false_labels, epsilon=0.0001)