def test_save_gpu():
    """Collect n-steps outputs and verify that the sequences produced with save_gpu_memory true/false are the same."""

    device = 'cuda'

    n_steps = 20

    false_params = DatasetSeObjectsParams()
    false_params.dataset_size = SeDatasetSize.SIZE_24
    false_params.save_gpu_memory = True
    false_params.dataset_config = DatasetConfig.TRAIN_ONLY
    false_params.random_order = False
    params = DatasetSeObjectsParams()
    params.dataset_size = SeDatasetSize.SIZE_24
    params.dataset_config = DatasetConfig.TRAIN_ONLY
    params.random_order = False

    node = DatasetSeObjectsNode(false_params, seed=None)
    params_1 = copy(params)
    params_1.save_gpu_memory = True
    node = DatasetSeObjectsNode(params_1, seed=None)
    node.allocate_memory_blocks(AllocatingCreator(device))
    gpu_true_images, gpu_true_labels, labels_gt, = collect_data(node, n_steps)

    params_2 = copy(params)
    params_2.save_gpu_memory = False
    node = DatasetSeObjectsNode(params_2, seed=None)
    node.allocate_memory_blocks(AllocatingCreator(device))
    gpu_false_images, gpu_false_labels, labels_gt, = collect_data(
        node, n_steps)

    # note that here the epsilon is required (save_gpu introduces some rounding problems here)
    assert compare_sequences(gpu_true_images, gpu_false_images, epsilon=0.0001)
    assert compare_sequences(gpu_true_labels, gpu_false_labels, epsilon=0.0001)
def test_test_cycles():
    """Set the testing position to the last element and make step. Then test weather the cycle starts in the
    begining. """

    device = 'cuda'

    # init node
    params = DatasetSeObjectsParams()
    params.dataset_size = SeDatasetSize.SIZE_24
    params.dataset_config = DatasetConfig.TEST_ONLY
    params.random_order = False
    params.save_gpu_memory = False

    node = DatasetSeObjectsNode(params, seed=None)
    node.allocate_memory_blocks(AllocatingCreator(device))

    # make step
    node.step()

    # read outputs
    first_image = node.outputs.image_output.tensor
    first_label = node.outputs.task_to_agent_label_ground_truth.tensor

    # hack the position and make another step
    node._unit._pos = len(node._unit._test_images)
    node.step()

    # read another outputs
    output_image = node.outputs.image_output.tensor
    output_label = node.outputs.task_to_agent_label_ground_truth.tensor

    # assert they are both the same
    assert same(first_image, output_image)
    assert same(first_label, output_label)
def run_measurement(name, params, args, max_steps: int = None):
    """Runs the experiment with specified params, see the parse_test_args method for arguments"""

    experiment = Task0OnlineLearningTemplate(
        Task0ConvWideAdapter(),
        Task0ConvWideTopology,
        params,
        max_steps=read_max_steps(max_steps),
        num_training_steps=read_max_steps(max_steps) // 10 *
        8,  # (1 tenth of time is used for testing)
        num_classes=DatasetSeObjectsNode.label_size(
        ),  # TODO make this somehow better
        num_layers=3,  # TODO parametrized
        measurement_period=4,  # 4
        sliding_window_size=
        300,  # be aware that the mutual information depends on this value
        sliding_window_stride=50,  # 50
        sp_evaluation_period=200,
        save_cache=args.save,
        load_cache=args.load,
        clear_cache=args.clear,
        experiment_name=name,
        computation_only=args.computation_only,
        experiment_folder=args.alternative_results_folder,
        disable_plt_show=True,
        just_hide_labels=
        True  # do not switch to testing data, just hide labels?#
    )

    if args.run_gui:
        run_just_model(Task0NarrowTopology(**params[0]), gui=True)
    else:
        print(
            f'======================================= Measuring model: {name}')
        experiment.run()
def collect_data(node: DatasetSeObjectsNode, n_samples: int):
    """Collects n_samples from multiple steps of the DatasetSeObjectsNode and returns the outputs in lists"""
    labels = []
    labels_gt = []
    images = []

    for step in range(0, n_samples):
        node.step()

        image = node.outputs.image_output.tensor  # TODO unresolved reference indication?
        label = node.outputs.task_to_agent_label.tensor
        label_gt = node.outputs.task_to_agent_label_ground_truth.tensor

        # TODO collect the metadata outputs ???

        images.append(image.clone())
        labels.append(label.clone())
        labels_gt.append(label_gt.clone())

    return images, labels, labels_gt
def test_class_filter_and_data_sizes():
    """Collect n_steps outputs and verify their shapes and check that the class filter works."""

    device = 'cuda'

    n_steps = 20

    params = DatasetSeObjectsParams()
    params.dataset_size = SeDatasetSize.SIZE_24
    params.save_gpu_memory = True
    params.class_filter = [1, 2, 19, 5]
    params.dataset_config = DatasetConfig.TRAIN_ONLY
    params.random_order = True

    node = DatasetSeObjectsNode(params, seed=None)

    node.allocate_memory_blocks(AllocatingCreator(device))
    images, labels, labels_gt, = collect_data(node, n_steps)

    # labels and labels_gt should be equal in the training phase
    assert compare_sequences(labels, labels_gt)

    assert images[0].device.type == device
    assert labels[0].device.type == device
    assert labels_gt[0].device.type == device

    assert images[0].shape[0] == SeDatasetSize.SIZE_24.value
    assert images[0].shape[1] == SeDatasetSize.SIZE_24.value
    assert images[0].shape[2] == DatasetSeBase.N_CHANNELS

    assert labels[0].shape[0] == DatasetSeObjectsUnit.NUM_LABELS

    # go through all of the class labels and check if each is contained in the filter
    for label in labels:
        _, max_id = label.max(0)
        max_id = max_id.item()

        assert max_id in params.class_filter
    def __init__(self, use_dataset: bool = True):
        super().__init__("Task 0 - Base topology",
                         inputs=Task0BaseGroupInputs(self),
                         outputs=Task0BaseGroupOutputs(self))

        if use_dataset:
            params = DatasetSeObjectsParams(
                dataset_config=DatasetConfig.TRAIN_TEST, save_gpu_memory=True)
            self.se_node = DatasetSeObjectsNode(params)
        else:
            se_config = SpaceEngineersConnectorConfig()
            se_config.curriculum = list((0, -1))
            actions_descriptor = SpaceEngineersActionsDescriptor()
            self.se_node = SpaceEngineersConnectorNode(actions_descriptor,
                                                       se_config)
def run_measurement(name,
                    params,
                    args,
                    debug: bool = False,
                    num_layers: int = 3):
    """"Runs the experiment with specified params, see the parse_test_args method for arguments"""

    exp_pars = debug_params if debug else full_params

    experiment = Task0TrainTestLearningRateTemplate(
        LearningRateTaModularAdapter(),
        Task0TaSeTopology,
        params,
        overall_training_steps=exp_pars.overall_training_steps,
        num_testing_steps=exp_pars.num_testing_steps,
        num_testing_phases=exp_pars.num_testing_phases,
        num_classes=DatasetSeObjectsNode.label_size(),
        num_layers=num_layers,
        measurement_period=exp_pars.measurement_period,
        sliding_window_size=1,  # not used
        sliding_window_stride=1,  # not used
        sp_evaluation_period=exp_pars.sp_evaluation_period,
        save_cache=args.save,
        load_cache=args.load,
        clear_cache=args.clear,
        experiment_name=name,
        computation_only=args.computation_only,
        experiment_folder=args.alternative_results_folder,
        disable_plt_show=True,
        show_conv_agreements=False)

    if args.run_gui:
        run_just_model(Task0TaSeTopology(**params[0]), gui=True)
    else:
        print(
            f'======================================= Measuring model: {name}')
        experiment.run()
class SeIoTask0Dataset(SeIoBase):
    """Access to the Task0 dataset."""
    def is_in_training_phase(self) -> bool:
        return not bool(self.get_testing_phase())

    _params: DatasetSeObjectsParams
    node_se_dataset: DatasetSeObjectsNode
    outputs: DatasetSeObjectsOutputs

    def __init__(self,
                 params: DatasetSeObjectsParams = DatasetSeObjectsParams(
                     dataset_config=DatasetConfig.TRAIN_TEST,
                     save_gpu_memory=True)):
        self._params = params

    def _create_and_add_nodes(self):
        self.node_se_dataset = DatasetSeObjectsNode(self._params)

        # common IO
        self.outputs = self.node_se_dataset.outputs
        self.inputs = self.node_se_dataset.inputs

    def _add_nodes(self, target_group: NodeGroupBase):
        for node in [self.node_se_dataset]:
            target_group.add_node(node)

    def _connect_nodes(self):
        pass

    def get_num_labels(self):
        return self.node_se_dataset.label_size()

    def get_image_numel(self):
        return self._params.dataset_dims[0] * self._params.dataset_dims[1] * 3

    def get_image_width(self):
        return self._params.dataset_dims[1]

    def get_image_height(self):
        return self._params.dataset_dims[0]

    def get_task_id(self) -> float:
        """Constant here, see below. If set to -1, it means that the experiment should end."""
        if self.node_se_dataset.is_train_test_ended():
            return -1.0
        return 0.0

    def get_task_instance_id(self) -> float:
        """The same as task_status, not used here. If set to -1, it means that the experiment should end."""
        if self.node_se_dataset.is_train_test_ended():
            return -1.0
        return 0.0

    def get_task_status(self) -> float:
        """At the end of the task (tells if solved), not used here. -1 indicates end of entire experiment."""
        if self.node_se_dataset.is_train_test_ended():
            return -1.0
        return 0.0

    def get_task_instance_status(self) -> float:
        """After one object passes computes your performance, not used in the dataset."""
        return 0.0

    def get_reward(self) -> float:
        return 0.0

    def get_testing_phase(self) -> float:
        value = 0.0 if self.node_se_dataset.is_training() else 1.0
        return value
    def _create_and_add_nodes(self):
        self.node_se_dataset = DatasetSeObjectsNode(self._params)

        # common IO
        self.outputs = self.node_se_dataset.outputs
        self.inputs = self.node_se_dataset.inputs
def test_train_test_position_reset():
    """If we switch from train to test, we can start training from the beginning every time"""

    device = 'cuda'

    params = get_common_params()
    params.switch_train_resets_train_pos = True

    node = DatasetSeObjectsNode(params, seed=None)
    node.allocate_memory_blocks(AllocatingCreator(device))

    node.switch_training(training_on=True)
    assert node._unit._pos == -1
    first_steps = 10

    train_images, train_labels, _, = collect_data(node, first_steps)

    # we are at the expected position
    last_train_pos = -1 + first_steps
    assert node._unit._pos == last_train_pos
    assert train_pos(node) == last_train_pos

    # switch to training (redundant) and check position is reset
    node.switch_training(training_on=True)
    assert node._unit._pos == -1
    assert train_pos(node) == -1

    second_steps = 3
    # check the training position starts from the beginning
    _, _, _ = collect_data(node, second_steps)
    last_train_pos = second_steps - 1
    assert node._unit._pos == last_train_pos
    assert train_pos(node) == last_train_pos

    # switch to testing and collect some data
    # _pos should reset (start testing), but the training_pos in the tensor should stay the same
    node.switch_training(training_on=False)
    assert node._unit._pos == -1
    assert train_pos(node) == last_train_pos
    testing_steps = 7

    test_images, test_labels, _ = collect_data(node, testing_steps)
    assert node._unit._pos == -1 + testing_steps  # pos in the training set changes
    assert train_pos(
        node) == last_train_pos  # train pos in the tensor does not

    # switch back to training and check expected positions (training pos should be resetted)
    third_train_steps = 2
    node.switch_training(training_on=True)
    assert node._unit._pos == -1
    assert train_pos(node) == -1

    train_images_2, train_labels_2, _ = collect_data(node, third_train_steps)

    last_train_pos = third_train_steps - 1
    assert train_pos(node) == last_train_pos
    assert node._unit._pos == last_train_pos

    # check that the data read at the beginning of training are equal
    assert same(train_images[0], train_images_2[0])
    assert same(train_labels[0], train_labels_2[0])
def test_train_test_position_persistence():
    """If we switch from train to test, do some testing and then back to train, we should continue where stopped"""

    device = 'cuda'

    params = get_common_params()
    params.switch_train_resets_train_pos = False

    node = DatasetSeObjectsNode(params, seed=None)
    node.allocate_memory_blocks(AllocatingCreator(device))

    node.switch_training(training_on=True)
    assert node._unit._pos == -1
    first_steps = 10

    _, _, _, = collect_data(node, first_steps)

    # we are at the expected position
    last_train_pos = -1 + first_steps
    assert node._unit._pos == last_train_pos
    assert train_pos(node) == last_train_pos

    # switch to training (redundant) and check position not changed
    node.switch_training(training_on=True)
    assert node._unit._pos == last_train_pos
    assert train_pos(node) == last_train_pos

    second_steps = 3
    # check the training position continues increasing
    _, _, _ = collect_data(node, second_steps)
    last_train_pos += second_steps
    assert node._unit._pos == last_train_pos
    assert train_pos(node) == last_train_pos

    # switch to testing and collect some data
    # _pos should reset (start testing), but the training_pos in the tensor should stay the same
    node.switch_training(training_on=False)
    assert node._unit._pos == -1
    assert train_pos(node) == last_train_pos
    testing_steps = 7

    test_images, test_labels, _ = collect_data(node, testing_steps)
    assert node._unit._pos == -1 + testing_steps  # pos in the training set changes
    assert train_pos(
        node) == last_train_pos  # train pos in the tensor does not

    # switch back to training and check expected positions
    third_train_steps = 2
    node.switch_training(training_on=True)
    _, _, _ = collect_data(node, third_train_steps)
    last_train_pos += third_train_steps
    assert train_pos(node) == last_train_pos
    assert node._unit._pos == last_train_pos

    # one more testing
    node.switch_training(training_on=False)
    test_images_2, test_labels_2, _ = collect_data(node, testing_steps)
    assert same(test_images[0], test_images_2[0])
    assert same(test_labels[0], test_labels_2[0])