Exemplo n.º 1
0
 def validate(self):
     super().validate()
     shape = self.inputs.input_image.tensor.shape
     validate_predicate(lambda: len(shape) == 3,
                        f"shape of the input should be 3 (Y,X,channel(s)), but is {shape}")
     validate_predicate(lambda: shape[2] == 3 or shape[2] == 1,
                        f"the last dimension corresponds to channels, only values 1 (grayscale) or 3 (RGB) supported")
Exemplo n.º 2
0
    def _validate_conv_params(self, params: ExpertParams):
        """
        This needs to be separate because it is overridden by the convSP
        """
        spatial = params.spatial

        validate_predicate(lambda: spatial.buffer_size >= spatial.batch_size)
Exemplo n.º 3
0
    def _validate_conv_learning_params(self, params: ExpertParams):
        """Validation of the convSP params when seen as one expert == common."""
        spatial = params.spatial

        validate_predicate(
            lambda: spatial.buffer_size >= params.flock_size,
            f"In convSP, the common buffer size {{{spatial.buffer_size}}} needs to be at least "
            f"flock_size {{{params.flock_size}}} for the case that all experts would like to store their "
            f"data into the buffer at once.")
Exemplo n.º 4
0
    def validate(self):
        """Checks that the input has correct dimensions."""
        super().validate()

        node_input = self.inputs.node_input.tensor

        validate_predicate(
            lambda: node_input.dim() == 3,
            f"The input should be 3D (y, x, channels) but has shape {node_input.shape}"
        )
    def validate_params_for_n_layers(self):

        # iterate through the vars, each of them should contain one value or list of len num_layers
        v = vars(self)
        for var_str in v:
            if isinstance(v[var_str], list):
                validate_predicate(
                    lambda: len(v[var_str]) == self.num_conv_layers,
                    f'{var_str} should have either one value or list of length {self.num_conv_layers}'
                )
Exemplo n.º 6
0
    def _validate_conv_learning_params(self, params: ExpertParams):
        """Validation of the convSP params when seen as one expert == common."""
        temporal = params.temporal

        validate_predicate(
            lambda: 0 < temporal.max_new_seqs <= (temporal.batch_size -
                                                  (temporal.seq_length - 1)),
            f"Max new sequences is a limit on the number of new sequences that can be added in a single "
            f"learning step. This cannot be larger than the number of sequences in the batch. "
            f"(0 < {{{temporal.max_new_seqs}}} <= ({{{temporal.batch_size}}} - ({{{temporal.seq_length}}} - 1))."
        )
Exemplo n.º 7
0
 def validate(self):
     """Checks that the saliency map has correct dimensions."""
     super().validate()
     saliency_map = self.inputs.input.tensor
     validate_predicate(
         lambda: saliency_map.dim() == 2,
         f"The input should be 2D (y, x) but has shape {saliency_map.shape}"
     )
     map_height, map_width = saliency_map.shape
     validate_predicate(lambda: map_height == map_width,
                        "The input saliency map needs to be square")
Exemplo n.º 8
0
    def _validate_conv_learning_params(self, params: ExpertParams):
        """Validation of the convSP params when seen as one expert == common."""
        temporal = params.temporal

        combined_batch_size = params.flock_size * (
            temporal.batch_size +
            (0 if temporal.n_subbatches == 1 else temporal.seq_length - 1))

        validate_predicate(
            lambda: temporal.max_new_seqs > 0 and temporal.max_new_seqs <=
            (combined_batch_size - (temporal.seq_length - 1)),
            f"Max new sequences is a limit on the number of new sequences that can be added in a single "
            f"learning step. This cannot be larger than the number of sequences in a batch from every expert. "
            f"(0 < {{{temporal.max_new_seqs}}} <= ({{{combined_batch_size}}} - ({{{temporal.seq_length}}} - 1))."
        )
Exemplo n.º 9
0
    def _create_symbol(self, symbol: str) -> torch.Tensor:
        """Create tensor containing rendered symbol using 5x5 font
        Args:
            symbol: Symbol to be rendered. String must be of size 1
        Returns:
            Tensor[dtype=uint8] of dimensions [7, 5 + self._padding_right] - by default [7,5] as padding_right is 0.
            Pixel values are: 1 - symbol, 0 - background
        """
        validate_predicate(lambda: len(symbol) == 1)

        image = Image.new("RGBA", (5 + self._padding_right, 10),
                          (255, 255, 255))
        d_usr = ImageDraw.Draw(image)
        d_usr.fontmode = "1"  # turn off antialiasing
        d_usr.text((0, 0), symbol, (0, 0, 0), font=self._usr_font)

        tensor = torch.from_numpy(np.array(image))
        # [height, width, channels]
        # cut top 3 lines (they are necessary in order the font is rendered in correct size)
        # convert tensor to bitmap (dtype = uint8) of dimensions [height-3, width]
        return tensor[3:, :, 0] == 0
Exemplo n.º 10
0
    def _verify_context_and_rewards(self, input_context: torch.Tensor,
                                    input_rewards: torch.tensor):

        # Check this only once as the context and reward shapes shouldn't change during the run
        if not self.context_size_checked and input_context is not None:
            validate_predicate(lambda: input_context.shape == (
                self.flock_size, self.n_providers, NUMBER_OF_CONTEXT_TYPES,
                self._params.temporal.incoming_context_size))
        if not self.context_size_checked and input_rewards is not None:
            valid_shapes = [(2, ), (1, ), (self.flock_size, 2)]
            validate_predicate(lambda: input_rewards.shape in valid_shapes)

        self.context_size_checked = True

        if input_context is not None:
            self.input_context.copy_(input_context)
        else:
            self._fill_empty_reward_part(self.input_context)

        if input_rewards is not None:
            # Assemble rewards if needed
            if input_rewards.shape == (self.flock_size, 2):
                self.input_rewards.copy_(input_rewards)
            elif input_rewards.shape == (2, ):
                self.input_rewards.copy_(
                    input_rewards.unsqueeze(dim=0).expand(self.flock_size, 2))
            else:
                # This accepts positive and negative values and places the value in the correct spot
                r = torch.zeros((2, ),
                                dtype=self._float_dtype,
                                device=self._device)
                if (input_rewards >= 0).all():
                    ind = 0
                else:
                    ind = 1
                r[ind] = torch.abs(input_rewards)[0]
                self.input_rewards.copy_(
                    r.unsqueeze(dim=0).expand(self.flock_size, 2))
        else:
            self.input_rewards.fill_(0)
Exemplo n.º 11
0
    def __init__(self,
                 conv_layers_params: MultipleLayersParams,
                 top_layer_params: MultipleLayersParams,
                 model_seed: Optional[int] = 321,
                 num_labels: int = 20,
                 image_size: Tuple[int, int, int] = (24, 24, 3),
                 name: str = "Nc1r1Group"):
        super().__init__(ClassificationTaskInputs(self),
                         ClassificationTaskOutputs(self), conv_layers_params,
                         model_seed, image_size, name)

        self._num_labels = num_labels

        validate_predicate(
            lambda: self._num_labels is not None,
            "num_labels cannot be None if top layer is used (top_layer_params is not None)."
        )

        # parse to expert params
        self._top_params = top_layer_params.convert_to_expert_params()[0]

        self.top_layer = SpReconstructionLayer(self.output_projection_sizes,
                                               self._num_labels,
                                               sp_params=self._top_params,
                                               name='TOP',
                                               seed=model_seed)
        self.add_node(self.top_layer)

        # Conv[-1] -> Fully
        Connector.connect(self.conv_layers[-1].outputs.data,
                          self.top_layer.inputs.data)

        # Label -> Fully
        Connector.connect(self.inputs.label.output,
                          self.top_layer.inputs.label)

        # Fully -> Reconstructed label
        Connector.connect(self.top_layer.outputs.label,
                          self.outputs.reconstructed_label.input)
Exemplo n.º 12
0
 def _check_parameters_1d(child_grid_dim: int, parent_rf_dim: int,
                          parent_rf_stride: int):
     validate_predicate(lambda: child_grid_dim > 0 and parent_rf_dim > 0 and
                        parent_rf_stride > 0)
     validate_predicate(lambda: parent_rf_stride <= parent_rf_dim)
     validate_predicate(
         lambda: (child_grid_dim - parent_rf_dim) % parent_rf_stride == 0,
         additional_message="RFs need to fill the input area exactly")
Exemplo n.º 13
0
    def _validate_inputs(self, input_batch: torch.Tensor,
                         targets: torch.Tensor,
                         learning_coefficients: torch.Tensor):

        batch_size = input_batch.shape[1]
        validate_predicate(lambda: input_batch.shape ==
                           (self.flock_size, batch_size, self.input_size))
        validate_predicate(lambda: targets.shape ==
                           (self.flock_size, batch_size, self.output_size))
        validate_predicate(lambda: learning_coefficients.shape ==
                           (self.flock_size, batch_size, 1))
Exemplo n.º 14
0
    def validate(self):
        """Checks that the output image and mask has correct dimensions."""
        super().validate()

        image = self.inputs.input_image.tensor
        coords = self.inputs.coordinates.tensor

        validate_predicate(
            lambda: image.dim() == 3,
            f"The input should be 3D (y, x, channels) but has shape {image.shape}"
        )

        validate_predicate(
            lambda: coords.dim() == 1,
            f"The coordinates should be 1D but has shape {coords.shape}")

        validate_predicate(
            lambda: len(coords) == 4,
            f"The coordinates should contain 4 values but has {len(coords)}")
Exemplo n.º 15
0
 def batch_size(self, value: int):
     validate_positive_int(value)
     validate_predicate(lambda: value < self._node_params.buffer_size)
     self._node_params.batch_size = value
Exemplo n.º 16
0
 def validate_params(params: GridWorldParams):
     validate_predicate(lambda: params.tile_size >= 1)
     validate_predicate(lambda: params.egocentric_width >= 1)
     validate_predicate(lambda: params.egocentric_height >= 1)
Exemplo n.º 17
0
 def _validate_params(self, params: DatasetAlphabetParams):
     if params.mode == DatasetAlphabetMode.SEQUENCE_PROBS:
         validate_predicate(lambda: params.sequence_probs is not None,
                            'params.sequence_probs must be set when params.mode == SEQUENCE_PROBS')
Exemplo n.º 18
0
 def validate(self):
     validate_positive_int(self._node_params.flock_size)
     validate_predicate(lambda: self.inputs.learning_coefficients.tensor.dim() == 1)
     validate_predicate(lambda: self._node_params.flock_size == self.inputs.learning_coefficients.tensor.shape[0])
Exemplo n.º 19
0
    def _validate_universal_params(self, params: ExpertParams):
        """
        Validate the params which are same in normal and convSP.
        """

        validate_predicate(lambda: params.flock_size > 0)
        validate_predicate(lambda: params.n_cluster_centers > 0)

        spatial = params.spatial
        validate_predicate(lambda: spatial.input_size > 0)
        validate_predicate(lambda: spatial.buffer_size > 0)

        validate_predicate(lambda: spatial.batch_size > 0)
        validate_predicate(lambda: 0 <= spatial.learning_rate <= 1)
        validate_predicate(lambda: spatial.cluster_boost_threshold > 0)
        validate_predicate(lambda: spatial.max_boost_time > 0)
        validate_predicate(lambda: spatial.learning_period > 0)
Exemplo n.º 20
0
    def _validate_universal_params(self, params: ExpertParams):
        validate_predicate(lambda: params.flock_size > 0)
        validate_predicate(lambda: params.n_cluster_centers > 0)

        temporal = params.temporal
        validate_predicate(lambda: temporal.buffer_size > 0)
        validate_predicate(lambda: temporal.batch_size > 0)
        validate_predicate(lambda: temporal.learning_period > 0)
        validate_predicate(lambda: temporal.seq_length >= 2)
        validate_predicate(lambda: temporal.seq_lookahead > 0)
        validate_predicate(lambda: temporal.seq_lookbehind > 0)
        validate_predicate(lambda: temporal.n_frequent_seqs > 0)
        validate_predicate(lambda: temporal.max_encountered_seqs > 0)
        validate_predicate(
            lambda: temporal.forgetting_limit >= 1,
            f"forgetting_limit {{{temporal.forgetting_limit}}} should be >= 1 to avoid too small numbers"
        )
        validate_predicate(lambda: temporal.context_prior >= SMALL_CONSTANT)
        validate_predicate(
            lambda: temporal.exploration_attempts_prior >= SMALL_CONSTANT)
        validate_predicate(lambda: 0 <= temporal.exploration_probability <= 1)
        validate_predicate(lambda: temporal.incoming_context_size > 0)
        validate_predicate(lambda: 0 <= temporal.own_rewards_weight)
        validate_predicate(lambda: temporal.n_providers > 0,
                           f"There should be at least one parent per flock.")

        validate_predicate(lambda: temporal.seq_length == temporal.
                           seq_lookbehind + temporal.seq_lookahead)

        validate_predicate(lambda: temporal.buffer_size >= temporal.batch_size,
                           "Batch size cannot be larger than buffer size.")

        validate_predicate(
            lambda: temporal.max_encountered_seqs > temporal.batch_size -
            (temporal.seq_length - 1),
            f"The whole bottom part of max_encountered_seqs {{{temporal.max_encountered_seqs}}} is "
            f"rewritten by batch_size {{{temporal.batch_size}}} - "
            f"(seq_length {{{temporal.seq_length}}} - 1),"
            f" so there should be enough space to store the actual seqs.")

        validate_predicate(
            lambda: temporal.n_frequent_seqs <= temporal.max_encountered_seqs,
            f"Frequent sequences are sampled from the all_encountered_seqs, so it should hold that "
            f"n_frequent_seqs {{{temporal.n_frequent_seqs}}} <="
            f" max_encountered_seqs {{{temporal.max_encountered_seqs}}}.")

        validate_predicate(
            lambda: temporal.n_subbatches > 0,
            f"The value for n_subbatches{{{temporal.n_subbatches}}} "
            f"Should be at least 1. A value of 1 has no subbatching, and any higher processes "
            f" all_encountered_sequences in a parallel fashion.")
Exemplo n.º 21
0
 def _validate_conv_params(self, params: ExpertParams):
     """Validation of the convSP params when seen as individual experts == super_params."""
     spatial = params.spatial
     validate_predicate(lambda: spatial.buffer_size == 1,
                        "ConvSP buffer_size should be 1")