コード例 #1
0
 def __init__(self, input_size, hidden_sizes, output_size, **_):
     """
     Parameters
     ----------
     input_size : int
         Number of units each element Xi in the input sequence X has.
     hidden_sizes : int, list of int
         Number of hidden units each GRU should have.
     output_size : int
         Number of units the regression layer should have.
     """
     super().__init__(input_size, hidden_sizes)
     self.output_size = output_size
     self.layer_regression = LayerRegression(self.hidden_sizes[-1],
                                             self.output_size)
     self.stopping_layer = LayerDense(self.hidden_sizes[-1] + input_size,
                                      1,
                                      activation="sigmoid",
                                      name="stopping")
コード例 #2
0
    def __init__(self, volume_manager, input_size, hidden_sizes, output_size, activation, use_previous_direction=False, predict_offset=False,
                 use_layer_normalization=False, dropout_prob=0., seed=1234, **_):
        """
        Parameters
        ----------
        volume_manager : :class:`VolumeManger` object
            Use to evaluate the diffusion signal at specific coordinates.
        input_size : int
            Number of units each element X has.
        hidden_sizes : int, list of int
            Number of hidden units each FFNN layer should have.
        output_size : int
            Number of units the regression layer should have.
        activation : str
            Name of the activation function to use in the hidden layers
        use_previous_direction : bool
            Use the previous direction as an additional input
        predict_offset : bool
            Predict the offset from the previous direction instead (need use_previous_direction).
        use_layer_normalization : bool
            Use LayerNormalization to normalize preactivations
        dropout_prob : float
            Dropout probability for recurrent networks. See: https://arxiv.org/pdf/1512.05287.pdf
        seed : int
            Random seed used for dropout normalization
        """
        super().__init__(input_size, hidden_sizes, activation, use_layer_normalization, dropout_prob, seed)
        self.volume_manager = volume_manager
        self.output_size = output_size
        self.use_previous_direction = use_previous_direction
        self.predict_offset = predict_offset

        if self.predict_offset:
            assert self.use_previous_direction  # Need previous direction to predict offset.

        layer_regression_activation = "tanh" if self.predict_offset else "identity"
        self.layer_regression = LayerDense(self.hidden_sizes[-1], self.output_size, activation=layer_regression_activation)

        if self.dropout_prob:
            p = 1 - self.dropout_prob
            self.dropout_vectors[self.layer_regression.name] = self.srng.binomial(size=(self.layer_regression.input_size,), n=1, p=p, dtype=floatX) / p
コード例 #3
0
    def __init__(self, volume_manager, input_size, hidden_sizes, use_layer_normalization=False, use_skip_connections=False, **_):
        """
        Parameters
        ----------
        volume_manager : :class:`VolumeManger` object
            Use to evaluate the diffusion signal at specific coordinates.
        input_size : int
            Number of units each element X has.
        hidden_sizes : int, list of int
            Number of hidden units each FFNN layer should have.
        use_layer_normalization : bool
            Use LayerNormalization to normalize preactivations
        use_skip_connections : bool
            Use skip connections from the input to all hidden layers in the network, and from all hidden layers to the output layer
        """
        super().__init__(input_size, hidden_sizes, use_layer_normalization=use_layer_normalization, use_skip_connections=use_skip_connections)
        self.volume_manager = volume_manager
        self.output_size = 1  # Positive class probability

        output_layer_input_size = sum(self.hidden_sizes) if self.use_skip_connections else self.hidden_sizes[-1]
        self.layer_classification = LayerDense(output_layer_input_size, self.output_size, activation="sigmoid")
コード例 #4
0
ファイル: gru_regression.py プロジェクト: szho42/learn2track
    def __init__(self,
                 volume_manager,
                 input_size,
                 hidden_sizes,
                 output_size,
                 activation='tanh',
                 use_previous_direction=False,
                 predict_offset=False,
                 use_layer_normalization=False,
                 drop_prob=0.,
                 use_zoneout=False,
                 use_skip_connections=False,
                 neighborhood_radius=None,
                 learn_to_stop=False,
                 seed=1234,
                 **_):
        """
        Parameters
        ----------
        volume_manager : :class:`VolumeManger` object
            Use to evaluate the diffusion signal at specific coordinates.
        input_size : int
            Number of units each element Xi in the input sequence X has.
        hidden_sizes : int, list of int
            Number of hidden units each GRU should have.
        output_size : int
            Number of units the regression layer should have.
        activation : str
            Activation function to apply on the "cell candidate"
        use_previous_direction : bool
            Use the previous direction as an additional input
        predict_offset : bool
            Predict the offset from the previous direction instead (need use_previous_direction).
        use_layer_normalization : bool
            Use LayerNormalization to normalize preactivations and stabilize hidden layer evolution
        drop_prob : float
            Dropout/Zoneout probability for recurrent networks. See: https://arxiv.org/pdf/1512.05287.pdf & https://arxiv.org/pdf/1606.01305.pdf
        use_zoneout : bool
            Use zoneout implementation instead of dropout
        use_skip_connections : bool
            Use skip connections from the input to all hidden layers in the network, and from all hidden layers to the output layer
        neighborhood_radius : float
            Add signal in positions around the current streamline coordinate to the input (with given length in voxel space); None = no neighborhood
        learn_to_stop : bool
            Predict whether the streamline being generated should stop or not
        seed : int
            Random seed used for dropout normalization
        """
        self.neighborhood_radius = neighborhood_radius
        self.model_input_size = input_size
        if self.neighborhood_radius:
            self.neighborhood_directions = get_neighborhood_directions(
                self.neighborhood_radius)
            # Model input size is increased when using neighborhood
            self.model_input_size = input_size * self.neighborhood_directions.shape[
                0]

        super().__init__(self.model_input_size,
                         hidden_sizes,
                         activation=activation,
                         use_layer_normalization=use_layer_normalization,
                         drop_prob=drop_prob,
                         use_zoneout=use_zoneout,
                         use_skip_connections=use_skip_connections,
                         seed=seed)
        # Restore input size
        self.input_size = input_size

        self.volume_manager = volume_manager
        self.output_size = output_size
        self.use_previous_direction = use_previous_direction
        self.predict_offset = predict_offset
        self.learn_to_stop = learn_to_stop

        if self.predict_offset:
            assert self.use_previous_direction  # Need previous direction to predict offset.

        # Do not use dropout/zoneout in last hidden layer
        layer_regression_activation = "tanh" if self.predict_offset else "identity"
        output_layer_input_size = sum(
            self.hidden_sizes
        ) if self.use_skip_connections else self.hidden_sizes[-1]
        self.layer_regression = LayerDense(
            output_layer_input_size,
            self.output_size,
            activation=layer_regression_activation,
            name="GRU_Regression")
        if self.learn_to_stop:
            # Predict whether a streamline should stop or keep growing
            self.layer_stopping = LayerDense(output_layer_input_size,
                                             1,
                                             activation='sigmoid',
                                             name="GRU_Regression_stopping")
コード例 #5
0
ファイル: gru_regression.py プロジェクト: nih23/learn2track
    def __init__(self,
                 volume_manager,
                 input_size,
                 hidden_sizes,
                 output_size,
                 activation='tanh',
                 use_previous_direction=False,
                 predict_offset=False,
                 use_layer_normalization=False,
                 drop_prob=0.,
                 use_zoneout=False,
                 use_skip_connections=False,
                 seed=1234,
                 **_):
        """
        Parameters
        ----------
        volume_manager : :class:`VolumeManger` object
            Use to evaluate the diffusion signal at specific coordinates.
        input_size : int
            Number of units each element Xi in the input sequence X has.
        hidden_sizes : int, list of int
            Number of hidden units each GRU should have.
        output_size : int
            Number of units the regression layer should have.
        activation : str
            Activation function to apply on the "cell candidate"
        use_previous_direction : bool
            Use the previous direction as an additional input
        predict_offset : bool
            Predict the offset from the previous direction instead (need use_previous_direction).
        use_layer_normalization : bool
            Use LayerNormalization to normalize preactivations and stabilize hidden layer evolution
        drop_prob : float
            Dropout/Zoneout probability for recurrent networks. See: https://arxiv.org/pdf/1512.05287.pdf & https://arxiv.org/pdf/1606.01305.pdf
        use_zoneout : bool
            Use zoneout implementation instead of dropout
        use_skip_connections : bool
            Use skip connections from the input to all hidden layers in the network, and from all hidden layers to the output layer
        seed : int
            Random seed used for dropout normalization
        """
        super().__init__(input_size,
                         hidden_sizes,
                         activation=activation,
                         use_layer_normalization=use_layer_normalization,
                         drop_prob=drop_prob,
                         use_zoneout=use_zoneout,
                         use_skip_connections=use_skip_connections,
                         seed=seed)
        self.volume_manager = volume_manager
        self.output_size = output_size
        self.use_previous_direction = use_previous_direction
        self.predict_offset = predict_offset

        if self.predict_offset:
            assert self.use_previous_direction  # Need previous direction to predict offset.

        # Do not use dropout/zoneout in last hidden layer
        layer_regression_activation = "tanh" if self.predict_offset else "identity"
        output_layer_input_size = sum(
            self.hidden_sizes
        ) if self.use_skip_connections else self.hidden_sizes[-1]
        self.layer_regression = LayerDense(
            output_layer_input_size,
            self.output_size,
            activation=layer_regression_activation,
            name="GRU_Regression")
コード例 #6
0
    def __init__(self,
                 volume_manager,
                 input_size,
                 hidden_sizes,
                 output_size,
                 activation,
                 use_previous_direction=False,
                 predict_offset=False,
                 use_layer_normalization=False,
                 dropout_prob=0.,
                 neighborhood_radius=False,
                 seed=1234,
                 **_):
        """
        Parameters
        ----------
        volume_manager : :class:`VolumeManger` object
            Use to evaluate the diffusion signal at specific coordinates.
        input_size : int
            Number of units each element X has.
        hidden_sizes : int, list of int
            Number of hidden units each FFNN layer should have.
        output_size : int
            Number of units the regression layer should have.
        activation : str
            Name of the activation function to use in the hidden layers
        use_previous_direction : bool
            Use the previous direction as an additional input
        predict_offset : bool
            Predict the offset from the previous direction instead (need use_previous_direction).
        use_layer_normalization : bool
            Use LayerNormalization to normalize preactivations
        dropout_prob : float
            Dropout probability for recurrent networks. See: https://arxiv.org/pdf/1512.05287.pdf
        neighborhood_radius : float
            Add signal in positions around the current streamline coordinate to the input (with given length in voxel space); None = no neighborhood
        seed : int
            Random seed used for dropout normalization
        """
        self.neighborhood_radius = neighborhood_radius
        self.model_input_size = input_size
        if self.neighborhood_radius:
            self.neighborhood_directions = get_neighborhood_directions(
                self.neighborhood_radius)
            # Model input size is increased when using neighborhood
            self.model_input_size = input_size * self.neighborhood_directions.shape[
                0]

        super().__init__(self.model_input_size,
                         hidden_sizes,
                         activation=activation,
                         use_layer_normalization=use_layer_normalization,
                         dropout_prob=dropout_prob,
                         seed=seed)
        # Restore input size
        self.input_size = input_size

        self.volume_manager = volume_manager
        self.output_size = output_size
        self.use_previous_direction = use_previous_direction
        self.predict_offset = predict_offset

        if self.predict_offset:
            assert self.use_previous_direction  # Need previous direction to predict offset.

        layer_regression_activation = "tanh" if self.predict_offset else "identity"
        self.layer_regression = LayerDense(
            self.hidden_sizes[-1],
            self.output_size,
            activation=layer_regression_activation)

        if self.dropout_prob:
            p = 1 - self.dropout_prob
            self.dropout_vectors[
                self.layer_regression.name] = self.srng.binomial(
                    size=(self.layer_regression.input_size, ),
                    n=1,
                    p=p,
                    dtype=floatX) / p