Пример #1
0
    def __init__(self, dim, self_attention=False, memory_gate=False):
        """
        Constructor for the ``WriteUnit``.

        :param dim: global 'd' hidden dimension
        :type dim: int

        :param self_attention: whether or not to use self-attention on the previous control states
        :type self_attention: bool

        :param memory_gate: whether or not to use memory gating.
        :type memory_gate: bool

        """

        # call base constructor
        super(WriteUnit, self).__init__()

        # linear layer for the concatenation of ri & mi-1
        self.concat_layer = linear(2 * dim, dim, bias=True)

        # self-attention & memory gating optional initializations
        self.self_attention = self_attention
        self.memory_gate = memory_gate

        if self.self_attention:
            self.attn = linear(dim, 1, bias=True)
            self.mi_sa_proj = linear(dim, dim, bias=True)
            self.mi_info_proj = linear(dim, dim, bias=True)

        if self.memory_gate:
            self.control = linear(dim, 1, bias=True)
Пример #2
0
    def __init__(self, dim, max_step):
        """
        Constructor for the control unit.

        :param dim: global 'd' hidden dimension
        :type dim: int

        :param max_step: maximum number of steps -> number of MAC cells in the network.
        :type max_step: int

        """

        # call base constructor
        super(ControlUnit, self).__init__()

        # define the linear layers (one per step) used to make the questions
        # encoding
        self.pos_aware_layers = nn.ModuleList()
        for _ in range(max_step):
            self.pos_aware_layers.append(linear(2 * dim, dim, bias=True))

        # define the linear layer used to create the cqi values
        self.ctrl_question = linear(2 * dim, dim, bias=True)

        # define the linear layer used to create the attention weights. Should
        # be one scalar weight per contextual word
        self.attn = linear(dim, 1, bias=True)
        self.step = 0
Пример #3
0
    def __init__(self, dim, embedded_dim):
        """
        Constructor for the ``InputUnit``.

        :param dim: global 'd' hidden dimension
        :type dim: int

        :param embedded_dim: dimension of the word embeddings.
        :type embedded_dim: int

        """

        # call base constructor
        super(InputUnit, self).__init__()

        self.dim = dim

        # instantiate image processing (2-layers CNN)
        self.conv = ImageProcessing(dim)

        # define linear layer for the projection of the knowledge base
        self.kb_proj_layer = linear(dim, dim, bias=True)

        # create bidirectional LSTM layer
        self.lstm = torch.nn.LSTM(input_size=embedded_dim, hidden_size=self.dim,
                            num_layers=1, batch_first=True, bidirectional=True)

        # linear layer for projecting the word encodings from 2*dim to dim
        # TODO: linear(2*self.dim, self.dim, bias=True) ?
        self.lstm_proj = torch.nn.Linear(2 * self.dim, self.dim)
Пример #4
0
    def __init__(self, dim):
        """
        Constructor for the :py:class:`ReadUnit` of the ``S-MAC`` model.

        :param dim: global 'd' hidden dimension.
        :type dim: int

        """

        # call base constructor
        super(ReadUnit, self).__init__()

        # linear layer to define I'(i,h,w) elements (r2 equation)
        self.concat_layer = linear(dim, dim, bias=True)

        # linear layer to compute attention weights
        self.attn = linear(dim, 1, bias=False)
Пример #5
0
    def __init__(self, dim, nb_classes):
        """
        Constructor for the ``OutputUnit``.

        :param dim: global 'd' dimension.
        :type dim: int

        :param nb_classes: number of classes to consider (classification problem).
        :type nb_classes: int

        """

        # call base constructor
        super(OutputUnit, self).__init__()

        # define the 2-layers MLP & specify weights initialization
        self.classifier = nn.Sequential(linear(dim * 3, dim, bias=True),
                                        nn.ELU(),
                                        linear(dim, nb_classes, bias=True))
        kaiming_uniform_(self.classifier[0].weight)
Пример #6
0
    def __init__(self, dim):
        """
        Constructor for the ``ReadUnit``.

        :param dim: global 'd' hidden dimension
        :type dim: int

        """

        # call base constructor
        super(ReadUnit, self).__init__()

        # define linear layer for the projection of the previous memory state
        self.mem_proj_layer = linear(dim, dim, bias=True)

        # linear layer to define I'(i,h,w) elements (r2 equation)
        self.concat_layer = linear(2 * dim, dim, bias=True)

        # linear layer to compute attention weights
        self.attn = linear(dim, 1, bias=True)
Пример #7
0
    def __init__(self, dim):
        """
        Constructor for the :py:class:`WriteUnit` of the ``S-MAC`` model.

        :param dim: global 'd' hidden dimension.
        :type dim: int

        """

        # call base constructor
        super(WriteUnit, self).__init__()

        # linear layer to create the new memory state from the current read vector (coming from the read unit)
        self.concat_layer = linear(dim, dim, bias=True)