Ejemplo n.º 1
0
    def _make_stack(self, seq_length=4):
        self.embedding_dim = embedding_dim = 3
        self.vocab_size = vocab_size = 10
        self.seq_length = seq_length

        def compose_network(inp, inp_dim, outp_dim, vs, name="compose"):
            # Just add the two embeddings!
            W = T.concatenate([T.eye(outp_dim), T.eye(outp_dim)], axis=0)
            return inp.dot(W)

        X = T.imatrix("X")
        transitions = T.imatrix("transitions")
        apply_dropout = T.scalar("apply_dropout")
        vs = VariableStore()
        self.stack = HardStack(embedding_dim,
                               embedding_dim,
                               vocab_size,
                               seq_length,
                               compose_network,
                               IdentityLayer,
                               apply_dropout,
                               vs,
                               X=X,
                               transitions=transitions,
                               make_test_fn=True)

        # Swap in our own dummy embeddings and weights.
        embeddings = np.arange(vocab_size).reshape(
            (vocab_size, 1)).repeat(embedding_dim, axis=1)
        self.stack.embeddings.set_value(embeddings)
Ejemplo n.º 2
0
    def _make_stack(self, seq_length=4):
        self.embedding_dim = embedding_dim = 3
        self.vocab_size = vocab_size = 10
        self.seq_length = seq_length

        def compose_network(inp, inp_dim, outp_dim, vs, name="compose"):
            # Just add the two embeddings!
            W = T.concatenate([T.eye(outp_dim), T.eye(outp_dim)], axis=0)
            return inp.dot(W)

        X = T.imatrix("X")
        transitions = T.imatrix("transitions")
        training_mode = T.scalar("training_mode")
        ground_truth_transitions_visible = T.scalar(
            "ground_truth_transitions_visible", dtype="int32")
        vs = VariableStore()

        # Swap in our own dummy embeddings and weights.
        initial_embeddings = np.arange(vocab_size).reshape(
            (vocab_size, 1)).repeat(embedding_dim, axis=1)

        self.stack = HardStack(embedding_dim,
                               embedding_dim,
                               vocab_size,
                               seq_length,
                               compose_network,
                               IdentityLayer,
                               training_mode,
                               ground_truth_transitions_visible,
                               vs,
                               X=X,
                               transitions=transitions,
                               make_test_fn=True,
                               initial_embeddings=initial_embeddings,
                               use_input_batch_norm=False,
                               use_input_dropout=False)