Пример #1
0
    def __init__(self, model, n_char, char_dim, n_filter, win_sizes):
        pc = model.add_subcollection()

        self.clookup = pc.add_lookup_parameters((n_char, char_dim))
        self.Ws = [
            pc.add_parameters((char_dim, size, 1, n_filter),
                              init=dy.GlorotInitializer(gain=0.5))
            for size in win_sizes
        ]
        self.bs = [
            pc.add_parameters((n_filter), init=dy.ConstInitializer(0))
            for _ in win_sizes
        ]

        self.win_sizes = win_sizes
        self.pc = pc
        self.spec = (n_char, char_dim, n_filter, win_sizes)
Пример #2
0
 def get_init(shape, init):
     # shape is a tuple of dims
     assert init in ["default", "const", "glorot", "ortho",
                     "gaussian"], "Unknown init method %s" % init
     if len(shape) == 1:  # set bias to 0
         return dy.ConstInitializer(0.)
     elif len(shape) == 2:
         if init == "default" or init == "glorot":
             return dy.GlorotInitializer()
         elif init == "gaussian":
             return dy.NormalInitializer(var=0.01 * 0.01)
         elif init == "ortho":
             assert shape[0] % shape[
                 1] == 0, "Bad shape %s for ortho_init" % shape
             num = shape[0] // shape[1]
             arr = ortho_weight(shape[1]) if num == 1 else\
                   np.concatenate([ortho_weight(shape[1]) for _ in range(num)])
             return dy.NumpyInitializer(arr)
     else:
         raise NotImplementedError(
             "Currently only support parameter dim <= 2.")
Пример #3
0
    def build_computation_graph(self, num_words, num_chars):
        """
        build graph and link to parameters
        self.predictors, self.char_rnn, self.wembeds, self.cembeds =
        """
        ## initialize word embeddings
        if self.embeds_file:
            print("loading embeddings")
            embeddings, emb_dim = load_embeddings_file(self.embeds_file)
            assert (emb_dim == self.in_dim)
            num_words = len(
                set(embeddings.keys()).union(set(
                    self.w2i.keys())))  # initialize all with embeddings
            # init model parameters and initialize them
            self.wembeds = self.model.add_lookup_parameters(
                (num_words, self.in_dim), init=self.initializer)

            init = 0
            for word in embeddings.keys():
                if word not in self.w2i:
                    self.w2i[word] = len(self.w2i.keys())  # add new word
                    self.wembeds.init_row(self.w2i[word], embeddings[word])
                    init += 1
                elif word in embeddings:
                    self.wembeds.init_row(self.w2i[word], embeddings[word])
                    init += 1
            print("initialized: {}".format(init))
            del embeddings  # clean up
        else:
            self.wembeds = self.model.add_lookup_parameters(
                (num_words, self.in_dim), init=self.initializer)

        ## initialize character embeddings
        self.cembeds = None
        if self.c_in_dim > 0:
            self.cembeds = self.model.add_lookup_parameters(
                (num_chars, self.c_in_dim), init=self.initializer)
        if self.lex_dim > 0 and self.embed_lex:
            # +1 for UNK property
            self.lembeds = self.model.add_lookup_parameters(
                (len(self.dictionary_values) + 1, self.lex_dim),
                init=dynet.GlorotInitializer())  #init=self.initializer)

        # make it more flexible to add number of layers as specified by parameter
        layers = []  # inner layers
        output_layers_dict = {}  # from task_id to actual softmax predictor
        for layer_num in range(0, self.h_layers):
            if layer_num == 0:
                if self.c_in_dim > 0:
                    # in_dim: size of each layer
                    if self.lex_dim > 0 and self.embed_lex:
                        lex_embed_size = self.lex_dim * len(
                            self.dictionary_values)
                        f_builder = self.builder(
                            1, self.in_dim + self.c_h_dim * 2 + lex_embed_size,
                            self.h_dim, self.model)
                        b_builder = self.builder(
                            1, self.in_dim + self.c_h_dim * 2 + lex_embed_size,
                            self.h_dim, self.model)
                    else:
                        f_builder = self.builder(
                            1, self.in_dim + self.c_h_dim * 2 + self.lex_dim,
                            self.h_dim, self.model)
                        b_builder = self.builder(
                            1, self.in_dim + self.c_h_dim * 2 + self.lex_dim,
                            self.h_dim, self.model)
                else:
                    f_builder = self.builder(1, self.in_dim + self.lex_dim,
                                             self.h_dim, self.model)
                    b_builder = self.builder(1, self.in_dim + self.lex_dim,
                                             self.h_dim, self.model)

                layers.append(BiRNNSequencePredictor(
                    f_builder,
                    b_builder))  #returns forward and backward sequence
            else:
                # add inner layers (if h_layers >1)
                f_builder = self.builder(1, self.h_dim, self.h_dim, self.model)
                b_builder = self.builder(1, self.h_dim, self.h_dim, self.model)
                layers.append(BiRNNSequencePredictor(f_builder, b_builder))

        # store at which layer to predict task
        task2layer = {
            task_id: out_layer
            for task_id, out_layer in zip(self.task2tag2idx, self.pred_layer)
        }
        if len(task2layer) > 1:
            print("task2layer", task2layer)
        for task_id in task2layer:
            task_num_labels = len(self.task2tag2idx[task_id])
            if not self.crf:
                output_layers_dict[task_id] = FFSequencePredictor(
                    self.task2tag2idx[task_id],
                    Layer(self.model,
                          self.h_dim * 2,
                          task_num_labels,
                          dynet.softmax,
                          mlp=self.mlp,
                          mlp_activation=self.activation_mlp))
            else:
                print("CRF")
                output_layers_dict[task_id] = CRFSequencePredictor(
                    self.model,
                    task_num_labels,
                    self.task2tag2idx[task_id],
                    Layer(self.model,
                          self.h_dim * 2,
                          task_num_labels,
                          None,
                          mlp=self.mlp,
                          mlp_activation=self.activation_mlp),
                    viterbi_loss=self.viterbi_loss)

        self.char_rnn = BiRNNSequencePredictor(
            self.builder(1, self.c_in_dim, self.c_h_dim, self.model),
            self.builder(1, self.c_in_dim, self.c_h_dim, self.model))

        self.predictors = {}
        self.predictors["inner"] = layers
        self.predictors["output_layers_dict"] = output_layers_dict
        self.predictors["task_expected_at"] = task2layer
import _dynet as dynet
"""
various helper mappings
"""
# DyNet adds init option to choose initializer: https://github.com/clab/dynet/blob/master/python/CHANGES.md
INITIALIZER_MAP = {
    'glorot': dynet.GlorotInitializer(),
    'constant': dynet.ConstInitializer(0.01),
    'uniform': dynet.UniformInitializer(0.1),
    'normal': dynet.NormalInitializer(mean=0, var=1)
}

TRAINER_MAP = {
    "sgd": dynet.SimpleSGDTrainer,
    "adam": dynet.AdamTrainer,
    "adadelta": dynet.AdadeltaTrainer,
    "adagrad": dynet.AdagradTrainer,
    "momentum": dynet.MomentumSGDTrainer
}

ACTIVATION_MAP = {
    "tanh": dynet.tanh,
    "rectify": dynet.rectify
}

BUILDERS = {
    "lstm": dynet.LSTMBuilder,  # is dynet.VanillaLSTMBuilder (cf. https://github.com/clab/dynet/issues/474)
    "lstmc": dynet.CoupledLSTMBuilder,
    "gru": dynet.GRUBuilder,
    "rnn": dynet.SimpleRNNBuilder
}