Пример #1
0
 def setUp(self):
     # --------------------------------------------------------------------------
     # SET args
     args = DotDict({"type": "Aphorism"})
     self.model = Model.from_params(args)
     self.MODEL_PATH = "/media/zzhuang/00091EA2000FB1D0/iGit/git_projects/libnlp/libNlp/data/models/Aphorism.pkl"
     assert type(self.model) == Aphorism
     assert type(self.model.main_network) == BasicLSTM
Пример #2
0
 def from_params(cls, args: DotDict) -> 'StackedBRNN':
     rnn_type = args.pop('type')
     embedding_size = args.pop('embedding_dim')
     hidden_size = args.pop('hidden_size')
     num_layers = args.pop('num_layers')
     dropout_rate = args.pop('dropout_rnn', 0)
     dropout_output = args.pop('dropout_rnn_output', False)
     args.assert_empty(cls.__name__)
     return cls(embedding_size,
                hidden_size,
                num_layers,
                dropout_rate=dropout_rate,
                dropout_output=dropout_output,
                rnn_type=rnn_type)
Пример #3
0
    def __init__(self, args: DotDict):
        """
        On initialization 'Model' construct one LSTM.

        :param args: config.pipeline.reader.encoding
        """
        self.args = args

        # TODO: here is just an example
        some_args = {
            "type": "BasicLSTM",
            "hidden_size": 300,
        }
        self.main_network = Network.from_params(DotDict(some_args))
Пример #4
0
    def from_params(cls, model_args: DotDict) -> 'Librarian':
        vocab_size = model_args.pop("vocab_size")
        embedding_dim = model_args.encoding.get("embedding_dim")

        doc_args = deepcopy(model_args.encoding)
        doc_args.pop("question_layers")
        doc_args.num_layers = doc_args.pop('doc_layers')

        question_args = deepcopy(model_args.encoding)
        question_args.pop('doc_layers')
        question_args.num_layers = question_args.pop('question_layers')

        start_aligning_args = model_args.aligning
        end_aligning_args = deepcopy(model_args.aligning)

        return cls(doc_args, question_args, start_aligning_args,
                   end_aligning_args, vocab_size, embedding_dim)
Пример #5
0
 def setUp(self):
     self.d = {'a': 1, 'b': {'c': 2, 'd': 3}}
     self.dd = DotDict(self.d)
Пример #6
0
 def from_params(cls, args: DotDict) -> 'Network':
     # to retrieve the scaling function etc.
     iterator_type = args.pop("type")
     return cls.by_name(iterator_type).from_params(args)
Пример #7
0
 def from_params(cls, args: DotDict) -> 'BilinearSeqAttn':
     doc_hidden_size = args.pop('doc_hidden_size')
     question_hidden_size = args.pop('question_hidden_size')
     return cls(doc_hidden_size, question_hidden_size)
Пример #8
0
 def test_get(self):
     d = {'a': 1, 'b': {'c': 2, 'd': 3}}
     dd = DotDict(d)
     assert dd.a == 1
     assert dd.b.c == 2
     assert dd.b.d == 3
Пример #9
0
 def test_add(self):
     d = {'a': 1, 'b': {'c': 2, 'd': 3}}
     dd = DotDict(d)
     dd.b.e = 4
     assert dd.b.e == 4
Пример #10
0
 def from_params(cls, args: DotDict) -> 'Model':
     # to retrieve the scaling function etc.
     iterator_type = args.get("type", cls.list_available())
     Registrable.list_available()
     return cls.by_name(iterator_type).from_params(args)
Пример #11
0
 def from_params(cls, args: DotDict) -> 'BilinearSeqAttn':
     doc_hidden_size = args.pop('doc_hidden_size')
     question_hidden_size = args.pop('question_hidden_size')
     args.assert_empty(cls.__name__)
     return cls(doc_hidden_size, question_hidden_size)