Beispiel #1
0
 def test_case_1(self):
     args = {
         "epochs": 3,
         "batch_size": 8,
         "validate": True,
         "use_cuda": True,
         "pickle_path": "./save/",
         "save_best_dev": True,
         "model_name": "default_model_name.pkl",
         "loss": Loss(None),
         "optimizer": Optimizer("Adam", lr=0.001, weight_decay=0),
         "vocab_size": 20,
         "word_emb_dim": 100,
         "rnn_hidden_units": 100,
         "num_classes": 3
     }
     trainer = SeqLabelTrainer()
     train_data = [
         [[1, 2, 3, 4, 5, 6], [1, 0, 1, 0, 1, 2]],
         [[2, 3, 4, 5, 1, 6], [0, 1, 0, 1, 0, 2]],
         [[1, 4, 1, 4, 1, 6], [1, 0, 1, 0, 1, 2]],
         [[1, 2, 3, 4, 5, 6], [1, 0, 1, 0, 1, 2]],
         [[2, 3, 4, 5, 1, 6], [0, 1, 0, 1, 0, 2]],
         [[1, 4, 1, 4, 1, 6], [1, 0, 1, 0, 1, 2]],
     ]
     dev_data = train_data
     model = SeqLabeling(args)
     trainer.train(network=model, train_data=train_data, dev_data=dev_data)
Beispiel #2
0
    def test_case_1(self):
        args = {
            "epochs": 3,
            "batch_size": 2,
            "validate": False,
            "use_cuda": False,
            "pickle_path": "./save/",
            "save_best_dev": True,
            "model_name": "default_model_name.pkl",
            "loss": Loss("cross_entropy"),
            "optimizer": Optimizer("Adam", lr=0.001, weight_decay=0),
            "vocab_size": 10,
            "word_emb_dim": 100,
            "rnn_hidden_units": 100,
            "num_classes": 5,
            "evaluator": SeqLabelEvaluator()
        }
        trainer = SeqLabelTrainer(**args)

        train_data = [
            [['a', 'b', 'c', 'd', 'e'], ['a', '@', 'c', 'd', 'e']],
            [['a', '@', 'c', 'd', 'e'], ['a', '@', 'c', 'd', 'e']],
            [['a', 'b', '#', 'd', 'e'], ['a', '@', 'c', 'd', 'e']],
            [['a', 'b', 'c', '?', 'e'], ['a', '@', 'c', 'd', 'e']],
            [['a', 'b', 'c', 'd', '$'], ['a', '@', 'c', 'd', 'e']],
            [['!', 'b', 'c', 'd', 'e'], ['a', '@', 'c', 'd', 'e']],
        ]
        vocab = {
            'a': 0,
            'b': 1,
            'c': 2,
            'd': 3,
            'e': 4,
            '!': 5,
            '@': 6,
            '#': 7,
            '$': 8,
            '?': 9
        }
        label_vocab = {'a': 0, '@': 1, 'c': 2, 'd': 3, 'e': 4}

        data_set = DataSet()
        for example in train_data:
            text, label = example[0], example[1]
            x = TextField(text, False)
            x_len = LabelField(len(text), is_target=False)
            y = TextField(label, is_target=False)
            ins = Instance(word_seq=x, truth=y, word_seq_origin_len=x_len)
            data_set.append(ins)

        data_set.index_field("word_seq", vocab)
        data_set.index_field("truth", label_vocab)

        model = SeqLabeling(args)

        trainer.train(network=model, train_data=data_set, dev_data=data_set)
        # If this can run, everything is OK.

        os.system("rm -rf save")
        print("pickle path deleted")
Beispiel #3
0
    def __init__(self,
                 train_data,
                 model,
                 n_epochs=3,
                 batch_size=32,
                 print_every=-1,
                 validate_every=-1,
                 dev_data=None,
                 use_cuda=False,
                 save_path="./save",
                 optimizer=Optimizer("Adam", lr=0.01, weight_decay=0),
                 need_check_code=True,
                 **kwargs):
        super(Trainer, self).__init__()

        self.train_data = train_data
        self.dev_data = dev_data  # If None, No validation.
        self.model = model
        self.n_epochs = int(n_epochs)
        self.batch_size = int(batch_size)
        self.use_cuda = bool(use_cuda)
        self.save_path = save_path
        self.print_every = int(print_every)
        self.validate_every = int(validate_every)
        self._best_accuracy = 0

        if need_check_code:
            _check_code(dataset=train_data, model=model, dev_data=dev_data)

        model_name = model.__class__.__name__
        assert hasattr(
            self.model,
            'get_loss'), "model {} has to have a 'get_loss' function.".format(
                model_name)
        self.loss_func = self.model.get_loss
        if isinstance(optimizer, torch.optim.Optimizer):
            self.optimizer = optimizer
        else:
            self.optimizer = optimizer.construct_from_pytorch(
                self.model.parameters())

        assert hasattr(
            self.model,
            'evaluate'), "model {} has to have a 'evaluate' function.".format(
                model_name)
        self.evaluator = self.model.evaluate

        if self.dev_data is not None:
            self.tester = Tester(model=self.model,
                                 data=self.dev_data,
                                 batch_size=self.batch_size,
                                 use_cuda=self.use_cuda)

        for k, v in kwargs.items():
            setattr(self, k, v)

        self.step = 0
        self.start_time = None  # start timestamp
Beispiel #4
0
def train():
    train_args, model_args = ConfigSection(), ConfigSection()
    ConfigLoader.load_config(config_dir, {"text_class": train_args})

    # load dataset
    print("Loading data...")
    ds_loader = ClassDatasetLoader(train_data_dir)
    data = ds_loader.load()
    print(data[0])

    # pre-process data
    pre = ClassPreprocess()
    data_train = pre.run(data, pickle_path=save_dir)
    print("vocabulary size:", pre.vocab_size)
    print("number of classes:", pre.num_classes)

    model_args["num_classes"] = pre.num_classes
    model_args["vocab_size"] = pre.vocab_size

    # construct model
    print("Building model...")
    model = CNNText(model_args)

    # ConfigSaver().save_config(config_dir, {"text_class_model": model_args})

    # train
    print("Training...")

    # 1
    # trainer = ClassificationTrainer(train_args)

    # 2
    trainer = ClassificationTrainer(epochs=train_args["epochs"],
                                    batch_size=train_args["batch_size"],
                                    validate=train_args["validate"],
                                    use_cuda=train_args["use_cuda"],
                                    pickle_path=save_dir,
                                    save_best_dev=train_args["save_best_dev"],
                                    model_name=model_name,
                                    loss=Loss("cross_entropy"),
                                    optimizer=Optimizer("SGD",
                                                        lr=0.001,
                                                        momentum=0.9))
    trainer.train(model, data_train)

    print("Training finished!")

    saver = ModelSaver(os.path.join(save_dir, model_name))
    saver.save_pytorch(model)
    print("Model saved!")
Beispiel #5
0
def train():
    train_args, model_args = ConfigSection(), ConfigSection()
    ConfigLoader.load_config(config_dir, {"text_class": train_args})

    # load dataset
    print("Loading data...")
    data = TextClassifyDataSet(load_func=ClassDataSetLoader.load)
    data.load(train_data_dir)

    print("vocabulary size:", len(data.word_vocab))
    print("number of classes:", len(data.label_vocab))
    save_pickle(data.word_vocab, save_dir, "word2id.pkl")
    save_pickle(data.label_vocab, save_dir, "label2id.pkl")

    model_args["num_classes"] = len(data.label_vocab)
    model_args["vocab_size"] = len(data.word_vocab)

    # construct model
    print("Building model...")
    model = CNNText(model_args)

    # train
    print("Training...")
    trainer = ClassificationTrainer(epochs=train_args["epochs"],
                                    batch_size=train_args["batch_size"],
                                    validate=train_args["validate"],
                                    use_cuda=train_args["use_cuda"],
                                    pickle_path=save_dir,
                                    save_best_dev=train_args["save_best_dev"],
                                    model_name=model_name,
                                    loss=Loss("cross_entropy"),
                                    optimizer=Optimizer("SGD", lr=0.001, momentum=0.9))
    trainer.train(model, data)

    print("Training finished!")

    saver = ModelSaver(os.path.join(save_dir, model_name))
    saver.save_pytorch(model)
    print("Model saved!")
Beispiel #6
0
    def __init__(self, **kwargs):
        """
        :param kwargs: dict of (key, value), or dict-like object. key is str.

        The base trainer requires the following keys:
            - epochs: int, the number of epochs in training
            - validate: bool, whether or not to validate on dev set
            - batch_size: int
            - pickle_path: str, the path to pickle files for pre-processing
        """
        super(BaseTrainer, self).__init__()
        """
            "default_args" provides default value for important settings. 
            The initialization arguments "kwargs" with the same key (name) will override the default value. 
            "kwargs" must have the same type as "default_args" on corresponding keys. 
            Otherwise, error will raise.
        """
        default_args = {
            "epochs": 3,
            "batch_size": 8,
            "validate": True,
            "use_cuda": True,
            "pickle_path": "./save/",
            "save_best_dev": True,
            "model_name": "default_model_name.pkl",
            "print_every_step": 1,
            "loss": Loss(None),  # used to pass type check
            "optimizer": Optimizer("Adam", lr=0.001, weight_decay=0)
        }
        """
            "required_args" is the collection of arguments that users must pass to Trainer explicitly. 
            This is used to warn users of essential settings in the training. 
            Specially, "required_args" does not have default value, so they have nothing to do with "default_args".
        """
        required_args = {
            "task"  # one of ("seq_label", "text_classify")
        }

        for req_key in required_args:
            if req_key not in kwargs:
                logger.error("Trainer lacks argument {}".format(req_key))
                raise ValueError("Trainer lacks argument {}".format(req_key))
        self._task = kwargs["task"]

        for key in default_args:
            if key in kwargs:
                if isinstance(kwargs[key], type(default_args[key])):
                    default_args[key] = kwargs[key]
                else:
                    msg = "Argument %s type mismatch: expected %s while get %s" % (
                        key, type(default_args[key]), type(kwargs[key]))
                    logger.error(msg)
                    raise ValueError(msg)
            else:
                # BaseTrainer doesn't care about extra arguments
                pass
        print(default_args)

        self.n_epochs = default_args["epochs"]
        self.batch_size = default_args["batch_size"]
        self.pickle_path = default_args["pickle_path"]
        self.validate = default_args["validate"]
        self.save_best_dev = default_args["save_best_dev"]
        self.use_cuda = default_args["use_cuda"]
        self.model_name = default_args["model_name"]
        self.print_every_step = default_args["print_every_step"]

        self._model = None
        self._loss_func = default_args["loss"].get(
        )  # return a pytorch loss function or None
        self._optimizer = None
        self._optimizer_proto = default_args["optimizer"]
        self._summary_writer = SummaryWriter(self.pickle_path +
                                             'tensorboard_logs')
        self._graph_summaried = False
        self._best_accuracy = 0.0
Beispiel #7
0
def test_training():
    # Config Loader
    trainer_args = ConfigSection()
    model_args = ConfigSection()
    ConfigLoader().load_config(config_dir, {
        "test_seq_label_trainer": trainer_args, "test_seq_label_model": model_args})

    data_set = TokenizeDataSetLoader().load(data_path)
    word_vocab = Vocabulary()
    label_vocab = Vocabulary()
    data_set.update_vocab(word_seq=word_vocab, label_seq=label_vocab)
    data_set.index_field("word_seq", word_vocab).index_field("label_seq", label_vocab)
    data_set.set_origin_len("word_seq")
    data_set.rename_field("label_seq", "truth").set_target(truth=False)
    data_train, data_dev = data_set.split(0.3, shuffle=True)
    model_args["vocab_size"] = len(word_vocab)
    model_args["num_classes"] = len(label_vocab)

    save_pickle(word_vocab, pickle_path, "word2id.pkl")
    save_pickle(label_vocab, pickle_path, "label2id.pkl")

    trainer = SeqLabelTrainer(
        epochs=trainer_args["epochs"],
        batch_size=trainer_args["batch_size"],
        validate=False,
        use_cuda=False,
        pickle_path=pickle_path,
        save_best_dev=trainer_args["save_best_dev"],
        model_name=model_name,
        optimizer=Optimizer("SGD", lr=0.01, momentum=0.9),
    )

    # Model
    model = SeqLabeling(model_args)

    # Start training
    trainer.train(model, data_train, data_dev)

    # Saver
    saver = ModelSaver(os.path.join(pickle_path, model_name))
    saver.save_pytorch(model)

    del model, trainer

    # Define the same model
    model = SeqLabeling(model_args)

    # Dump trained parameters into the model
    ModelLoader.load_pytorch(model, os.path.join(pickle_path, model_name))

    # Load test configuration
    tester_args = ConfigSection()
    ConfigLoader().load_config(config_dir, {"test_seq_label_tester": tester_args})

    # Tester
    tester = SeqLabelTester(batch_size=4,
                            use_cuda=False,
                            pickle_path=pickle_path,
                            model_name="seq_label_in_test.pkl",
                            evaluator=SeqLabelEvaluator()
                            )

    # Start testing with validation data
    data_dev.set_target(truth=True)
    tester.test(model, data_dev)
Beispiel #8
0
# construct model
model_args = {
    'num_classes': n_classes,
    'vocab_size': vocab_size
}
model = ClassificationModel(num_classes=n_classes, vocab_size=vocab_size)

# construct trainer
train_args = {
    "epochs": 3,
    "batch_size": 16,
    "pickle_path": data_dir,
    "validate": False,
    "save_best_dev": False,
    "model_saved_path": None,
    "use_cuda": True,
    "loss": Loss("cross_entropy"),
    "optimizer": Optimizer("Adam", lr=0.001)
}
trainer = ClassificationTrainer(**train_args)

# start training
trainer.train(model, train_data=train_set, dev_data=dev_set)

# predict using model
data_infer = [x[0] for x in data]
infer = ClassificationInfer(data_dir)
labels_pred = infer.predict(model.cpu(), data_infer)
print(labels_pred)
Beispiel #9
0
def train_and_test():
    # Config Loader
    trainer_args = ConfigSection()
    model_args = ConfigSection()
    ConfigLoader("config.cfg").load_config(config_dir, {
        "test_seq_label_trainer": trainer_args,
        "test_seq_label_model": model_args
    })

    # Data Loader
    pos_loader = POSDatasetLoader(data_path)
    train_data = pos_loader.load_lines()

    # Preprocessor
    p = SeqLabelPreprocess()
    data_train, data_dev = p.run(train_data,
                                 pickle_path=pickle_path,
                                 train_dev_split=0.5)
    model_args["vocab_size"] = p.vocab_size
    model_args["num_classes"] = p.num_classes

    # Trainer: two definition styles
    # 1
    # trainer = SeqLabelTrainer(trainer_args.data)

    # 2
    trainer = SeqLabelTrainer(
        epochs=trainer_args["epochs"],
        batch_size=trainer_args["batch_size"],
        validate=trainer_args["validate"],
        use_cuda=trainer_args["use_cuda"],
        pickle_path=pickle_path,
        save_best_dev=trainer_args["save_best_dev"],
        model_name=model_name,
        optimizer=Optimizer("SGD", lr=0.01, momentum=0.9),
    )

    # Model
    model = SeqLabeling(model_args)

    # Start training
    trainer.train(model, data_train, data_dev)
    print("Training finished!")

    # Saver
    saver = ModelSaver(os.path.join(pickle_path, model_name))
    saver.save_pytorch(model)
    print("Model saved!")

    del model, trainer, pos_loader

    # Define the same model
    model = SeqLabeling(model_args)

    # Dump trained parameters into the model
    ModelLoader.load_pytorch(model, os.path.join(pickle_path, model_name))
    print("model loaded!")

    # Load test configuration
    tester_args = ConfigSection()
    ConfigLoader("config.cfg").load_config(
        config_dir, {"test_seq_label_tester": tester_args})

    # Tester
    tester = SeqLabelTester(save_output=False,
                            save_loss=False,
                            save_best_dev=False,
                            batch_size=4,
                            use_cuda=False,
                            pickle_path=pickle_path,
                            model_name="seq_label_in_test.pkl",
                            print_every_step=1)

    # Start testing with validation data
    tester.test(model, data_dev)

    # print test results
    print(tester.show_metrics())
    print("model tested!")
Beispiel #10
0
def train_and_test():
    # Config Loader
    trainer_args = ConfigSection()
    model_args = ConfigSection()
    ConfigLoader().load_config(config_dir, {
        "test_seq_label_trainer": trainer_args,
        "test_seq_label_model": model_args
    })

    data_set = SeqLabelDataSet()
    data_set.load(data_path)
    train_set, dev_set = data_set.split(0.3, shuffle=True)
    model_args["vocab_size"] = len(data_set.word_vocab)
    model_args["num_classes"] = len(data_set.label_vocab)

    save_pickle(data_set.word_vocab, pickle_path, "word2id.pkl")
    save_pickle(data_set.label_vocab, pickle_path, "label2id.pkl")

    trainer = SeqLabelTrainer(
        epochs=trainer_args["epochs"],
        batch_size=trainer_args["batch_size"],
        validate=False,
        use_cuda=trainer_args["use_cuda"],
        pickle_path=pickle_path,
        save_best_dev=trainer_args["save_best_dev"],
        model_name=model_name,
        optimizer=Optimizer("SGD", lr=0.01, momentum=0.9),
    )

    # Model
    model = SeqLabeling(model_args)

    # Start training
    trainer.train(model, train_set, dev_set)
    print("Training finished!")

    # Saver
    saver = ModelSaver(os.path.join(pickle_path, model_name))
    saver.save_pytorch(model)
    print("Model saved!")

    del model, trainer

    change_field_is_target(dev_set, "truth", True)

    # Define the same model
    model = SeqLabeling(model_args)

    # Dump trained parameters into the model
    ModelLoader.load_pytorch(model, os.path.join(pickle_path, model_name))
    print("model loaded!")

    # Load test configuration
    tester_args = ConfigSection()
    ConfigLoader().load_config(config_dir,
                               {"test_seq_label_tester": tester_args})

    # Tester
    tester = SeqLabelTester(batch_size=4,
                            use_cuda=False,
                            pickle_path=pickle_path,
                            model_name="seq_label_in_test.pkl",
                            evaluator=SeqLabelEvaluator())

    # Start testing with validation data
    tester.test(model, dev_set)
    print("model tested!")