コード例 #1
0
    def evaluate(self,
                 dataset: torch.utils.data.Dataset) -> Dict[str, Any]:
        """
        This model's evaluation function.

        :param dataset:
            A torch.utils.data.Dataset object representing the dataset you wish
            to evaluate this model's performance on.


        :return:
            A Dict[str, Any] object mapping metric keys with values measured
            by evaluating the model.
        """
        dl = list(torch.utils.data.DataLoader(dataset=dataset,
                                              batch_size=1,
                                              shuffle=False,
                                              collate_fn=pad_collate))
        x, y = zip(*[(entry[0].squeeze(0), entry[1]) for entry in dl])

        return evaluate(self, x, y, loss=self.loss)
コード例 #2
0
ファイル: main.py プロジェクト: isperfee/TextClassification
    data_test = encode_sentences([content[0] for content in test_data],
                                 word_to_id)
    label_test = to_categorical(
        encode_cate([content[1] for content in test_data], cat_to_id))

    data_train = sequence.pad_sequences(data_train, maxlen=args.max_len)
    data_test = sequence.pad_sequences(data_test, maxlen=args.max_len)

    model = TextRNN(args.max_len, args.max_features,
                    args.embedding_size).build_model()
    model.compile('adam', 'categorical_crossentropy', metrics=['accuracy'])

    logger.info('开始训练...')
    callbacks = [
        ModelCheckpoint('./model.h5', verbose=1),
        EarlyStopping(monitor='val_accuracy', patience=2, mode='max')
    ]

    history = model.fit(data_train,
                        label_train,
                        batch_size=args.batch_size,
                        epochs=args.epochs,
                        callbacks=callbacks,
                        validation_data=(data_test, label_test))

    model.summary()
    label_pre = model.predict(data_test)
    pred_argmax = label_pre.argmax(-1)
    label_test = label_test.argmax(-1)
    print(evaluate(label_test, pred_argmax, categories))