示例#1
0
def test_finetune_full():
    """ finetuning using 'full'.
    """
    DATASET_PATH = '../data/SS-Youtube/raw.pickle'
    nb_classes = 2
    min_acc = 0.65

    with open('../model/vocabulary.json', 'r') as f:
        vocab = json.load(f)

    data = load_benchmark(DATASET_PATH, vocab, extend_with=10000)
    model = deepmoji_transfer(nb_classes,
                              data['maxlen'],
                              PRETRAINED_PATH,
                              extend_embedding=data['added'])
    model.summary()
    model, acc = finetune(model,
                          data['texts'],
                          data['labels'],
                          nb_classes,
                          data['batch_size'],
                          method='full',
                          nb_epochs=1)

    print("Finetune full SS-Youtube 1 epoch acc: {}".format(acc))
    assert acc >= min_acc
示例#2
0
def test_finetune_last():
    """ finetuning using 'last'.
    """
    DATASET_PATH = '../data/SS-Youtube/raw.pickle'
    nb_classes = 2
    min_acc = 0.65

    with open('../model/vocabulary.json', 'r') as f:
        vocab = json.load(f)

    data = load_benchmark(DATASET_PATH, vocab)

    model = deepmoji_transfer(nb_classes, data['maxlen'], PRETRAINED_PATH)
    model.summary()
    model, acc = finetune(model, data['texts'], data['labels'], nb_classes,
                          data['batch_size'], method='last', nb_epochs=1)

    print("Finetune last SS-Youtube 1 epoch acc: {}".format(acc))
    assert acc >= min_acc
1) Freeze all layers except for the softmax layer.
2) Train.
"""

from __future__ import print_function
import example_helper
import json
from deepmoji.model_def import deepmoji_transfer
from deepmoji.global_variables import PRETRAINED_PATH
from deepmoji.finetuning import (load_benchmark, finetune)

DATASET_PATH = '../data/SS-Youtube/raw.pickle'
nb_classes = 2

with open('../model/vocabulary.json', 'r') as f:
    vocab = json.load(f)

# Load dataset.
data = load_benchmark(DATASET_PATH, vocab)

# Set up model and finetune
model = deepmoji_transfer(nb_classes, data['maxlen'], PRETRAINED_PATH)
model.summary()
model, acc = finetune(model,
                      data['texts'],
                      data['labels'],
                      nb_classes,
                      data['batch_size'],
                      method='last')
print('Acc: {}'.format(acc))
示例#4
0
"""

from __future__ import print_function
import example_helper
import json
from deepmoji.model_def import deepmoji_transfer
from deepmoji.global_variables import PRETRAINED_PATH
from deepmoji.finetuning import (
    load_benchmark,
    finetune)


DATASET_PATH = '../data/kaggle-insults/raw.pickle'
nb_classes = 2

with open('../model/vocabulary.json', 'r') as f:
    vocab = json.load(f)

# Load dataset. Extend the existing vocabulary with up to 10000 tokens from
# the training dataset.
data = load_benchmark(DATASET_PATH, vocab, extend_with=10000)

# Set up model and finetune. Note that we have to extend the embedding layer
# with the number of tokens added to the vocabulary.
model = deepmoji_transfer(nb_classes, data['maxlen'], PRETRAINED_PATH,
                          extend_embedding=data['added'])
model.summary()
model, acc = finetune(model, data['texts'], data['labels'], nb_classes,
                      data['batch_size'], method='chain-thaw', verbose=2)
print('Acc: {}'.format(acc))
            nb_model_classes,
            data['maxlen'], weight_path,
            extend_embedding=data['added'])
        model.summary()

        # Training
        print('Training: {}'.format(path))
        if use_f1_score:
            model, result = class_avg_finetune(model, data['texts'],
                                               data['labels'],
                                               nb_classes, data['batch_size'],
                                               FINETUNE_METHOD,
                                               verbose=VERBOSE)
        else:
            model, result = finetune(model, data['texts'], data['labels'],
                                     nb_classes, data['batch_size'],
                                     FINETUNE_METHOD, metric='acc',
                                     verbose=VERBOSE)

        # Write results
        if use_f1_score:
            print('Overall F1 score (dset = {}): {}'.format(dset, result))
            with open('{}/{}_{}_{}_results.txt'.
                      format(RESULTS_DIR, dset, FINETUNE_METHOD, rerun_iter),
                      "w") as f:
                f.write("F1: {}\n".format(result))
        else:
            print('Test accuracy (dset = {}): {}'.format(dset, result))
            with open('{}/{}_{}_{}_results.txt'.
                      format(RESULTS_DIR, dset, FINETUNE_METHOD, rerun_iter),
                      "w") as f:
                f.write("Acc: {}\n".format(result))
0) Load all weights except for the softmax layer. Do not add tokens to the
   vocabulary and do not extend the embedding layer.
1) Freeze all layers except for the softmax layer.
2) Train.
"""

from __future__ import print_function
import example_helper
import json
from deepmoji.model_def import deepmoji_transfer
from deepmoji.global_variables import PRETRAINED_PATH
from deepmoji.finetuning import (
    load_benchmark,
    finetune)

DATASET_PATH = '../data/SS-Youtube/raw.pickle'
nb_classes = 2

with open('../model/vocabulary.json', 'r') as f:
    vocab = json.load(f)

# Load dataset.
data = load_benchmark(DATASET_PATH, vocab)

# Set up model and finetune
model = deepmoji_transfer(nb_classes, data['maxlen'], PRETRAINED_PATH)
model.summary()
model, acc = finetune(model, data['texts'], data['labels'], nb_classes,
                      data['batch_size'], method='last')
print('Acc: {}'.format(acc))