コード例 #1
0
def test_finetune_last():
    """ finetuning using 'last'.
    """
    dataset_path = ROOT_PATH + '/data/SS-Youtube/raw.pickle'
    nb_classes = 2
    min_acc = 0.68

    with open(VOCAB_PATH, 'r') as f:
        vocab = json.load(f)

    data = load_benchmark(dataset_path, vocab)
    print('Loading model from {}.'.format(PRETRAINED_PATH))
    model = torchmoji_transfer(nb_classes, PRETRAINED_PATH)
    print(model)
    model, acc = finetune(model,
                          data['texts'],
                          data['labels'],
                          nb_classes,
                          data['batch_size'],
                          method='last',
                          nb_epochs=1)

    print("Finetune last SS-Youtube 1 epoch acc: {}".format(acc))

    assert acc >= min_acc
コード例 #2
0
def test_finetune_full():
    """ finetuning using 'full'.
    """
    DATASET_PATH = ROOT_PATH + '/data/SS-Youtube/raw.pickle'
    nb_classes = 2
    # Keras and pyTorch implementation of the Adam optimizer are slightly different and change a bit the results
    # We reduce the min accuracy needed here to pass the test
    # See e.g. https://discuss.pytorch.org/t/suboptimal-convergence-when-compared-with-tensorflow-model/5099/11
    min_acc = 0.68

    with open(VOCAB_PATH, 'r') as f:
        vocab = json.load(f)

    data = load_benchmark(DATASET_PATH, vocab, extend_with=10000)
    print('Loading pyTorch model from {}.'.format(PRETRAINED_PATH))
    model = torchmoji_transfer(nb_classes,
                               PRETRAINED_PATH,
                               extend_embedding=data['added'])
    print(model)
    model, acc = finetune(model,
                          data['texts'],
                          data['labels'],
                          nb_classes,
                          data['batch_size'],
                          method='full',
                          nb_epochs=1)

    print("Finetune full SS-Youtube 1 epoch acc: {}".format(acc))
    assert acc >= min_acc
コード例 #3
0
ファイル: test_finetuning.py プロジェクト: cclauss/torchMoji
def test_finetune_last():
    """ finetuning using 'last'.
    """
    dataset_path = ROOT_PATH + '/data/SS-Youtube/raw.pickle'
    nb_classes = 2
    min_acc = 0.68

    with open(VOCAB_PATH, 'r') as f:
        vocab = json.load(f)

    data = load_benchmark(dataset_path, vocab)
    print('Loading model from {}.'.format(PRETRAINED_PATH))
    model = torchmoji_transfer(nb_classes, PRETRAINED_PATH)
    print(model)
    model, acc = finetune(model, data['texts'], data['labels'], nb_classes,
                          data['batch_size'], method='last', nb_epochs=1)

    print("Finetune last SS-Youtube 1 epoch acc: {}".format(acc))

    assert acc >= min_acc
コード例 #4
0
ファイル: test_finetuning.py プロジェクト: cclauss/torchMoji
def test_finetune_full():
    """ finetuning using 'full'.
    """
    DATASET_PATH = ROOT_PATH+'/data/SS-Youtube/raw.pickle'
    nb_classes = 2
    # Keras and pyTorch implementation of the Adam optimizer are slightly different and change a bit the results
    # We reduce the min accuracy needed here to pass the test
    # See e.g. https://discuss.pytorch.org/t/suboptimal-convergence-when-compared-with-tensorflow-model/5099/11
    min_acc = 0.68

    with open(VOCAB_PATH, 'r') as f:
        vocab = json.load(f)

    data = load_benchmark(DATASET_PATH, vocab, extend_with=10000)
    print('Loading pyTorch model from {}.'.format(PRETRAINED_PATH))
    model = torchmoji_transfer(nb_classes, PRETRAINED_PATH, extend_embedding=data['added'])
    print(model)
    model, acc = finetune(model, data['texts'], data['labels'], nb_classes,
                          data['batch_size'], method='full', nb_epochs=1)

    print("Finetune full SS-Youtube 1 epoch acc: {}".format(acc))
    assert acc >= min_acc
コード例 #5
0
ファイル: finetune_dataset.py プロジェクト: cclauss/torchMoji
        assert len(vocab) == nb_tokens

        dset = p[0]
        path = p[1]
        nb_classes = p[2]
        use_f1_score = p[3]

        if FINETUNE_METHOD == 'last':
            extend_with = 0
        elif FINETUNE_METHOD in ['new', 'full', 'chain-thaw']:
            extend_with = 10000
        else:
            raise ValueError('Finetuning method not recognised!')

        # Load dataset.
        data = load_benchmark(path, vocab, extend_with=extend_with)

        (X_train, y_train) = (data['texts'][0], data['labels'][0])
        (X_val, y_val) = (data['texts'][1], data['labels'][1])
        (X_test, y_test) = (data['texts'][2], data['labels'][2])

        weight_path = PRETRAINED_PATH if FINETUNE_METHOD != 'new' else None
        nb_model_classes = 2 if use_f1_score else nb_classes
        model = torchmoji_transfer(
                    nb_model_classes,
                    weight_path,
                    extend_embedding=data['added'])
        print(model)

        # Training
        print('Training: {}'.format(path))
コード例 #6
0
import json
from torchmoji.finetuning import load_benchmark
from torchmoji.class_avg_finetuning import class_avg_finetune
from torchmoji.model_def import torchmoji_transfer
from torchmoji.global_variables import PRETRAINED_PATH, VOCAB_PATH, ROOT_PATH

DATASET_PATH = '{}/data/Sentence/sentenceSentiment_small.pickle'.format(
    ROOT_PATH)
nb_classes = 3

with open(VOCAB_PATH, 'r') as f:
    vocab = json.load(f)

# Load dataset. Extend the existing vocabulary with up to 10000 tokens from
# the training dataset.
data = load_benchmark(DATASET_PATH, vocab, extend_with=10000)

# Set up model and finetune. Note that we have to extend the embedding layer
# with the number of tokens added to the vocabulary.
#
# Also note that when using class average F1 to evaluate, the model has to be
# defined with two classes, since the model will be trained for each class
# separately.
model = torchmoji_transfer(2, PRETRAINED_PATH, extend_embedding=data['added'])
print(model)

# For finetuning however, pass in the actual number of classes.
model, f1 = class_avg_finetune(model,
                               data['texts'],
                               data['labels'],
                               nb_classes,
コード例 #7
0
4) Unfreeze all layers and train entire model.
"""

from __future__ import print_function
import example_helper
import json
from torchmoji.model_def import torchmoji_transfer
from torchmoji.global_variables import PRETRAINED_PATH
from torchmoji.finetuning import (
     load_benchmark,
     finetune)


DATASET_PATH = '../data/kaggle-insults/raw.pickle'
nb_classes = 2

with open('../model/vocabulary.json', 'r') as f:
    vocab = json.load(f)

# Load dataset. Extend the existing vocabulary with up to 10000 tokens from
# the training dataset.
data = load_benchmark(DATASET_PATH, vocab, extend_with=10000)

# Set up model and finetune. Note that we have to extend the embedding layer
# with the number of tokens added to the vocabulary.
model = torchmoji_transfer(nb_classes, PRETRAINED_PATH, extend_embedding=data['added'])
print(model)
model, acc = finetune(model, data['texts'], data['labels'], nb_classes,
                      data['batch_size'], method='chain-thaw')
print('Acc: {}'.format(acc))
コード例 #8
0
The 'last' method does the following:
0) Load all weights except for the softmax layer. Do not add tokens to the
   vocabulary and do not extend the embedding layer.
1) Freeze all layers except for the softmax layer.
2) Train.
"""

from __future__ import print_function
import example_helper
import json
from torchmoji.model_def import torchmoji_transfer
from torchmoji.global_variables import PRETRAINED_PATH, VOCAB_PATH, ROOT_PATH
from torchmoji.finetuning import (
     load_benchmark,
     finetune)

DATASET_PATH = '{}/data/SS-Youtube/raw.pickle'.format(ROOT_PATH)
nb_classes = 2

with open(VOCAB_PATH, 'r') as f:
    vocab = json.load(f)

# Load dataset.
data = load_benchmark(DATASET_PATH, vocab)

# Set up model and finetune
model = torchmoji_transfer(nb_classes, PRETRAINED_PATH)
print(model)
model, acc = finetune(model, data['texts'], data['labels'], nb_classes, data['batch_size'], method='last')
print('Acc: {}'.format(acc))
コード例 #9
0
        assert len(vocab) == nb_tokens

        dset = p[0]
        path = p[1]
        nb_classes = p[2]
        use_f1_score = p[3]

        if FINETUNE_METHOD == 'last':
            extend_with = 0
        elif FINETUNE_METHOD in ['new', 'full', 'chain-thaw']:
            extend_with = 10000
        else:
            raise ValueError('Finetuning method not recognised!')

        # Load dataset.
        data = load_benchmark(path, vocab, extend_with=extend_with)

        (X_train, y_train) = (data['texts'][0], data['labels'][0])
        (X_val, y_val) = (data['texts'][1], data['labels'][1])
        (X_test, y_test) = (data['texts'][2], data['labels'][2])

        weight_path = PRETRAINED_PATH if FINETUNE_METHOD != 'new' else None
        nb_model_classes = 2 if use_f1_score else nb_classes
        model = torchmoji_transfer(nb_model_classes,
                                   data['maxlen'],
                                   weight_path,
                                   extend_embedding=data['added'])
        model.summary()

        # Training
        print('Training: {}'.format(path))
コード例 #10
0
1) Freeze all layers except for the softmax layer.
2) Train.
"""

from __future__ import print_function
import example_helper
import json
from torchmoji.model_def import torchmoji_transfer
from torchmoji.global_variables import PRETRAINED_PATH, VOCAB_PATH, ROOT_PATH
from torchmoji.finetuning import (load_benchmark, finetune)

DATASET_PATH = '{}/data/SS-Youtube/raw.pickle'.format(ROOT_PATH)
nb_classes = 2

with open(VOCAB_PATH, 'r') as f:
    vocab = json.load(f)

# Load dataset.
data = load_benchmark(DATASET_PATH, vocab)

# Set up model and finetune
model = torchmoji_transfer(nb_classes, PRETRAINED_PATH)
print(model)
model, acc = finetune(model,
                      data['texts'],
                      data['labels'],
                      nb_classes,
                      data['batch_size'],
                      method='last')
print('Acc: {}'.format(acc))