示例#1
0
def run_experiment():
    score = open("score.csv", "wt")
    for k in range(1, 21):
        k *= 10
        if k == 0:
            alphabet, states, init_state, accept_states, transitions = \
                SIMPLIFIED_JSON_ALPHABET, SIMPLIFIED_JSON_STATES, SIMPLIFIED_JSON_INIT_STATE, \
                SIMPLIFIED_JSON_ACCEPT_STATES, SIMPLIFIED_JSON_TRANSITIONS
        else:
            alphabet, states, init_state, accept_states, transitions = load_fst_by_nodes_to_add(k)
            init_state = init_state[0]

        fst = FST(alphabet, states, init_state, accept_states, transitions)
        fst_dataset = FstDataset(BinaryFSTParams(), fst=fst)

        activator_params = BinaryActivatorParams()
        activator_params.EPOCHS = 100

        activator = binaryActivator(BinaryModule(BinaryModuleParams(alphabet_size=len(fst_dataset.chr_embed))),
                                    activator_params, fst_dataset, split_fst_dataset)
        activator.train(validate_rate=10)

        score.write(str(k) + "train_loss," + ",".join([str(v) for v in activator.loss_train_vec]) + "\n")
        score.write(str(k) + "train_acc," + ",".join([str(v) for v in activator.accuracy_train_vec]) + "\n")
        score.write(str(k) + "train_auc," + ",".join([str(v) for v in activator.auc_train_vec]) + "\n")
        score.write(str(k) + "dev_loss," + ",".join([str(v) for v in activator.loss_dev_vec]) + "\n")
        score.write(str(k) + "dev_acc," + ",".join([str(v) for v in activator.accuracy_dev_vec]) + "\n")
        score.write(str(k) + "dev_auc," + ",".join([str(v) for v in activator.auc_dev_vec]) + "\n")
def train_acceptor():
    json_dataset = SimpleJsonAcceptorDataset(size=1000)
    activator_params = BinaryActivatorParams()
    activator_params.EPOCHS = 50
    activator = binaryActivator(
        BinaryModule(
            BinaryModuleParams(alphabet_size=len(json_dataset.chr_embed))),
        activator_params, json_dataset, split_simplified_json_acceptor_dataset)
    activator.train(validate_rate=10)
示例#3
0
    def _check_configuration(self, dataset: FstDataset,
                             model_params: BinaryModuleParams,
                             activator_params: BinaryActivatorParams):

        model = BinaryModule(model_params)
        activator = binaryActivator(model, activator_params, dataset)
        activator.train(show_plot=False)
        del model

        return activator.accuracy_train_vec, activator.auc_train_vec, activator.loss_train_vec, \
               activator.accuracy_dev_vec, activator.auc_dev_vec, activator.loss_dev_vec
def run_trial(params):
    # collect configuration
    alphabet_size = int(params["alphabet"])
    states_size = int(params["states"])
    embed_dim = int(params["embed_dim"])
    lstm_layers = int(params["lstm_layers"])
    lstm_dropout = params["lstm_dropout"]
    lstm_out = int(params["lstm_out"])
    batch_size = int(params["batch_size"])
    opt = Adam if params["optimizer"] == "ADAM" else SGD
    lr = params["learning_rate"]
    l2_reg = params["regularization"]
    epochs = int(params["epochs"])

    # define data-set
    ds_params = BinaryFSTParams()
    ds_params.FST_ALPHABET_SIZE = alphabet_size
    ds_params.FST_STATES_SIZE = states_size
    ds_params.FST_ACCEPT_STATES_SIZE = 2
    dataset = FstDataset(ds_params)

    # define model
    model_params = BinaryModuleParams(alphabet_size=len(dataset.chr_embed),
                                      embed_dim=embed_dim,
                                      lstm_layers=lstm_layers,
                                      lstm_dropout=lstm_dropout,
                                      lstm_out_dim=lstm_out)
    model_params.OPTIMIZER = opt
    model_params.REGULARIZATION = l2_reg
    model_params.LEARNING_RATE = lr

    # define activator
    activator_params = BinaryActivatorParams()
    activator_params.EPOCHS = epochs
    activator_params.BATCH_SIZE = batch_size
    activator_params.EPOCHS = epochs

    model = BinaryModule(model_params)
    activator = binaryActivator(model, activator_params, dataset)
    activator.train(show_plot=False, apply_nni=True, early_stop=True)
示例#5
0
from torch.optim import Adam
from binary_params import BinaryModuleParams, BinaryActivatorParams
from binary_rnn_activator import binaryActivator
from binary_rnn_model import BinaryModule
import os
from torch.utils.data import DataLoader

if __name__ == "__main__":
    sources = [
        os.path.join("yelp_review_polarity_csv", "train.csv"),
        os.path.join("yelp_review_polarity_csv", "test.csv"),
    ]
    ds = YelpSentimentDataset(sources)

    # define model
    model_params = BinaryModuleParams(alphabet_size=ds.num_words, embed_dim=10,
                                      lstm_layers=1, lstm_dropout=0, lstm_out_dim=50)
    model_params.OPTIMIZER = Adam
    model_params.REGULARIZATION = 1e-3
    model_params.LEARNING_RATE = 1e-3

    # define activator
    activator_params = BinaryActivatorParams()
    activator_params.EPOCHS = 200
    activator_params.BATCH_SIZE = 64
    activator_params.TRAIN_TEST_SPLIT = 0.93

    model = BinaryModule(model_params)
    activator = binaryActivator(model, activator_params, ds, yelp_sentiment_data_split)
    activator.train()
def transfer_double_accept(main_data_set_siza=MAIN_DATA_SIZE,
                           sub_data_sizes=SUB_DATA_SIZES,
                           num_states=NUM_STATES,
                           size_alphabet=SIZE_ALPHABET):
    time = strftime("%m%d%H%M%S", gmtime())
    out_res = open(
        os.path.join("transfer_binary_double_accept_" + time + ".txt"), "wt")
    out_res.write(
        "line0: config, line1: acc_train, line2: auc_train, line3: loss_train, line4: acc_dev, "
        "line5: auc_dev, line6: loss_dev\n")

    ds_params = BinaryFSTParams()
    ds_params.DATASET_SIZE = main_data_set_siza
    ds_params.FST_ACCEPT_STATES_SIZE = 2
    ds_params.FST_STATES_SIZE = num_states
    ds_params.FST_ALPHABET_SIZE = size_alphabet
    ds_params.NEGATIVE_SAMPLES = True

    ds = FstDoubleAcceptDataset(ds_params)
    # data is for accept state one
    ds.mode_one()

    # train base model with 10000 samples
    base_model = BinaryModule(
        BinaryModuleParams(alphabet_size=ds_params.FST_ALPHABET_SIZE,
                           lstm_out_dim=100))

    activator = binaryActivator(base_model, BinaryActivatorParams(), ds)
    activator.train(show_plot=False)
    out_res.write(
        str(ds_params.DATASET_SIZE) + ",base" + "\n" +
        str(activator.accuracy_train_vec) + "\n" +
        str(activator.auc_train_vec) + "\n" + str(activator.loss_train_vec) +
        "\n" + str(activator.accuracy_dev_vec) + "\n" +
        str(activator.auc_dev_vec) + "\n" + str(activator.loss_dev_vec) + "\n")

    # data is for accept state two
    ds.mode_two()
    for data_size in sub_data_sizes:  # , 500, 1000, 10000]:
        ds.resize(data_size)

        # train without transfer
        solo_model = BinaryModule(
            BinaryModuleParams(alphabet_size=ds_params.FST_ALPHABET_SIZE,
                               lstm_out_dim=100))
        activator = binaryActivator(solo_model, BinaryActivatorParams(), ds)
        activator.train(show_plot=False)
        out_res.write(
            str(data_size) + ",solo" + "\n" +
            str(activator.accuracy_train_vec) + "\n" +
            str(activator.auc_train_vec) + "\n" +
            str(activator.loss_train_vec) + "\n" +
            str(activator.accuracy_dev_vec) + "\n" +
            str(activator.auc_dev_vec) + "\n" + str(activator.loss_dev_vec) +
            "\n")

        # train with transfer
        transfer_model = deepcopy(base_model)
        activator = binaryActivator(transfer_model, BinaryActivatorParams(),
                                    ds)
        activator.train(show_plot=False)
        out_res.write(
            str(data_size) + ",transfer" + "\n" +
            str(activator.accuracy_train_vec) + "\n" +
            str(activator.auc_train_vec) + "\n" +
            str(activator.loss_train_vec) + "\n" +
            str(activator.accuracy_dev_vec) + "\n" +
            str(activator.auc_dev_vec) + "\n" + str(activator.loss_dev_vec) +
            "\n")
    out_res.close()
示例#7
0
        self._model.eval()
        # calc number of iteration in current epoch
        len_data = len(data_loader)
        for batch_index, (sequence, label) in enumerate(data_loader):
            sequence, label = self._to_gpu(sequence, label)
            # print progress
            self._print_progress(batch_index, len_data, job=VALIDATE_JOB)
            output = self._model(sequence)
            # calculate total loss
            loss_count += self._loss_func(output.squeeze(dim=1), label.float())
            true_labels += label.tolist()
            pred += output.squeeze().tolist()

        # update loss accuracy
        loss = float(loss_count / len(data_loader))
        self._update_loss(loss, job=job)
        self._update_accuracy(pred, true_labels, job=job)
        self._update_auc(pred, true_labels, job=job)
        return loss


if __name__ == '__main__':
    from binary_params import BinaryFSTParams, BinaryModuleParams
    fst_dataset = FstDataset(BinaryFSTParams())
    activator = binaryActivator(
        BinaryModule(
            BinaryModuleParams(alphabet_size=len(fst_dataset.chr_embed))),
        BinaryActivatorParams(), fst_dataset, split_fst_dataset)
    activator.train()