Esempio n. 1
0
def lime():

    print('Loading dataset ...')
    X_train, Y_train, X_val, Y_val, X_test, Y_test = get_dataset(
        minibatch_size=32, sampling='None', numpy='True')

    net = load_fraudnet()
    lime_list = []
    explainer = LimeTabularExplainer(X_train, training_labels=Y_train)

    def func_call(x):
        print(x.shape)
        input_ = torch.from_numpy(x).to(device=device).float()
        prob_1 = net(input_).view(-1, 1).cpu().data.numpy()
        prob_0 = 1 - prob_1
        prob = np.concatenate([prob_0, prob_1], axis=1)
        return prob

    for i in range(X_test.shape[0]):

        exp = explainer.explain_instance(X_test[i, :],
                                         func_call,
                                         labels=(0, 1),
                                         num_features=50)
        lime_list.append(exp)

    lime_list = np.array(lime_list)
    lime_list = preprocess(lime_list)
    pickle.dump(lime_list, open('./saved_attributions/lime.pkl', 'wb'))
Esempio n. 2
0
def train_model(config_path: str):
    writer = SummaryWriter()
    config = read_training_pipeline_params(config_path)
    logger.info("pretrained_emb {b}", b=config.net_params.pretrained_emb)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    logger.info("Device is {device}", device=device)
    SRC, TRG, dataset = get_dataset(config.dataset_path, False)
    train_data, valid_data, test_data = split_data(
        dataset, **config.split_ration.__dict__)
    SRC.build_vocab(train_data, min_freq=3)
    TRG.build_vocab(train_data, min_freq=3)
    torch.save(SRC.vocab, config.src_vocab_name)
    torch.save(TRG.vocab, config.trg_vocab_name)
    logger.info("Vocab saved")
    print(f"Unique tokens in source (ru) vocabulary: {len(SRC.vocab)}")
    print(f"Unique tokens in target (en) vocabulary: {len(TRG.vocab)}")
    train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
        (train_data, valid_data, test_data),
        batch_size=config.BATCH_SIZE,
        device=device,
        sort_key=_len_sort_key,
    )
    INPUT_DIM = len(SRC.vocab)
    OUTPUT_DIM = len(TRG.vocab)

    config_encoder = BertConfig(vocab_size=INPUT_DIM)
    config_decoder = BertConfig(vocab_size=OUTPUT_DIM)
    config = EncoderDecoderConfig.from_encoder_decoder_configs(
        config_encoder, config_decoder)
    model = EncoderDecoderModel(config=config)
    config_encoder = model.config.encoder
    config_decoder = model.config.decoder
    config_decoder.is_decoder = True
    config_decoder.add_cross_attention = True
    config = EncoderDecoderConfig.from_encoder_decoder_configs(
        config_encoder, config_decoder)
    model = EncoderDecoderModel(config=config)
    args = TrainingArguments(
        output_dir="output",
        evaluation_strategy="steps",
        eval_steps=500,
        per_device_train_batch_size=128,
        per_device_eval_batch_size=128,
        num_train_epochs=10,
        save_steps=3000,
        seed=0,
        load_best_model_at_end=True,
    )
    # args.place_model_on_device = device
    trainer = Trainer(
        model=model,
        args=args,
        train_dataset=train_iterator,
        eval_dataset=valid_iterator,
        callbacks=[EarlyStoppingCallback(early_stopping_patience=3)],
    )
    trainer.train()

    model.save_pretrained("bert2bert")
def train(model_name='fraud_net', ch=1, tbd='logs', smote=False):

    no_epochs = 30
    lr = 1e-3
    batch_size = 1024
    weight_factor = 1.0  #40.67    #weight given to class 1

    print('Loading dataset ...')
    train_loader, valid_loader, test_loader = get_dataset(
        minibatch_size=batch_size, sampling='None')

    net = FraudNet(ch, continuous).to(device='cuda').train()
    print('Model:')
    print(net)

    optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    best_valid_loss = float('inf')
    writer = SummaryWriter('runs/' + tbd)

    for i in range(no_epochs):

        for b, data in enumerate(train_loader):

            inputs, labels = data

            if smote:
                inputs, labels = smote_func(inputs, labels)

            y_pred = net(inputs)
            mask = (labels > 0.5).float()
            batch_weights = mask * weight_factor + (1 - mask) * (1)
            loss = nn.BCELoss(weight=(batch_weights))(y_pred, labels)
            # print ('batch_weights',batch_weights)

            if b % 100000000:
                print('Epochs: {}, batch: {} loss: {}'.format(i, b, loss))
                sys.stdout.flush()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        writer.add_scalar('train_loss', loss, i)
        valid_loss = evaluate(valid_loader, net)
        writer.add_scalar('valid_loss', valid_loss, i)
        test_loss = evaluate(test_loader, net)
        writer.add_scalar('test_loss', test_loss, i)

        print('Epochs: {}, Valid loss: {}, Test Loss: {}'.format(
            i, valid_loss, test_loss))

        if valid_loss < best_valid_loss:
            torch.save(
                net.state_dict(), './saved_checkpoints/' + model_name + '_' +
                '{0:.3f}'.format(valid_loss) + '_' +
                '{0:.3f}'.format(test_loss) + '.th')
            best_valid_loss = valid_loss
            print('Saving checkpoint at epoch: {}'.format(i))
Esempio n. 4
0
def lime1():

    print('Loading dataset ...')
    train_loader, valid_loader, test_loader = get_dataset(minibatch_size=1024,
                                                          sampling='None')
    test_loader = get_dataset_test(minibatch_size=batch_size)

    net = load_fraudnet()

    train_embeds = []
    for data in train_loader:
        x, y = data
        embed = net.embedding_forward(x).cpu().data.numpy()
        train_embeds.append(embed)

    train_embeds = np.concatenate(train_embeds, axis=0)

    test_embeds = []
    for data in test_loader:
        x = data
        embed = net.embedding_forward(x).cpu().data.numpy()
        test_embeds.append(embed)

    test_embeds = np.concatenate(test_embeds, axis=0)

    print('train test', train_embeds.shape, test_embeds.shape)

    lime_list = []
    explainer = LimeTabularExplainer(train_embeds)

    def func_call(x):
        print(x.shape)
        input_ = torch.from_numpy(x).to(device=device).float()
        prob_1 = net.feedforward(input_).view(-1, 1).cpu().data.numpy()
        prob_0 = 1 - prob_1
        prob = np.concatenate([prob_0, prob_1], axis=1)
        return prob

    for i in range(test_embeds.shape[0]):

        exp = explainer.explain_instance(test_embeds[i, :],
                                         func_call,
                                         labels=(0, 1),
                                         num_features=91)
        lime_list.append(exp)

    lime_list = np.array(lime_list)
    lime_list = preprocess1(lime_list)
    pickle.dump(lime_list, open('./saved_attributions/lime.pkl', 'wb'))
Esempio n. 5
0
def train():
    tf.enable_eager_execution()

    BATCH_SIZE = 16
    MAX_STEP = 10000

    train_dir = './data/train'
    image_paths, image_labels = load_data.get_path_and_label(train_dir, is_random=True)
    image_label_ds = load_data.get_dataset(image_paths, image_labels, BATCH_SIZE, is_random=True)

    demo_model = tf.keras.Sequential([
        tf.keras.layers.Conv2D(16, [3, 3], padding='same', activation='relu'),
        tf.keras.layers.MaxPooling2D(strides=2, padding='same'),
        tf.keras.layers.Conv2D(16, [3, 3], padding='same', activation='relu'),
        tf.keras.layers.MaxPooling2D(strides=2, padding='same'),
        tf.keras.layers.Flatten(),
        tf.keras.layers.Dense(128, activation='relu'),
        tf.keras.layers.Dropout(0.5),
        tf.keras.layers.Dense(128, activation='relu'),
        tf.keras.layers.Dropout(0.5),
        tf.keras.layers.Dense(2, activation='softmax')
    ])

    optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)

    checkpoint_dir = './logs_2'
    checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt')
    checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=demo_model)

    start = time.time()
    for step, (images, labels) in enumerate(image_label_ds.take(MAX_STEP)):
        with tf.GradientTape() as tape:
            logits = demo_model(images, training=True)
            loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)

        grads = tape.gradient(loss, demo_model.variables)
        optimizer.apply_gradients(zip(grads, demo_model.variables),
                                  global_step=tf.train.get_or_create_global_step())

        if step % 100 == 0:
            acc = tf.nn.in_top_k(logits, labels, 1)
            acc = tf.reduce_mean(tf.cast(acc, tf.float16))
            end = time.time()
            print('Step:%6d, loss:%.6f, accuracy:%.2f%%, time:%.2fs' % (step, loss, acc * 100, end - start))
            start = time.time()

        if step % 1000 == 0:
            checkpoint.save(file_prefix=checkpoint_prefix)
Esempio n. 6
0
def train(WORD, CHAR, ARC, REL, transform, encoder, epoch=60, word_dim=100):
    model = BiaffineDependencyModel(n_words=WORD.vocab.n_init,
                                    n_feats=len(CHAR.vocab),
                                    n_rels=len(REL.vocab),
                                    pad_index=WORD.pad_index,
                                    unk_index=WORD.unk_index,
                                    bos_index=WORD.bos_index,
                                    feat_pad_index=CHAR.pad_index,
                                    encoder=encoder,
                                    n_embed=word_dim)
    model.load_pretrained(WORD.embed)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(device)
    model.to(device)

    puncts = torch.tensor(
        [i for s, i in WORD.vocab.stoi.items() if ispunct(s)]).to(device)

    train, dev, test = get_dataset(transform)
    # train.sentences = train.sentences[:30000]
    dev.sentences = dev.sentences[:200]
    test.sentences = test.sentences[:200]
    print('train sentences:%d dev sentences:%d test sentences:%d' %
          (len(train.sentences), len(dev.sentences), len(test.sentences)))
    if (encoder == 'lstm'):
        optimizer = Adam(model.parameters(),
                         lr=2e-3,
                         betas=(0.9, 0.9),
                         eps=1e-12)
    else:
        optimizer = ScheduledOptim(
            Adam(model.parameters(), betas=(0.9, 0.98), eps=1e-09), 2.0, 800,
            4000)

    train_parser(train,
                 dev,
                 test,
                 model,
                 optimizer,
                 transform,
                 WORD,
                 puncts,
                 encoder,
                 epochs=epoch,
                 path=encoder + '_model')
Esempio n. 7
0
def train_model(config_path: str):
    writer = SummaryWriter()
    config = read_training_pipeline_params(config_path)
    logger.info("pretrained_emb {b}", b=config.net_params.pretrained_emb)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    logger.info("Device is {device}", device=device)
    SRC, TRG, dataset = get_dataset(config.dataset_path,
                                    config.net_params.transformer)
    train_data, valid_data, test_data = split_data(
        dataset, **config.split_ration.__dict__)
    if config.net_params.pretrained_emb:
        src_vectors = torchtext.vocab.FastText(language='ru')
    SRC.build_vocab(train_data, min_freq=3)
    if config.net_params.pretrained_emb:
        SRC.vocab.load_vectors(src_vectors)
    TRG.build_vocab(train_data, min_freq=3)
    torch.save(SRC.vocab, config.src_vocab_name)
    torch.save(TRG.vocab, config.trg_vocab_name)
    logger.info("Vocab saved")
    print(f"Unique tokens in source (ru) vocabulary: {len(SRC.vocab)}")
    print(f"Unique tokens in target (en) vocabulary: {len(TRG.vocab)}")
    train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
        (train_data, valid_data, test_data),
        batch_size=config.BATCH_SIZE,
        device=device,
        sort_key=_len_sort_key,
    )
    INPUT_DIM = len(SRC.vocab)
    OUTPUT_DIM = len(TRG.vocab)

    if config.net_params.attention:
        Encoder = network_gru_attention.Encoder
        Decoder = network_gru_attention.Decoder
        Seq2Seq = network_gru_attention.Seq2Seq
        Attention = network_gru_attention.Attention
        attn = Attention(config.net_params.HID_DIM, config.net_params.HID_DIM)
        enc = Encoder(INPUT_DIM, config.net_params.ENC_EMB_DIM,
                      config.net_params.HID_DIM, config.net_params.HID_DIM,
                      config.net_params.ENC_DROPOUT)
        dec = Decoder(OUTPUT_DIM, config.net_params.DEC_EMB_DIM,
                      config.net_params.HID_DIM, config.net_params.HID_DIM,
                      config.net_params.DEC_DROPOUT, attn)

        model = Seq2Seq(enc, dec, device)
    if config.net_params.transformer:
        logger.info("Transformer lets go")
        Encoder = network_transformer.Encoder
        Decoder = network_transformer.Decoder
        Seq2Seq = network_transformer.Seq2Seq
        SRC_PAD_IDX = SRC.vocab.stoi[SRC.pad_token]
        TRG_PAD_IDX = TRG.vocab.stoi[TRG.pad_token]
        HID_DIM = 512
        ENC_LAYERS = 6
        DEC_LAYERS = 6
        ENC_HEADS = 8
        DEC_HEADS = 8
        ENC_PF_DIM = 2048
        DEC_PF_DIM = 2048
        ENC_DROPOUT = 0.1
        DEC_DROPOUT = 0.1

        enc = Encoder(INPUT_DIM, HID_DIM, ENC_LAYERS, ENC_HEADS, ENC_PF_DIM,
                      ENC_DROPOUT, device)

        dec = Decoder(OUTPUT_DIM, HID_DIM, DEC_LAYERS, DEC_HEADS, DEC_PF_DIM,
                      DEC_DROPOUT, device)
        model = Seq2Seq(enc, dec, SRC_PAD_IDX, TRG_PAD_IDX, device)
    if not config.net_params.attention and not config.net_params.transformer:
        Encoder = my_network.Encoder
        Decoder = my_network.Decoder
        Seq2Seq = my_network.Seq2Seq
        enc = Encoder(INPUT_DIM, config.net_params.ENC_EMB_DIM,
                      config.net_params.HID_DIM, config.net_params.N_LAYERS,
                      config.net_params.ENC_DROPOUT)
        dec = Decoder(OUTPUT_DIM, config.net_params.DEC_EMB_DIM,
                      config.net_params.HID_DIM, config.net_params.N_LAYERS,
                      config.net_params.DEC_DROPOUT)
        model = Seq2Seq(enc, dec, device)

    model.apply(init_weights)
    if config.net_params.pretrained_emb:
        model.encoder.tok_embedding = nn.Embedding.from_pretrained(
            torch.FloatTensor(SRC.vocab.vectors))
    model.to(device)
    PAD_IDX = TRG.vocab.stoi[TRG.pad_token]
    optimizer = optim.Adam(model.parameters(), config.lr)
    criterion = nn.CrossEntropyLoss(ignore_index=PAD_IDX)
    lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, **config.lr_scheduler.__dict__)
    train_history = []
    valid_history = []
    best_valid_loss = float('inf')
    print("Let's go")
    # for p in model.encoder.parameters():
    #     p.requires_grad = True
    # for p in model.decoder.parameters():
    #     p.requires_grad = True

    for epoch in range(config.N_EPOCHS):

        start_time = time.time()

        train_loss = train(model, train_iterator, optimizer, criterion,
                           config.CLIP, train_history, valid_history)
        valid_loss = evaluate(model, valid_iterator, criterion)
        lr_scheduler.step(valid_loss)
        end_time = time.time()

        epoch_mins, epoch_secs = epoch_time(start_time, end_time)

        if valid_loss < best_valid_loss:
            best_valid_loss = valid_loss
            torch.save(model.state_dict(), config.model_out_name)

        train_history.append(train_loss)
        valid_history.append(valid_loss)
        writer.add_scalar('train loss', train_history[-1], epoch)
        writer.add_scalar('valid loss', valid_history[-1], epoch)
        writer.add_scalar('learning rate', lr_scheduler._last_lr[0], epoch)
        print(f'Epoch: {epoch + 1:02} | Time: {epoch_mins}m {epoch_secs}s')
        print(
            f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}'
        )
        print(
            f'\t Val. Loss: {valid_loss:.3f} |  Val. PPL: {math.exp(valid_loss):7.3f}'
        )
        for idx, batch in enumerate(valid_iterator):
            if idx > 3:
                break
            src = batch.src[:, idx:idx + 1]
            trg = batch.trg[:, idx:idx + 1]
            generate_translation(src, trg, model, TRG.vocab, SRC.vocab,
                                 config.net_params.transformer)

    get_bleu(model, test_iterator, TRG, config.net_params.transformer)
Esempio n. 8
0
from sklearn import ensemble
from sklearn import feature_extraction
from sklearn import linear_model
from sklearn import pipeline
from sklearn import cross_validation
from sklearn import metrics
from sklearn.externals import joblib

import load_data
import pickle

# Load the dataset from the csv file. Handled by load_data.py. Each email is split in characters and each one has label assigned
X, y, label_names = load_data.get_dataset()

# Split the dataset on training and testing sets
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,y,test_size=0.2,random_state=0)

#Setting up vectorizer that will convert dataset into vectors using n-gram
vectorizer = feature_extraction.text.TfidfVectorizer(ngram_range=(1, 4), analyzer='char')

#Setting up pipeline to flow data though vectorizer to the liner model implementation
pipe = pipeline.Pipeline([('vectorizer', vectorizer), ('clf', linear_model.LogisticRegression())])

#Pass training set of features and labels though pipe.
pipe.fit(X_train, y_train)

#Test model accuracy by running feature test set
y_predicted = pipe.predict(X_test)

print(metrics.classification_report(y_test, y_predicted,target_names=label_names))
from torch.utils.data import DataLoader, random_split
from load_data import get_dataset, MyCollate
from torchvision import transforms as T
import pickle
from torch import nn
import numpy as np

#split()
transforms = T.Compose([
    T.Resize(256),
    T.CenterCrop(224),
    T.ToTensor(),
    T.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
])
dataset, data_loader = get_dataset(root_dir="Flickr8k",
                                   caption_file="captions_train.txt",
                                   transforms=transforms)

print(len(dataset.vocabulary))


def convert_data_loader(loader):
    pad_idx = dataset.vocabulary.stoi["<PAD>"]
    return DataLoader(
        dataset=loader,
        batch_size=16,  #how many sample which load to the batch
        shuffle=True,
        num_workers=2,  #how many subprocess to uses for data loading
        pin_memory=True,
        collate_fn=MyCollate(
            pad_idx)  #merge list of sample to form a mini batch tensor
import torch, torchvision
from torchvision import datasets, transforms
from torch import nn, optim
from torch.nn import functional as F

import numpy as np
# pip install shap
import shap

from load_data import get_dataset, get_dataset_test

model = torch.load(
    './saved_checkpoints/continuous_sameWt_eqSample_new_fc_0.3_2_0.333_0.333.th',
    map_location='cpu')
train_loader, valid_loader, test_loader = get_dataset(minibatch_size=128)

# since shuffle=True, this is a random sample of test data
batch = next(iter(test_loader))
images, _ = batch

background = images[:100]
test_images = images[100:103]

e = shap.DeepExplainer(model, background)
shap_values = e.shap_values(test_images)

print(shap_values.shape)
Esempio n. 11
0
    X_tsne = preprocessing.normalize(X_tsne.embedding_, norm='l2')
    
    fig = plt.figure()
    ax = plt.subplot(111)
    for i in range(X_tsne.shape[0]):
        plt.text(X_tsne[i, 0], X_tsne[i, 1], str(labels[i]),
                 color=plt.cm.Set1(labels[i] / kmeans_clusters),
                 fontdict={'weight': 'bold', 'size': 3})
    plt.legend()
    # plt.show()
    '''
    print()


if __name__ == '__main__':
    train_epoch = sys.argv[1]
    test = int(sys.argv[2])
    if test != 0:
        test = True
    else:
        test = False
    data = load_data.get_dataset(test=test)
    model_dm, model_dbow = train_doc2vec(data, new_model=True)
    vecs_dm = load_data.get_doc_vec(model_dm, data)
    vecs_dbow = load_data.get_doc_vec(model_dbow, data)
    labels_dm, cluster_centers_dm = train_cluster(vecs_dm, 'kmeans_dm.pkl')
    labels_dbow, cluster_centers_dbow = train_cluster(vecs_dbow, 'kmeans_dbow.pkl')
    train_tsne(vecs_dm, labels_dm, 'tsne_dm.pkl')
    train_tsne(vecs_dbow, labels_dbow, 'tsne_dbow.pkl')
    print()
def test(model_name, ch):

    batch_size = 64
    train_loader, valid_loader, test_loader = get_dataset(
        minibatch_size=batch_size)

    net = FraudNet(ch, continuous).to(device='cuda')
    paths = glob('./saved_checkpoints/' + model_name + '_*')
    print('paths', paths)
    test_loss = np.array(
        [float(path.split('.th')[0].split('_')[-1]) for path in paths])
    print(test_loss)
    idx = np.argmin(test_loss)
    print(idx)
    path = paths[idx]
    print(path)

    # net.load_state_dict(torch.load(path))

    loss = 0.0
    steps = 0.0
    weight_factor = 1.0
    net = net.eval()

    predicted_list = []
    labels_list = []
    loss_list = []

    loss = evaluate(test_loader, net)
    print('loss', loss)
    exit()

    with torch.no_grad():
        for b, data in enumerate(test_loader):

            inputs, labels = data
            y_pred = net(inputs)

            mask = (labels > 0.5).float()
            batch_weights = mask * weight_factor + (1 - mask) * (1)
            loss += nn.BCELoss(weight=batch_weights)(y_pred, labels)
            steps += 1.0

            bce_loss = nn.BCELoss(reduction='none')(y_pred, labels)

            predicted_list.append(y_pred.cpu().data.numpy())
            labels_list.append(labels.cpu().data.numpy())
            loss_list.append(bce_loss.cpu().data.numpy())

    predicted_list = np.array([x for y in predicted_list for x in y])
    labels_list = np.array([x for y in labels_list for x in y])
    actual_labels = (labels_list >= 0.5).astype(np.int32)

    loss_list = np.array([x for y in loss_list for x in y])

    positive_loss = loss_list[labels_list >= 0.5].mean()
    negative_loss = loss_list[labels_list < 0.5].mean()
    overall_loss = loss_list.mean()
    weighted_loss = loss / steps

    result = calc_metrics_classification(actual_labels, predicted_list)
    result.update({
        'positive_loss': positive_loss,
        'negative_loss': negative_loss,
        'overall_loss': overall_loss,
        'weighted_loss': weighted_loss
    })
    print(result)
Esempio n. 13
0
def save_labeled_data(corpus, labels, file_name):
    index = 0
    res = {}
    for key in corpus:
        tmp_re_ = []
        for text in corpus[key]:
            tmp_dict_ = {'text': text, 'label': str(labels[index])}
            tmp_re_.append(tmp_dict_)
            index += 1
        res[key] = tmp_re_
    with open(os.path.join(path_data, file_name), 'w') as f:
        json.dump(res, f, ensure_ascii=False, indent=4, separators=(',', ': '))


if __name__ == '__main__':
    test = False

    doc2vec_dm_model = Doc2Vec.load(os.path.join(path_model, 'doc2vec_dm'))
    doc2vec_dbow_model = Doc2Vec.load(os.path.join(path_model, 'doc2vec_dbow'))
    km_dm_model = joblib.load(os.path.join(path_model, 'kmeans_dm.pkl'))
    km_dbow_model = joblib.load(os.path.join(path_model, 'kmeans_dbow.pkl'))
    corpus = load_data.get_dataset(test=test, cut=False)
    vecs_dm = load_data.get_doc_vec(doc2vec_dm_model, corpus)
    vecs_dbow = load_data.get_doc_vec(doc2vec_dbow_model, corpus)
    print('get vecs')
    labels_dm = label_infer(km_dm_model, vecs_dm)
    labels_dbow = label_infer(km_dbow_model, vecs_dbow)
    corpus = load_data.get_dataset(raw=True, test=test)
    save_labeled_data(corpus, labels_dm, 'match-data-dm.json')
    save_labeled_data(corpus, labels_dm, 'match-data-dbow.json')
Esempio n. 14
0
def train(model_name='RL_net', tbd='logs', sparsity_lambda=0.5, ch=3):

    no_epochs = 30
    lr = 1e-3
    batch_size = 1024

    print('Loading dataset ...')
    train_loader, valid_loader, test_loader = get_dataset(
        minibatch_size=batch_size)
    fraud_net = load_fraudnet()
    net = RLNet(ch=ch).to(device='cuda').train()
    print('Model:')
    print(net)

    optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    best_valid_reward = -1 * float('inf')
    writer = SummaryWriter('RL_runs/' + tbd)

    for i in range(no_epochs):

        for b, data in enumerate(train_loader):

            inputs, labels = data
            inputs, labels = smote_func(inputs, labels)

            y_logits = net(inputs)
            y_probs = F.sigmoid(y_logits)

            m = Bernoulli(probs=y_probs)
            selected_features = m.sample()
            number_selected = selected_features.sum(1)  #fraction selected

            # print (selected_features,default_features)
            selected_inputs = inputs * selected_features + (
                1 - selected_features) * default_features

            y_pred = fraud_net(selected_inputs)

            bce_loss = nn.BCELoss(reduction='none')(y_pred, labels)

            # print('number_selected, bce_loss',number_selected.shape,bce_loss.shape)
            number_dropped = 50 - number_selected
            reward = -1 * bce_loss - sparsity_lambda * torch.abs(
                number_selected - 5).unsqueeze(1)

            log_probs = m.log_prob(selected_features)
            print(log_probs.shape, reward.shape)
            loss = -log_probs * reward
            loss = loss.mean()

            if b % 100:
                print(
                    'Epochs: {}, batch: {}, loss: {}, reward: {}, bce loss: {}, fraction selected: {}'
                    .format(i, b, loss,
                            reward.mean().item(),
                            bce_loss.mean().item(),
                            number_selected.mean().item()))
                sys.stdout.flush()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        writer.add_scalar('train_loss', loss, i)
        writer.add_scalar('reward', reward.mean(), i)
        writer.add_scalar('bce_loss', bce_loss.mean(), i)
        writer.add_scalar('number_selected', number_selected.mean(), i)

        valid_result = evaluate(valid_loader, fraud_net, net, sparsity_lambda)
        valid_result.update({'name': 'Valid_Epoch_{}'.format(i)})
        print(valid_result)

        valid_reward = valid_result['reward']

        if valid_reward > best_valid_reward:
            torch.save(net.state_dict(),
                       './saved_rl_checkpoints/' + model_name + '.th')
            best_valid_reward = valid_reward

            test_result = evaluate(test_loader, fraud_net, net,
                                   sparsity_lambda)
            test_result.update({'name': 'test_Epoch_{}'.format(i)})
            print(test_result)

            print('Saving checkpoint at epoch: {}'.format(i))

    print(test_result)