def grid_search(max_features_list, n_estimators_list):
    max_f1 = 0
    max_oversample = None
    max_scale = None
    max_mf = None
    max_nest = None
    for oversample in [True, False]:
        for scale in [True, False]:
            print("Oversample: {}, Scale: {}".format(oversample, scale))
            X_train, X_test, y_train, y_test = split_data(
                oversample=oversample, scale=scale)
            for mf in max_features_list:
                for nest in n_estimators_list:
                    f1 = fit_model(X_train,
                                   X_test,
                                   y_train,
                                   y_test,
                                   max_features=mf,
                                   n_estimators=nest)
                    if f1 > max_f1:
                        print("NEW MAX!")
                        max_f1 = f1
                        max_oversample = oversample
                        max_scale = scale
                        max_mf = mf
                        max_nest = nest
    return max_f1, max_oversample, max_scale, max_mf, max_nest
Example #2
0
def train_model(config_path: str):
    writer = SummaryWriter()
    config = read_training_pipeline_params(config_path)
    logger.info("pretrained_emb {b}", b=config.net_params.pretrained_emb)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    logger.info("Device is {device}", device=device)
    SRC, TRG, dataset = get_dataset(config.dataset_path, False)
    train_data, valid_data, test_data = split_data(
        dataset, **config.split_ration.__dict__)
    SRC.build_vocab(train_data, min_freq=3)
    TRG.build_vocab(train_data, min_freq=3)
    torch.save(SRC.vocab, config.src_vocab_name)
    torch.save(TRG.vocab, config.trg_vocab_name)
    logger.info("Vocab saved")
    print(f"Unique tokens in source (ru) vocabulary: {len(SRC.vocab)}")
    print(f"Unique tokens in target (en) vocabulary: {len(TRG.vocab)}")
    train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
        (train_data, valid_data, test_data),
        batch_size=config.BATCH_SIZE,
        device=device,
        sort_key=_len_sort_key,
    )
    INPUT_DIM = len(SRC.vocab)
    OUTPUT_DIM = len(TRG.vocab)

    config_encoder = BertConfig(vocab_size=INPUT_DIM)
    config_decoder = BertConfig(vocab_size=OUTPUT_DIM)
    config = EncoderDecoderConfig.from_encoder_decoder_configs(
        config_encoder, config_decoder)
    model = EncoderDecoderModel(config=config)
    config_encoder = model.config.encoder
    config_decoder = model.config.decoder
    config_decoder.is_decoder = True
    config_decoder.add_cross_attention = True
    config = EncoderDecoderConfig.from_encoder_decoder_configs(
        config_encoder, config_decoder)
    model = EncoderDecoderModel(config=config)
    args = TrainingArguments(
        output_dir="output",
        evaluation_strategy="steps",
        eval_steps=500,
        per_device_train_batch_size=128,
        per_device_eval_batch_size=128,
        num_train_epochs=10,
        save_steps=3000,
        seed=0,
        load_best_model_at_end=True,
    )
    # args.place_model_on_device = device
    trainer = Trainer(
        model=model,
        args=args,
        train_dataset=train_iterator,
        eval_dataset=valid_iterator,
        callbacks=[EarlyStoppingCallback(early_stopping_patience=3)],
    )
    trainer.train()

    model.save_pretrained("bert2bert")
def train_net(net, data, label, nepochs, batch_size, eval_single_output_fn = None):
    '''
    Trains the given RCNN using criterion to calculate the loss and optimizer
    to adjust the weights. 
    Prints out the training loss, training accuracy,
    and test accuracy every epoch.
    '''
    print("batch_size:", batch_size)
    print("nepochs:", nepochs)
    for epoch in range(1,nepochs+1):
        # TrainX is a list of batches for training. Each batch if a vector of size 
        # (batch_size, num_seq, embedding_length). 
        # TrainY is a list of labels. 
        # TestX and testY are of the same format for testing.
        trainX, trainY, testX, testY = load_data.split_data(data, label, batch_size)
        train_loss, train_acc = train_pattern(net, trainX, trainY, epoch)
        test_loss, test_acc = eval_model(net, testX, testY)
        print("epoch  %s: train loss %2.2f train accuracy %2.2f\n test loss %2.2f test accuracy %2.2f" % (epoch, train_loss, train_acc, test_loss, test_acc))
Example #4
0
def construct_model_and_data(args):
	"""
	Load model and data on which the attack is carried out.
	Assign target classes and images for targeted attack.
	"""
	data_model = args.dataset_name + args.model_name
	dataset = ImageData(args.dataset_name)
	x_test, y_test = dataset.x_val, dataset.y_val
	reference = - dataset.x_train_mean
	model = ImageModel(args.model_name, args.dataset_name, 
		train = False, load = True)

	# Split the test dataset into two parts.
	# Use the first part for setting target image for targeted attack.
	x_train, y_train, x_test, y_test = split_data(x_test, y_test, model, 
		num_classes = model.num_classes, split_rate = 0.5, 
		sample_per_class = np.min([np.max([200, args.num_samples // 10 * 3]),
		 1000]))

	outputs = {'data_model': data_model,
				'x_test': x_test,
				'y_test': y_test,
				'model': model,
				'clip_max': 1.0,
				'clip_min': 0.0
				}

	if args.attack_type == 'targeted':
		# Assign target class and image for targeted atttack.
		label_train = np.argmax(y_train, axis = 1)
		label_test = np.argmax(y_test, axis = 1)
		x_train_by_class = [x_train[label_train == i] for i in range(model.num_classes)]
		target_img_by_class = np.array([x_train_by_class[i][0] for i in range(model.num_classes)])
		np.random.seed(0)
		target_labels = [np.random.choice([j for j in range(model.num_classes) if j != label]) for label in label_test]
		target_img_ids = [np.random.choice(len(x_train_by_class[target_label])) for target_label in target_labels]
		target_images = [x_train_by_class[target_labels[j]][target_img_id] for j, target_img_id in enumerate(target_img_ids)]
		outputs['target_labels'] = target_labels
		outputs['target_images'] = target_images

	return outputs
Example #5
0
def main():
    start, end, seed = process_arguments.parse_arguments()
    dataframe = process_arguments.read_csv(start, end)
    column_names = [
        "Country", "Year", "Region", "Happiness Score",
        "Economy (GDP per Capita)", "Health (Life Expectancy)", "Freedom",
        "Trust (Government Corruption)", "Generosity"
    ]

    labels, data = load_data.split_data(dataframe, column_names)

    # use the MinMaxScaler to scale all features between 0 and 1
    scaler = MinMaxScaler()
    features_minmax = pd.DataFrame(data=data)
    features_minmax = scaler.fit_transform(features_minmax)
    data_train, data_test, labels_train, labels_test =\
        load_data.split_train_test(features_minmax, labels, seed)

    # LINEAR REGRESSION MODEL
    print("LINEAR REGRESSION MODEL")
    linear_regression =\
        LinearRegressionModel(data_test, labels_test, data_train, labels_train,\
            start, end)
    linear_regression.train()
    linear_regression.predict()
    linear_regression.graph()
    print("R^2 = " + str(linear_regression.r_squared()))
    print("Weights = " + str(linear_regression.weights()))

    # KNEIGHBORS REGRESSOR MODEL
    print("\nKNEIGHBORS REGRESSOR MODEL")
    knregressor =\
        KNeighborsRegressorModel(data_test, labels_test, data_train, labels_train,\
            start, end)
    knregressor.train()
    knregressor.predict()
    knregressor.graph()
    print("R^2 = " + str(knregressor.r_squared()))
#!/usr/bin/env python3
"""One-vs-one classifier."""

from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import cross_val_score
from sklearn.multiclass import OneVsOneClassifier
from sklearn.preprocessing import StandardScaler
import numpy as np

import load_data

if __name__ == '__main__':
    train_data, train_label, test_data, test_label = load_data.split_data()
    ovo_clf = OneVsOneClassifier(SGDClassifier(random_state=42))
    print(
        cross_val_score(ovo_clf,
                        train_data,
                        train_label,
                        cv=3,
                        scoring="accuracy"))

    scaler = StandardScaler()
    train_data_scaled = scaler.fit_transform(train_data.astype(np.float64))
    print('After scaled: {}'.format(
        cross_val_score(ovo_clf,
                        train_data_scaled,
                        train_label,
                        cv=3,
                        scoring="accuracy")))
Example #7
0
import cPickle
import csv
import timeit

import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T

import ACNN
import generate_function
import load_data

# load data
train_x, train_y, test_x = load_data.train_test('./dataset/digit/train.csv', './dataset/digit/test.csv')
train_x, train_y, valid_x, valid_y = load_data.split_data(train_x, train_y)
train_x, train_y = load_data.shared_data(train_x, train_y)
valid_x, valid_y = load_data.shared_data(valid_x, valid_y)
X = T.matrix('input', dtype=theano.config.floatX)
y = T.ivector('labels')
index = T.lscalar('index')
batch_size = 20
learning_rate = 0.01
train_batches = train_x.get_value(borrow=True).shape[0] // batch_size
valid_batches = valid_x.get_value(borrow=True).shape[0] // batch_size
rng = np.random.RandomState(1234)

# create model
layer0_input = X.reshape((batch_size, 1, 28, 28))
conv_net = ACNN.Al_cnn(
    input=layer0_input,
Example #8
0
SEMANTIC_EMBED = 512
MAX_ITER = 100
batch_size = 128
image_size = 224


images, tags, labels = loading_data(DATA_DIR)
dimTxt = tags.shape[1]
dimLab = labels.shape[1]

DATABASE_SIZE = 18015
TRAINING_SIZE = 10000
QUERY_SIZE = 2000
VERIFICATION_SIZE = 1000

X, Y, L = split_data(images, tags, labels, QUERY_SIZE, TRAINING_SIZE, DATABASE_SIZE)
train_L = L['train']
train_x = X['train']
train_y = Y['train']

query_L = L['query']
query_x = X['query']
query_y = Y['query']

retrieval_L = L['retrieval']
retrieval_x = X['retrieval']
retrieval_y = Y['retrieval']

num_train = train_x.shape[0]
numClass = train_L.shape[1]
dimText = train_y.shape[1]
Example #9
0
from sklearn.metrics import precision_recall_curve
from sklearn.model_selection import cross_val_predict
import matplotlib.pyplot as plt

import load_data


def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
    """Show precision_recall_curve."""
    plt.plot(thresholds, precisions[:-1], "b--", label="Precision")
    plt.plot(thresholds, recalls[:-1], "g-", label="Recall")
    plt.xlabel("Threshold")
    plt.legend(loc="upper left")
    plt.ylim([0, 1])


if __name__ == '__main__':
    train_data, train_label, _, _ = load_data.split_data()
    train_label_5 = (train_label == 5)

    sgd_clf = SGDClassifier(random_state=42)
    scores = cross_val_predict(sgd_clf,
                               train_data,
                               train_label_5,
                               cv=3,
                               method="decision_function")
    precisions, recalls, thres = precision_recall_curve(train_label_5, scores)

    plot_precision_recall_vs_threshold(precisions, recalls, thres)
    plt.show()
Example #10
0
    ## data
    data_view = ['CC', 'MLO']
    data_type = ['Dense', 'Fatty']
    type_v = 1
    data, target = load_medical_data(data_type[type_v])
    if scale_data:
        data = scale_data_normlize(data)
    feature_number = data[data_view[0]].shape[1]
    logger.info("data type {}".format(data_type[type_v]))

    kf = KFold(**kfold_params_dict)
    ## train
    experts = {}
    experts_split_indexes = split_data(data['CC'],
                                       is_multi_expert=False,
                                       kfold=kf)
    print "train experts"
    for i, view in enumerate(data_view):
        np.random.seed(check_seed)
        expert = expert_model(input_shape=(feature_number, ),
                              index=view,
                              params=current_params)
        expert.compile(optimizer=current_params['optimizer'],
                       loss='binary_crossentropy',
                       metrics=['accuracy'])  #, fmeasure])
        train_phase(expert, data[view], target, view, fit_params_dict,
                    experts_split_indexes, False, params_dir)
        experts[view] = expert

    print "train combined models"
import pickle
from load_data import split_data


def import_model():
    with open("model.pkl", 'rb') as f_un:
        model = pickle.load(f_un)
    return model


if __name__ == '__main__':
    model = import_model()
    X = split_data(f_name='../data/data.json', for_predict=True):
    model.predict(X)
Example #12
0
    config = yaml.load(file, Loader=yaml.FullLoader)[config]

date = datetime.now().strftime("%d%m%Y-%H%M")
output = f"./output/{date}/" if args.path is None else args.path
Path(output).mkdir(parents=True, exist_ok=True)

#### Logger ####
logging.basicConfig(filename=f"{output}log.txt", level=logging.INFO,
                    format='%(asctime)s %(levelname)s: %(message)s', datefmt='%d.%m.%Y %H:%M:%S')

#### Load Data ####
x, y = load_data(**config["data"])
x = x.to_numpy().reshape(-1, config["causal_conv"]["c_in"], config["causal_conv"]["seq_length"])
y = y[::config["causal_conv"]["seq_length"]]  # Create lagged outputs

train, val = split_data(x, y.to_numpy())
train_loader = DataLoader(train, **config["loader"])
val_loader = DataLoader(val, **config["loader"])


#### Create Model #####
model = CausalConvNet(**config["causal_conv"]).cuda()
classifier = MLP(num_class=np.max(y) + 1,**config["mlp"]).cuda()

#### Tracking Varaibales #####
train_batch_losses = []
train_loss = []
train_metrics = []
train_pred = np.array([])
train_label = np.array([])
Example #13
0
    if 'data' not in os.listdir(data_model):
        os.mkdir('{}/data'.format(data_model))
    if 'figs' not in os.listdir(data_model):
        os.mkdir('{}/figs'.format(data_model))

    print('Loading dataset...')
    dataset = ImageData(args.dataset_name)
    model = ImageModel(args.model_name,
                       args.dataset_name,
                       train=False,
                       load=True)

    if args.dataset_name == 'cifar10':
        X_train, Y_train, X_test, Y_test = split_data(dataset.x_val,
                                                      dataset.y_val,
                                                      model,
                                                      num_classes=10,
                                                      split_rate=0.8,
                                                      sample_per_class=1000)

    print('Sanity checking...')
    data_sample = X_test
    print('data_sample.shape', data_sample.shape)
    print('X_train.shape', X_train.shape)

    pred_test = model.predict(dataset.x_val)

    def cross_entropy(predictions, targets, epsilon=1e-12):
        predictions = np.clip(predictions, epsilon, 1. - epsilon)
        N = predictions.shape[0]
        ce = -np.sum(targets * np.log(predictions + 1e-9)) / N
        return ce
Example #14
0
import numpy as np
import pandas as pd
from models.oracle import Oracle
from models.surrogate_teacher import Surrogate
from models.omniscient_teacher import Omniscient
from models.random_teacher import Random
from models.without_teacher import Without_teacher
from utils import predict, predict_by_W, rmse_W, write_np2csv, rmse_w, make_random_mask, predict_wj
from load_data import read_W, read_csv, split_data
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
import logging
import datetime
# %%
df = read_csv('output/wine-quality-pm1.csv', header=0)
train_X, test_X, train_y, test_y = split_data(df, True)
eta, lambd, alpha = 1, 2, 0.01
training_epochs, loops = 10, 10
J = 10
# 提示する教材合計数
textbook = 500
# 推定に使う教材数
test_textbook_list = [100]
# 推定間に提示する教材数
between_textbook_list = [1]
# 組
k = 1
lambds = [1, 2, 3, 4, 5]

for lambd in lambds:
    oracle = Oracle(eta=eta, lambd=lambd)
Example #15
0
import pandas as pd
from models.oracle import Oracle
from models.surrogate_teacher import Surrogate
from models.omniscient_teacher import Omniscient
from models.random_teacher import Random
from models.without_teacher import Without_teacher
from utils import predict, predict_by_W, rmse_W, write_np2csv, rmse_w, make_random_mask, predict_wj
from load_data import read_W, read_csv, split_data
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
import logging
import datetime

# %%
df = read_csv('output/weebil_vespula_pm1.csv', header=0)
train_X, test_X, train_y, test_y = split_data(df, False)
eta, lambd, alpha = 1, 2, 0.01
training_epochs, loops = 10, 10
J = 10
# 提示する教材合計数
textbook = 500
# 推定に使う教材数
test_textbook_list = [100]
# 推定間に提示する教材数
between_textbook_list = [1]
# 組
k = 1

lambds = [1, 2, 3, 4, 5]
for lambd in lambds:
    oracle = Oracle(eta=eta, lambd=lambd)
Example #16
0
def train_model(config_path: str):
    writer = SummaryWriter()
    config = read_training_pipeline_params(config_path)
    logger.info("pretrained_emb {b}", b=config.net_params.pretrained_emb)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    logger.info("Device is {device}", device=device)
    SRC, TRG, dataset = get_dataset(config.dataset_path,
                                    config.net_params.transformer)
    train_data, valid_data, test_data = split_data(
        dataset, **config.split_ration.__dict__)
    if config.net_params.pretrained_emb:
        src_vectors = torchtext.vocab.FastText(language='ru')
    SRC.build_vocab(train_data, min_freq=3)
    if config.net_params.pretrained_emb:
        SRC.vocab.load_vectors(src_vectors)
    TRG.build_vocab(train_data, min_freq=3)
    torch.save(SRC.vocab, config.src_vocab_name)
    torch.save(TRG.vocab, config.trg_vocab_name)
    logger.info("Vocab saved")
    print(f"Unique tokens in source (ru) vocabulary: {len(SRC.vocab)}")
    print(f"Unique tokens in target (en) vocabulary: {len(TRG.vocab)}")
    train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
        (train_data, valid_data, test_data),
        batch_size=config.BATCH_SIZE,
        device=device,
        sort_key=_len_sort_key,
    )
    INPUT_DIM = len(SRC.vocab)
    OUTPUT_DIM = len(TRG.vocab)

    if config.net_params.attention:
        Encoder = network_gru_attention.Encoder
        Decoder = network_gru_attention.Decoder
        Seq2Seq = network_gru_attention.Seq2Seq
        Attention = network_gru_attention.Attention
        attn = Attention(config.net_params.HID_DIM, config.net_params.HID_DIM)
        enc = Encoder(INPUT_DIM, config.net_params.ENC_EMB_DIM,
                      config.net_params.HID_DIM, config.net_params.HID_DIM,
                      config.net_params.ENC_DROPOUT)
        dec = Decoder(OUTPUT_DIM, config.net_params.DEC_EMB_DIM,
                      config.net_params.HID_DIM, config.net_params.HID_DIM,
                      config.net_params.DEC_DROPOUT, attn)

        model = Seq2Seq(enc, dec, device)
    if config.net_params.transformer:
        logger.info("Transformer lets go")
        Encoder = network_transformer.Encoder
        Decoder = network_transformer.Decoder
        Seq2Seq = network_transformer.Seq2Seq
        SRC_PAD_IDX = SRC.vocab.stoi[SRC.pad_token]
        TRG_PAD_IDX = TRG.vocab.stoi[TRG.pad_token]
        HID_DIM = 512
        ENC_LAYERS = 6
        DEC_LAYERS = 6
        ENC_HEADS = 8
        DEC_HEADS = 8
        ENC_PF_DIM = 2048
        DEC_PF_DIM = 2048
        ENC_DROPOUT = 0.1
        DEC_DROPOUT = 0.1

        enc = Encoder(INPUT_DIM, HID_DIM, ENC_LAYERS, ENC_HEADS, ENC_PF_DIM,
                      ENC_DROPOUT, device)

        dec = Decoder(OUTPUT_DIM, HID_DIM, DEC_LAYERS, DEC_HEADS, DEC_PF_DIM,
                      DEC_DROPOUT, device)
        model = Seq2Seq(enc, dec, SRC_PAD_IDX, TRG_PAD_IDX, device)
    if not config.net_params.attention and not config.net_params.transformer:
        Encoder = my_network.Encoder
        Decoder = my_network.Decoder
        Seq2Seq = my_network.Seq2Seq
        enc = Encoder(INPUT_DIM, config.net_params.ENC_EMB_DIM,
                      config.net_params.HID_DIM, config.net_params.N_LAYERS,
                      config.net_params.ENC_DROPOUT)
        dec = Decoder(OUTPUT_DIM, config.net_params.DEC_EMB_DIM,
                      config.net_params.HID_DIM, config.net_params.N_LAYERS,
                      config.net_params.DEC_DROPOUT)
        model = Seq2Seq(enc, dec, device)

    model.apply(init_weights)
    if config.net_params.pretrained_emb:
        model.encoder.tok_embedding = nn.Embedding.from_pretrained(
            torch.FloatTensor(SRC.vocab.vectors))
    model.to(device)
    PAD_IDX = TRG.vocab.stoi[TRG.pad_token]
    optimizer = optim.Adam(model.parameters(), config.lr)
    criterion = nn.CrossEntropyLoss(ignore_index=PAD_IDX)
    lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, **config.lr_scheduler.__dict__)
    train_history = []
    valid_history = []
    best_valid_loss = float('inf')
    print("Let's go")
    # for p in model.encoder.parameters():
    #     p.requires_grad = True
    # for p in model.decoder.parameters():
    #     p.requires_grad = True

    for epoch in range(config.N_EPOCHS):

        start_time = time.time()

        train_loss = train(model, train_iterator, optimizer, criterion,
                           config.CLIP, train_history, valid_history)
        valid_loss = evaluate(model, valid_iterator, criterion)
        lr_scheduler.step(valid_loss)
        end_time = time.time()

        epoch_mins, epoch_secs = epoch_time(start_time, end_time)

        if valid_loss < best_valid_loss:
            best_valid_loss = valid_loss
            torch.save(model.state_dict(), config.model_out_name)

        train_history.append(train_loss)
        valid_history.append(valid_loss)
        writer.add_scalar('train loss', train_history[-1], epoch)
        writer.add_scalar('valid loss', valid_history[-1], epoch)
        writer.add_scalar('learning rate', lr_scheduler._last_lr[0], epoch)
        print(f'Epoch: {epoch + 1:02} | Time: {epoch_mins}m {epoch_secs}s')
        print(
            f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}'
        )
        print(
            f'\t Val. Loss: {valid_loss:.3f} |  Val. PPL: {math.exp(valid_loss):7.3f}'
        )
        for idx, batch in enumerate(valid_iterator):
            if idx > 3:
                break
            src = batch.src[:, idx:idx + 1]
            trg = batch.trg[:, idx:idx + 1]
            generate_translation(src, trg, model, TRG.vocab, SRC.vocab,
                                 config.net_params.transformer)

    get_bleu(model, test_iterator, TRG, config.net_params.transformer)
Example #17
0
#!/usr/bin/env python3
"""Multi output classification."""

from sklearn.neighbors import KNeighborsClassifier
import matplotlib
import matplotlib.pyplot as plt
import numpy as np

import load_data

if __name__ == '__main__':
    train_data, _, test_data, _ = load_data.split_data()

    train_mod = train_data + np.random.randint(0, 100, (len(train_data), 784))
    test_mod = test_data + np.random.randint(0, 100, (len(test_data), 784))

    knn_clf = KNeighborsClassifier()
    knn_clf.fit(train_mod, train_data)

    random_index = 100
    clean_digit = knn_clf.predict([test_mod[random_index]])

    plt.subplot(121)
    src_image = test_mod[random_index].reshape(28, 28)
    plt.imshow(src_image, cmap=matplotlib.cm.binary, interpolation="nearest")

    plt.subplot(122)
    clean_image = clean_digit.reshape(28, 28)
    plt.imshow(clean_image, cmap=matplotlib.cm.binary, interpolation="nearest")

    plt.show()