Exemplo n.º 1
0
def main(*args):
    path, csv, test_csv, test_labels, identifier, infer, model_name, result_dir, sub_fn_path, load, exp_name = args

    config = global_config[exp_name]
    config['path'] = path
    config['csv'] = csv
    config['test_csv'] = test_csv
    config['test_labels'] = test_labels
    config['model_name'] = model_name
    config['result_dir'] = result_dir
    config['identifier'] = identifier

    if infer:
        vocab, trn_ds, vld_ds, _, emb_matrix = make_dataset(config)
        trn_dl, vld_dl, _ = make_iterator(config, vocab, trn_ds, vld_ds, _)

        config['vocab_size'] = len(vocab.itos)
        config['pad_idx'] = vocab.stoi[PAD_TOKEN]

        model = make_model(config, emb_matrix)

        # load model from disk from previous iteration and just
        if load:
            print(
                'Loading model from disk from {}'.format(config['result_dir'] +
                                                         config['model_name'] +
                                                         '.pth'))

            model_dict = load_model(config['result_dir'] +
                                    config['model_name'] + '.pth')
            model = model.load_state_dict(model_dict)
        else:
            model = learn(model, trn_dl, vld_dl, vocab, config)

    else:
        vocab, trn_ds, _, tst_ds, emb_matrix = make_dataset(config)
        trn_dl, _, tst_dl = make_iterator(config, vocab, trn_ds, _, tst_ds)

        config['vocab_size'] = len(vocab.itos)
        config['emb_matrix'] = emb_matrix
        config['pad_idx'] = vocab.stoi[PAD_TOKEN]

        model = make_model(config, emb_matrix)

        if load:
            print(
                'Loading model from disk from {}'.format(config['result_dir'] +
                                                         config['model_name'] +
                                                         '_full.pth'))

            model_dict = load_model(config['result_dir'] +
                                    config['model_name'] + '_full.pth')
            model = model.load_state_dict(model_dict)
        else:
            model = learn(model, trn_dl, _, vocab, config)

        test_labels = read_csv(config['test_labels'])
        _ = predictions(model, tst_dl, None, test_labels, sub_fn_path)
def train(args):
    total_timesteps = int(args.num_timesteps)
    seed = args.seed
    nsteps = int(args.nsteps)
    ent_coef = args.ent_coef
    vf_coef = args.vf_coef
    p_coef = args.p_coef
    lr = args.lr
    max_grad_norm = args.max_grad_norm
    gamma = args.gamma
    lam = args.lam
    nminibatches = int(args.nminibatches)
    noptepochs = int(args.noptepochs)
    cliprange = args.cliprange
    save_interval = int(args.save_interval)
    env = build_env(args)
    model = learn(env=env,
                  total_timesteps=total_timesteps,
                  seed=seed,
                  nsteps=nsteps,
                  ent_coef=ent_coef,
                  lr=lr,
                  vf_coef=vf_coef,
                  p_coef=p_coef,
                  max_grad_norm=max_grad_norm,
                  gamma=gamma,
                  lam=lam,
                  nminibatches=nminibatches,
                  noptepochs=noptepochs,
                  cliprange=cliprange,
                  save_interval=save_interval)

    return model, env
Exemplo n.º 3
0
def getWeightAndBias(epochs):
    global weightAndBias
    global learning

    if not learning:
        learning = True
        weightAndBias = learner.learn(xList, yList, epochs)
        learning = False
Exemplo n.º 4
0
def main(clfs=None):
    with open(DESTINATION + '.out', 'w') as fp:
        clfs = clfs or {}
        fp.write('File,Class\n')
        for patient in [1, 2, 3]:
            clf, X, y = sub_learn(patient=patient)
            if patient not in clfs:
                clfs[patient] = clf, learn(X, y)
            make_submission(clfs, fp, patient)
Exemplo n.º 5
0
def learn():
    global xList
    global yList
    global weightAndBias
    global learning

    # 1, 2.5, 3 -> [1.0, 2.5, 3.0]
    xList = xList + list(map(float, request.json['xList'].strip().split(",")))
    yList = yList + list(map(float, request.json['yList'].strip().split(",")))

    xList = xList[0:min(len(xList), len(yList))]
    yList = yList[0:min(len(xList), len(yList))]

    epochs = (int(request.json['epochs']))

    if not learning:
        learning = True
        weightAndBias = learner.learn(xList, yList, epochs)
        learning = False
Exemplo n.º 6
0
from learner import learn, predict
import argparse
import warnings
warnings.filterwarnings("ignore")

parser = argparse.ArgumentParser(description='ML for dropout prediction')
parser.add_argument("--train",
                    type=bool,
                    default=1,
                    help="set a module in training mode")
parser.add_argument("--test",
                    type=bool,
                    default=0,
                    help="set a module in testing mode")
parser.add_argument("--cv", type=int, default=3, help="KFolds")
parser.add_argument("--smote",
                    type=int,
                    default=None,
                    help="whether to use smote")
args = parser.parse_args()

if args.train:
    file_name = "../data/train_data_week_1_challenge.csv"
    learn(file_name, args.cv, args.smote)
if args.test:
    file_name = "../data/test_data_week_1_challenge.csv"
    label = "../data/test_label_week_1_challenge.csv"
    predict(file_name, label, args.smote)
Exemplo n.º 7
0
Arquivo: main.py Projeto: ollpu/qml
    ([], 'Y', 0),
]

model = Model(2, struct)
# model = Model(1, [
#     ([], 'Y', 0)
# ])

params = UnsetParams()
tparams = ModelParams(model,
                      2 * np.pi * np.random.rand(len(model.structure) + 1))
tparams.params[-1] = np.random.rand(1) - 0.5
cost_evolution = []

for repi in range(5):
    tparams, one_ce = learner.learn(tparams, X, Y, 0.01, 1000)
    cost_evolution.append(one_ce)
    if tparams.cost < params.cost: params = tparams.copy()
    print(tparams.cost)
    tparams.params += np.random.normal(0, 1, tparams.params.shape)
print(params.cost)

export.write_qs(params)

Yc = params.classify(X)

mx, my = np.meshgrid(np.linspace(-1.5, 2.5, 50), np.linspace(-1.5, 1.5, 50))
mf = [Xmap(tx) for tx in zip(mx.flat, my.flat)]
mz = np.reshape(params.predict(mf), mx.shape)

plt.subplot(2, 1, 1)
Exemplo n.º 8
0
import torch
import learner
import lfw as dataset
import preprocess
import torchvision.transforms as transforms

with torch.no_grad():
    train_set, test_set = dataset.train_test()
    learner.learn(preprocess.resize_with_padding(train_set['data'], 390, 390),
                  train_set['labels'])
    preds = learner.predict(
        preprocess.resize_with_padding(test_set['data'], 390, 390))
    print("predictions\t=", preds.tolist())
    print("ground truth \t=", test_set['labels'].tolist())
    print("accuracy\t= %.2f%%" %
          ((preds == test_set['labels']).sum().item() * 100 / preds.numel(), ))
Exemplo n.º 9
0
 if args.moe_warmup > 0 and sequence_index == args.moe_warmup:
     learner.warmup_end()  # changes the weights trainer
 sequence_type = sequence_iterator.get_current_index()
 sequence_type_name = sequence_iterator.get_current_iterator().get_name()
 domain_switched(sequence_index, sequence_type, sequence_type_name)
 sequence_loss = 0
 sequence_length = len(input_sequence)
 sequence_end = global_position + sequence_length
 start_time = time.time()
 for chunk_index, (input_chunk, target_chunk) in enumerate(
         data.safe_iterate_chunks(input_sequence, target_sequence,
                                  args.bptt, args.batch_size)):
     timestep_updated(chunk_index, global_position)
     batch_loss = None
     if not args.debug_reveal_domain:
         loss = learner.learn(input_chunk, target_chunk.view(-1))
     else:
         loss = learner.learn(input_chunk, target_chunk.view(-1),
                              sequence_type)
     loss = loss.item()
     if args.shadow_run:
         shadow_index = global_position // len(input_chunk)
         assert global_position == shadow_positions[shadow_index]
         loss -= shadow_losses[shadow_index]
     if batch_loss is None:
         batch_loss = loss
         sequence_loss += loss * len(input_chunk)
         global_loss += loss * len(input_chunk)
         loss_hist.append(loss)
     log_utils.write_general_ppl(gen_fout,
                                 gen_json_fout, args, global_position,