Esempio n. 1
0
def train_net(net, epochs=5, lr=0.1, batch_size=1):

    file_path = '/media/wingspan/ssd512/Glj_train/VOCtrainval_2012/VOCdevkit/VOC2012/ImageSets/Segmentation/train.txt'
    dir_img = '/media/wingspan/ssd512/Glj_train/VOCtrainval_2012/VOCdevkit/VOC2012/JPEGImages'
    dir_mask = '/media/wingspan/ssd512/Glj_train/VOCtrainval_2012/VOCdevkit/VOC2012/SegmentationClass'
    dir_checkpoint = 'checkpoints/'

    train_pipe = data.data_load(file_path, dir_img, dir_mask)

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)
    loss_cal = nn.CrossEntropyLoss()

    for epoch in range(epochs):
        print('starting epoch {}/{}.'.format(epoch + 1, epochs))
        imgs_labels = train_pipe.data_gene()
        epoch_loss = 0

        for imgs, labels in imgs_labels:
            imgs = torch.from_numpy(imgs)
            true_masks = torch.LongTensor(labels)
            pred_masks = net(imgs)
            loss = loss_cal(pred_masks, true_masks)
            epoch_loss += loss.item()
            #print('{0:.4f} --- loss : {1:.6f}'.format(i*batch_size/N_train, loss.item()))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print('Epoch finished ! Loss: {}'.format(epoch_loss))

        torch.save(net.state_dict(),
                   dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
Esempio n. 2
0
def main():
    texts_dict, word_id = data.data_load(text_path)
    relations_list = data.relation_load(relations_path)

    devel = relations_list[:200]
    del relations_list[:200]
    train = relations_list

    train_iter = iterators.SerialIterator(train, batch_size=10, shuffle=True)
    test_iter = iterators.SerialIterator(devel,
                                         batch_size=10,
                                         repeat=False,
                                         shuffle=False)

    m = mymodel.MyChain(len(word_id))
    model = mymodel.Classifier(m)

    gpu_device = 0
    cuda.get_device(gpu_device).use()
    model.to_gpu(gpu_device)

    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    updater = MyUpdater(train_iter, optimizer, texts_dict)
    trainer = training.Trainer(updater, (30, 'epoch'), out='result')

    eval_model = model.copy()
    trainer.extend(MyEvaluater(test_iter, eval_model, updater.myconverter))
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(['epoch', 'main/accuracy']))
    trainer.extend(extensions.ProgressBar())
    trainer.run()
Esempio n. 3
0
from data import data_load
import tensorflow_datasets as tfds
import tensorflow as tf
# pick a data sets
DATA_SET = 'mnist'

# define splits
DS_CONFIG = {
    # mnist data set parameters
    'mnist': {
        'batch_size': 700,
        'num_repeats': 5,
        'mdl_input_dim': [24, 24, 1]
    }
}

# load the data set
TRAIN_SET, TEST_SET, SET_INFO = data_load(db_name=DATA_SET,
                                          with_info=True,
                                          **DS_CONFIG[DATA_SET])
# dataset, info = tfds.load('mnist',split='train[:10%]+test[:10%]', with_info = True)
# print(info)
# print(dataset)
# # print(typeof(dataset))
# # img = dataset['image']
# print(len(list(dataset)))
# num_elm = 0
# for elm in dataset:
#     num_elm += 1
# print(num_elm)
print("done!!!")
Esempio n. 4
0
from models import train_network

from tensorflow.python.client import device_lib
# Just Check if GPU is being used or not
print(device_lib.list_local_devices())

ln_ip_data_path = r'\python_code\ln_train_2m_b3k_input.mat'
ln_op_data_path = r'\python_code\ln_train_2m_b3k_output.mat'

ip_data_path = r'\python_code\b3k_input_2m.mat'
op_data_path = r'\python_code\b3k_output_2m.mat'
test_data_path = r'\python_code\b3k_test_input_1m.mat'

model_save_path = r'model_weights.h5'
# Loading Data
X, y = data_load(ip_data_path, op_data_path)

# Reduce y to 8th order
y = y[:,:45]

print('Data Loaded ... \n')

res_model = build_nn_resnet()

print('Network Constructed ... \n')
print('Training Network ... \n')

res_model = train_network(res_model, X, y, num_epoch=400, batch=1000, save_path=model_save_path)

print('Making Predictions and Saving file')
Esempio n. 5
0
from __future__ import print_function

from keras.callbacks import LambdaCallback

from model import get_model
from data import data_load
from config import Config
from sample import print_sample

(chars, char_indices, indices_char, maxlen, X, y, text) = data_load(Config)

model = get_model(Config, maxlen, chars)

model_name = Config['model_name']

print_sample(Config, text, maxlen, chars, char_indices, model, indices_char)
Esempio n. 6
0
def german_english_translation():
    print("german_english_translation task")
    SRC, TGT, train, val, test = data_help.data_load()

    # GPUs to use
    devices = [0, 1]
    pad_idx = TGT.vocab.stoi["<blank>"]
    print("Size:", len(SRC.vocab), len(TGT.vocab))
    model = model_help.make_model(len(SRC.vocab), len(TGT.vocab),
                                  N=6).to(args.device)
    print("+===============+")
    criterion = model_help.LabelSmoothing(size=len(TGT.vocab),
                                          padding_idx=pad_idx,
                                          smoothing=0.1).to(args.device)
    BATCH_SIZE = 12000
    train_iter = data_help.MyIterator(train,
                                      batch_size=BATCH_SIZE,
                                      device=devices[0],
                                      repeat=False,
                                      sort_key=lambda x:
                                      (len(x.src), len(x.trg)),
                                      batch_size_fn=batch_size_fn,
                                      train=True)
    valid_iter = data_help.MyIterator(val,
                                      batch_size=BATCH_SIZE,
                                      device=devices[0],
                                      repeat=False,
                                      sort_key=lambda x:
                                      (len(x.src), len(x.trg)),
                                      batch_size_fn=batch_size_fn,
                                      train=False)
    model_par = nn.DataParallel(model, device_ids=devices)

    model_opt = model_help.NoamOpt(
        model.src_embed[0].d_model, 1, 2000,
        torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98),
                         eps=1e-9))
    for epoch in range(10):
        model_par.train()
        run_epoch((data_help.rebatch(pad_idx, b) for b in train_iter),
                  model_par,
                  model_help.MultiGPULossCompute(model.generator,
                                                 criterion,
                                                 devices=devices,
                                                 opt=model_opt))
        model_par.eval()
        loss = run_epoch((data_help.rebatch(pad_idx, b) for b in valid_iter),
                         model_par,
                         model_help.MultiGPULossCompute(model.generator,
                                                        criterion,
                                                        devices=devices,
                                                        opt=None))
        print(loss)
    """Once trained we can decode the model to produce a set of translations. 
    Here we simply translate the first sentence in the validation set. This dataset 
    is pretty small so the translations with greedy search are reasonably accurate."""
    for i, batch in enumerate(valid_iter):
        src = batch.src.transpose(0, 1)[:1]
        src_mask = (src != SRC.vocab.stoi["<blank>"]).unsqueeze(-2)
        out = greedy_decode(model,
                            src,
                            src_mask,
                            max_len=60,
                            start_symbol=TGT.vocab.stoi["<s>"])
        print("Translation:", end="\t")
        for i in range(1, out.size(1)):
            sym = TGT.vocab.itos[out[0, i]]
            if sym == "</s>": break
            print(sym, end=" ")
        print()
        print("Target:", end="\t")
        for i in range(1, batch.trg.size(0)):
            sym = TGT.vocab.itos[batch.trg.data[i, 0]]
            if sym == "</s>": break
            print(sym, end=" ")
        print()
        break