예제 #1
0
def main(config):
    logger = config.get_logger('train')
    # fix random seeds for reproducibility
    seed_everything(seed=config.config['seed'])
    metric_bests = []
    # logger = config.get_logger('train')
    for i,train_dataloader, valid_dataloader, test_dataloader in makeDataLoader(config):

        model = makeModel(config)
        # logger.info(model)

        criterion = makeLoss(config)
        metrics = makeMetrics(config)

        optimizer = makeOptimizer(config, model)
        lr_scheduler = makeLrSchedule(config, optimizer, train_dataloader)

        trainer = Trainer(model, criterion, metrics, optimizer,
                          config=config,
                          i_fold=i,
                          data_loader=train_dataloader,
                          valid_data_loader=valid_dataloader,
                          test_data_loader=test_dataloader,
                          lr_scheduler=lr_scheduler)

        trainer.train()
        metric_bests.append(trainer.mnt_best)
    logger.info('metric scores:{}'.format(metric_bests))
    logger.info('metric mean score: {}'.format(sum(metric_bests) / float(len(metric_bests))))
예제 #2
0
def main(config):
    from data_process import makeDataLoader
    #
    from trainer.htqe_trainer import Trainer

    logger = config.get_logger('train')
    train_dataloader, valid_dataloader, test_dataloader = makeDataLoader(
        config)

    model = makeModel(config)
    logger.info(model)

    criterion = makeLoss(config)
    metrics = makeMetrics(config)

    optimizer = makeOptimizer(config, model)
    lr_scheduler = makeLrSchedule(config, optimizer, train_dataloader.dataset)

    trainer = Trainer(model,
                      criterion,
                      metrics,
                      optimizer,
                      config=config,
                      data_loader=train_dataloader,
                      valid_data_loader=valid_dataloader,
                      test_data_loader=test_dataloader,
                      lr_scheduler=lr_scheduler)

    trainer.train()
예제 #3
0
def main(config):
    from data_process import makeDataLoader
    # 针对不同的数据,训练过程的设置略有不同。
    # from trainer.weibo_trainer import Trainer # weibo
    # from trainer.cnews_trainer import Trainer # cnews
    from trainer.medical_question_trainer import Trainer

    logger = config.get_logger('train')
    train_dataloader, valid_dataloader, test_dataloader = makeDataLoader(config)

    model = makeModel(config)
    logger.info(model)

    criterion = makeLoss(config)
    metrics = makeMetrics(config)

    optimizer = makeOptimizer(config, model)
    lr_scheduler = makeLrSchedule(config, optimizer, train_dataloader.dataset)

    trainer = Trainer(model, criterion, metrics, optimizer,
                      config=config,
                      data_loader=train_dataloader,
                      valid_data_loader=valid_dataloader,
                      test_data_loader=test_dataloader,
                      lr_scheduler=lr_scheduler)

    trainer.train()
예제 #4
0
def active_learning(config):
    from data_process import makeDataSet
    from trainer.trainer import Trainer

    logger = config.get_logger('train')
    train_set, valid_set, query_set = makeDataSet(config)

    model = makeModel(config)
    logger.info(model)

    criterion = makeLoss(config)
    metrics = makeMetrics(config)

    optimizer = makeOptimizer(config, model)
    # lr_scheduler = makeLrSchedule(config, optimizer, train_set)

    trainer = Trainer(
        model,
        criterion,
        metrics,
        optimizer,
        config=config,
        train_dataset=train_set,
        valid_dataset=valid_set,
        query_dataset=query_set,
        test_dataset=None,
        # lr_scheduler=lr_scheduler
    )

    trainer.train()
예제 #5
0
def main(config):
    logger = config.get_logger('train')

    # setup data_loader instances
    data_loader = config.init_obj('data_loader', module_data)
    valid_data_loader = data_loader.split_validation()

    # build model architecture, then print to console
    model = makeModel(config)
    logger.info(model)

    # get function handles of loss and metrics
    criterion = makeLoss(config)
    metrics = makeMetrics(config)

    # build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler
    optimizer = makeOptimizer(config, model)

    lr_scheduler = config.init_obj('lr_scheduler', torch.optim.lr_scheduler,
                                   optimizer)

    trainer = Trainer(model,
                      criterion,
                      metrics,
                      optimizer,
                      config=config,
                      data_loader=data_loader,
                      valid_data_loader=valid_data_loader,
                      lr_scheduler=lr_scheduler)

    trainer.train()
예제 #6
0
def main(config):
    logger = config.get_logger('test')

    # setup data_loader instances
    data_loader = getattr(module_data, config['data_loader']['type'])(
        config['data_loader']['args']['data_dir'],
        batch_size=512,
        shuffle=False,
        validation_split=0.0,
        training=False,
        num_workers=2)

    # build model architecture
    model = makeModel(config)
    logger.info(model)

    # get function handles of loss and metrics
    loss_fn = makeLoss(config)
    metric_fns = makeMetrics(config)

    logger.info('Loading checkpoint: {} ...'.format(config.resume))
    checkpoint = torch.load(config.resume)
    state_dict = checkpoint['state_dict']
    if config['n_gpu'] > 1:
        model = torch.nn.DataParallel(model)
    model.load_state_dict(state_dict)

    # prepare model for testing
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = model.to(device)
    model.eval()

    total_loss = 0.0
    total_metrics = torch.zeros(len(metric_fns))

    with torch.no_grad():
        for i, (data, target) in enumerate(tqdm(data_loader)):
            data, target = data.to(device), target.to(device)
            output = model(data)

            #
            # save sample images, or do something with output here
            #

            # computing loss, metrics on test set
            loss = loss_fn(output, target)
            batch_size = data.shape[0]
            total_loss += loss.item() * batch_size
            for i, metric in enumerate(metric_fns):
                total_metrics[i] += metric(output, target) * batch_size

    n_samples = len(data_loader.sampler)
    log = {'loss': total_loss / n_samples}
    log.update({
        met.__name__: total_metrics[i].item() / n_samples
        for i, met in enumerate(metric_fns)
    })
    logger.info(log)
예제 #7
0
def main():
    size = (256, 256)

    weights = '4_50_0.1156_0.9848_0.6352_0.9524.h5'
    model = makeModel(size, weights='weights/' + weights)

    videoMap = [(r'D:\DiskE\Computer_Vision_Task\video_6.mp4',
                 'classificationLogs/video_6_classified_4_50.csv'),
                (r'D:\DiskE\Computer_Vision_Task\video_2.mp4',
                 'classificationLogs/video_2_classified_4_50.csv')]

    for srcVideoPath, classifiedVideoPath in videoMap:
        logClassification(model, size, srcVideoPath, classifiedVideoPath)
예제 #8
0
def setupModel(batch, isTraining, add_summary=False):
    result = {}

    truth = batch["truth"]
    nclasses = truth.shape.as_list()[1]
    output, model = makeModel(nclasses,
                              batch["cpf"],
                              batch["npf"],
                              batch["sv"],
                              batch["globals"],
                              isTraining=isTraining)
    result["model"] = model

    prediction = tf.nn.softmax(output)
    '''
    bigtruth = tf.stack([tf.reduce_max(truth[:,0:nclasses-1],axis=1),truth[:,nclasses-1]],axis=1)
    bigoutput = tf.stack([tf.reduce_sum(output[:,0:nclasses-1],axis=1),output[:,nclasses-1]],axis=1)
    bigprediction =  keras.layers.Activation('softmax')(bigoutput)
    '''
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=truth,
                                                               logits=output)
    loss = tf.reduce_mean(cross_entropy)

    #bigcross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=bigtruth,logits=bigoutput)
    #bigloss = tf.reduce_mean(bigcross_entropy)

    #bigcross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=truth,logits=output)
    #bigloss = tf.reduce_mean(cross_entropy)
    result["prediction"] = prediction
    result["truth"] = truth

    #result["bigprediction"] = bigprediction
    #result["bigtruth"] = bigtruth

    accuracy, accuracy_op = tf.metrics.accuracy(tf.argmax(truth, 1),
                                                tf.argmax(prediction, 1))
    result["accuracy"] = accuracy_op

    #print "Extra loss in model from Keras:", model.losses
    result["loss"] = loss
    #result["bigloss"] = bigloss
    #result["extraloss"] = tf.reduce_sum(model.losses)
    result[
        "mimloss"] = loss  #+tf.reduce_sum(model.losses) #bigloss+loss+tf.reduce_sum(model.losses)

    return result
예제 #9
0
def train():
    inputSize = (150, 150)
    model = makeModel(inputSize, compileForTraining=True, weights='first_try.h5')

    epochs = 50
    batch_size = 64

    train_generator, validation_generator = makeDataset('data', inputSize, batch_size)

    nb_train_samples = train_generator.samples
    nb_validation_samples = validation_generator.samples

    model.fit_generator(
        train_generator,
        steps_per_epoch=nb_train_samples // batch_size,
        epochs=epochs,
        validation_data=validation_generator,
        validation_steps=nb_validation_samples // batch_size)

    model.save_weights('first_try.h5')
예제 #10
0
def main(config):
    logger = config.get_logger('train')
    train_dataloader, valid_dataloader = makeDataLoader(config)

    model = makeModel(config)
    logger.info(model)

    # criterion = makeLoss(config)
    # metrics = makeMetrics(config)

    optimizer = makeOptimizer(config, model)
    lr_scheduler = makeLrSchedule(config, optimizer, train_dataloader)

    trainer = Trainer(model,
                      None,
                      None,
                      optimizer,
                      config=config,
                      data_loader=train_dataloader,
                      valid_data_loader=valid_dataloader,
                      test_data_loader=None,
                      lr_scheduler=lr_scheduler)
    trainer.train()
예제 #11
0
파일: trn.py 프로젝트: hkaggarwal/J-MoDL
saveDir = 'trained_models/'
cwd = os.getcwd()
directory=saveDir+datetime.now().strftime("%d%b_%I%M%S%P_")+ \
 str(acc)+'acc_'+  str(nImg)+'I_'+str(epochs)+'ep_' +str(K)+'K'

if not os.path.exists(directory):
    os.makedirs(directory)
sessFileName = directory + '/model'

#%% save testing model first.

tf.reset_default_graph()
orgT = tf.placeholder(dtype=tf.complex64, shape=(None, None, None), name='org')
csmT = tf.placeholder(tf.complex64, shape=(None, None, None, None), name='csm')

atbT, predT = mm.makeModel(orgT, csmT, initx, inity, M, N, sigma, lam, K,
                           False)
atbT = tf.identity(atbT, name='atb')
predT = tf.identity(predT, name='predTst')

sessFileNameTst = directory + '/modelTst'

saver = tf.train.Saver()
with tf.Session(config=config) as sess:
    sess.run(tf.global_variables_initializer())
    savedFile = saver.save(sess,
                           sessFileNameTst,
                           latest_filename='checkpointTst')
print('testing model saved:' + savedFile)

#%% some tensorflow code for dataset input
tf.reset_default_graph()
예제 #12
0
    for i in tqdm(range(0, len(src), bs)):
        X = batch(src[i:i + bs], vocab=vocab_src, padidx=pad_idx)
        X = torch.from_numpy(X).to(device)
        Y = greedyDecoder(X,
                          standardMask(X, pad_idx),
                          model,
                          startidx=sos_idx,
                          unk=pad_idx)
        ypred.extend(translateBack(Y, inv_vocab_tgt, EOS))
        ytrue.extend([' '.join(s) for s in tgt[i:i + bs]])

    score = blue([s.lower() for s in ypred], [s.lower() for s in ytrue])

    return score


if __name__ == '__main__':
    # --model_path=models --datafile=../.data/iwslt/de-en/IWSLT16.TED.tst2014.de-en --vocab=vocab
    #29.15,30.8,24.98
    parser = parse()
    src_vocab, tgt_vocab = loadVocab(parser.vocab)
    src, tgt = readFile(parser.datafile)

    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    model = makeModel(len(src_vocab), len(tgt_vocab))
    model = model.to(device)
    restore(model, path=parser.model_path, tocpu=False)
    myscore = myEvaluate(model, src, tgt, src_vocab, tgt_vocab, device=device)
    print('score:', myscore)
예제 #13
0
from keras.preprocessing.image import load_img
from keras.preprocessing.image import ImageDataGenerator
import statistics
from collections import Counter

def fixed_generator(generator):
    for batch in generator:
        yield (batch, batch)

INIT_LR=1e-3
EPOCHS=60
BATCH_SIZE=64
SIZE=32


autoencoder=model.makeModel()	
#print(autoencoder.summary())

opt2=RMSprop(lr=INIT_LR, decay=1e-6)
autoencoder.compile(loss="mean_squared_error", optimizer = opt2)


with open('final_data.pickle','rb') as f:
	data=pickle.load(f)

with open('final_labels.pickle','rb') as f:
    labels=pickle.load(f)

#####
print(data.shape)
print(labels.shape)                                                                                                                                                                                                                                                                                                                                                                                                                                                 
예제 #14
0
cwd = os.getcwd()
directory=saveDir+datetime.now().strftime("%d%b_%I%M%P_")+ \
 str(nLayers)+'L_'+str(K)+'K_'+str(epochs)+'E_'+gradientMethod

if not os.path.exists(directory):
    os.makedirs(directory)
sessFileName = directory + '/model'

#%% save test model
tf.reset_default_graph()

csmT = tf.placeholder(tf.complex64, shape=(None, 12, 256, 232), name='csm')
maskT = tf.placeholder(tf.complex64, shape=(None, 256, 232), name='mask')
atbT = tf.placeholder(tf.float32, shape=(None, 256, 232, 2), name='atb')

out = mm.makeModel(atbT, csmT, maskT, False, nLayers, K, gradientMethod)
predTst = out['dc' + str(K)]
predTst = tf.identity(predTst, name='predTst')
sessFileNameTst = directory + '/modelTst'

saver = tf.train.Saver()
with tf.Session(config=config) as sess:
    sess.run(tf.global_variables_initializer())
    savedFile = saver.save(sess,
                           sessFileNameTst,
                           latest_filename='checkpointTst')
print('testing model saved:' + savedFile)
#%% read multi-channel dataset
trnOrg, trnAtb, trnCsm, trnMask = sf.getData('training')
trnOrg, trnAtb = sf.c2r(trnOrg), sf.c2r(trnAtb)
예제 #15
0
    sv = tf.placeholder('float32',
                        shape=(None, featureDict["sv"]["max"],
                               len(featureDict["sv"]["branches"])),
                        name="sv")
    event = tf.placeholder('float32',
                           shape=(None,
                                  len(featureDict["globals"]["branches"])),
                           name="globals")

    print "cpf shape: ", cpf.shape.as_list()
    print "npf shape: ", npf.shape.as_list()
    print "sv shape: ", sv.shape.as_list()
    print "globals shape: ", event.shape.as_list()

    output, model = makeModel(len(featureDict["truth"]["branches"]),
                              cpf,
                              npf,
                              sv,
                              event,
                              isTraining=False)
    prediction = tf.nn.softmax(output, name="prediction")

    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    sess.run(init_op)
    model.loadVariables("all/model_epoch28.json", sess)

    const_graph = tf.graph_util.convert_variables_to_constants(
        sess, sess.graph.as_graph_def(), ["prediction"])
    tf.train.write_graph(const_graph, "all", "model_epoch28.pb", as_text=False)
예제 #16
0
from data import getdata
from model import makeModel

# Now we wannt numpy for martix math operations
import numpy as np

# for making the grap we will import matplotlib
from matplotlib import style
import matplotlib.pyplot as plt
import matplotlib.animation as animation

# geting the data (only 40% of the data)
data = getdata(40)

#making the model
model = makeModel(consts.lr, consts.th)

# initialzing variables for graph
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
'''
Now we will be training the model with the live stock price graph.
we will make a function named animate which will be the function
which will interate with time.

this function has a variable 'i' which will be the number of interation
done.

Then we will make a function which will take the number of interations done
and output us the training data and label.