示例#1
0
def main():
    trainer = Trainer()
    trainer.prepare_for_train(random_id = True, hist = "hist", ifDense_GCN = True, feedback = True, batch_size = 128, epoch_num = 40, val = 0.2)
    trainer.fit()
    #trainer.save_model("model.pkl")
    #trainer.load_model("model.pkl")
    trainer.score()
示例#2
0
def train_test(config, logger):

    tot_acc = 0
    tot_prec = 0
    tot_rec = 0
    tot_f1 = 0
    tot_auc = 0

    total_start_time = time.time()
    # Training for each fold
    for i in range(0, config.kfold):
        # To match the output filenames
        k = str(i + 1)

        # Load data iterator
        dataloader = Dataloader(config, k)

        # Load model
        arch = LSTM(config, dataloader).to(config.device)
        if config.model == 'cnn':
            arch = CNN(config, dataloader).to(config.device)

        # Print network configuration
        logger.info(arch)

        # Trainer
        model = Trainer(config, logger, dataloader, arch, k)

        # Train
        if not config.eval:
            logger.info("**************Training started !!!**************\n")
            logger.info("Starting training on {0}th-fold".format(k))
            model.fit()

        # Test
        logger.info("**************Testing Started !!!**************\n")
        model.load_checkpoint()
        acc, prec, rec, f1, auc = model.predict()
        logger.info(
            "Accuracy: %6.3f Precision: %6.3f Recall: %6.3f FB1: %6.3f AUC: %6.3f"
            % (acc, prec, rec, f1, auc))
        logger.info("***********************************************\n")

        # Calculate the metrics
        tot_acc += acc
        tot_prec += prec
        tot_rec += rec
        tot_f1 += f1
        tot_auc += auc

    total_end_time = time.time()

    # Display final results
    epoch_mins, epoch_secs = utilities.epoch_time(total_start_time,
                                                  total_end_time)
    logger.info("Epoch Time: %dm %ds" % (epoch_mins, epoch_secs))
    logger.info(
        "Final_Accuracy;%6.3f;Final_Precision;%6.3f;Final_Recall;%6.3f;Final_FB1;%6.3f;Final_AUC;%6.3f "
        % (tot_acc / config.kfold, tot_prec / config.kfold, tot_rec /
           config.kfold, tot_f1 / config.kfold, tot_auc / config.kfold))
示例#3
0
def main(config_fn, data_verbose=False):
    """"""
    # load config file
    config_fn = os.path.abspath(config_fn)
    config = load_config(config_fn)

    # launch data servers
    with data_context(config_fn,
                      which_set=['train', 'valid'],
                      verbose=data_verbose) as data_streams:

        # initialize trainer
        trainer = Trainer(config, data_streams)

        # train
        trainer.fit()
示例#4
0
 def process(self):
     """
     Trains different model based on a csv-data containing the hyper_params.
     If a model is trained, results, parameters of model, coefficient mask and the hyperparaemters are saved
     in analysis_folder. Models are numbered in the same ordered as they are in the hyper_params.csv.
     """
     rng = jax.random.PRNGKey(-1)
     rng_batch = npr.RandomState(5)
     for j, hyper_params in self.hyper_params_df.iterrows():
         hyper_params = hyper_params.to_dict()
         path_save = self.analysis_folder + '/' + str(j)
         if os.path.isfile(path_save):
             continue
         else:
             loader = Loader(hyper_params)
             train_set = hyper_params['training_set']
             X, dX, X_eval, dX_eval, t = self.data[train_set]
             loader.hyper_params['num_batches'], _ = divmod(
                 X.shape[0], loader.hyper_params['batch_size'])
             loader.hyper_params['x_dim'] = X.shape[1]
             model, hyper_params = loader.create_model(rng)
             trainer = Trainer(X, dX, X_eval, dX_eval, t, model,
                               hyper_params)
             results, params, coeff_mask = trainer.fit(rng_batch)
             with open(path_save, 'wb') as fp:
                 pickle.dump([results, params, coeff_mask, hyper_params],
                             fp)
示例#5
0
def experiment(trial: optuna.Trial, train_dl, valid_dl, *,
               device='cpu', n_classes, n_epochs, input_shape):
    net = Net(input_shape, n_classes, trial).to(device)
    trainer = Trainer(trial, n_epochs=n_epochs, device=device)

    def epoch_end_callback(epoch, _, acc):
        trial.report(acc, epoch)
        if trial.should_prune(epoch):
            raise optuna.exceptions.TrialPruned()

        if acc < 0.05:  # Prune really bad runs
            raise optuna.exceptions.TrialPruned()

    losses, accuracies = trainer.fit(net, train_dl, valid_dl,
                                     epoch_end_callback=epoch_end_callback)
    return accuracies[-1]
示例#6
0
文件: main.py 项目: jnepal/nepali-ner
def main():
    """
        Main File
    """
    # Parse argument
    config, logger = parse_args()

    if config.kfold > 0 and not config.eval:
        logger.info("Splitting dataset into {0}-fold".format(config.kfold))
        splitter.main(input_file=config.data_file,
                      output_dir=config.root_path,
                      verbose=config.verbose,
                      kfold=config.kfold,
                      pos=config.use_pos,
                      log_file=config.data_log)

    tot_acc = 0
    tot_prec = 0
    tot_rec = 0
    tot_f1 = 0

    for i in range(0, config.kfold):
        # To match the output filenames
        k = str(i + 1)

        if not config.eval:
            logger.info("Starting training on {0}th-fold".format(k))

        # Load data iterator
        dataloader = Dataloader(config, k)

        # Debugging purpose. Don't delete
        #         sample = next(iter(train_iter))
        #         print(sample.TEXT)

        # Load model
        if config.use_char or config.use_graph:
            assert config.use_char ^ config.use_graph, "Either use Character-Level or Grapheme-Level. Not both!!!"
            lstm = CharLSTMTagger(config, dataloader).to(config.device)
        else:
            lstm = LSTMTagger(config, dataloader).to(config.device)

        # Print network configuration
        logger.info(lstm)

        model = Trainer(config, logger, dataloader, lstm, k)

        if not config.eval:
            # Train
            logger.info("Training started !!!")
            model.fit()

        # Test
        model.load_checkpoint()
        logger.info("Testing Started !!!")
        acc, prec, rec, f1 = model.predict()
        logger.info(
            "Accuracy: %6.2f%%; Precision: %6.2f%%; Recall: %6.2f%%; FB1: %6.2f "
            % (acc, prec, rec, f1))

        tot_acc += acc
        tot_prec += prec
        tot_rec += rec
        tot_f1 += f1

    logger.info(
        "Final Accuracy: %6.2f%%; Final Precision: %6.2f%%; Final Recall: %6.2f%%; Final FB1: %6.2f "
        % (tot_acc / config.kfold, tot_prec / config.kfold,
           tot_rec / config.kfold, tot_f1 / config.kfold))
示例#7
0
    if config.dataset == 'mnist':
        model = SimpleLinear(in_size=28 * 28, out_size=10)
    else:
        num_langs = len(loaders[0].dataset.langs)
        vocab_size = len(loaders[0].dataset.char2index)

        model = SimpleRNN(in_size=vocab_size, hidden_size=config.hidden_size, out_size=num_langs)

    return model


if __name__ == '__main__':
    data_dir = Path(__file__).parent / 'data/'
    data_dir.mkdir(parents=True, exist_ok=True)

    loaders = configure_dataloaders(data_dir)

    model = configure_model()

    trainer = Trainer(
        model=model,
        train_loader=loaders[0],
        valid_loader=loaders[1],
    )

    try:
        trainer.fit()
    except KeyboardInterrupt:
        exit(0)

示例#8
0
        NewPad(),
        transforms.ToTensor(),
        transforms.Normalize([0.3967, 0.4193, 0.4018],
                             [0.1837, 0.1673, 0.1833])
    ])
    return image_transformer(image).float(), mask


train_image_path = './data/UDD5/train/splitted/src/'
train_label_path = './fast_scnn/data/UDD5/train/splitted/gt/'
test_image_path = './fast_scnn/data/UDD5/val/splitted/src/'
test_label_path = './fast_scnn/data/UDD5/val/splitted/gt/'
ds_train = UDD(train_image_path, train_label_path, transform=UDD_preprocessing)
ds_test = UDD(test_image_path, test_label_path, transform=UDD_preprocessing)

dl_train = DataLoader(ds_train, batch_size, shuffle=True)
dl_test = DataLoader(ds_test, batch_size, shuffle=False)

model = FastSCNN(num_classes=5)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
criterion = CrossEntropyLoss()
success_metric = pixel_accuracy
trainer = Trainer(model, criterion, optimizer, success_metric, device, None)
fit_res = trainer.fit(dl_train,
                      dl_test,
                      num_epochs=num_epochs,
                      checkpoints='checkpoints/' + model.__class__.__name__ +
                      datetime.datetime.today().strftime("%m_%d"))

print(fit_res)
trainer = Trainer(
    model=model,
    optimizer=optimizer,
    criterion=FocalDiceCoefLoss(d_weight=[1, 10, 20]),
    metrics={
        'Kd Dsc': DiceCoef(weight=[0, 1, 0]),
        'Ca Dsc': DiceCoef(weight=[0, 0, 1])
    },
    scheduler=scheduler,
    tr_transform=tr_transform,
    vd_transform=vd_transform,
)
trainer.save('init.pt')

log_dir = "logs/Task00_Kidney/att-res-{}".format(
    datetime.now().strftime("%H%M%S"))
trainer.load('logs/Task00_Kidney/att-res-065552-last.pt')
trainer.fit(
    case_set,
    batch_size=1,
    epochs=500,
    # valid_split=0.2,
    num_samples=250,
    log_dir=log_dir,
    save_dir=log_dir,
    save_last=True,
    save_best=True,
    num_workers=2,
    pin_memory=True)
示例#10
0
#gpu_device=0
#cuda.get_device(gpu_device).use()
#model.to_gpu(gpu_device)

tr_im = loadim(a=-1, j="trainimages")
tr_ma = loadim(a=-1, j="trainmasks")
#teimage=loadim(a=-1,j="test")

#trdepth,tedepth=loadcsv()
#trdepth=sorted(trdepth,key=lambda t:t[0])
#tedepth=sorted(tedepth,key=lambda t:t[0])
#tr_dep=[int(t[1]) for t in trdepth]
#te_dep=[int(t[1]) for t in tedepth]
#tr_im_ma_dep=[[tr_im[i],tr_ma[i],tr_dep[i]] for i in range(4000)]
#tr_im_ma_dep=sorted(tr_im_ma_dep,key=lambda t:t[2])

gpu_device = 0
X = cuda.to_gpu(tr_im, device=0)
Y = cuda.to_gpu(tr_ma, device=0)

model = ResNet()
cuda.get_device(gpu_device).use()
model.to_gpu(gpu_device)

trainer = Trainer(model)

trainer.fit(X)

df_loss = pd.DataFrame(trainer.loss)
df_loss.to_csv('loss_csv')