def train(args): iters, vocab = get_iterator(args) model = get_model(args, vocab) loss_fn = get_loss(args, vocab) optimizer = get_optimizer(args, model) trainer = get_trainer(args, model, loss_fn, optimizer) metrics = get_metrics(args, vocab) evaluator = get_evaluator(args, model, loss_fn, metrics) logger = get_logger(args) @trainer.on(Events.STARTED) def on_training_started(engine): print("Begin Training") @trainer.on(Events.ITERATION_COMPLETED) def log_iter_results(engine): log_results(logger, 'train/iter', engine.state, engine.state.iteration) @trainer.on(Events.EPOCH_COMPLETED) def evaluate_epoch(engine): log_results(logger, 'train/epoch', engine.state, engine.state.epoch) state = evaluate_once(evaluator, iterator=iters['val']) log_results(logger, 'valid/epoch', state, engine.state.epoch) trainer.run(iters['train'], max_epochs=args.max_epochs)
def train(args): args, model, iters, vocab, ckpt_available = get_model_ckpt(args) if ckpt_available: print("loaded checkpoint {}".format(args.ckpt_name)) loss_fn = get_loss(args, vocab) optimizer = get_optimizer(args, model) trainer = get_trainer(args, model, loss_fn, optimizer) metrics = get_metrics(args, vocab) evaluator = get_evaluator(args, model, loss_fn, metrics) logger = get_logger(args) @trainer.on(Events.STARTED) def on_training_started(engine): print("Begin Training") @trainer.on(Events.ITERATION_COMPLETED) def log_iter_results(engine): log_results(logger, 'train/iter', engine.state, engine.state.iteration) @trainer.on(Events.EPOCH_COMPLETED) def evaluate_epoch(engine): log_results(logger, 'train/epoch', engine.state, engine.state.epoch) state = evaluate_once(evaluator, iterator=iters['val']) log_results(logger, 'valid/epoch', state, engine.state.epoch) save_ckpt(args, engine.state.epoch, engine.state.metrics['loss'], model, vocab) trainer.run(iters['train'], max_epochs=args.max_epochs)
def train(args): args, model, iters, vocab, ckpt_available = get_model_ckpt(args) if ckpt_available: print("loaded checkpoint {}".format(args.ckpt_name)) loss_fn = get_loss(args, vocab) optimizer = get_optimizer(args, model) pretrainer = get_pretrainer(args, model, loss_fn, optimizer) trainer = get_trainer(args, model, loss_fn, optimizer) metrics = get_metrics(args, vocab) evaluator = get_evaluator(args, model, loss_fn, metrics) logger = get_logger(args) @pretrainer.on(Events.STARTED) def on_training_started(engine): print("Begin Pretraining") @pretrainer.on(Events.ITERATION_COMPLETED) def log_iter_results(engine): log_results(logger, 'pretrain/iter', engine.state, engine.state.iteration) @pretrainer.on(Events.EPOCH_COMPLETED) def evaluate_epoch(engine): log_results(logger, 'pretrain/epoch', engine.state, engine.state.epoch) """ @pretrainer.on(Events.COMPLETED) def unfreeze_language_model(engine): for param in model.module.language_model.base_model.parameters(): param.requires_grad = True """ @trainer.on(Events.STARTED) def on_training_started(engine): print("Begin Training") @trainer.on(Events.ITERATION_COMPLETED) def log_iter_results(engine): log_results(logger, 'train/iter', engine.state, engine.state.iteration) @trainer.on(Events.EPOCH_COMPLETED) def evaluate_epoch(engine): log_results(logger, 'train/epoch', engine.state, engine.state.epoch) state = evaluate_once(evaluator, iterator=iters['val']) log_results(logger, 'valid/epoch', state, engine.state.epoch) log_results_cmd('valid/epoch', state, engine.state.epoch) save_ckpt(args, engine.state.epoch, engine.state.metrics['loss'], model, vocab) evaluate_by_logic_level(args, model, iterator=iters['val']) if args.pretrain_epochs > 0: pretrainer.run(iters['pretrain'], max_epochs=args.pretrain_epochs) trainer.run(iters['train'], max_epochs=args.max_epochs)
def evaluate(args): args, model, iters, vocab, ckpt_available = get_model_ckpt(args) if ckpt_available: print("loaded checkpoint {}".format(args.ckpt_name)) loss_fn = get_loss(args, vocab) metrics = get_metrics(args, vocab) evaluator = get_evaluator(args, model, loss_fn, metrics) state = evaluate_once(evaluator, iterator=iters['val']) log_results_cmd('valid/epoch', state, 0)
def MyMetrics(model): path = './CRCHisto' tp_num = 0 gt_num = 0 pred_num = 0 precision = 0 recall = 0 f1_score = 0 for i in range(81, 101): filename = os.path.join(path, 'img' + str(i) + '.bmp') if os.path.exists(filename): gtpath = './CRCHistoPhenotypes_2016_04_28/Detection' imgname = 'img' + str(i) img = misc.imread(filename) img = misc.imresize(img, (256, 256)) img = img - 128. img = img / 128. img = img.reshape((1, img.shape[0], img.shape[1], img.shape[2])) img = np.transpose(img, (0, 3, 1, 2)) img = torch.Tensor(img).cuda() result = model(img) result = result.cpu().detach().numpy() result = np.transpose(result, (0, 2, 3, 1))[0] result = np.exp(result) result = result[:, :, 1] result = misc.imresize(result, (500, 500)) result = result / 255. boxes = non_max_suppression(result) matname = imgname + '_detection.mat' matpath = os.path.join(gtpath, imgname, matname) gt = sio.loadmat(matpath)['detection'] pred = [] for i in range(boxes.shape[0]): x1 = boxes[i, 0] y1 = boxes[i, 1] x2 = boxes[i, 2] y2 = boxes[i, 3] cx = int(x1 + (x2 - x1) / 2) cy = int(y1 + (y2 - y1) / 2) pred.append([cx, cy]) p, r, f1, tp = get_metrics(gt, pred) tp_num += tp gt_num += gt.shape[0] pred_num += np.array(pred).shape[0] precision = tp_num / (pred_num + epsilon) recall = tp_num / (gt_num + epsilon) f1_score = 2 * (precision * recall / (precision + recall + epsilon)) return precision, recall, f1_score
def train(args): args, model, iters, ckpt_available = get_model_ckpt(args) if ckpt_available: print("loaded checkpoint {}".format(args.ckpt_name)) loss_fn = get_loss(args) optimizer = get_optimizer(args, model) trainer = get_trainer(args, model, loss_fn, optimizer) metrics = get_metrics(args) evaluator = get_evaluator(args, model, loss_fn, metrics) logger = get_logger(args) trainer.run(iters['train']), max_epochs=args.max_epochs)
def pretrain(args): tf = get_transform(args, 'none') ds = get_dataset(args, tf, 'none') args, model, ckpt_available = get_model_ckpt(args) if ckpt_available: print("loaded checkpoint {} in pretraining stage".format( args.ckpt_name)) loss_fn = get_loss(args) sub_optimizer = get_sub_optimizer(args, model) optimizer = get_optimizer(args, sub_optimizer) scheduler = get_scheduler(args, optimizer) # setup nvidia/apex amp # model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level, num_losses=1) # model = idist.auto_model(model) trainer = get_trainer(args, model, loss_fn, optimizer, scheduler) metrics = get_metrics(args) logger = get_logger(args) @trainer.on(Events.STARTED) def on_training_started(engine): print("Begin Pretraining") # batch-wise @trainer.on(Events.ITERATION_COMPLETED) def log_iter_results(engine): log_results(logger, 'pretrain/iter', engine.state, engine.state.iteration) # epoch-wise (ckpt) @trainer.on(Events.EPOCH_COMPLETED) def save_epoch(engine): log_results(logger, 'pretrain/epoch', engine.state, engine.state.epoch) log_results_cmd(logger, 'pretrain/epoch', engine.state, engine.state.epoch) save_ckpt(args, engine.state.epoch, engine.state.metrics['loss'], model) trainer.run(ds, max_epochs=args.epoch)
for epoch in range(100): # print('\nepoch %d:' % epoch) # train the model train_model(model, tr_data_loader, criterion, optimizer, gpu_id=1) if epoch % 1 == 0: print('\nepoch %d:' % epoch) # validate the trained model for every 1 epoch val = validate_model(model, tr_noaug_data_loader, criterion, epoch, prediction_folder='../results/train_data/', gpu_id=1) cvs_path = 'ep_' + str(epoch) + '_predictions.csv' arr = cvs_to_arr(cvs_file='../results/train_data/' + cvs_path) metrics = get_metrics(arr) print('train_accuracy', metrics[0]) # validate the test model for every 1 epoch val = validate_model(model, ts_noaug_data_loader, criterion, epoch, prediction_folder='../results/test_data/', gpu_id=1) cvs_path = 'ep_' + str(epoch) + '_predictions.csv' arr = cvs_to_arr(cvs_file='../results/test_data/' + cvs_path) metrics = get_metrics(arr) print('test_accuracy', metrics[0]) # validate the validation model for every 1 epoch
# batch-wise @trainer.on(Events.ITERATION_COMPLETED) def log_iter_results(engine): log_results(logger, 'pretrain/iter', engine.state, engine.state.iteration) # epoch-wise (ckpt) @trainer.on(Events.EPOCH_COMPLETED) def save_epoch(engine): log_results(logger, 'pretrain/epoch', engine.state, engine.state.epoch) log_results_cmd(logger, 'pretrain/epoch', engine.state, engine.state.epoch) save_ckpt(args, engine.state.epoch, engine.state.metrics['loss'], model) trainer.run(ds, max_epochs=args.epoch) ======= metrics = get_metrics(args) logger = get_logger(args) @trainer.on(Events.STARTED) def on_training_started(engine): print("Begin Pretraining") # batch-wise @trainer.on(Events.ITERATION_COMPLETED) def log_iter_results(engine): log_results(logger, 'pretrain/iter', engine.state, engine.state.iteration) # epoch-wise (ckpt) @trainer.on(Events.EPOCH_COMPLETED) def save_epoch(engine): log_results(logger, 'pretrain/epoch', engine.state, engine.state.epoch)