示例#1
0
文件: cli.py 项目: csold/dga_detector
def train_model(data_path='data/dga_domains.csv',
                model_path='models/model.pickle'):
    df = pd.read_csv(data_path)
    X = extract_features(df)
    y = (df['class'] == 'dga').astype(int)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)

    train(X_train, y_train, model_path)

    y_pred = pred(X_test, model_path)
    print(classification_report(y_test, y_pred))
示例#2
0
def train(model, dataloader, criterion, optimizer, num_epochs, model_save_dir):
    since = time.time()
    dataset_size = dataloader.dataset.len
    train_l = []
    valid_l = []
    running_loss = 0.0
    i = 0
    x = list()
    for epoch in range(num_epochs):
        print('------------------EPOCH {}/{}------------------'.format(epoch+1, num_epochs))
        model.train()
        x.append(epoch + 1)
        # iterating over data
        for data in dataloader:
            # getting the inputs and labels
            x1, x2, y = data['previmg'], data['currimg'], data['currbb']
            # wrapping them in variable
            if use_gpu:
                x1, x2, y = Variable(x1.cuda()), Variable(x2.cuda()), Variable(y.cuda(), requires_grad=False)
            else:
                x1, x2, y = Variable(x1), Variable(x2), Variable(y, requires_grad=False)
            # zero the parameter gradients
            optimizer.zero_grad()
            # forward
            output = model(x1, x2)
            loss = criterion(output, y)
            # backward + optimize
            loss.backward()
            optimizer.step()
            print('training epoch : %d, step : %d, loss : %f' % (epoch+1, i, loss.data.item()))
            i = i + 1
            running_loss += loss.data.item()

        epoch_loss = running_loss / dataset_size
        train_l.append(epoch_loss)
        print('-------------Loss: {:.4f} in epoch: {}-------------'.format(epoch_loss, epoch+1))
        val_loss = validation(model, criterion, epoch+1)
        print('Validation Loss: {:.4f}'.format(val_loss))
        valid_l.append(val_loss)

    path = model_save_dir + 'model_n_epoch_' + str(num_epochs) + '.pth'
    torch.save(model.state_dict(), path)

    # plotting the loss graphics both for validation and training.
    plot_loss_table(x, train_l, valid_l)

    time_elapsed = time.time() - since
    print('Training completed in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
    return model
示例#3
0
def hyper_tuning(hparams, tuning_path):
    with open(tuning_path) as f:
        tuning_params = yaml.load(f, Loader=yaml.FullLoader)

    combinations = build_parameter_combinations(tuning_params)
    for combination_id, combination in enumerate(combinations):
        print("Combination: {} / {}".format(combination_id, len(combinations)))
        for key, value in combination.items():
            node = hparams
            path = key.split("|")
            for i, k in enumerate(path):
                if i == len(path) - 1:
                    node[k] = value
                else:
                    node = node[k]

        try:
            train(hparams)
        except:
            traceback.print_exc()
示例#4
0
def train():
    tot_loss = 0.0
    tot_correct = 0
    tot_lsl = 0.0
    tot_lss_1 = 0.0
    tot_lss_2 = 0.0
    tot_lsd = 0.0
    for inputs, labels in train_loader:
        inputs = inputs.to(device)
        labels = labels.to(device)

        # CutMix regularizer
        label_original = F.one_hot(labels, 10)
        lam = np.random.beta(cutmix_beta, cutmix_beta)
        rand_index = torch.randperm(inputs.size()[0])
        x_cutmix = inputs.clone().detach()
        x_a = inputs[rand_index, :, :, :]
        labels_a = labels[rand_index]
        bbx1, bby1, bbx2, bby2 = rand_bbox(inputs.size(), lam)
        M = torch.zeros((inputs.size()[-2], inputs.size()[-1]))

        M = M.to(device)

        M[bbx1:bbx2, bby1:bby2] = 1
        x_cutmix[:, :, bbx1:bbx2, bby1:bby2] = x_a[:, :, bbx1:bbx2, bby1:bby2]
        lam = ((bbx2 - bbx1) * (bby2 - bby1) /
               (inputs.size()[-1] * inputs.size()[-2]))
        label_cutmix = lam * label_original[rand_index, :] + (
            1 - lam) * label_original

        # x_a
        model.eval()
        with torch.no_grad():
            _dummy1, _dummy2, _dummpy3, Y_a = model(x_a)
        # CutMix
        model.train()
        optimizer.zero_grad()
        outputs, pool_outputs, M_hat, Y_cutmix = model(x_cutmix)

        # Resize M to H0 * W0
        M = M.unsqueeze(dim=0).unsqueeze(dim=1)
        M = M.repeat(inputs.size()[0], 1, 1, 1)
        M_resizer = torch.nn.MaxPool2d(int(M.size()[-1] / M_hat.size()[-1]))
        M = M_resizer(M)

        lsl = lam * criterion_ce(outputs, labels_a) + (1 - lam) * criterion_ce(
            outputs, labels)
        lss_1 = criterion_lss1(M_hat, M)
        lss_2 = criterion_lss2(M[0, 0, :, :] * Y_cutmix, M[0, 0, :, :] * Y_a)
        lsd = criterion_lss2(outputs, pool_outputs.detach()) + 0.5 * (
            lam * criterion_ce(pool_outputs, labels_a) +
            (1 - lam) * criterion_ce(pool_outputs, labels))

        # loss = lsl + lss_1 + lss_2 + lsd
        loss = lsl
        loss.backward()
        optimizer.step()

        _, preds = torch.max(outputs, 1)
        _, labels = torch.max(label_cutmix, 1)

        tot_loss += loss.item() * inputs.size(0)
        tot_correct += torch.sum(preds == labels.data).item()
        tot_lsl += lsl.item() * inputs.size(0)
        tot_lss_1 += lss_1.item() * inputs.size(0)
        tot_lss_2 += lss_2.item() * inputs.size(0)
        tot_lsd += lsd.item() * inputs.size(0)

    len_ = len(train_loader.dataset)
    epoch_loss = tot_loss / len_
    epoch_acc = tot_correct / len_
    epoch_lsl = tot_lsl / len_
    epoch_lss_1 = tot_lss_1 / len_
    epoch_lss_2 = tot_lss_2 / len_
    epoch_lsd = tot_lsd / len_

    return epoch_loss, epoch_acc, epoch_lsl, epoch_lss_1, epoch_lss_2, epoch_lsd
示例#5
0
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)

# training params
epochs = 10
counter = 0
print_every = 1000
clip = 5  # gradient clipping

# move model to GPU, if available
if (train_on_gpu):
    model.cuda()

for e in range(epochs):
    model.train()
    # batch loop
    for inputs, labels in tqdm(Train_loader, total=len(Train_loader)):
        counter += 1
        batch_size = inputs.shape[0]
        h = model.init_hidden(batch_size)
        if (train_on_gpu):
            inputs, labels = inputs.cuda(), labels.cuda()

        # Creating new variables for the hidden state, otherwise
        # we'd backprop through the entire training history
        h = tuple([each.data for each in h])

        # zero accumulated gradients
        model.zero_grad()
示例#6
0
def train(model, optimizer, scheduler, global_step, train_dataset, dev_dataset,
          opt, collator, best_eval_loss):

    if opt.is_main:
        try:
            tb_logger = torch.utils.tensorboard.SummaryWriter(
                Path(opt.checkpoint_dir) / opt.name)
        except:
            tb_logger = None
            logger.warning('Tensorboard is not available.')
    train_sampler = DistributedSampler(
        train_dataset) if opt.is_distributed else RandomSampler(train_dataset)
    train_dataloader = DataLoader(train_dataset,
                                  sampler=train_sampler,
                                  batch_size=opt.per_gpu_batch_size,
                                  drop_last=True,
                                  num_workers=10,
                                  collate_fn=collator)

    loss, curr_loss = 0.0, 0.0
    epoch = 1
    model.train()
    while global_step < opt.total_steps:
        if opt.is_distributed > 1:
            train_sampler.set_epoch(epoch)
        epoch += 1
        for i, batch in enumerate(train_dataloader):
            global_step += 1
            (idx, question_ids, question_mask, passage_ids, passage_mask,
             gold_score) = batch
            _, _, _, train_loss = model(
                question_ids=question_ids.cuda(),
                question_mask=question_mask.cuda(),
                passage_ids=passage_ids.cuda(),
                passage_mask=passage_mask.cuda(),
                gold_score=gold_score.cuda(),
            )

            train_loss.backward()

            if global_step % opt.accumulation_steps == 0:
                torch.nn.utils.clip_grad_norm_(model.parameters(), opt.clip)
                optimizer.step()
                scheduler.step()
                model.zero_grad()

            train_loss = src.util.average_main(train_loss, opt)
            curr_loss += train_loss.item()

            if global_step % opt.eval_freq == 0:
                eval_loss, inversions, avg_topk, idx_topk = evaluate(
                    model, dev_dataset, collator, opt)
                if eval_loss < best_eval_loss:
                    best_eval_loss = eval_loss
                    if opt.is_main:
                        src.util.save(model, optimizer, scheduler, global_step,
                                      best_eval_loss, opt, dir_path,
                                      'best_dev')
                model.train()
                if opt.is_main:
                    log = f"{global_step} / {opt.total_steps}"
                    log += f" -- train: {curr_loss/opt.eval_freq:.6f}"
                    log += f", eval: {eval_loss:.6f}"
                    log += f", inv: {inversions:.1f}"
                    log += f", lr: {scheduler.get_last_lr()[0]:.6f}"
                    for k in avg_topk:
                        log += f" | avg top{k}: {100*avg_topk[k]:.1f}"
                    for k in idx_topk:
                        log += f" | idx top{k}: {idx_topk[k]:.1f}"
                    logger.info(log)

                    if tb_logger is not None:
                        tb_logger.add_scalar("Evaluation", eval_loss,
                                             global_step)
                        tb_logger.add_scalar("Training",
                                             curr_loss / (opt.eval_freq),
                                             global_step)
                    curr_loss = 0

            if opt.is_main and global_step % opt.save_freq == 0:
                src.util.save(model, optimizer, scheduler, global_step,
                              best_eval_loss, opt, dir_path,
                              f"step-{global_step}")
            if global_step > opt.total_steps:
                break
示例#7
0
def run():
    model.load_data() # twitter-uni, imdb, arxiv-10
    """Builds model, loads data, trains and evaluates"""
    model.build()
    model.train()
    model.evaluate()
示例#8
0
            path = key.split("|")
            for i, k in enumerate(path):
                if i == len(path) - 1:
                    node[k] = value
                else:
                    node = node[k]

        try:
            train(hparams)
        except:
            traceback.print_exc()


if __name__ == "__main__":
    args_parser = argparse.ArgumentParser()
    args_parser.add_argument("-c", "--config", type=str, default='config.yml',
                             help="Experiment Configurations ")
    args_parser.add_argument("-t", "--tuning", type=str, default=None,
                             help="Experiment Configurations ")
    run_args = args_parser.parse_args()
    config_path = run_args.config
    tuning_path = run_args.tuning

    with open(config_path) as f:
        hparams = yaml.load(f, Loader=yaml.FullLoader)

    if tuning_path:
        hyper_tuning(hparams, tuning_path)
    else:
        train(hparams)
示例#9
0
def run(args):
    print('Task 1: clear cell grade prediction')
    path = '/data/larson2/RCC_dl/new/clear_cell/'

    transform = {
        'train':
        transforms.Compose([
            transforms.Lambda(lambda x: torch.Tensor(x)),
            src.dataloader.Rescale(-160, 240,
                                   zero_center=True),  # rset dynamic range
            transforms.Lambda(
                lambda x: x.repeat(3, 1, 1, 1).permute(3, 0, 1, 2)),
            #     src.dataloader.Normalize(),
            #     src.dataloader.Crop(110),
            #     src.dataloader.RandomCenterCrop(90),
            src.dataloader.RandomHorizontalFlip(),
            #     src.dataloader.RandomRotate(25),
            src.dataloader.Resize(256)
        ]),
        'val':
        transforms.Compose([
            transforms.Lambda(lambda x: torch.Tensor(x)),
            src.dataloader.Rescale(-160, 240,
                                   zero_center=True),  # rset dynamic range
            transforms.Lambda(
                lambda x: x.repeat(3, 1, 1, 1).permute(3, 0, 1, 2)),
            #       src.dataloader.Normalize(),
            #       src.dataloader.Crop(90),
            src.dataloader.Resize(256)
        ])
    }

    my_dataset = {
        'train':
        src.dataloader.RCCDataset_h5(path,
                                     mode='train',
                                     transform=transform['train']),
        'val':
        src.dataloader.RCCDataset_h5(path,
                                     mode='val',
                                     transform=transform['train'])
    }

    my_loader = {
        x: DataLoader(my_dataset[x], batch_size=1, shuffle=True, num_workers=4)
        for x in ['train', 'val']
    }

    print('train size: ', len(my_loader['train']))
    print('train size: ', len(my_loader['val']))

    ### Some Checkers
    print('Summary: ')
    print('\ttrain size: ', len(my_loader['train']))
    print('\ttrain size: ', len(my_loader['val']))
    print('\tDatatype = ', next(iter(my_loader['train']))[0].dtype)
    print('\tMin = ', next(iter(my_loader['train']))[0].min())
    print('\tMax = ', next(iter(my_loader['train']))[0].max())
    print('\tInput size', next(iter(my_loader['train']))[0].shape)
    #     print('\tweight = ', args.weight)

    ### Tensorboard Log Setup
    log_root_folder = "/data/larson2/RCC_dl/logs/"
    now = datetime.now()
    now = now.strftime("%Y%m%d-%H%M%S")
    logdir = os.path.join(
        log_root_folder,
        f"{now}_model_{args.model}_{args.prefix_name}_epoch_{args.epochs}_weight_{args.weight}_lr_{args.lr}_gamma_{args.gamma}_lrsche_{args.lr_scheduler}_{now}"
    )
    #     os.makedirs(logdir)
    print(f'\tlogdir = {logdir}')

    writer = SummaryWriter(logdir)

    ### Model Selection

    device = torch.device(
        "cuda:{}".format(args.gpu) if torch.cuda.is_available() else "cpu")

    model = src.model.TDNet()
    model = model.to(device)

    writer.add_graph(model, my_dataset['train'][0][0].to(device))

    print('\tCuda:', torch.cuda.is_available(), f'\n\tdevice = {device}')

    optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=0.1)

    if args.lr_scheduler == "plateau":
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                               patience=3,
                                                               factor=.3,
                                                               threshold=1e-4,
                                                               verbose=True)
    elif args.lr_scheduler == "step":
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                    step_size=3,
                                                    gamma=args.gamma)

    pos_weight = torch.FloatTensor([args.weight]).to(device)
    criterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight)

    ### Ready?
    best_val_loss = float('inf')
    best_val_auc = float(0)
    best_model_wts = copy.deepcopy(model.state_dict())
    iteration_change_loss = 0
    t_start_training = time.time()

    ### Here we go
    for epoch in range(args.epochs):
        current_lr = get_lr(optimizer)
        t_start = time.time()

        epoch_loss = {'train': 0., 'val': 0.}
        epoch_corrects = {'train': 0., 'val': 0.}

        epoch_acc = 0.0
        epoch_AUC = 0.0

        for phase in ['train', 'val']:
            if phase == 'train':
                if args.lr_scheduler == "step":
                    scheduler.step()
                model.train()
            else:
                model.eval()

            running_losses = []
            running_corrects = 0.
            y_trues = []
            y_probs = []
            y_preds = []

            print('lr: ', current_lr)
            for i, (inputs, labels, header) in enumerate(my_loader[phase]):
                optimizer.zero_grad()

                inputs = inputs.to(device)
                labels = labels.to(device)

                # forward
                # track history only in train
                with torch.set_grad_enabled(phase == 'train'):
                    outputs = model(inputs.float())  # raw logits
                    probs = torch.sigmoid(
                        outputs)  # [0, 1] probability, shape = s * 1
                    preds = torch.round(
                        probs
                    )  # 0 or 1, shape = s * 1, prediction for each slice
                    pt_pred, _ = torch.mode(
                        preds, 0
                    )  # take majority vote, shape = 1, prediction for each patient

                    count0 = (preds == 0).sum().float()
                    count1 = (preds == 1).sum().float()
                    pt_prob = count1 / (preds.shape[0])

                    # convert label to slice level
                    loss = criterion(outputs, labels.repeat(
                        inputs.shape[1], 1))  # inputs shape = 1*s*3*256*256

                    # backward + optimize only if in training phases
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()

                # multiple loss by slice num per batch?
                running_losses.append(loss.item())  # * inputs.size(0)
                running_corrects += torch.sum(preds == labels.data)

                y_trues.append(int(labels.item()))
                y_probs.append(pt_prob.item())  # use ratio to get probability
                y_preds.append(pt_pred.item())

                writer.add_scalar(f'{phase}/Loss', loss.item(),
                                  epoch * len(my_loader[phase]) + i)
                writer.add_pr_curve('{phase}pr_curve', y_trues, y_probs, 0)

                if (i % args.log_every == 0) & (i > 0):
                    print(
                        'Epoch: {0}/{1} | Single batch number : {2}/{3} | avg loss:{4} | Acc: {5:.4f} | lr: {6}'
                        .format(epoch + 1, args.epochs, i,
                                len(my_loader[phase]),
                                np.round(np.mean(running_losses), 4),
                                (running_corrects / len(my_loader[phase])),
                                current_lr))

            # epoch statistics
            epoch_loss[phase] = np.round(np.mean(running_losses), 4)
            epoch_corrects[phase] = (running_corrects / len(my_loader[phase]))

            cm = confusion_matrix(y_trues, y_preds, labels=[0, 1])
            src.helper.print_cm(cm, ['0', '1'])
            sens, spec, acc = src.helper.compute_stats(y_trues, y_preds)
            print('sens: {:.4f}'.format(sens))
            print('spec: {:.4f}'.format(spec))
            print('acc:  {:.4f}'.format(acc))
            print()

        print(
            '\ Summary  train loss: {0} | val loss: {1} | train acc: {2:.4f} | val acc: {3:.4f}'
            .format(epoch_loss['train'], epoch_loss['val'],
                    epoch_corrects['train'], epoch_corrects['val']))
        print('-' * 30)
示例#10
0
def train(model, optimizer, scheduler, step, train_dataset, eval_dataset, opt, collator, best_dev_em, checkpoint_path):

    if opt.is_main:
        try:
            tb_logger = torch.utils.tensorboard.SummaryWriter(Path(opt.checkpoint_dir)/opt.name)
        except:
            tb_logger = None
            logger.warning('Tensorboard is not available.')

    torch.manual_seed(opt.global_rank + opt.seed) #different seed for different sampling depending on global_rank
    train_sampler = RandomSampler(train_dataset)
    train_dataloader = DataLoader(
        train_dataset,
        sampler=train_sampler,
        batch_size=opt.per_gpu_batch_size,
        drop_last=True,
        num_workers=10,
        collate_fn=collator
    )

    loss, curr_loss = 0.0, 0.0
    epoch = 1
    model.train()
    while step < opt.total_steps:
        epoch += 1
        for i, batch in enumerate(train_dataloader):
            step += 1
            (idx, labels, _, context_ids, context_mask) = batch

            train_loss = model(
                input_ids=context_ids.cuda(),
                attention_mask=context_mask.cuda(),
                labels=labels.cuda()
            )[0]

            train_loss.backward()

            if step % opt.accumulation_steps == 0:
                torch.nn.utils.clip_grad_norm_(model.parameters(), opt.clip)
                optimizer.step()
                scheduler.step()
                model.zero_grad()

            train_loss = src.util.average_main(train_loss, opt)
            curr_loss += train_loss.item()

            if step % opt.eval_freq == 0:
                dev_em = evaluate(model, eval_dataset, tokenizer, collator, opt)
                model.train()
                if opt.is_main:
                    if dev_em > best_dev_em:
                        best_dev_em = dev_em
                        src.util.save(model, optimizer, scheduler, step, best_dev_em,
                                  opt, checkpoint_path, 'best_dev')
                    log = f"{step} / {opt.total_steps} |"
                    log += f"train: {curr_loss/opt.eval_freq:.3f} |"
                    log += f"evaluation: {100*dev_em:.2f}EM |"
                    log += f"lr: {scheduler.get_last_lr()[0]:.5f}"
                    logger.info(log)
                    curr_loss = 0
                    if tb_logger is not None:
                        tb_logger.add_scalar("Evaluation", dev_em, step)
                        tb_logger.add_scalar("Training", curr_loss / (opt.eval_freq), step)

            if opt.is_main and step % opt.save_freq == 0:
                src.util.save(model, optimizer, scheduler, step, best_dev_em,
                          opt, checkpoint_path, f"step-{step}")
            if step > opt.total_steps:
                break
示例#11
0
def train_keypoint_rcnn(data=None,
                        epochs: int = None,
                        lr: float = 1e-5,
                        pretrained: str = None):

    model = src.model.keypoint_rcnn

    if not isinstance(pretrained, str) and pretrained is not None:
        raise ValueError(
            f'Argument "pretrained" must be a path to a valid mask file, '
            f'not {pretrained} with type {type(pretrained)}')
    if epochs is None:
        epochs = 500

    if pretrained is not None:
        print('Loading...')
        model.load_state_dict(torch.load(pretrained))

    if torch.cuda.is_available(): device = 'cuda:0'
    else: device = 'cpu'

    # tests = KeypointData('/media/DataStorage/Dropbox (Partners HealthCare)/DetectStereocillia/data/keypoint_train_data')
    model = model.train().to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)

    for e in range(epochs):
        epoch_loss = []
        time_1 = time.clock_gettime_ns(1)
        for image, data_dict in data:
            for key in data_dict:
                data_dict[key] = data_dict[key].to(device)
            assert image.shape[1] == 3

            optimizer.zero_grad()
            loss = model(image.to(device), [data_dict])
            losses = 0
            for key in loss:
                losses += loss[key]
            losses.backward()
            epoch_loss.append(losses.item())
            optimizer.step()
        time_2 = time.clock_gettime_ns(1)

        delta_time = np.round((np.abs(time_2 - time_1) / 1e9) / 60, decimals=2)

        #  --------- This is purely to output a nice bar for training --------- #
        if e % 5 == 0:
            if e > 0:
                print('\b \b' * len(out_str), end='')
            progress_bar = '[' + '█' * +int(np.round(e / epochs, decimals=1) * 10) + \
                           ' ' * int(
                (10 - np.round(e / epochs, decimals=1) * 10)) + f'] {np.round(e / epochs, decimals=3)}%'

            out_str = f'epoch: {e} ' + progress_bar + f'| time remaining: {delta_time * (epochs-e)} min | epoch loss: {torch.tensor(epoch_loss).mean().item()}'
            print(out_str, end='')

        # If its the final epoch print out final string
        elif e == epochs - 1:
            print('\b \b' * len(out_str), end='')
            progress_bar = '[' + '█' * 10 + f'] {1.0}'
            out_str = f'epoch: {epochs} ' + progress_bar + f'| time remaining: {0} min | epoch loss: {torch.tensor(epoch_loss).mean().item()}'
            print(out_str)

        torch.save(model.state_dict(), 'models/keypoint_rcnn.mdl')

    model.eval()
    out = model(image.unsqueeze(0).cuda())
model = model.to(device)
test_iter = Iterator(test_data,
                     batch_size=16,
                     device=0,
                     sort=False,
                     sort_within_batch=False,
                     repeat=False)
for idx, batch in enumerate(train_iter):

    text_a, text_b, label = batch.sentence_a, batch.sentence_b, batch.similarity

N_EPOCHS = 30
CLIP = 1

best_valid_loss = float('inf')

BATCH_SIZE = 128
criterion = nn.CosineEmbeddingLoss()

optimizer = optim.Adam(model.parameters())
for epoch in range(N_EPOCHS):

    train_loss = train(model, train_iter, optimizer, CLIP)

    if train_loss < best_valid_loss:
        best_valid_loss = train_loss
        torch.save(model.state_dict(), 'tut1-model.pt')

    print(epoch)
    print(train_loss)
 def test_train(self):
     start_date = dt(2018, 2, 1)
     end_date = dt(2018, 3, 1)
     model.train(start_date, end_date, self.config)
示例#14
0
文件: train3d.py 项目: aspgln/rcc
def train_model(model,
                train_loader,
                device,
                epoch,
                num_epochs,
                optimizer,
                writer,
                current_lr,
                log_every=100,
                weight=1):
    _ = model.train()

    model = model.to(device)
    y_trues = []
    y_logits = []
    y_probs = []
    y_preds = []
    loss_values = []

    pos_weight = torch.FloatTensor([weight]).to(device)
    criterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight)

    for i, (image, label, header) in enumerate(train_loader):
        optimizer.zero_grad()

        image = image.to(device)
        label = label.to(device)

        outputs = model(image.float())
        loss = criterion(outputs, label)
        loss.backward()
        optimizer.step()

        probs = torch.sigmoid(outputs)
        preds = torch.round(probs)

        loss_values.append(loss.item())
        y_trues.append(int(label.item()))
        y_logits.append(outputs.item())
        y_probs.append(probs.item())
        y_preds.append(preds.item())

        try:
            auc = metrics.roc_auc_score(y_trues, y_probs)
        except:
            auc = 0.5

        writer.add_scalar('Train/Loss', loss.item(),
                          epoch * len(train_loader) + i)
        writer.add_scalar('Train/AUC', auc, epoch * len(train_loader) + i)

        if (i % log_every == 0) & (i > 0):
            print(
                '''[Epoch: {0} / {1} |Single batch number : {2} / {3} ]| avg train loss {4} | train auc : {5} | lr : {6}'''
                .format(epoch + 1, num_epochs, i, len(train_loader),
                        np.round(np.mean(loss_values), 4), np.round(auc, 4),
                        current_lr))

    cm = confusion_matrix(y_trues, y_preds, labels=[0, 1])
    print_cm(cm, ['0', '1'])
    sens, spec, acc = compute_stats(y_trues, y_preds)
    print('sens: {:.4f}'.format(sens))
    print('spec: {:.4f}'.format(spec))
    print('acc:  {:.4f}'.format(acc))
    print()

    writer.add_scalar('Train/AUC_epoch', auc, epoch + i)

    train_loss_epoch = np.round(np.mean(loss_values), 4)
    train_auc_epoch = np.round(auc, 4)
    return train_loss_epoch, train_auc_epoch