Beispiel #1
0
def evaluate_model(model, val_loader):
    metric = torch.nn.CrossEntropyLoss()
    model.eval()

    for m in model.modules():
        if isinstance(m, nn.BatchNorm2d):
            m.train()
            m.weight.requires_grad = False
            m.bias.requires_grad = False

    y_probs = np.zeros((0, 3), np.float)
    losses, y_trues = [], []

    for i, (image, label, case_id) in enumerate(tqdm(val_loader)):
        if torch.cuda.is_available():
            image = image.cuda()
            label = label.cuda()

        prediction = model.forward(image.float())
        loss = metric(prediction, label.long())

        loss_value = loss.item()
        losses.append(loss_value)
        y_prob = F.softmax(prediction, dim=1).detach().cpu().numpy()

        y_probs = np.concatenate([y_probs, y_prob])
        y_trues.append(label.item())
    metric_collects = utils.calc_multi_cls_measures(y_probs, y_trues)
    val_loss_epoch = np.mean(losses)
    return val_loss_epoch, metric_collects
Beispiel #2
0
def evaluate_model(model,
                   val_loader,
                   epoch,
                   num_epochs,
                   writer,
                   current_lr,
                   log_every=20):
    n_classes = model.n_classes
    metric = torch.nn.CrossEntropyLoss()

    model.eval()
    for m in model.modules():
        if isinstance(m, nn.BatchNorm2d):
            m.train()
            m.weight.requires_grad = False
            m.bias.requires_grad = False

    y_probs = np.zeros((0, n_classes), np.float)
    losses, y_trues = [], []

    for i, (image, label, case_id) in enumerate(val_loader):

        if torch.cuda.is_available():
            image = image.cuda()
            label = label.cuda()

        prediction = model.forward(image.float())
        loss = metric(prediction, label.long())

        loss_value = loss.item()
        losses.append(loss_value)
        y_prob = F.softmax(prediction, dim=1)
        y_probs = np.concatenate([y_probs, y_prob.detach().cpu().numpy()])
        y_trues.append(label.item())

        metric_collects = utils.calc_multi_cls_measures(y_probs, y_trues)

        n_iter = epoch * len(val_loader) + i
        writer.add_scalar('Val/Loss', loss_value, n_iter)

        if (i % log_every == 0) & (i > 0):
            prefix = '*Val|'
            utils.print_progress(epoch + 1,
                                 num_epochs,
                                 i,
                                 len(val_loader),
                                 np.mean(losses),
                                 current_lr,
                                 metric_collects,
                                 prefix=prefix)

    val_loss_epoch = np.round(np.mean(losses), 4)
    return val_loss_epoch, metric_collects
Beispiel #3
0
def train_model(model, train_loader, epoch, num_epochs, optimizer, writer,
                current_lr, log_every=1, n_classes=3):
    metric = torch.nn.CrossEntropyLoss()

    y_probs = np.zeros((0, n_classes), np.float)
    losses, y_trues = [], []
    model.train()

    for m in model.modules():
        if isinstance(m, nn.BatchNorm2d):
            m.train()
            m.weight.requires_grad = False
            m.bias.requires_grad = False

    for i, (image, label, case_id) in enumerate(train_loader):
        print(f'Starting with batch {i}, case id {case_id}')

        # if i == 18:
        # pdb.set_trace()

        optimizer.zero_grad()
        if torch.cuda.is_available():
            image = image.cuda()
            label = label.cuda()

        image = torch.squeeze(image, dim=0)

        prediction = model.forward(image.float())
        loss = metric(prediction, label.long())
        loss.backward()
        optimizer.step()
        print(f'Done with batch {i}')

        loss_value = loss.item()
        losses.append(loss_value)
        y_prob = F.softmax(prediction, dim=1)
        y_probs = np.concatenate([y_probs, y_prob.detach().cpu().numpy()])
        y_trues.append(label.item())

        metric_collects = utils.calc_multi_cls_measures(y_probs, y_trues)
        n_iter = epoch * len(train_loader) + i
        writer.add_scalar('Train/Loss', loss_value, n_iter)

        if (i % log_every == 0) and i > 0:
            utils.print_progress(epoch + 1, num_epochs, i, len(train_loader),
                                 np.mean(losses), current_lr, metric_collects)

    train_loss_epoch = np.round(np.mean(losses), 4)
    return train_loss_epoch, metric_collects
def train_model(model,
                train_loader,
                epoch,
                num_epochs,
                optimizer,
                current_lr,
                log_every=100):
    n_classes = model.n_classes
    metric = torch.nn.CrossEntropyLoss()

    y_probs = np.zeros((0, n_classes), np.float)
    losses, y_trues = [], []
    model.train()

    for m in model.modules():
        if isinstance(m, nn.BatchNorm2d):
            m.train()
            m.weight.requires_grad = False
            m.bias.requires_grad = False

    for i, (image, label) in enumerate(train_loader):
        optimizer.zero_grad()
        if torch.cuda.is_available():
            image = image.cuda()
            label = label.cuda()
        prediction = model.forward(image.float())
        loss = metric(prediction, label.long())
        loss.backward()
        optimizer.step()

        loss_value = loss.item()
        losses.append(loss_value)
        y_prob = F.softmax(prediction, dim=1)
        y_probs = np.concatenate([y_probs, y_prob.detach().cpu().numpy()])
        label = np.array(label)
        y_trues = np.append(y_trues, label)

    y_trues = np.array(y_trues)
    metric_collects = utils.calc_multi_cls_measures(y_probs, y_trues)
    train_loss_epoch = np.round(np.mean(losses), 4)

    return train_loss_epoch, metric_collects
Beispiel #5
0
###############################################################################
# Build the model
###############################################################################

ntokens = len(corpus.dictionary)
model = model.RNNModel(args.model, ntokens, args.emsize, args.nhid,
                       args.nlayers, args.dropout, args.tied).to(device)
if args.pretrained:
    model = torch.load(args.pretrained, map_location=device)

criterion = nn.CrossEntropyLoss()

for p in model.parameters():
    print(type(p.data), p.size())
for m in model.modules():
    print(m)

for name, W in model.named_parameters():
    print("named parameter name {}".format(name))

###############################################################################
# Training code
###############################################################################


def repackage_hidden(h):
    """Wraps hidden states in new Tensors, to detach them from their history."""
    if isinstance(h, torch.Tensor):
        return h.detach()
    else: