예제 #1
0
    valid_loader = torch.utils.data.DataLoader(valid_data,
                                               batch_size=TEST_BATCH_SIZE,
                                               shuffle=False,
                                               collate_fn=collate)

    best_mse = None
    best_epoch = -1
    model_file_name = 'models/model_' + model_st + '_' + dataset + '_' + str(
        fold) + '.model'

    for epoch in range(NUM_EPOCHS):
        train(model, device, train_loader, optimizer, epoch + 1)
        print('predicting for valid data')
        G, P = predicting(model, device, valid_loader)
        val = get_mse(G, P)
        print('valid result:', val, best_mse)
        if best_mse is None or val < best_mse:
            best_mse = val
            best_epoch = epoch + 1
            torch.save(model.state_dict(), model_file_name)
            print('rmse improved at epoch ', best_epoch, '; best_test_mse',
                  best_mse, model_st, dataset, fold)
            calculate_metrics(G, P, dataset)
            calculate_metrics(G[quadA], P[quadA], dataset + ' (quadA)')
            calculate_metrics(G[quadB], P[quadB], dataset + ' (quadB)')
            calculate_metrics(G[quadC], P[quadC], dataset + ' (quadC)')
        else:
            print('No improvement since epoch ', best_epoch, '; best_test_mse',
                  best_mse, model_st, dataset, fold)
        sys.stdout.flush()
예제 #2
0
        for batch in train_dataloader:

            optimizer.zero_grad()

            img = batch['img']
            target_labels = batch['labels']
            target_labels = {
                t: target_labels[t].to(device)
                for t in target_labels
            }
            output = model(img.to(device))

            loss_train, losses_train = model.get_loss(output, target_labels)
            total_loss += loss_train.item()
            batch_accuracy_color, batch_accuracy_gender, batch_accuracy_article = calculate_metrics(
                output, target_labels)

            accuracy_color += batch_accuracy_color
            accuracy_gender += batch_accuracy_gender
            accuracy_article += batch_accuracy_article

            loss_train.backward()
            optimizer.step()

        print(
            "epoch {:4d}, loss: {:.4f}, color {:.4f}, gender:{:.4f}, article: {:.4f}"
            .format(epoch, total_loss / n_train_samples,
                    accuracy_color / n_train_samples,
                    accuracy_gender / n_train_samples,
                    accuracy_article / n_train_samples))
예제 #3
0
        for batch in train_dataloader:
            optimizer.zero_grad()

            img = batch['img']
            target_labels = batch['labels']
            target_labels = {
                t: target_labels[t].to(device)
                for t in target_labels
            }
            output = model(img.to(device))

            loss_train, losses_train = model.get_loss(output, target_labels)
            total_loss += loss_train.item()

            batch_accuracy_age, batch_accuracy_gender, batch_accuracy_ethnicity = \
                calculate_metrics(output, target_labels)

            accuracy_age += batch_accuracy_age
            accuracy_gender += batch_accuracy_gender
            accuracy_ethnicity += batch_accuracy_ethnicity

            loss_train.backward()
            optimizer.step()

        print(
            "epoch {:4d}, loss: {:.4f}, age: {:.4f}, gender: {:.4f}, ethnicity: {:.4f}"
            .format(epoch, total_loss / n_train_samples,
                    accuracy_age / n_train_samples,
                    accuracy_gender / n_train_samples,
                    accuracy_ethnicity / n_train_samples))
예제 #4
0
def visualize_grid(model,
                   dataloader,
                   attr,
                   device,
                   show_cn_matrices=True,
                   checkpoint=None,
                   csv_filename=None,
                   caption=''):
    if checkpoint is not None:
        checkpoint_load(model, checkpoint)
    model.eval()
    gt_all = {x: list() for x in attr.fld_names}
    predicted_all = {x: list() for x in attr.fld_names}
    accuracyes = {x: 0 for x in attr.fld_names}

    list_of_images_to_check = []
    loitc_columns = ['filename', 'choice']

    with torch.no_grad():
        for batch in dataloader:
            img = batch['img'].to(device)
            gt_s = {x: batch['labels'][x] for x in attr.fld_names}
            fnames = batch['img_path']
            output = model(img)
            batches = calculate_metrics(output, gt_s)
            for x in batches:
                accuracyes[x] += batches[x]
            predicted_s = {
                x: output[x].cpu().max(1)[1]
                for x in attr.fld_names
            }

            for i in range(img.shape[0]):
                predicted = {
                    x: attr.labels_id_to_name[x][predicted_s[x][i].item()]
                    for x in attr.fld_names
                }
                gt = {
                    x: attr.labels_id_to_name[x][gt_s[x][i].item()]
                    for x in attr.fld_names
                }
                for x in attr.fld_names:
                    gt_v = gt[x]
                    prdv = predicted[x]
                    gt_all[x].append(gt_v)
                    predicted_all[x].append(prdv)
                    if gt_v == prdv:
                        continue
                    list_of_images_to_check.append({
                        loitc_columns[0]:
                        os.path.split(fnames[i])[1],
                        loitc_columns[1]:
                        f'<{gt_v}> or <{prdv}>'
                    })

    if csv_filename is not None and list_of_images_to_check:
        with open(csv_filename, 'w', newline='') as file:
            writer = csv.DictWriter(file, fieldnames=loitc_columns)
            writer.writeheader()
            writer.writerows(list_of_images_to_check)

    # Draw confusion matrices
    if show_cn_matrices:
        for x in attr.fld_names:
            cn_matrix = confusion_matrix(
                y_true=gt_all[x],
                y_pred=predicted_all[x],
                labels=attr.labels[x],
                # )
                normalize='true')
            ConfusionMatrixDisplay(cn_matrix,
                                   display_labels=attr.labels[x]).plot(
                                       include_values=True,
                                       xticks_rotation='vertical')
            plt.title(f"{x}:{caption}")
            plt.tight_layout()
            plt.show(block=False)

    model.train()