Exemplo n.º 1
0
### Dataset configs ###
num_classes = 24

use_cuda = torch.cuda.is_available()

### trained model configs ###
modelname = "3d_model"
trained_path = 'Weights/mc-cls/model_best.pth.tar'
batch_size = 256


if __name__ == '__main__':

	net = densenet.densenet201(sample_size=64, sample_duration=30, num_classes=num_classes)
	trained_weights = torch.load(trained_path)
	net.load_state_dict(trained_weights['state_dict'])

	test_dataset = MatrixDataset(base_dir='../data/', num_classes=num_classes, mode='test')
	test_loader = DataLoader(test_dataset, batch_size=batch_size)

	output_scores = []

	if use_cuda:
		net.cuda()
	net.eval()

	for batch,(vid, vidname) in enumerate(test_loader):

		print "Processing batch {}/{}".format(batch,len(test_loader))
Exemplo n.º 2
0
def main():

    net = densenet.densenet201(sample_size=64,
                               sample_duration=30,
                               num_classes=num_classes)
    if pretrained:
        pretrained_weights = torch.load(pretrained_path)
        net.load_state_dict(pretrained_weights['state_dict'])

    train_dataset = MatrixDataset(base_dir='../data/', num_classes=num_classes)
    # train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=num_workers)

    # test_dataset = MatrixDataset(base_dir='../data/test/', mode='test', sample_size=sample_size, num_classes=num_classes)
    # test_loader = DataLoader(test_dataset, batch_size=batch_size, num_workers=num_workers)

    print len(train_dataset), "Train + Val size"
    # print len(test_dataset)

    train_loader, test_loader, class_weights = get_train_valid_loader(
        train_dataset, train_size=0.7,
        shuffle=False)  #shuffle=False since sampler takes continous inputs

    if sample_size:
        class_weights = np.ones((num_classes), dtype=np.float32)
        criterion = nn.CrossEntropyLoss()
    else:
        # class_weights = get_class_weights(train_dataset, train_loader)
        print class_weights
        criterion = nn.CrossEntropyLoss(class_weights)

    optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),
                          lr=lr,
                          momentum=momentum,
                          weight_decay=weight_decay)

    make_dir(weights_folder)
    make_dir(plot_folder)

    if use_cuda:
        net.cuda()

    ### Plot Lists
    if resume == False:
        # for replacing whole plots
        train_loss = []
        train_loss_epoch = []
        precision_train = []
        recall_train = []
        avg_precision_train = []
        avg_recall_train = []
        f1_scores_train = []

        precision_test = []
        recall_test = []
        avg_precision_test = []
        avg_recall_test = []
        f1_scores_test = []
        test_loss = []
        test_loss_mean = []

    start_epoch = 0
    best_prec1 = 0
    early_stop_counter = 0

    ### Resuming checkpoints
    if resume:
        checkpoint = torch.load(checkpoint_path)
        net.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        start_epoch = checkpoint['epoch']  # gives next epoch number
        best_prec1 = checkpoint['best_prec1']

    for epoch in xrange(start_epoch, epochs):

        ### As I need shuffled dataset in every epoch!
        # train_dataset = AnatomyDataset(base_dir='../data/train/', mode='train', sample_size=sample_size, num_classes=num_classes)
        # train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=num_workers)

        ### Plot lists
        if resume:
            # for appending to the plots
            train_loss = []
            train_loss_epoch = []
            precision_train = []
            recall_train = []
            avg_precision_train = []
            avg_recall_train = []
            f1_scores_train = []

            precision_test = []
            recall_test = []
            avg_precision_test = []
            avg_recall_test = []
            f1_scores_test = []
            test_loss = []
            test_loss_mean = []

        print "------------------------TRAINING {} - EPOCH: {}---------------------------".format(
            modelname, epoch)
        net.train()
        output_labels = []
        target_labels = []
        temp_loss_epoch = []

        train_loss, temp_loss_epoch, output_labels, target_labels = train(
            net, criterion, optimizer, epoch, train_loader, temp_loss_epoch,
            train_loss, output_labels, target_labels)

        ### Accuracies
        train_loss_epoch.append(sum(temp_loss_epoch) / len(temp_loss_epoch))
        precision_train.append(
            np.asarray(
                precision_score(target_labels,
                                output_labels,
                                labels,
                                average=None)))
        recall_train.append(
            np.asarray(
                recall_score(target_labels,
                             output_labels,
                             labels,
                             average=None)))
        avg_precision_train.append(
            precision_score(target_labels,
                            output_labels,
                            labels,
                            average='weighted'))
        avg_recall_train.append(
            recall_score(target_labels,
                         output_labels,
                         labels,
                         average='weighted'))
        f1_scores_train.append(
            f1_score(target_labels, output_labels, labels, average='weighted'))

        ### Printing
        print "precision_train ", precision_train[-1]
        print "recall_train ", recall_train[-1]
        print "train_loss_epoch", train_loss_epoch[-1]
        print "avg_precision_train ", avg_precision_train[-1]
        print "avg_recall_train ", avg_precision_train[-1]
        print "f1_scores_train ", f1_scores_train[-1]

        ### Plotting
        plot_visdom(epoch, train_loss, win='train_loss')
        plot_visdom(epoch, train_loss_epoch, win='train_loss_epoch')
        plot_visdom(epoch, precision_train, win='precision_train')
        plot_visdom(epoch, recall_train, win='recall_train')
        plot_visdom(epoch, avg_precision_train, win='avg_precision_train')
        plot_visdom(epoch, avg_recall_train, win='avg_recall_train')
        plot_visdom(epoch, f1_scores_train, win='f1_scores_train')

        ##### Testing ######

        print "------------------------TESTING EPOCH: {}---------------------------".format(
            epoch)
        net.eval()
        output_labels = []
        target_labels = []
        temp_loss_epoch = []

        test_loss, temp_loss_epoch, output_labels, target_labels = test(
            net, criterion, optimizer, epoch, test_loader, temp_loss_epoch,
            test_loss, output_labels, target_labels)

        ### Accuracies
        test_loss_mean.append(sum(temp_loss_epoch) / len(temp_loss_epoch))
        precision_test.append(
            np.asarray(
                precision_score(target_labels,
                                output_labels,
                                labels,
                                average=None)))
        recall_test.append(
            np.asarray(
                recall_score(target_labels,
                             output_labels,
                             labels,
                             average=None)))
        avg_precision_test.append(
            precision_score(target_labels,
                            output_labels,
                            labels,
                            average='weighted'))
        avg_recall_test.append(
            recall_score(target_labels,
                         output_labels,
                         labels,
                         average='weighted'))
        f1_scores_test.append(
            f1_score(target_labels, output_labels, labels, average='weighted'))

        ### Printing logs
        print "test loss mean ", test_loss_mean[-1]
        print "precision_test ", precision_test[-1]
        print "recall_test ", recall_test[-1]
        print "avg_precision_test ", avg_precision_test[-1]
        print "avg_recall_test ", avg_precision_test[-1]
        print "f1_scores_test ", f1_scores_test[-1]

        ### Plotting
        epochm = epoch
        plot_visdom(epochm, test_loss, win='test_loss')
        plot_visdom(epochm, test_loss_mean, win='test_loss_mean')
        plot_visdom(epochm, precision_test, win='precision_test')
        plot_visdom(epochm, recall_test, win='recall_test')
        plot_visdom(epochm, avg_precision_test, win='avg_precision_test')
        plot_visdom(epochm, avg_recall_test, win='avg_recall_test')
        plot_visdom(epochm, f1_scores_test, win='f1_scores_test')

        ### Save the environment
        vis.save(envs=[basename + modelname])

        ### Checking if best
        # prec1 = test_loss_mean[-1]
        prec1 = f1_scores_test[-1]
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        ### Saving weights
        if is_best or epoch % 10 == 0 or epoch == epochs - 1:
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': modelname,
                    'state_dict': net.state_dict(),
                    'best_prec1': best_prec1,
                    'optimizer': optimizer.state_dict(),
                }, is_best)