コード例 #1
0
def main(options):

    if options.num_classes == 2:
        TRAINING_PATH = 'train_2classes.txt'
    else:
        TRAINING_PATH = 'train.txt'
    IMG_PATH = '/Users/waz/JHU/CV-ADNI/ImageNoSkull'

    dset_train = AD_3DRandomPatch(IMG_PATH, TRAINING_PATH)

    train_loader = DataLoader(dset_train,
                              batch_size=options.batch_size,
                              shuffle=True,
                              num_workers=4,
                              drop_last=True)

    sparsity = 0.05
    beta = 0.5

    mean_square_loss = nn.MSELoss()
    kl_div_loss = nn.KLDivLoss()

    use_gpu = len(options.gpuid) >= 1
    autoencoder = AutoEncoder()

    autoencoder = autoencoder.cpu()

    optimizer = torch.optim.Adam(autoencoder.parameters(),
                                 lr=options.learning_rate,
                                 weight_decay=options.weight_decay)

    train_loss = 0.
    for epoch in range(options.epochs):
        print("At {0}-th epoch.".format(epoch))
        for i, patches in enumerate(train_loader):
            for b, batch in enumerate(patches):
                batch = Variable(batch)
                output, mean_activitaion = autoencoder(batch)
                loss1 = mean_square_loss(output, batch)
                loss2 = kl_div_loss(mean_activitaion,
                                    Variable(torch.Tensor([sparsity])))
                print("loss1", loss1)
                print("loss2", loss2)
                loss = loss1 + loss2
                train_loss += loss
                logging.info(
                    "batch {0} training loss is : {1:.5f}, {1:.5f}".format(
                        b, loss1.data[0], loss2.data[0]))
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
        train_avg_loss = train_loss / len(train_loader * 1000)
        print(
            "Average training loss is {0:.5f} at the end of epoch {1}".format(
                train_avg_loss.data[0], epoch))
    torch.save(model.state_dict(), open("autoencoder_model", 'wb'))
コード例 #2
0
def main(options):

    if options.num_classes == 2:
        TRAINING_PATH = 'train_2classes.txt'
    else:
        TRAINING_PATH = 'train.txt'
    IMG_PATH = './Image'

    dset_train = AD_3DRandomPatch(IMG_PATH, TRAINING_PATH)

    train_loader = DataLoader(dset_train,
                              batch_size=options.batch_size,
                              shuffle=True,
                              num_workers=4,
                              drop_last=True)

    sparsity = 0.05
    beta = 0.5

    mean_square_loss = nn.MSELoss()
    kl_div_loss = nn.KLDivLoss(reduce=False)

    use_gpu = len(options.gpuid) >= 1
    autoencoder = AutoEncoder()

    if (use_gpu):
        autoencoder = autoencoder.cuda()
    else:
        autoencoder = autoencoder.cpu()

    optimizer = torch.optim.Adam(autoencoder.parameters(),
                                 lr=options.learning_rate,
                                 weight_decay=options.weight_decay)

    train_loss = 0.
    for epoch in range(options.epochs):
        print("At {0}-th epoch.".format(epoch))
        for i, patches in enumerate(train_loader):
            print(i)
            print(len(patches))
コード例 #3
0
def main(options):

    if options.num_classes == 3:
        TRAINING_PATH = '/media/ailab/Backup Plus/ADNI/data/train_3classes.txt'
    else:
        TRAINING_PATH = '/media/ailab/Backup Plus/ADNI/data/train_3classes.txt'
    IMG_PATH = '/media/ailab/Backup Plus/ADNI/data/image'

    dset_train = AD_3DRandomPatch(IMG_PATH, TRAINING_PATH)

    train_loader = DataLoader(dset_train,
                              batch_size=options.batch_size,
                              shuffle=True,
                              num_workers=0,
                              drop_last=True)
    sparsity = 0.05
    beta = 0.5

    mean_square_loss = nn.MSELoss()
    #kl_div_loss = nn.KLDivLoss(reduce=False)

    use_gpu = len(options.gpuid) >= 1
    autoencoder = AutoEncoder()

    if (use_gpu):
        autoencoder = autoencoder.cuda()
    else:
        autoencoder = autoencoder.cpu()

    #autoencoder.load_state_dict(torch.load("./autoencoder_pretrained_model19"))

    optimizer = torch.optim.Adam(autoencoder.parameters(),
                                 lr=options.learning_rate,
                                 weight_decay=options.weight_decay)

    last_train_loss = 1e-4
    f = open("autoencoder_loss", 'a')
    for epoch in range(options.epochs):
        train_loss = 0.
        print("At {0}-th epoch.".format(epoch))
        for i, patches in enumerate(train_loader):
            patch = patches['patch']
            for b, batch in enumerate(patch):
                batch = Variable(batch).cuda()
                #batch = out.view(-1, 343)
                output, s_ = autoencoder(batch)
                batch = batch.view(-1, 343)
                loss1 = mean_square_loss(output, batch)
                s = Variable(torch.ones(s_.shape) * sparsity).cuda()
                loss2 = (s * torch.log(s / (s_ + 1e-8)) + (1 - s) * torch.log(
                    (1 - s) / ((1 - s_ + 1e-8)))).sum() / options.batch_size
                #kl_div_loss(mean_activitaion, sparsity)
                loss = loss1 + beta * loss2
                train_loss += loss
                logging.info(
                    "batch {0} training loss is : {1:.5f}, {2:.5f}".format(
                        i * 1000 + b, loss1.data[0], loss2.data[0]))
                f.write("batch {0} training loss is : {1:.3f}\n".format(
                    i * 1000 + b, loss.data[0]))
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
        train_avg_loss = train_loss / (len(train_loader) * 1000)
        print(
            "Average training loss is {0:.5f} at the end of epoch {1}".format(
                train_avg_loss.data[0], epoch))
        if (abs(train_avg_loss.data[0] - last_train_loss) <=
                options.estop) or ((epoch + 1) % 20 == 0):
            torch.save(autoencoder.state_dict(),
                       open("autoencoder_pretrained_model" + str(epoch), 'wb'))
        last_train_loss = train_avg_loss.data[0]
    f.close()