コード例 #1
0
def main(args):
    model = AutoEncoder()
    use_gpu = torch.cuda.is_available()
    if use_gpu:
        print('cuda is available!')
        model.cuda()

    weight_file = args.path_weight_file

    model = AutoEncoder()
    model.load_state_dict(
        torch.load(weight_file, map_location=lambda storage, loc: storage))

    test_dataset = dataloader('dogs_cats', 'test')
    test_loader = torch.utils.data.DataLoader(
        test_dataset, batch_size=args.batch_size, shuffle=False)

    images, _ = iter(test_loader).next()
    images = Variable(images, volatile=True)

    imshow(torchvision.utils.make_grid(images.data[:25], nrow=5))
    outputs = model(images)
    imshow(torchvision.utils.make_grid(outputs.data[:25], nrow=5))
コード例 #2
0
def main(options):

    if options.num_classes == 2:
        TRAINING_PATH = 'train_2classes.txt'
    else:
        TRAINING_PATH = 'train.txt'
    IMG_PATH = './Image'

    dset_train = AD_3DRandomPatch(IMG_PATH, TRAINING_PATH)

    train_loader = DataLoader(dset_train,
                              batch_size=options.batch_size,
                              shuffle=True,
                              num_workers=4,
                              drop_last=True)

    sparsity = 0.05
    beta = 0.5

    mean_square_loss = nn.MSELoss()
    kl_div_loss = nn.KLDivLoss(reduce=False)

    use_gpu = len(options.gpuid) >= 1
    autoencoder = AutoEncoder()

    if (use_gpu):
        autoencoder = autoencoder.cuda()
    else:
        autoencoder = autoencoder.cpu()

    optimizer = torch.optim.Adam(autoencoder.parameters(),
                                 lr=options.learning_rate,
                                 weight_decay=options.weight_decay)

    train_loss = 0.
    for epoch in range(options.epochs):
        print("At {0}-th epoch.".format(epoch))
        for i, patches in enumerate(train_loader):
            print(i)
            print(len(patches))
コード例 #3
0
    pre_train_path = args.load_weight_dir
    pretrained_dict = torch.load(pre_train_path)  #load pre train model
    pretrained_dict = {
        k: v
        for k, v in pretrained_dict.items() if k in model_dict
    }  #load the layer only same with the target model
    model_dict.update(pretrained_dict)
    print('===================================')
    print('load pre_train weight successfully')
    print('===================================')
except:
    print('===================================')
    print('       random init the weight      ')
    print('===================================')
autoencoder.load_state_dict(model_dict)
autoencoder.cuda()
autoencoder.train()
'''opt setting'''
optimizer = torch.optim.Adam(
    autoencoder.parameters(),
    lr=args.learning_rate)  # optimize all cnn parameters
loss_func = nn.MSELoss()
'''folder for saving  weight and loss history'''
save_path = args.save_weight_dir
'''training code'''
for epoch in range(EPOCH):
    loss_iter = 0
    for step, (x, b_label) in enumerate(train_data):

        x_in = torch.tensor(x).cuda()
        decoded = autoencoder(x_in)
コード例 #4
0
ファイル: train.py プロジェクト: Adamits/autoextendVN
print(all_embeddings.size()[0])
embed_batches = [all_embeddings[:, i:i + batch_size]\
    for i in range(0, all_embeddings.size()[1], batch_size)]

#embed_batch1 = all_embeddings[:, 260:280]
#embed_batch2 = all_embeddings[:, 280:300]
#embed_batches = [embed_batch1, embed_batch2]

for d, embeds in enumerate(embed_batches):
    dim_start = d * batch_size
    dim_end = (d+1) * batch_size
    
    print("Initializing AutoEncoder for dims %i to %i"\
          % (dim_start, dim_end))
    model = AutoEncoder(embeds, word2class, class2word, USE_CUDA)
    model = model.cuda() if USE_CUDA else model
    
    # Ignore any parameters with requires_grad = False
    # aka the pretrained embeddings
    params = filter(lambda x: x.requires_grad, model.parameters())
    optimizer = torch.optim.SGD(params, lr=lr)

    num_dims = model.embedding_dims
    num_words = model.num_words
    num_classes = model.num_classes
    last_loss = float("inf")

    for i in range(epochs):
        print("Epoch %i" % i)

        optimizer.zero_grad()
コード例 #5
0
def main(args):
    ## load datasets
    train_dataset = dataloader('dogs_cats', 'train')

    ## split train and validation
    num_train = len(train_dataset)
    indices = list(range(num_train))
    split = 5000

    validation_idx = np.random.choice(indices, size=split, replace=False)
    train_idx = list(set(indices) - set(validation_idx))

    train_sampler = SubsetRandomSampler(train_idx)
    validation_sampler = SubsetRandomSampler(validation_idx)

    ## train and validation loader
    train_loader = torch.utils.data.DataLoader(
        train_dataset, 
        batch_size=args.batch_size, 
        sampler=train_sampler)
    valid_loader = torch.utils.data.DataLoader(
        train_dataset, 
        batch_size=args.batch_size, 
        sampler=validation_sampler)

    ## debug
    if args.debug:
        images, _ = next(iter(train_loader))
        grid = torchvision.utils.make_grid(images[:25], nrow=5)
        imshow(grid, 'train')

        images, _ = next(iter(valid_loader))
        grid = torchvision.utils.make_grid(images[:25], nrow=5)
        imshow(grid, 'valid')

    ## define model
    model = AutoEncoder()
    use_gpu = torch.cuda.is_available()
    if use_gpu:
        print('cuda is available!')
        model.cuda()
    
    ## loss and optimizer
    criterion = nn.MSELoss()
    optimizer = torch.optim.SGD(
        model.parameters(), lr=0.001, momentum=0.9, weight_decay=1e-5)

    ## log 
    log_dir = 'logs'
    if not os.path.isdir('logs'):
        os.mkdir('logs')

    ## train and valid
    best_val = 5
    loss_list = []
    val_loss_list = []
    for epoch in range(args.n_epochs):
        loss = train(model, criterion, optimizer, train_loader, use_gpu)
        val_loss = valid(model, criterion, valid_loader, use_gpu)

        print('epoch {:d}, loss: {:.4f} val_loss: {:.4f}'.format(epoch, loss, val_loss))

        if val_loss < best_val:
            print('val_loss improved from {:.5f} to {:.5f}!'.format(best_val, val_loss))
            best_val = val_loss
            model_file = 'epoch{:03d}-{:.3f}.pth'.format(epoch, val_loss)
            torch.save(model.state_dict(), os.path.join(log_dir, model_file))

        loss_list.append(loss)
        val_loss_list.append(val_loss)
コード例 #6
0
def main(options):

    if options.num_classes == 3:
        TRAINING_PATH = '/media/ailab/Backup Plus/ADNI/data/train_3classes.txt'
    else:
        TRAINING_PATH = '/media/ailab/Backup Plus/ADNI/data/train_3classes.txt'
    IMG_PATH = '/media/ailab/Backup Plus/ADNI/data/image'

    dset_train = AD_3DRandomPatch(IMG_PATH, TRAINING_PATH)

    train_loader = DataLoader(dset_train,
                              batch_size=options.batch_size,
                              shuffle=True,
                              num_workers=0,
                              drop_last=True)
    sparsity = 0.05
    beta = 0.5

    mean_square_loss = nn.MSELoss()
    #kl_div_loss = nn.KLDivLoss(reduce=False)

    use_gpu = len(options.gpuid) >= 1
    autoencoder = AutoEncoder()

    if (use_gpu):
        autoencoder = autoencoder.cuda()
    else:
        autoencoder = autoencoder.cpu()

    #autoencoder.load_state_dict(torch.load("./autoencoder_pretrained_model19"))

    optimizer = torch.optim.Adam(autoencoder.parameters(),
                                 lr=options.learning_rate,
                                 weight_decay=options.weight_decay)

    last_train_loss = 1e-4
    f = open("autoencoder_loss", 'a')
    for epoch in range(options.epochs):
        train_loss = 0.
        print("At {0}-th epoch.".format(epoch))
        for i, patches in enumerate(train_loader):
            patch = patches['patch']
            for b, batch in enumerate(patch):
                batch = Variable(batch).cuda()
                #batch = out.view(-1, 343)
                output, s_ = autoencoder(batch)
                batch = batch.view(-1, 343)
                loss1 = mean_square_loss(output, batch)
                s = Variable(torch.ones(s_.shape) * sparsity).cuda()
                loss2 = (s * torch.log(s / (s_ + 1e-8)) + (1 - s) * torch.log(
                    (1 - s) / ((1 - s_ + 1e-8)))).sum() / options.batch_size
                #kl_div_loss(mean_activitaion, sparsity)
                loss = loss1 + beta * loss2
                train_loss += loss
                logging.info(
                    "batch {0} training loss is : {1:.5f}, {2:.5f}".format(
                        i * 1000 + b, loss1.data[0], loss2.data[0]))
                f.write("batch {0} training loss is : {1:.3f}\n".format(
                    i * 1000 + b, loss.data[0]))
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
        train_avg_loss = train_loss / (len(train_loader) * 1000)
        print(
            "Average training loss is {0:.5f} at the end of epoch {1}".format(
                train_avg_loss.data[0], epoch))
        if (abs(train_avg_loss.data[0] - last_train_loss) <=
                options.estop) or ((epoch + 1) % 20 == 0):
            torch.save(autoencoder.state_dict(),
                       open("autoencoder_pretrained_model" + str(epoch), 'wb'))
        last_train_loss = train_avg_loss.data[0]
    f.close()