train_loader = torch.utils.data.DataLoader(MNIST('./dataset/mnist',
                                                     train=True,
                                                     download=True),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=0)
    test_loader = torch.utils.data.DataLoader(MNIST('./dataset/mnist',
                                                    train=False),
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=0)

    sdae = StackedDAE(input_dim=784,
                      z_dim=10,
                      binary=False,
                      encodeLayer=[500, 500, 2000],
                      decodeLayer=[2000, 500, 500],
                      activation="relu",
                      dropout=0)

    # Print the pre-train model structure
    print(sdae)
    sdae.pretrain(train_loader,
                  test_loader,
                  lr=args.lr,
                  batch_size=args.batch_size,
                  num_epochs=args.pretrainepochs,
                  corrupt=0.2,
                  loss_type="mse")

    # Train the stacked denoising autoencoder
Example #2
0
     elif datasetname=='cifar':
         transform = transforms.Compose(
             [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
         trainset = datasets.CIFAR10(
             root='./dataset/cifar', train=True, download=False, transform=transform)  # download=True会通过官方渠道下载
         train_loader = torch.utils.data.DataLoader(
             trainset, batch_size=batch_size, shuffle=True, num_workers=2)
         testset = datasets.CIFAR10(
             root='./dataset/cifar', train=False, download=False, transform=transform)
         test_loader = torch.utils.data.DataLoader(
             testset, batch_size=batch_size, shuffle=False, num_workers=2)
     else:
         exit()
     # pretrain
     sdae = StackedDAE(input_dim=784, z_dim=10, binary=False,
                       encodeLayer=[500, 500, 2000], decodeLayer=[2000, 500, 500], activation="relu",
                       dropout=0,log_dir=log_dir)
     print(sdae)
     sdae.pretrain(train_loader, lr=args.sdae_lr, batch_size=batch_size,
         num_epochs=300, corrupt=0.2, loss_type="mse")
     sdae.fit(train_loader, lr=args.sdae_lr, num_epochs=500, corrupt=0.2, loss_type="mse")
     sdae.save_model(sdae_savepath)
 if os.path.exists("model/dcn-run-mnist-%d.pt" % i)==False:
     # finetune
     fit_train=None
     fit_test = None
     X=None
     y=None
     if datasetname=='mnist':
         train_loader = torch.utils.data.DataLoader(
             MNIST('./dataset/mnist', train=True, download=False),
Example #3
0
            train_data = myDataset(train_path, -1, '.data')
            # test_data=myDataset(test_path,-1, '.pkl')
            train_loader = data.DataLoader(dataset=train_data,
                                           batch_size=batch_size,
                                           shuffle=True,
                                           collate_fn=train_data.collate_fn,
                                           num_workers=4)
            # test_loader = data.DataLoader(dataset=test_data, batch_size=batch_size, shuffle=True,
            #                                collate_fn=train_data.collate_fn,num_workers=4)

            # pretrain
            sdae = StackedDAE(input_dim=12,
                              z_dim=8,
                              binary=False,
                              encodeLayer=[12],
                              decodeLayer=[12],
                              activation="relu",
                              dropout=0,
                              log_dir=log_dir)
            sdae.cuda()
            # print(sdae)
            sdae.pretrain(train_loader,
                          lr=args.sdae_pre_lr,
                          batch_size=batch_size,
                          num_epochs=20,
                          corrupt=0.2,
                          loss_type="mse")
            torch.cuda.empty_cache()
            sdae.fit(train_loader,
                     lr=args.sdae_fit_lr,
                     num_epochs=20,