Exemple #1
0
 parser.add_argument('--dcn_lr', type=float, default=0.01, metavar='N',
                     help='learning rate for training (default: 0.001)')
 args = parser.parse_args()
 log_dir = 'logs/dec-' + datasetname
 if os.path.exists(log_dir) == False:
     os.makedirs(log_dir)
 for i in range(1, repeat+1):
     sdae_savepath = ("model/sdae-run-"+datasetname+"-%d.pt" % i)
     if os.path.exists(sdae_savepath)==False:
         print("Experiment #%d" % i)
         write_log("Experiment #%d" % i,log_dir)
         train_loader=None
         test_loader=None
         if datasetname=='mnist':
             train_loader = torch.utils.data.DataLoader(
                 MNIST('./dataset/mnist', train=True, download=True),
                 batch_size=batch_size, shuffle=True, num_workers=0)
             # test_loader = torch.utils.data.DataLoader(
             #     MNIST('./dataset/mnist', train=False),
             #     batch_size=batch_size, shuffle=False, num_workers=0)
         elif datasetname=='cifar':
             transform = transforms.Compose(
                 [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
             trainset = datasets.CIFAR10(
                 root='./dataset/cifar', train=True, download=False, transform=transform)  # download=True会通过官方渠道下载
             train_loader = torch.utils.data.DataLoader(
                 trainset, batch_size=batch_size, shuffle=True, num_workers=2)
             testset = datasets.CIFAR10(
                 root='./dataset/cifar', train=False, download=False, transform=transform)
             test_loader = torch.utils.data.DataLoader(
                 testset, batch_size=batch_size, shuffle=False, num_workers=2)
                        help='input batch size for training (default: 256)')
    parser.add_argument('--pretrainepochs',
                        type=int,
                        default=300,
                        metavar='N',
                        help='number of epochs to train (default: 300)')
    parser.add_argument('--epochs',
                        type=int,
                        default=500,
                        metavar='N',
                        help='number of epochs to train (default: 500)')
    args = parser.parse_args()

    # Load data for pre-training
    train_loader = torch.utils.data.DataLoader(MNIST('./dataset/mnist',
                                                     train=True,
                                                     download=True),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=0)
    test_loader = torch.utils.data.DataLoader(MNIST('./dataset/mnist',
                                                    train=False),
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=0)

    sdae = StackedDAE(input_dim=784,
                      z_dim=10,
                      binary=False,
                      encodeLayer=[500, 500, 2000],
                      decodeLayer=[2000, 500, 500],
Exemple #3
0
                        help='number of epochs to train (default: 200)')
    parser.add_argument('--pretrain',
                        type=str,
                        default="../model/mnist_sdae_weights.pt",
                        metavar='N',
                        help='directory for pre-trained weights')
    parser.add_argument('--data',
                        type=str,
                        default="MNIST",
                        metavar='N',
                        help='dataset(MNIST, Fashion)')
    parser.add_argument('--use_pretrain', type=bool, default=True)
    args = parser.parse_args()

    # Load data
    mnist_train = MNIST('./dataset/mnist', train=True, download=True)
    mnist_test = MNIST('./dataset/mnist', train=False)
    X = mnist_train.train_data
    y = mnist_train.train_labels
    test_X = mnist_test.test_data
    test_y = mnist_test.test_labels

    # Set parameters
    ml_penalty, cl_penalty = 0.1, 1
    if args.data == "Fashion":
        fashionmnist_train = FashionMNIST('./dataset/fashion_mnist',
                                          train=True,
                                          download=True)
        fashionmnist_test = FashionMNIST('./dataset/fashion_mnist',
                                         train=False)
        X = fashionmnist_train.train_data
Exemple #4
0
LEARNING_RATE = 0.001
TRAIN_DIR = None
# LOG_DIR = 'data/summaries/mnist_slic_spatial'
LOG_DIR = 'data/summaries/mnist_quickshift_spatial'
SAVE_STEP = 250

AUGMENT_TRAIN_EXAMPLES = False
DROPOUT = 0.5
BATCH_SIZE = 64
MAX_STEPS = 15000
DISPLAY_STEP = 10
# FORM_FEATURES = SLIC_FEATURES
FORM_FEATURES = QUICKSHIFT_FEATURES
NUM_FEATURES = len(FORM_FEATURES) + 1

data = Data(DATA_DIR)

# segmentation_algorithm = slic_fixed(
#     num_segments=100, compactness=5, max_iterations=10, sigma=0)
segmentation_algorithm = quickshift_fixed(ratio=1,
                                          kernel_size=2,
                                          max_dist=2,
                                          sigma=0)

feature_extraction_algorithm = extract_features_fixed(FORM_FEATURES)


def preprocess_spatial_fixed(segmentation_algorithm,
                             feature_extraction_algorithm, node_size,
                             node_stride, delta, neighborhood_size,
                             connectivity):
Exemple #5
0
from lib.models import UNet
from lib.datasets import MNIST
from lib.process import Trainer, Evaluator
import matplotlib.pyplot as plt
import torch
# CONSTANST
MODEL_PATH = './u-net-mnist.pth'
EPOCHS = 1

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dataset = MNIST()
model = UNet(n_channels=1, n_classes=1)
model = model.to(device)
trainer = Trainer(model=model, dataset=dataset, batch_size=16, device=device)
trainer.load_model(model, MODEL_PATH)
evaluator = Evaluator(dataset=dataset, batch_size=16, device=device)


def train():
    for _ in range(EPOCHS):
        loss = trainer.train_epoch(progress_bar=False)
        print('loss', loss)
        with torch.no_grad():
            score = evaluator.DCM(model=model)
            print('DCM score:', score)
    print('end of training')
    trainer.save_model(MODEL_PATH)


def eval():
    model.eval()
EPOCHS = args.epochs
MODEL_PATH = './{}-ds{}.pth'.format(args.net, args.dataset)
EPOCHS = args.epochs
BATCH = args.batch
DEEPVESSEL = False

if args.pre_transform:
    if args.dataset.startswith('G'):
        pre_transform = Crop(30, 150, 256, 256)
    else:
        pre_transform = CropVessel12(30, 150, 256, 256)
else:
    pre_transform = None

if args.dataset == 'MNIST':
    dataset = MNIST(background=args.background)
elif args.dataset == 'GMNIST':
    dataset = GMNIST(background=args.background)
elif args.dataset == 'VESSEL12':
    dataset = VESSEL12(data_dir=args.vesseldir, pre_transform=pre_transform)
elif args.dataset == 'GVESSEL12':
    dataset = GVESSEL12(data_dir=args.vesseldir, pre_transform=pre_transform)
elif args.dataset == 'SVESSEL':
    dataset = SVESSEL(data_dir=args.svesseldir)
elif args.dataset == 'GSVESSEL':
    dataset = GSVESSEL(data_dir=args.svesseldir)

else:
    dataset = MNIST()

if args.net == 'GFCN':