示例#1
0
    def __init__(
        self,
        data_path,
        problem=PROBLEM_CLASSIFICATION,
        preproc_trn_X=None,
        preproc_trn_Y=None,
        preproc_val_X=None,
        preproc_val_Y=None,
    ):

        self.problem = problem
        self.val_X, self.val_Y, self.trn_X, self.trn_Y = get_dataset(
            data_path, problem)

        self.set_preprocessings(
            preproc_trn_X,
            preproc_trn_Y,
            preproc_val_X,
            preproc_val_Y,
        )
示例#2
0
import torch
import torch.nn as nn 
import torch.nn.functional as F 
from torch.autograd import Variable

import  os

from options.train_options import TrainOptions
from data.get_dataset import get_dataset
from models.get_model import get_model


if __name__ == '__main__':
    opt = TrainOptions().parse() # get training options
    dataset = get_dataset(opt)
    dataloader = torch.utils.data.DataLoader(
        dataset,
        batch_size=opt.batch_size,
        shuffle=True,
        collate_fn=dataset.collate_fn
    )

    model = get_model(opt)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    num_batches = int(len(dataloader) / opt.batch_size)
    for epoch in range(opt.start_epochs, opt.start_epochs+opt.epochs):
        for i, (img_path, imgs, targets) in enumerate(dataloader):
            if len(opt.gpu_ids) > 0:
                model = model.to(opt.device)
                imgs = Variable(imgs.to(opt.device))
示例#3
0
def val(epoch, np_save_name, dataroot, dataset_mode, opt_name):
    opt = Options().parse()
    opt.dataroot = dataroot

    opt.dataset_mode = dataset_mode

    opt.name = opt_name

    opt.load_model = os.path.join(opt.checkpoint_dir, opt.name,
                                  opt.name + '_' + str(epoch) + '.pth')
    print(opt.load_model)

    dataset = get_dataset(opt)
    valLoader = torch.utils.data.DataLoader(dataset,
                                            batch_size=1,
                                            shuffle=False)
    print(len(valLoader))
    model = create_model(opt)
    if len(opt.gpu_ids) > 1:
        model = model.module

    model.load(opt.load_model)

    start_time = time.time()

    dicts = {}
    for i, (imgs, labels, img_paths) in enumerate(valLoader):
        st = time.time()
        inputs = {'img': imgs}
        if opt.dataset_mode == 'abide':  # abide
            name = img_paths[0].split('/')[-2]
        elif opt.dataset_mode == 'Ours':  # ours
            name = img_paths[0].split('/')[-1][:-7]
        # print(name)

        model.set_input(inputs, mode='test')
        output = model.inference().cpu()

        if output.data == 1:
            dicts[name] = 'yes'
        else:
            dicts[name] = 'no'

        conv1, conv2, conv5 = model.show_middle_results()
        conv5 = conv5.cpu().detach()
        input_d, input_h, input_w = 182, 218, 182

        bs, channels, d, h, w = conv5.size()
        cam5 = torch.zeros((1, 1, d, h, w), dtype=torch.float32)

        weights = model.get_weights('module.fc.weight')
        if output.data == 1:
            weights = weights[1, ...]
        else:
            weights = weights[0, ...]

        for c in range(channels):
            cam5[0, 0, ...] += conv5[0, c, ...]  #* weights[c]

        res5 = (cam5 - torch.min(cam5)) / (torch.max(cam5) - torch.min(cam5))
        res5 = F.interpolate(res5,
                             size=(input_d, input_h, input_w),
                             mode='trilinear')
        res5 = res5.squeeze(0)
        res5 = res5.squeeze(0)

        # standardize
        res5_np = res5.numpy()

        if not os.path.exists(os.path.join(np_save_name)):
            os.makedirs(os.path.join(np_save_name))
        np.save(os.path.join(np_save_name, name + '.npy'), res5_np)
    return dicts
示例#4
0
def val(epoch):
    logger = open('preschooler_log.txt', 'w')
    opt = Options().parse()
    #opt.dataroot = 'dataset/abide/labels/ours_test_reori.txt'

    opt.dataset_mode = 'Ours'
    opt.name = 'preschooler'

    opt.load_model = os.path.join(opt.checkpoint_dir, opt.name,
                                  opt.name + '_' + str(epoch) + '.pth')
    print(opt.load_model)

    dataset = get_dataset(opt)
    valLoader = torch.utils.data.DataLoader(dataset,
                                            batch_size=1,
                                            shuffle=False)
    print(len(valLoader))
    model = create_model(opt)
    if len(opt.gpu_ids) > 1:
        model = model.module

    model.load(opt.load_model)
    TP, FP, FN, total = 0, 0, 0, 0
    TN = 0
    label_tp, label_tn = 0, 0
    start_time = time.time()
    for i, (imgs, labels, img_paths) in enumerate(valLoader):
        inputs = {'img': imgs}

        name = img_paths[0].split('/')[-1]
        model.set_input(inputs, mode='test')
        output = model.inference().detach().cpu()

        #print(output, labels, name)
        logger.write("%d\t%d\t%s\n" % (int(output), int(labels), name))
        if labels.data == 1:
            label_tp += 1
        if labels.data == 0:
            label_tn += 1

        if output.data == 1 and labels.data == 1:
            TP += 1
        elif output.data == 0 and labels.data == 0:
            TN += 1
        elif output.data == 1 and labels.data == 0:
            FP += 1
        elif output.data == 0 and labels.data == 1:
            FN += 1
    if TP + FP == 0:
        P = 0
    else:
        P = float(TP) / (TP + FP)

    if TP + FN == 0:
        R = 0
    else:
        R = float(TP) / (TP + FN)

    if P + R == 0:
        F1 = 0
    else:
        F1 = (2 * P * R) / (P + R)

    sen = float(TP) / (TP + FN)
    spe = float(TN) / (TN + FP)
    acc = (float(TP) + float(TN)) / len(valLoader) * 100.0
    end_time = time.time()

    #print(TP + TN + FP + FN)
    #print(len(valLoader))

    message = '\n------------------------results----------------------\n'
    # message += '{:>10}\t{:>10}\t{:>10}\n'.format('TP:', TP, label_tp)
    # message += '{:>10}\t{:>10}\t{:>10}\n'.format('TN:', TN, label_tn)
    message += '{:>10}\t{:>10.4f}\n'.format('acc:', acc)
    message += '{:>10}\t{:>10.4f}\n'.format('precision:', P)
    message += '{:>10}\t{:>10.4f}\n'.format('recall:', R)
    message += '{:>10}\t{:>10.4f}\n'.format('Specificity:', spe)
    message += '{:>10}\t{:>10.4f}\n'.format('Sensitivity:', sen)
    message += '{:>10}\t{:>10.4f}\n'.format('F1-measure:', F1)
    message += '{:>10}\t{:>10.4f}\n'.format(
        'avg_time:', (end_time - start_time) / len(valLoader))
    message += '------------------------------------------------------\n'
    print(message)
    logger.write(message + '\n')
    return acc
示例#5
0
def save_mid_report(start, end, train_t, val_t, test_t):
	# write to txt file
	val_txt = open(val_t, 'w')
	test_txt = open(test_t, 'w')


	train_opt = Options().parse()
	val_opt = Options().parse()
	test_opt = Options().parse()

	# dataroot
	val_opt.dataroot = 'dataset/abide/labels/test7.txt'
	# test_opt.dataroot = 'dataset/abide/labels/ours_all.txt'

	train_opt.dataroot = 'dataset/abide/labels/ours_train.txt'
	#val_opt.dataroot = 'dataset/abide/labels/ours_test.txt'
	test_opt.dataroot = 'dataset/abide/labels/less9.txt'

	# dataset mode
	# train_opt.dataset_mode = 'abide'
	# val_opt.dataset_mode = 'abide'
	# test_opt.dataset_mode = 'Ours'

	train_opt.dataset_mode = 'Ours'
	val_opt.dataset_mode = 'abide'
	test_opt.dataset_mode = 'abide'

	# model name
	#train_opt.name = 'abide_all'
	train_opt.name = 'ours_split'

	# load models
	for epoch in range(start, end):
		train_opt.load_model = os.path.join(train_opt.checkpoint_dir, train_opt.name, train_opt.name+'_'+str(epoch)+'.pth')
		test_opt.load_model = train_opt.load_model
		val_opt.load_model = train_opt.load_model
		print(train_opt.load_model)

		# train_dataset = get_dataset(train_opt)
		val_dataset = get_dataset(val_opt)
		test_dataset = get_dataset(test_opt)

		# trainLoader = torch.utils.data.DataLoader(
		# 	train_dataset,
		# 	batch_size=8,
		# 	shuffle=False
		# )
	
		valLoader = torch.utils.data.DataLoader(
			val_dataset,
			batch_size=4,
			shuffle=False
		)

		testLoader = torch.utils.data.DataLoader(
			test_dataset,
			batch_size=4,
			shuffle=False
		)
	
		#print(len(train_dataset), len(val_dataset), len(test_dataset))
	
		#train_model = create_model(train_opt)
		model = create_model(test_opt)

		if len(train_opt.gpu_ids) > 1:
			model = model.module

		model.load(test_opt.load_model)
		#get_model_res(trainLoader, model, train_txt)
		get_model_res(valLoader, model, val_txt)
		get_model_res(testLoader, model, test_txt)
	
	#train_txt.close()
	val_txt.close()
	test_txt.close()
示例#6
0
    def __init__(self, z_size, post_model, prior_model, obs_model,
                 mix_components, free_bits, h_size, depth, ds_list,
                 sdn_max_scale, sdn_min_scale, sdn_nfeat_0, sdn_nfeat_diff,
                 sdn_num_dirs, lrate, lrate_decay, root, dataset, num_workers,
                 batch, batch_val, ema_coef, random_seed, downsample_first,
                 sampling_temperature, distributed_backend, amp, gpus, nbits,
                 figsize, evaluation_mode, accumulate_grad_batches, beta_rate,
                 beta_final, **kw):

        super().__init__()
        self.save_hyperparameters()

        # create model signature
        self.signature_string = \
            '_amp-{}_gpus-{}_lr-{}_lrd-{}_seed-{}_z-{}_max-{}_min-{}_nf0-{}_nfdif-{}_d-{}_b-{}_fb-{}_h-{}_depth-{}_' \
            'ds-{}-dsf-{}_ema-{}_tmp-{}_nw-{}-{}-{}-{}_mx-{}_bits-{}_ab-{}_bv-{}' \
            .format(amp, gpus, lrate, lrate_decay, random_seed, z_size, sdn_max_scale, sdn_min_scale, sdn_nfeat_0, sdn_nfeat_diff,
                    sdn_num_dirs, batch, free_bits, h_size, depth, list2string(ds_list), downsample_first, ema_coef,
                    sampling_temperature, num_workers, obs_model, post_model, distributed_backend, mix_components,
                    nbits, accumulate_grad_batches, batch_val)

        # initialize variables
        self.lrate = lrate
        self.random_seed = random_seed
        self.sdn_max_scale = sdn_max_scale
        self.sdn_min_scale = sdn_min_scale
        self.sdn_nfeat_0 = sdn_nfeat_0
        self.sdn_nfeat_diff = sdn_nfeat_diff
        self.sdn_num_dirs = sdn_num_dirs
        self.z_size = z_size
        self.h_size = h_size
        self.beta_rate = beta_rate
        self.beta_final = beta_final
        self.beta_current = 0  # => KL annealing
        self.obs_model_name = obs_model
        self.post_model_name = post_model
        self.mix_components = mix_components
        self.num_workers = num_workers
        self.batch = batch
        self.dataset = dataset
        self.figsize = figsize

        # dataset specifications
        channels, image_size = get_dataset_specifications(dataset)
        self.bpd_factor = (image_size * image_size * channels * np.log(2.))

        # make sure image size is a power of 2
        assert image_size in [1024, 512, 256, 128, 64,
                              32], "Unaccepted image format."

        # construct prior and observation models
        self.obs_model = eval(obs_model)(channels=channels,
                                         image_size=image_size,
                                         nbits=nbits,
                                         mix_components=mix_components)
        self.prior_model = eval(prior_model)()
        self.post_model = eval(post_model)()

        # load data
        self.trainset, self.valset = get_dataset(
            root=root,
            dataset=dataset,
            transform=self.obs_model.get_transform())

        # keeping track of SDNLayer directions and state sizes, and also of current scale
        self.cur_dir = 0

        encoder_list, decoder_list = self.create_enc_dec_networks(
            channels, sdn_nfeat_0)

        self.encoder = EncWrapper(encoder_list)
        self.decoder = nn.Sequential(*decoder_list)

        print(self.encoder)
        print(self.decoder)

        # beta vae loss by default
        self.vae_loss = self.beta_vae_loss

        # initialize weights
        self.apply(weights_init)

        # Evaluate disentanglement metrics and related vars
        self.num_channels = channels
        self.image_size = image_size
        self.evaluation_metric = ['factor_vae_metric', 'beta_vae_sklearn']
示例#7
0
    def __init__(self, z_size, post_model, prior_model, obs_model,
                 mix_components, free_bits, h_size, depth, ds_list,
                 sdn_max_scale, sdn_min_scale, sdn_nfeat_0, sdn_nfeat_diff,
                 sdn_num_dirs, lrate, lrate_decay, root, dataset, num_workers,
                 batch, batch_val, ema_coef, random_seed, downsample_first,
                 sampling_temperature, distributed_backend, amp, gpus, nbits,
                 figsize, evaluation_mode, accumulate_grad_batches, **kw):

        super().__init__()

        # save HPs to checkpoints
        self.save_hyperparameters()

        # create model signature
        self.signature_string = \
            '_amp-{}_gpus-{}_lr-{}_lrd-{}_seed-{}_z-{}_max-{}_min-{}_nf0-{}_nfdif-{}_d-{}_b-{}_fb-{}_h-{}_depth-{}_' \
            'ds-{}-dsf-{}_ema-{}_tmp-{}_nw-{}-{}-{}-{}_mx-{}_bits-{}_ab-{}_bv-{}' \
            .format(amp, gpus, lrate, lrate_decay, random_seed, z_size, sdn_max_scale, sdn_min_scale, sdn_nfeat_0, sdn_nfeat_diff,
                    sdn_num_dirs, batch, free_bits, h_size, depth, list2string(ds_list), downsample_first, ema_coef,
                    sampling_temperature, num_workers, obs_model, post_model, distributed_backend, mix_components,
                    nbits, accumulate_grad_batches, batch_val)

        # dataset specifications
        channels, image_size = get_dataset_specifications(dataset=dataset)
        self.bpd_factor = image_size * image_size * channels * np.log(2.)
        self.channels = channels
        self.image_size = image_size

        # initialize variables
        self.lrate = lrate
        self.lrate_decay = lrate_decay
        self.sampling_temperature = sampling_temperature
        self.amp = amp
        self.num_workers = num_workers
        self.batch = batch
        self.batch_val = batch_val if batch_val > 0 else batch
        self.figsize = figsize
        self.ema_coef = ema_coef

        # construct observation model
        self.obs_model = eval(obs_model)(channels=channels,
                                         image_size=image_size,
                                         nbits=nbits,
                                         mix_components=mix_components)

        # load data
        self.trainset, self.valset = get_dataset(
            root=root,
            dataset=dataset,
            transform=self.obs_model.get_transform(),
            load_trainset=(not evaluation_mode))

        # gradient scaler for amp
        self.scaler = GradScaler(enabled=self.amp)

        # padding for 28x28 images
        extra_padding = 0 if image_size != 28 else 2

        # keeping track of SDNLayer directions and number of channels, and also of current scale
        self.cur_dir = 0
        cur_num_feat = sdn_nfeat_0
        top_input_dim = (image_size + 2 * extra_padding)
        if downsample_first:
            top_input_dim = top_input_dim // 2

        # parameters for bottom layers
        bot_kernel_size, bot_stride, bot_padding = (
            4, 2, 1) if downsample_first else (3, 1, 1)

        # construct bottom layer of encoder i.e. first layer
        self.first_layer = nn.Sequential(
            nn.Conv2d(channels, h_size, bot_kernel_size, bot_stride,
                      bot_padding), nn.ZeroPad2d(extra_padding))

        # construct ladder network
        self.ladder_layers = nn.ModuleList()
        for i in range(depth):
            # set up the flags
            downsample = (i in ds_list and top_input_dim > 1)
            # use sdn or not
            use_sdn = (sdn_max_scale >= top_input_dim >= sdn_min_scale)
            if use_sdn:
                sdn_dirs_a = self._get_dirs(sdn_num_dirs)
                sdn_dirs_b = self._get_dirs(sdn_num_dirs)
            else:
                sdn_dirs_a = None
                sdn_dirs_b = None
            # add layer
            self.ladder_layers.append(
                LadderLayer(post_model=eval(post_model)(z_size=z_size),
                            prior_model=eval(prior_model)(z_size=z_size),
                            z_size=z_size,
                            h_size=h_size,
                            free_bits=free_bits,
                            downsample=downsample,
                            sdn_num_features=cur_num_feat,
                            sdn_dirs_a=sdn_dirs_a,
                            sdn_dirs_b=sdn_dirs_b,
                            use_sdn=use_sdn,
                            sampling_temperature=sampling_temperature))
            # correct input dimensionality for top layer
            if downsample:
                top_input_dim = max(1, top_input_dim // 2)
                if use_sdn:
                    cur_num_feat = max(50, cur_num_feat - sdn_nfeat_diff)

        # learnable constant which is fed to the top-most layer top-down pass
        self.register_parameter('h', torch.nn.Parameter(torch.zeros(h_size)))
        self.h_shape = torch.Size([h_size, top_input_dim, top_input_dim])

        # construct bottom layer of decoder i.e. last layer
        if sdn_max_scale >= image_size:
            self.last_layer = nn.Sequential(
                Crop2d(extra_padding), nn.ELU(True),
                ResSDNLayer(in_ch=h_size,
                            out_ch=self.obs_model.params_per_dim(),
                            num_features=sdn_nfeat_0,
                            dirs=self._get_dirs(4),
                            kernel_size=bot_kernel_size,
                            stride=bot_stride,
                            padding=bot_padding,
                            upsample=downsample_first))
        else:
            cnn_module = nn.ConvTranspose2d if downsample_first else nn.Conv2d
            self.last_layer = nn.Sequential(
                Crop2d(extra_padding), nn.ELU(True),
                cnn_module(h_size, self.obs_model.params_per_dim(),
                           bot_kernel_size, bot_stride, bot_padding))

        # initialize NN weights
        self.apply(weights_init)