def parse(self):
        if not self.initialized:
            self.initialize()
        self.opt = self.parser.parse_args()
        self.opt.isTrain = self.isTrain   # train or test

        str_ids = self.opt.gpu_ids.split(',')
        self.opt.gpu_ids = []
        for str_id in str_ids:
            id = int(str_id)
            if id >= 0:
                self.opt.gpu_ids.append(id)

        # set gpu ids
        if len(self.opt.gpu_ids) > 0:
            torch.cuda.set_device(self.opt.gpu_ids[0])

        args = vars(self.opt)

        print('------------ Options -------------')
        for k, v in sorted(args.items()):
            print('%s: %s' % (str(k), str(v)))
        print('-------------- End ----------------')

        # save to the disk
        expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
        util.mkdirs(expr_dir)
        file_name = os.path.join(expr_dir, 'opt.txt')
        with open(file_name, 'wt') as opt_file:
            opt_file.write('------------ Options -------------\n')
            for k, v in sorted(args.items()):
                opt_file.write('%s: %s\n' % (str(k), str(v)))
            opt_file.write('-------------- End ----------------\n')
        return self.opt
Ejemplo n.º 2
0
    def parse(self):
        if not self.initialized:
            self.initialize()
        self.opt = self.parser.parse_args()
        self.opt.isTrain = self.isTrain   # train or test

        str_ids = self.opt.gpu_ids.split(',')
        self.opt.gpu_ids = []
        for str_id in str_ids:
            id = int(str_id)
            if id >= 0:
                self.opt.gpu_ids.append(id)

        args = dict((name, getattr(self.opt, name)) for name in dir(self.opt)
                    if not name.startswith('_'))


        print('------------ Options -------------')
        for k, v in sorted(args.items()):
            print('%s: %s' % (str(k), str(v)))
        print('-------------- End ----------------')

        # save to the disk
        expr_dir =  os.path.join(self.opt.checkpoints_dir, self.opt.name)
        util.mkdirs(expr_dir)
        file_name = os.path.join(expr_dir, 'opt.txt')
        with open(file_name, 'wt') as opt_file:
            opt_file.write('------------ Options -------------\n')
            for k, v in sorted(args.items()):
                opt_file.write('%s: %s\n' % (str(k), str(v)))
            opt_file.write('-------------- End ----------------\n')
        return self.opt
    def print_options(self, opt):
        message = ''
        message += '----------------- Options ---------------\n'
        for k, v in sorted(vars(opt).items()):
            comment = ''
            default = self.parser.get_default(k)
            if v != default:
                comment = '\t[default: %s]' % str(default)
            message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
        message += '----------------- End -------------------'
        print(message)

        # save to the disk
        expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
        util.mkdirs(expr_dir)
        file_name = os.path.join(expr_dir, 'opt.txt')
        with open(file_name, 'wt') as opt_file:
            opt_file.write(message)
            opt_file.write('\n')
Ejemplo n.º 4
0
    def parse(self):
        if not self.initialized:
            self.initialize()
        self.opt = self.parser.parse_args()
        self.opt.isTrain = self.isTrain   # train or test

        args = vars(self.opt)

        print('------------ Options -------------')
        for k, v in sorted(args.items()):
            print('%s: %s' % (str(k), str(v)))
        print('-------------- End ----------------')

        # save to the disk
        expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
        util.mkdirs(expr_dir)
        file_name = os.path.join(expr_dir, 'opt.txt')
        with open(file_name, 'wt') as opt_file:
            opt_file.write('------------ Options -------------\n')
            for k, v in sorted(args.items()):
                opt_file.write('%s: %s\n' % (str(k), str(v)))
            opt_file.write('-------------- End ----------------\n')
        return self.opt
from models.models import create_model
import os
import util.util as util
from torch.autograd import Variable
import torch.nn as nn

opt = TrainOptions().parse()
opt.nThreads = 1
opt.batchSize = 1 
opt.serial_batches = True 
opt.no_flip = True
opt.instance_feat = True

name = 'features'
save_path = os.path.join(opt.checkpoints_dir, opt.name)

############ Initialize #########
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
model = create_model(opt)
util.mkdirs(os.path.join(opt.dataroot, opt.phase + '_feat'))

######## Save precomputed feature maps for 1024p training #######
for i, data in enumerate(dataset):
	print('%d / %d images' % (i+1, dataset_size)) 
	feat_map = model.module.netE.forward(Variable(data['image'].cuda(), volatile=True), data['inst'].cuda())
	feat_map = nn.Upsample(scale_factor=2, mode='nearest')(feat_map)
	image_numpy = util.tensor2im(feat_map.data[0])
	save_path = data['path'][0].replace('/train_label/', '/train_feat/')
	util.save_image(image_numpy, save_path)
Ejemplo n.º 6
0
 def option_file_path(self, opt, makedir=False):
     expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
     if makedir:
         util.mkdirs(expr_dir)
     file_name = os.path.join(expr_dir, 'opt')
     return file_name
import sys
from util import util
import numpy as np
import argparse

parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--in_path', type=str, default='/data/big/dataset/ILSVRC2012')
parser.add_argument('--out_path', type=str, default='./dataset/ilsvrc2012/')

opt = parser.parse_args()
orig_path = opt.in_path
print('Copying ILSVRC from...[%s]'%orig_path)

# Copy over part of training set (for initializer)
trn_small_path = os.path.join(opt.out_path,'train_small')
util.mkdirs(opt.out_path)
util.mkdirs(trn_small_path)
train_subdirs = os.listdir(os.path.join(opt.in_path,'train'))
for train_subdir in train_subdirs[:10]:
	os.symlink(os.path.join(opt.in_path,'train',train_subdir),os.path.join(trn_small_path,train_subdir))
print('Making small training set in...[%s]'%trn_small_path)

# Copy over whole training set
trn_path = os.path.join(opt.out_path,'train')
util.mkdirs(opt.out_path)
os.symlink(os.path.join(opt.in_path,'train'),trn_path)
print('Making training set in...[%s]'%trn_path)

# Copy over subset of ILSVRC12 val set for colorization val set
val_path = os.path.join(opt.out_path,'val/imgs')
util.mkdirs(val_path)
Ejemplo n.º 8
0
import util.util as util
from torch.autograd import Variable
import torch.nn as nn

opt = TrainOptions().parse()
opt.nThreads = 1
opt.batchSize = 1
opt.serial_batches = True
opt.no_flip = True
opt.instance_feat = True

name = 'features'
save_path = os.path.join(opt.checkpoints_dir, opt.name)

############ Initialize #########
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
model = create_model(opt)
util.mkdirs(os.path.join(opt.dataroot, opt.phase + '_feat'))

# Save precomputed feature maps for 1024p training #
for i, data in enumerate(dataset):
    print('%d / %d images' % (i + 1, dataset_size))
    feat_map = model.module.netE.forward(
        Variable(data['image'].cuda(), volatile=True), data['inst'].cuda())
    feat_map = nn.Upsample(scale_factor=2, mode='nearest')(feat_map)
    image_numpy = util.tensor2im(feat_map.data[0])
    save_path = data['path'][0].replace('/train_label/', '/train_feat/')
    util.save_image(image_numpy, save_path)
Ejemplo n.º 9
0
                cv2.imwrite((output_path + "/images/image%d.tif" % index), src_roi)
                cv2.imwrite((output_path + "/labels/label%d.tif" % index), label_roi)
                index += 1


def change_label(label_dir):
    image_paths = os.listdir(label_dir)
    for path_item in tqdm(image_paths):
        label = m.open(label_dir + "/" + path_item).convert("L")

        # change 255 to 1
        im_point = label.point(lambda x: x // 255)

        im_point.save(label_dir + "/" + path_item, 'tif')


if __name__ == "__main__":
    # create the paths of created datasets
    util.mkdirs(['./mass_inria/trainA/images', './mass_inria/trainA/labels',
                 './mass_inria/trainB/images', './mass_inria/trainB/labels'])
    mass_output_path = "./mass_inria/trainA"
    createSetsA(mass_images_path, mass_labels_path, mass_image_size, mass_output_path)

    inria_output_path_train = "./mass_inria/trainB"
    createSetsB(inria_images_path_train, inria_labels_path_train, inria_image_size, inria_output_path_train)

    trainA_label_dir = "./mass_inria/trainA/labels"
    change_label(trainA_label_dir)
    trainB_label_dir = "./mass_inria/trainB/labels"
    change_label(trainB_label_dir)
import torchvision.utils as vutils
from util import util

opt = TestOptions().parse()
opt.nThreads = 1
opt.batchSize = 1
opt.serial_batches = True
opt.no_flip = True

data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
model = create_model(opt)
opt.is_psnr = True

summary_dir = os.path.join(opt.results_dir, 'result')
util.mkdirs([summary_dir])

for i, data in enumerate(dataset):
    if i >= opt.how_many:
        break
    model.set_input(data)
    model.test()

    print('\nimage: ', i, '/', len(dataset))

    visuals = model.get_current_visuals()
    print('%04d: process image... ' % (i))
    for key, val in visuals.items():
        vutils.save_image(val,
                          '{}/{}_{}.png'.format(summary_dir, i, key),
                          nrow=1,
Ejemplo n.º 11
0
            start_row = row * image_size
            end_row = start_row + image_size
            for colom in range(8):
                start_colom = colom * image_size
                end_colom = start_colom + image_size

                src_roi = image[start_colom: end_colom, start_row: end_row, :]
                label_roi = label[start_colom: end_colom, start_row: end_row, :]
                # 切割图像然后保存
                cv2.imwrite((output_path + "/images/image%d.tif" % index), src_roi)
                cv2.imwrite((output_path + "/labels/label%d.tif" % index), label_roi)
                index += 1


def change_B_label(label_dir):
    image_paths = os.listdir(label_dir)
    for path_item in tqdm(image_paths):
        label = m.open(label_dir + "/" + path_item).convert("L")

        # change 255 to 1
        im_point = label.point(lambda x: x // 255)

        im_point.save(label_dir + "/" + path_item, 'tif')


if __name__ == "__main__":
    util.mkdirs(['./mass_inria/valB/images', './mass_inria/valB/labels'])
    inria_output_path_val = "./mass_inria/valB"
    createSets(inria_images_path_val, inria_labels_path_val, inria_image_size_val, inria_output_path_val)
    valB_label_dir = "./mass_inria/valB/labels"
    change_B_label(valB_label_dir)
Ejemplo n.º 12
0
    elif host in [
            'danbury', 'denton', 'elkin', 'elkpark', 'dublin', 'dobson',
            'eureka', 'erwin', 'enfield', 'elmcity'
    ]:
        opt.checkpoints_dir = '/data/zhenghan/checkpoints'
    else:
        raise ValueError(
            "cannot decide checkpoints_dir, server '%s' not recognized." %
            host)
args = vars(opt)
print('------------ Options -------------')
for k, v in sorted(args.items()):
    print('%s: %s' % (str(k), str(v)))
print('-------------- End ----------------')
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
opt.file_name = os.path.join(expr_dir, 'log.txt')
with open(opt.file_name, 'wt') as log_file:
    log_file.write('------------ Options -------------\n')
    for k, v in sorted(args.items()):
        log_file.write('%s: %s\n' % (str(k), str(v)))
    log_file.write('-------------- End ----------------\n')

Model = getModel(opt)
model = Model()
'''
model.initialize(opt)
print("model [%s] was created" % (model.name()))
'''
MRFDataset = getDataset(opt)
Ejemplo n.º 13
0
def batch_generation_from_update(batch_size,
                                 save_path,
                                 fname_list,
                                 features_mat,
                                 checkpoints_dir,
                                 classname,
                                 black=True):
    ''' Generate decoded segmentation map from input features
        Args: batch_size (int), batch size for the VAE to take in
              save_path (str), save generated masks to path
              fname_list (list of str), save generated masks with fname
              features_mat (numpy array): input features to be decoded,
                                          in the order of fname_list
              checkpoints_dir (str), load VAE weights from path
              classname (str), label taxonomy defined by dataset with classname
              black (boolean), black is True for regular generation;
                               black is False for debugging, thus the generated mask
                               is not in the format for cGAN input
    '''
    # Create and load model weights in
    vae_opt = initialize_option(classname)
    vae_opt.batchSize = batch_size
    vae_opt.checkpoints_dir = checkpoints_dir

    vae_util.mkdirs(save_path)

    if vae_opt.share_decoder and vae_opt.share_encoder:
        if vae_opt.separate_clothing_unrelated:
            from models.separate_clothing_encoder_models import create_model as vae_create_model
        else:
            print('Only supports separating clothing and clothing-irrelevant')
            raise NotImplementedError
    else:
        print('Only supports sharing encoder and decoder among all parts')
        raise NotImplementedError

    model = vae_create_model(vae_opt)
    # Forward input into the model
    dataset_size = len(fname_list)
    num_batch = int(dataset_size / batch_size)
    elem_in_last_batch = dataset_size % batch_size
    for i in range(num_batch):
        generated = model.generate_from_random(
            torch.Tensor(features_mat[i * batch_size:(i + 1) *
                                      batch_size, :]).cuda())
        for j in range(batch_size):
            if black:
                vae_util.save_image(
                    vae_util.tensor2label_black(generated.data[j],
                                                vae_opt.output_nc,
                                                normalize=True),
                    os.path.join(save_path,
                                 '%s.png' % (fname_list[i * batch_size + j])))
            else:
                vae_util.save_image(
                    vae_util.tensor2label(generated.data[j],
                                          vae_opt.output_nc,
                                          normalize=True),
                    os.path.join(save_path,
                                 '%s.png' % (fname_list[i * batch_size + j])))
    # Remaining instance in the last batch needs to be generated here
    if elem_in_last_batch > 0:
        generated = model.generate_from_random(
            torch.Tensor(features_mat[-elem_in_last_batch:, :]).cuda())
        for j in range(elem_in_last_batch):
            if black:
                vae_util.save_image(
                    vae_util.tensor2label_black(generated.data[j],
                                                vae_opt.output_nc,
                                                normalize=True),
                    os.path.join(
                        save_path,
                        '%s.png' % (fname_list[-elem_in_last_batch + j])))
            else:
                vae_util.save_image(
                    vae_util.tensor2label(generated.data[j],
                                          vae_opt.output_nc,
                                          normalize=True),
                    os.path.join(
                        save_path,
                        '%s.png' % (fname_list[-elem_in_last_batch + j])))
Ejemplo n.º 14
0
    if h == 0:
        h = base
    if w == 0:
        w = base

    if (h == oh) and (w == ow):
        return img
    return img.resize((w, h), method)


model, num_params_G, num_params_D = create_model(opt)
model.eval()

rlt_dir = os.path.join(opt.results_dir, 'test')
util.mkdirs([rlt_dir])

transform_list = []
transform_list += [transforms.ToTensor()]
mskimg_transform = transforms.Compose(transform_list)
transform_list = []
transform_list += [transforms.ToTensor()]
msk_transform = transforms.Compose(transform_list)
transform_list = []
transform_list += [transforms.ToTensor()]
seg_transform = transforms.Compose(transform_list)

start_time = time.time()
for i, data in enumerate(dataset):
    with torch.no_grad():
        msk_img_path = data['path_mskimg'][0]
Ejemplo n.º 15
0
from util.visualizer import Visualizer
from util.visualizer import save_segment_result
from util.metrics import RunningScore
from util import util
import time
import os
import numpy as np

if __name__ == '__main__':
    # 加载设置
    opt = TrainOptions().parse()

    # 设置显示验证结果存储的设置
    web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'val')
    image_dir = os.path.join(web_dir, 'images')
    util.mkdirs([web_dir, image_dir])

    # 加载数据集
    dataset_train = create_dataset(opt)
    dataset_train_size = len(dataset_train)
    print('The number of training images = %d' % dataset_train_size)

    # 改变phase参数然后获取验证集
    opt.phase = "val"
    dataset_val = create_dataset(opt)
    dataset_val_size = len(dataset_val)
    print('The number of valling images = %d' % dataset_val_size)


    # 创建模型
    model = create_model(opt)
Ejemplo n.º 16
0
from util import util
import numpy as np
import argparse

parser = argparse.ArgumentParser(
    formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--in_path', type=str, default='/data/big/dataset/')
parser.add_argument('--out_path', type=str, default='../datasets/Place/')

opt = parser.parse_args()
orig_path = '/home/pzh/Place/places365_small/train'
print('Copying ILSVRC from...[%s]' % orig_path)

# Copy over part of training set (for initializer)
trn_small_path = os.path.join(opt.out_path, 'train_small')
util.mkdirs(opt.out_path)
util.mkdirs(trn_small_path)
train_subdirs = os.listdir(os.path.join(opt.in_path, 'train'))
for train_subdir in train_subdirs[:5]:
    os.symlink(os.path.join(opt.in_path, 'train', train_subdir),
               os.path.join(trn_small_path, train_subdir))
print('Making small training set in...[%s]' % trn_small_path)

# Copy over whole training set
trn_path = os.path.join(opt.out_path, 'train')
util.mkdirs(opt.out_path)
os.symlink(os.path.join(opt.in_path, 'train'), trn_path)
print('Making training set in...[%s]' % trn_path)
'''
# Copy over subset of ILSVRC12 val set for colorization val set
val_path = os.path.join(opt.out_path, 'val/imgs')
Ejemplo n.º 17
0
    def parse(self):
        if not self.initialized:
            self.initialize()
        self.opt = self.parser.parse_args()
        self.opt.isTrain = self.isTrain  # train or test
        '''
        str_ids = self.opt.gpu_ids.split(',')
        self.opt.gpu_ids = []
        for str_id in str_ids:
            id = int(str_id)
            if id >= 0:
                self.opt.gpu_ids.append(id)
        

        # set gpu ids
        if len(self.opt.gpu_ids) > 0:
            torch.cuda.set_device(self.opt.gpu_ids[0])
        '''
        if not self.opt.onMAC:
            host = subprocess.check_output('hostname').decode('utf-8')[:-1]

            if host == 'stilson' or host == 'andrew' or host == 'wiggin':
                self.opt.checkpoints_dir = '/raid/zhenghan/checkpoints'
            elif host == 'badin' or host == 'bogue' or host == 'burgaw':
                self.opt.checkpoints_dir = '/shenlab/local/zhenghan/checkpoints'
            else:
                raise ValueError(
                    "cannot decide checkpoints_dir, server '%s' not recognized."
                    % host)

        # if data is loaded on GPU, nThreads should be 0 and gpu_ids must not be []
        if self.opt.data_GPU:
            self.opt.nThreads = 0
            # assert len(self.opt.gpu_ids) > 0, "set data_GPU as true but gpu_ids is []"

        if self.opt.goal_type == 'T1&T2':
            assert self.opt.output_nc == 2

        if self.opt.which_model_netG == 'simple_conv_small_PCA':
            self.opt.PCA = True

        args = vars(self.opt)

        print('------------ Options -------------')
        for k, v in sorted(args.items()):
            print('%s: %s' % (str(k), str(v)))
        print('-------------- End ----------------')

        # save to the disk
        if self.isTrain:
            expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
        else:
            expr_dir = os.path.join(self.opt.results_dir, self.opt.name)
        util.mkdirs(expr_dir)
        file_name = os.path.join(expr_dir, 'opt_' + self.opt.phase + '.txt')
        with open(file_name, 'wt') as opt_file:
            opt_file.write('------------ Options -------------\n')
            for k, v in sorted(args.items()):
                opt_file.write('%s: %s\n' % (str(k), str(v)))
            opt_file.write('-------------- End ----------------\n')
        return self.opt