コード例 #1
0
def split_samples(args):
    assets = AssetManager(args.base_dir)

    data = np.load(assets.get_preprocess_file_path(args.input_data_name))
    imgs, classes, contents = data['imgs'], data['classes'], data['contents']

    n_classes = np.unique(classes).size
    n_samples = imgs.shape[0]

    n_test_samples = int(n_samples * args.test_split)

    test_idx = np.random.choice(n_samples, size=n_test_samples, replace=False)
    train_idx = ~np.isin(np.arange(n_samples), test_idx)

    np.savez(file=assets.get_preprocess_file_path(args.test_data_name),
             imgs=imgs[test_idx],
             classes=classes[test_idx],
             contents=contents[test_idx],
             n_classes=n_classes)

    np.savez(file=assets.get_preprocess_file_path(args.train_data_name),
             imgs=imgs[train_idx],
             classes=classes[train_idx],
             contents=contents[train_idx],
             n_classes=n_classes)
コード例 #2
0
ファイル: lord.py プロジェクト: matech96/lord
def train_encoders(args):
	assets = AssetManager(args.base_dir)
	model_dir = assets.get_model_dir(args.model_name)
	tensorboard_dir = assets.get_tensorboard_dir(args.model_name)

	data = np.load(assets.get_preprocess_file_path(args.data_name))
	imgs, classes, contents, n_classes = data['imgs'], data['classes'], data['contents'], data['n_classes']
	imgs = imgs.astype(np.float32) / 255.0

	converter = Converter.load(model_dir, include_encoders=False)

	glo_backup_dir = os.path.join(model_dir, args.glo_dir)
	if not os.path.exists(glo_backup_dir):
		os.mkdir(glo_backup_dir)
		converter.save(glo_backup_dir)

	converter.train_encoders(
		imgs=imgs,
		classes=classes,

		batch_size=default_config['train_encoders']['batch_size'],
		n_epochs=default_config['train_encoders']['n_epochs'],

		model_dir=model_dir,
		tensorboard_dir=tensorboard_dir
	)

	converter.save(model_dir)
コード例 #3
0
def train_encoders(args):
	assets = AssetManager(args.base_dir)
	model_dir = assets.get_model_dir(args.model_name)
	tensorboard_dir = assets.get_tensorboard_dir(args.model_name)

	data = np.load(assets.get_preprocess_file_path(args.data_name))
	imgs = data['imgs'].astype(np.float32) / 255.0

	backup_dir = os.path.join(model_dir, 'latent')
	if not os.path.exists(backup_dir):
		lord = Lord.load(model_dir, include_encoders=False)

		os.mkdir(backup_dir)
		lord.save(backup_dir)

	else:
		lord = Lord.load(backup_dir, include_encoders=False)

	lord.train_encoders(
		imgs=imgs,
		classes=data['classes'],

		batch_size=default_config['train_encoders']['batch_size'],
		n_epochs=default_config['train_encoders']['n_epochs'],

		model_dir=model_dir,
		tensorboard_dir=tensorboard_dir
	)

	lord.save(model_dir)
コード例 #4
0
ファイル: lord.py プロジェクト: lenechk/deepLearningLab
def train(args):
    assets = AssetManager(args.base_dir)
    if not args.retrain:
        model_dir = assets.recreate_model_dir(args.model_name)
        tensorboard_dir = assets.recreate_tensorboard_dir(args.model_name)
    else:
        model_dir = assets.get_model_dir(args.model_name)
        tensorboard_dir = assets.get_tensorboard_dir(args.model_name)
    data = np.load(assets.get_preprocess_file_path(args.data_name))
    imgs = data['imgs'].astype(np.float32) / 255.0

    config = dict(
        img_shape=imgs.shape[1:],
        n_imgs=imgs.shape[0],
        n_classes=data['n_classes'].item(),
    )

    config.update(base_config)

    lord = Lord(config)
    if args.retrain:
        lord.load(model_dir, latent=True, amortized=False)
    lord.config = config
    lord.train_latent(imgs=imgs,
                      classes=data['classes'],
                      model_dir=model_dir,
                      tensorboard_dir=tensorboard_dir,
                      retrain=args.retrain)

    lord.save(model_dir, latent=True, amortized=False)
コード例 #5
0
def train(args):
    assets = AssetManager(args.base_dir)
    model_dir = assets.recreate_model_dir(args.model_name)
    tensorboard_dir = assets.recreate_tensorboard_dir(args.model_name)

    data = np.load(assets.get_preprocess_file_path(args.data_name))
    imgs = data['imgs'].astype(np.float32) / 255.0

    config = Config(
        img_shape=imgs.shape[1:],
        n_imgs=imgs.shape[0],
        n_classes=data['n_classes'].item(),
        content_dim=default_config['content_dim'],
        class_dim=default_config['class_dim'],
        content_std=default_config['content_std'],
        content_decay=default_config['content_decay'],
        n_adain_layers=default_config['n_adain_layers'],
        adain_dim=default_config['adain_dim'],
        perceptual_loss_layers=default_config['perceptual_loss']['layers'],
        perceptual_loss_weights=default_config['perceptual_loss']['weights'],
        perceptual_loss_scales=default_config['perceptual_loss']['scales'])

    lord = Lord.build(config)
    lord.train(imgs=imgs,
               classes=data['classes'],
               batch_size=default_config['train']['batch_size'],
               n_epochs=default_config['train']['n_epochs'],
               model_dir=model_dir,
               tensorboard_dir=tensorboard_dir)

    lord.save(model_dir)
コード例 #6
0
ファイル: lord.py プロジェクト: matech96/lord
def preprocess(args):
	assets = AssetManager(args.base_dir)

	img_dataset = dataset.get_dataset(args.dataset_id, args.dataset_path)
	imgs, classes, contents = img_dataset.read_images()
	n_classes = np.unique(classes).size

	np.savez(
		file=assets.get_preprocess_file_path(args.data_name),
		imgs=imgs, classes=classes, contents=contents, n_classes=n_classes
	)
コード例 #7
0
def split_samples(args):
	assets = AssetManager(args.base_dir)

	data = np.load(assets.get_preprocess_file_path(args.input_data_name))
	imgs, classes, contents = data['imgs'], data['classes'], data['contents']

	n_classes = np.unique(classes).size

    # NOTE: This is a static split, valid only for CIFAR-10
	test_idx = np.arange(50000, 60000)
	train_idx = np.arange(50000)

	np.savez(
		file=assets.get_preprocess_file_path(args.test_data_name),
		imgs=imgs[test_idx], classes=classes[test_idx], contents=contents[test_idx], n_classes=n_classes
	)

	np.savez(
		file=assets.get_preprocess_file_path(args.train_data_name),
		imgs=imgs[train_idx], classes=classes[train_idx], contents=contents[train_idx], n_classes=n_classes
	)
コード例 #8
0
def split_classes(args):
	assets = AssetManager(args.base_dir)

	data = np.load(assets.get_preprocess_file_path(args.input_data_name))
	imgs, classes, contents = data['imgs'], data['classes'], data['contents']

    # Seed
	np.random.seed(2020)
	n_classes = np.unique(classes).size
	test_classes = np.random.choice(n_classes, size=args.num_test_classes, replace=False)

	test_idx = np.isin(classes, test_classes)
	train_idx = ~np.isin(classes, test_classes)

	np.savez(
		file=assets.get_preprocess_file_path(args.test_data_name),
		imgs=imgs[test_idx], classes=classes[test_idx], contents=contents[test_idx], n_classes=n_classes
	)

	np.savez(
		file=assets.get_preprocess_file_path(args.train_data_name),
		imgs=imgs[train_idx], classes=classes[train_idx], contents=contents[train_idx], n_classes=n_classes
	)
コード例 #9
0
ファイル: lord.py プロジェクト: matech96/lord
def train(args):
	wandb.config.update(default_config)
	args_dict = vars(args)
	args_dict.pop('func')
	wandb.config.update(args_dict)
	assets = AssetManager(args.base_dir)
	model_dir = assets.recreate_model_dir(args.model_name)
	tensorboard_dir = assets.recreate_tensorboard_dir(args.model_name)

	data = np.load(assets.get_preprocess_file_path(args.data_name))
	imgs, classes, contents, n_classes = data['imgs'], data['classes'], data['contents'], data['n_classes']
	imgs = imgs.astype(np.float32) / 255.0

	converter = Converter.build(
		img_shape=imgs.shape[1:],
		n_imgs=imgs.shape[0],
		n_classes=n_classes,

		content_dim=args.content_dim,
		class_dim=args.class_dim,

		content_std=args.content_std,
		content_decay=args.content_decay,

		n_adain_layers=default_config['n_adain_layers'],
		adain_enabled=args.adain,
		adain_dim=default_config['adain_dim'],
		adain_normalize=args.adain_normalize,

		perceptual_loss_layers=default_config['perceptual_loss']['layers'],
		perceptual_loss_weights=default_config['perceptual_loss']['weights'],
		perceptual_loss_scales=default_config['perceptual_loss']['scales'],
	)

	converter.train(
		imgs=imgs,
		classes=classes,

		batch_size=default_config['train']['batch_size'],
		n_epochs=default_config['train']['n_epochs'],

		model_dir=model_dir,
		tensorboard_dir=tensorboard_dir
	)

	converter.save(model_dir)
コード例 #10
0
ファイル: lord.py プロジェクト: lenechk/deepLearningLab
def train_encoders(args):
    assets = AssetManager(args.base_dir)
    model_dir = assets.get_model_dir(args.model_name)
    tensorboard_dir = assets.get_tensorboard_dir(args.model_name)

    data = np.load(assets.get_preprocess_file_path(args.data_name))
    imgs = data['imgs'].astype(np.float32) / 255.0

    lord = Lord()
    lord.load(model_dir, latent=True, amortized=False)

    lord.train_amortized(imgs=imgs,
                         classes=data['classes'],
                         model_dir=model_dir,
                         tensorboard_dir=tensorboard_dir)

    lord.save(model_dir, latent=False, amortized=True)
コード例 #11
0
def train(args):
    assets = AssetManager(args.base_dir)
    model_dir = assets.recreate_model_dir(args.model_name)
    tensorboard_dir = assets.recreate_tensorboard_dir(args.model_name)

    data = np.load(assets.get_preprocess_file_path(args.data_name))
    imgs = data['imgs'].astype(np.float32) / 255.0

    config = dict(
        img_shape=imgs.shape[1:],
        n_imgs=imgs.shape[0],
        n_classes=data['n_classes'].item(),
    )

    config.update(base_config)

    lord = Lord(config)
    lord.train(imgs=imgs,
               classes=data['classes'],
               model_dir=model_dir,
               tensorboard_dir=tensorboard_dir)
コード例 #12
0
    def __init__(self,
                 subset=None,
                 base_dir='results',
                 model_name='minst_10_model',
                 data_name='minst_10_test',
                 include_encoders=True):
        self.data_name = data_name
        assets = AssetManager(base_dir)
        data = np.load(assets.get_preprocess_file_path(data_name))
        imgs, classes, contents, n_classes = data['imgs'], data[
            'classes'], data['contents'], data['n_classes']
        imgs = imgs.astype(np.float32) / 255.0
        if subset is not None:
            self.curr_imgs = imgs[:subset]
            self.classes = classes[:subset]
        else:
            self.curr_imgs = imgs
            self.classes = classes

        self.onehot_enc = OneHotEncoder()
        self.onehot_classes = self.onehot_enc.fit_transform(
            self.classes.reshape(-1, 1))
        self.n_classes = self.onehot_classes.shape[1]

        self.n_images = self.curr_imgs.shape[0]

        self.converter = Converter.load(assets.get_model_dir(model_name),
                                        include_encoders=include_encoders)
        self.content_codes = self.converter.content_encoder.predict(
            self.curr_imgs)
        self.class_codes = self.converter.class_encoder.predict(self.curr_imgs)
        class_adain_params = self.converter.class_modulation.predict(
            self.class_codes)
        self.class_adain_params = class_adain_params.reshape(
            class_adain_params.shape[0], -1)
        self.curr_imgs, self.classes, self.onehot_classes, self.content_codes, self.class_codes, self.class_adain_params = \
            shuffle(self.curr_imgs, self.classes, self.onehot_classes, self.content_codes, self.class_codes,
                    self.class_adain_params)
コード例 #13
0
    def __init__(self,
                 digit,
                 subset=None,
                 base_dir='results',
                 model_name='minst_10_model',
                 data_name='minst_10_test'):
        self.digit = digit
        assets = AssetManager(base_dir)
        data = np.load(assets.get_preprocess_file_path(data_name))
        imgs, classes, contents, n_classes = data['imgs'], data[
            'classes'], data['contents'], data['n_classes']
        imgs = imgs.astype(np.float32) / 255.0
        curr_zero_idx = (classes == digit)
        curr_zero_idx[subset:] = False  # if have the time use np.csum isntead
        self.curr_imgs = imgs[curr_zero_idx, ]
        self.n_images = self.curr_imgs.shape[0]

        self.converter = Converter.load(assets.get_model_dir(model_name),
                                        include_encoders=True)
        self.content_codes = self.converter.content_encoder.predict(
            self.curr_imgs)
        class_codes = self.converter.class_encoder.predict(self.curr_imgs)
        self.class_adain_params = self.converter.class_modulation.predict(
            class_codes)