Пример #1
0
def train_encoders(args):
	assets = AssetManager(args.base_dir)
	model_dir = assets.get_model_dir(args.model_name)
	tensorboard_dir = assets.get_tensorboard_dir(args.model_name)

	data = np.load(assets.get_preprocess_file_path(args.data_name))
	imgs, classes, contents, n_classes = data['imgs'], data['classes'], data['contents'], data['n_classes']
	imgs = imgs.astype(np.float32) / 255.0

	converter = Converter.load(model_dir, include_encoders=False)

	glo_backup_dir = os.path.join(model_dir, args.glo_dir)
	if not os.path.exists(glo_backup_dir):
		os.mkdir(glo_backup_dir)
		converter.save(glo_backup_dir)

	converter.train_encoders(
		imgs=imgs,
		classes=classes,

		batch_size=default_config['train_encoders']['batch_size'],
		n_epochs=default_config['train_encoders']['n_epochs'],

		model_dir=model_dir,
		tensorboard_dir=tensorboard_dir
	)

	converter.save(model_dir)
Пример #2
0
def train(args):
    assets = AssetManager(args.base_dir)
    if not args.retrain:
        model_dir = assets.recreate_model_dir(args.model_name)
        tensorboard_dir = assets.recreate_tensorboard_dir(args.model_name)
    else:
        model_dir = assets.get_model_dir(args.model_name)
        tensorboard_dir = assets.get_tensorboard_dir(args.model_name)
    data = np.load(assets.get_preprocess_file_path(args.data_name))
    imgs = data['imgs'].astype(np.float32) / 255.0

    config = dict(
        img_shape=imgs.shape[1:],
        n_imgs=imgs.shape[0],
        n_classes=data['n_classes'].item(),
    )

    config.update(base_config)

    lord = Lord(config)
    if args.retrain:
        lord.load(model_dir, latent=True, amortized=False)
    lord.config = config
    lord.train_latent(imgs=imgs,
                      classes=data['classes'],
                      model_dir=model_dir,
                      tensorboard_dir=tensorboard_dir,
                      retrain=args.retrain)

    lord.save(model_dir, latent=True, amortized=False)
Пример #3
0
def train_encoders(args):
	assets = AssetManager(args.base_dir)
	model_dir = assets.get_model_dir(args.model_name)
	tensorboard_dir = assets.get_tensorboard_dir(args.model_name)

	data = np.load(assets.get_preprocess_file_path(args.data_name))
	imgs = data['imgs'].astype(np.float32) / 255.0

	backup_dir = os.path.join(model_dir, 'latent')
	if not os.path.exists(backup_dir):
		lord = Lord.load(model_dir, include_encoders=False)

		os.mkdir(backup_dir)
		lord.save(backup_dir)

	else:
		lord = Lord.load(backup_dir, include_encoders=False)

	lord.train_encoders(
		imgs=imgs,
		classes=data['classes'],

		batch_size=default_config['train_encoders']['batch_size'],
		n_epochs=default_config['train_encoders']['n_epochs'],

		model_dir=model_dir,
		tensorboard_dir=tensorboard_dir
	)

	lord.save(model_dir)
Пример #4
0
def train_encoders(args):
    assets = AssetManager(args.base_dir)
    model_dir = assets.get_model_dir(args.model_name)
    tensorboard_dir = assets.get_tensorboard_dir(args.model_name)

    data = np.load(assets.get_preprocess_file_path(args.data_name))
    imgs = data['imgs'].astype(np.float32) / 255.0

    lord = Lord()
    lord.load(model_dir, latent=True, amortized=False)

    lord.train_amortized(imgs=imgs,
                         classes=data['classes'],
                         model_dir=model_dir,
                         tensorboard_dir=tensorboard_dir)

    lord.save(model_dir, latent=False, amortized=True)
Пример #5
0
    def __init__(self,
                 subset=None,
                 base_dir='results',
                 model_name='minst_10_model',
                 data_name='minst_10_test',
                 include_encoders=True):
        self.data_name = data_name
        assets = AssetManager(base_dir)
        data = np.load(assets.get_preprocess_file_path(data_name))
        imgs, classes, contents, n_classes = data['imgs'], data[
            'classes'], data['contents'], data['n_classes']
        imgs = imgs.astype(np.float32) / 255.0
        if subset is not None:
            self.curr_imgs = imgs[:subset]
            self.classes = classes[:subset]
        else:
            self.curr_imgs = imgs
            self.classes = classes

        self.onehot_enc = OneHotEncoder()
        self.onehot_classes = self.onehot_enc.fit_transform(
            self.classes.reshape(-1, 1))
        self.n_classes = self.onehot_classes.shape[1]

        self.n_images = self.curr_imgs.shape[0]

        self.converter = Converter.load(assets.get_model_dir(model_name),
                                        include_encoders=include_encoders)
        self.content_codes = self.converter.content_encoder.predict(
            self.curr_imgs)
        self.class_codes = self.converter.class_encoder.predict(self.curr_imgs)
        class_adain_params = self.converter.class_modulation.predict(
            self.class_codes)
        self.class_adain_params = class_adain_params.reshape(
            class_adain_params.shape[0], -1)
        self.curr_imgs, self.classes, self.onehot_classes, self.content_codes, self.class_codes, self.class_adain_params = \
            shuffle(self.curr_imgs, self.classes, self.onehot_classes, self.content_codes, self.class_codes,
                    self.class_adain_params)
Пример #6
0
def train_encoders(args):
	assets = AssetManager(args.base_dir)
	model_dir = assets.get_model_dir(args.model_name)
	tensorboard_dir = assets.get_tensorboard_dir(args.model_name)
	data = np.load(assets.get_preprocess_file_path(args.data_name))
	imgs = data['imgs'].astype(np.float32) / 255.0

	config = dict(
		img_shape=imgs.shape[1:],
		n_imgs=imgs.shape[0],
		n_classes=data['n_classes'].item(),
	)

	config.update(base_config)

	lord = Lord(config)

	lord.train_encoders(
		imgs=imgs,
		classes=data['classes'],

		model_dir=model_dir,
		tensorboard_dir=tensorboard_dir
	)
Пример #7
0
    def __init__(self,
                 digit,
                 subset=None,
                 base_dir='results',
                 model_name='minst_10_model',
                 data_name='minst_10_test'):
        self.digit = digit
        assets = AssetManager(base_dir)
        data = np.load(assets.get_preprocess_file_path(data_name))
        imgs, classes, contents, n_classes = data['imgs'], data[
            'classes'], data['contents'], data['n_classes']
        imgs = imgs.astype(np.float32) / 255.0
        curr_zero_idx = (classes == digit)
        curr_zero_idx[subset:] = False  # if have the time use np.csum isntead
        self.curr_imgs = imgs[curr_zero_idx, ]
        self.n_images = self.curr_imgs.shape[0]

        self.converter = Converter.load(assets.get_model_dir(model_name),
                                        include_encoders=True)
        self.content_codes = self.converter.content_encoder.predict(
            self.curr_imgs)
        class_codes = self.converter.class_encoder.predict(self.curr_imgs)
        self.class_adain_params = self.converter.class_modulation.predict(
            class_codes)
Пример #8
0
def ligning_plot(model_name, base_dir, adain_enabled):
    assets = AssetManager(base_dir)
    converter = Converter.load(assets.get_model_dir(model_name),
                               include_encoders=True)

    data_base_dir = 'data/small_norb_lord'

    azimuths = []
    elevations = []
    lightings = []
    lt_rts = []
    classes = []
    img_paths = []

    regex = re.compile(r'azimuth(\d+)_elevation(\d+)_lighting(\d+)_(\w+).jpg')
    for category in tqdm(os.listdir(data_base_dir)):
        for instance in os.listdir(os.path.join(data_base_dir, category)):
            for file_name in os.listdir(
                    os.path.join(data_base_dir, category, instance)):
                img_path = os.path.join(data_base_dir, category, instance,
                                        file_name)
                azimuth, elevation, lighting, lt_rt = regex.match(
                    file_name).groups()

                class_id = (int(category) * 10) + int(instance)
                azimuths.append(int(azimuth))
                elevations.append(int(elevation))
                lightings.append(int(lighting))
                lt_rts.append(lt_rt)
                classes.append(class_id)
                img_paths.append(img_path)

    df = pd.DataFrame({
        'azimuth': azimuths,
        'elevation': elevations,
        'lighting': lightings,
        'lt_rt': lt_rts,
        'classe': classes,
        'img_path': img_paths,
    })

    df = df.sample(frac=1).reset_index(drop=True)

    fxd_content = [df[df.lighting == i]['img_path'].iloc[0] for i in range(6)]
    fxd_class = df[df.classe == 0]['img_path'][:10]
    l2li = lambda x: [
        np.expand_dims(cv2.cvtColor(cv2.resize(plt.imread(i), dsize=(64, 64)),
                                    cv2.COLOR_BGR2GRAY),
                       axis=2).astype(np.float32) / 255.0 for i in x
    ]

    fxd_content_img = l2li(fxd_content)
    fxd_class_img = l2li(fxd_class)

    fxd_content_cnt, fxd_content_cls = pred_imgs(converter, fxd_content_img,
                                                 adain_enabled)
    fxd_class_cnt, fxd_class_cls = pred_imgs(converter, fxd_class_img,
                                             adain_enabled)

    plt.rcParams["figure.figsize"] = (20, 20)
    blank = np.zeros_like(fxd_content_img[0])
    output = [np.concatenate([blank] + list(fxd_content_img), axis=1)]
    for i in tqdm(range(10)):
        generated_imgs = [
            converter.generator.predict(
                [fxd_content_cnt[[j]], fxd_class_cls[[i]]])[0]
            for j in range(6)
        ]

        converted_imgs = [fxd_class_img[i]] + generated_imgs

        output.append(np.concatenate(converted_imgs, axis=1))

    merged_img = np.concatenate(output, axis=0)

    plt.xlabel('Content')
    plt.ylabel('Class')
    plt.imshow(np.squeeze(merged_img), cmap='gray')
    wandb.log({"Lighting plot": plt})