Beispiel #1
0
def split_samples(args):
    assets = AssetManager(args.base_dir)

    data = np.load(assets.get_preprocess_file_path(args.input_data_name))
    imgs, classes, contents = data['imgs'], data['classes'], data['contents']

    n_classes = np.unique(classes).size
    n_samples = imgs.shape[0]

    n_test_samples = int(n_samples * args.test_split)

    test_idx = np.random.choice(n_samples, size=n_test_samples, replace=False)
    train_idx = ~np.isin(np.arange(n_samples), test_idx)

    np.savez(file=assets.get_preprocess_file_path(args.test_data_name),
             imgs=imgs[test_idx],
             classes=classes[test_idx],
             contents=contents[test_idx],
             n_classes=n_classes)

    np.savez(file=assets.get_preprocess_file_path(args.train_data_name),
             imgs=imgs[train_idx],
             classes=classes[train_idx],
             contents=contents[train_idx],
             n_classes=n_classes)
Beispiel #2
0
def train_encoders(args):
	assets = AssetManager(args.base_dir)
	model_dir = assets.get_model_dir(args.model_name)
	tensorboard_dir = assets.get_tensorboard_dir(args.model_name)

	data = np.load(assets.get_preprocess_file_path(args.data_name))
	imgs, classes, contents, n_classes = data['imgs'], data['classes'], data['contents'], data['n_classes']
	imgs = imgs.astype(np.float32) / 255.0

	converter = Converter.load(model_dir, include_encoders=False)

	glo_backup_dir = os.path.join(model_dir, args.glo_dir)
	if not os.path.exists(glo_backup_dir):
		os.mkdir(glo_backup_dir)
		converter.save(glo_backup_dir)

	converter.train_encoders(
		imgs=imgs,
		classes=classes,

		batch_size=default_config['train_encoders']['batch_size'],
		n_epochs=default_config['train_encoders']['n_epochs'],

		model_dir=model_dir,
		tensorboard_dir=tensorboard_dir
	)

	converter.save(model_dir)
Beispiel #3
0
def train_encoders(args):
	assets = AssetManager(args.base_dir)
	model_dir = assets.get_model_dir(args.model_name)
	tensorboard_dir = assets.get_tensorboard_dir(args.model_name)

	data = np.load(assets.get_preprocess_file_path(args.data_name))
	imgs = data['imgs'].astype(np.float32) / 255.0

	backup_dir = os.path.join(model_dir, 'latent')
	if not os.path.exists(backup_dir):
		lord = Lord.load(model_dir, include_encoders=False)

		os.mkdir(backup_dir)
		lord.save(backup_dir)

	else:
		lord = Lord.load(backup_dir, include_encoders=False)

	lord.train_encoders(
		imgs=imgs,
		classes=data['classes'],

		batch_size=default_config['train_encoders']['batch_size'],
		n_epochs=default_config['train_encoders']['n_epochs'],

		model_dir=model_dir,
		tensorboard_dir=tensorboard_dir
	)

	lord.save(model_dir)
def train(args):
    assets = AssetManager(args.base_dir)
    model_dir = assets.recreate_model_dir(args.model_name)
    tensorboard_dir = assets.recreate_tensorboard_dir(args.model_name)

    data = np.load(assets.get_preprocess_file_path(args.data_name))
    imgs = data['imgs'].astype(np.float32) / 255.0

    config = Config(
        img_shape=imgs.shape[1:],
        n_imgs=imgs.shape[0],
        n_classes=data['n_classes'].item(),
        content_dim=default_config['content_dim'],
        class_dim=default_config['class_dim'],
        content_std=default_config['content_std'],
        content_decay=default_config['content_decay'],
        n_adain_layers=default_config['n_adain_layers'],
        adain_dim=default_config['adain_dim'],
        perceptual_loss_layers=default_config['perceptual_loss']['layers'],
        perceptual_loss_weights=default_config['perceptual_loss']['weights'],
        perceptual_loss_scales=default_config['perceptual_loss']['scales'])

    lord = Lord.build(config)
    lord.train(imgs=imgs,
               classes=data['classes'],
               batch_size=default_config['train']['batch_size'],
               n_epochs=default_config['train']['n_epochs'],
               model_dir=model_dir,
               tensorboard_dir=tensorboard_dir)

    lord.save(model_dir)
Beispiel #5
0
def train(args):
	assets = AssetManager(args.base_dir)
	model_dir = assets.recreate_model_dir(args.model_name)
	tensorboard_dir = assets.recreate_tensorboard_dir(args.model_name)

	data = np.load(assets.get_preprocess_file_path(args.data_name))
	imgs = data['imgs'].astype(np.float32) / 255.0

	config = dict(
		img_shape=imgs.shape[1:],
		n_imgs=imgs.shape[0],
		n_classes=data['n_classes'].item(),
	)

	config.update(base_config)

	lord = Lord(config)
	lord.train_latent(
		imgs=imgs,
		classes=data['classes'],

		model_dir=model_dir,
		tensorboard_dir=tensorboard_dir
	)

	lord.save(model_dir, latent=True, amortized=False)
Beispiel #6
0
def preprocess(args):
	assets = AssetManager(args.base_dir)

	img_dataset = dataset.get_dataset(args.dataset_id, args.dataset_path)
	imgs, classes, contents = img_dataset.read_images()
	n_classes = np.unique(classes).size

	np.savez(
		file=assets.get_preprocess_file_path(args.data_name),
		imgs=imgs, classes=classes, contents=contents, n_classes=n_classes
	)
Beispiel #7
0
def main():
    if '--paper' in sys.argv:
        load_dotenv('env/paper.env')
    elif '--live' in sys.argv:
        load_dotenv('env/live.env')

    asset_client = AssetManager()
    assets = asset_client.get_available_assets()
    charts = asset_client.get_charts(assets)
    charts.sort(key=lambda c: c.last_percent_change or 0)
    for chart in charts:
        print(str(chart))
Beispiel #8
0
def run():
    # initialize PyGame
    if pygame.mixer:
        pygame.mixer.pre_init(22050, -16, 2, 1024)

    os.environ['SDL_VIDEO_CENTERED'] = '1'
    pygame.init()
    pygame.display.set_icon(pygame.image.load('images/icon.png'))
    screen = pygame.display.set_mode(config.screen_size)
    pygame.display.set_caption("Super Mario")
    assets = AssetManager()

    state_stack.push(MainMenu(assets))

    # timer initialize
    game_timer.reset()
    accumulator = 0.0

    while state_stack.top is not None:
        state_stack.top.do_events()
        game_timer.update()

        # todo: fixed time step, or max time step?
        accumulator += game_timer.elapsed
        accumulator = min(0.10, accumulator)
        while accumulator > config.PHYSICS_DT:
            state_stack.update(config.PHYSICS_DT)
            accumulator -= config.PHYSICS_DT

        state_stack.draw(screen)
        pygame.display.flip()

    exit(0)
    def __init__(self, parent=None):
        super(MapEditor, self).__init__(parent)
        self.setupUi(self)

        self.setWindowIcon(QtGui.QIcon("assets/icon.png"))

        print("Working Directory: %s"%(WORKING_DIR))

        # Create our asset manager
        self.assetManager = AssetManager(WORKING_DIR)
        self.assetManager.load_asset("deny.png")

        # Create the canvas :D
        self.scene = EditorScene(self)  
        self.graphicsView = GraphicsView(self.scene)
        self.graphicsView.setObjectName("graphicsView")
        self.horizontalLayout.addWidget(self.graphicsView)

        # We can now initialize the scene
        self.scene.initialize_scene()

        # The tool that is currently active
        self.current_tool=None
        self.current_status=None

        # Init all available tools
        self.initialize_tools()
Beispiel #10
0
def train(args):
	wandb.config.update(default_config)
	args_dict = vars(args)
	args_dict.pop('func')
	wandb.config.update(args_dict)
	assets = AssetManager(args.base_dir)
	model_dir = assets.recreate_model_dir(args.model_name)
	tensorboard_dir = assets.recreate_tensorboard_dir(args.model_name)

	data = np.load(assets.get_preprocess_file_path(args.data_name))
	imgs, classes, contents, n_classes = data['imgs'], data['classes'], data['contents'], data['n_classes']
	imgs = imgs.astype(np.float32) / 255.0

	converter = Converter.build(
		img_shape=imgs.shape[1:],
		n_imgs=imgs.shape[0],
		n_classes=n_classes,

		content_dim=args.content_dim,
		class_dim=args.class_dim,

		content_std=args.content_std,
		content_decay=args.content_decay,

		n_adain_layers=default_config['n_adain_layers'],
		adain_enabled=args.adain,
		adain_dim=default_config['adain_dim'],
		adain_normalize=args.adain_normalize,

		perceptual_loss_layers=default_config['perceptual_loss']['layers'],
		perceptual_loss_weights=default_config['perceptual_loss']['weights'],
		perceptual_loss_scales=default_config['perceptual_loss']['scales'],
	)

	converter.train(
		imgs=imgs,
		classes=classes,

		batch_size=default_config['train']['batch_size'],
		n_epochs=default_config['train']['n_epochs'],

		model_dir=model_dir,
		tensorboard_dir=tensorboard_dir
	)

	converter.save(model_dir)
Beispiel #11
0
def train_encoders(args):
    assets = AssetManager(args.base_dir)
    model_dir = assets.get_model_dir(args.model_name)
    tensorboard_dir = assets.get_tensorboard_dir(args.model_name)

    data = np.load(assets.get_preprocess_file_path(args.data_name))
    imgs = data['imgs'].astype(np.float32) / 255.0

    lord = Lord()
    lord.load(model_dir, latent=True, amortized=False)

    lord.train_amortized(imgs=imgs,
                         classes=data['classes'],
                         model_dir=model_dir,
                         tensorboard_dir=tensorboard_dir)

    lord.save(model_dir, latent=False, amortized=True)
Beispiel #12
0
def split_samples(args):
	assets = AssetManager(args.base_dir)

	data = np.load(assets.get_preprocess_file_path(args.input_data_name))
	imgs, classes, contents = data['imgs'], data['classes'], data['contents']

	n_classes = np.unique(classes).size

    # NOTE: This is a static split, valid only for CIFAR-10
	test_idx = np.arange(50000, 60000)
	train_idx = np.arange(50000)

	np.savez(
		file=assets.get_preprocess_file_path(args.test_data_name),
		imgs=imgs[test_idx], classes=classes[test_idx], contents=contents[test_idx], n_classes=n_classes
	)

	np.savez(
		file=assets.get_preprocess_file_path(args.train_data_name),
		imgs=imgs[train_idx], classes=classes[train_idx], contents=contents[train_idx], n_classes=n_classes
	)
Beispiel #13
0
    def __init__(self,
                 subset=None,
                 base_dir='results',
                 model_name='minst_10_model',
                 data_name='minst_10_test',
                 include_encoders=True):
        self.data_name = data_name
        assets = AssetManager(base_dir)
        data = np.load(assets.get_preprocess_file_path(data_name))
        imgs, classes, contents, n_classes = data['imgs'], data[
            'classes'], data['contents'], data['n_classes']
        imgs = imgs.astype(np.float32) / 255.0
        if subset is not None:
            self.curr_imgs = imgs[:subset]
            self.classes = classes[:subset]
        else:
            self.curr_imgs = imgs
            self.classes = classes

        self.onehot_enc = OneHotEncoder()
        self.onehot_classes = self.onehot_enc.fit_transform(
            self.classes.reshape(-1, 1))
        self.n_classes = self.onehot_classes.shape[1]

        self.n_images = self.curr_imgs.shape[0]

        self.converter = Converter.load(assets.get_model_dir(model_name),
                                        include_encoders=include_encoders)
        self.content_codes = self.converter.content_encoder.predict(
            self.curr_imgs)
        self.class_codes = self.converter.class_encoder.predict(self.curr_imgs)
        class_adain_params = self.converter.class_modulation.predict(
            self.class_codes)
        self.class_adain_params = class_adain_params.reshape(
            class_adain_params.shape[0], -1)
        self.curr_imgs, self.classes, self.onehot_classes, self.content_codes, self.class_codes, self.class_adain_params = \
            shuffle(self.curr_imgs, self.classes, self.onehot_classes, self.content_codes, self.class_codes,
                    self.class_adain_params)
Beispiel #14
0
def split_classes(args):
	assets = AssetManager(args.base_dir)

	data = np.load(assets.get_preprocess_file_path(args.input_data_name))
	imgs, classes, contents = data['imgs'], data['classes'], data['contents']

    # Seed
	np.random.seed(2020)
	n_classes = np.unique(classes).size
	test_classes = np.random.choice(n_classes, size=args.num_test_classes, replace=False)

	test_idx = np.isin(classes, test_classes)
	train_idx = ~np.isin(classes, test_classes)

	np.savez(
		file=assets.get_preprocess_file_path(args.test_data_name),
		imgs=imgs[test_idx], classes=classes[test_idx], contents=contents[test_idx], n_classes=n_classes
	)

	np.savez(
		file=assets.get_preprocess_file_path(args.train_data_name),
		imgs=imgs[train_idx], classes=classes[train_idx], contents=contents[train_idx], n_classes=n_classes
	)
Beispiel #15
0
    def __init__(self,
                 digit,
                 subset=None,
                 base_dir='results',
                 model_name='minst_10_model',
                 data_name='minst_10_test'):
        self.digit = digit
        assets = AssetManager(base_dir)
        data = np.load(assets.get_preprocess_file_path(data_name))
        imgs, classes, contents, n_classes = data['imgs'], data[
            'classes'], data['contents'], data['n_classes']
        imgs = imgs.astype(np.float32) / 255.0
        curr_zero_idx = (classes == digit)
        curr_zero_idx[subset:] = False  # if have the time use np.csum isntead
        self.curr_imgs = imgs[curr_zero_idx, ]
        self.n_images = self.curr_imgs.shape[0]

        self.converter = Converter.load(assets.get_model_dir(model_name),
                                        include_encoders=True)
        self.content_codes = self.converter.content_encoder.predict(
            self.curr_imgs)
        class_codes = self.converter.class_encoder.predict(self.curr_imgs)
        self.class_adain_params = self.converter.class_modulation.predict(
            class_codes)
Beispiel #16
0
def ligning_plot(model_name, base_dir, adain_enabled):
    assets = AssetManager(base_dir)
    converter = Converter.load(assets.get_model_dir(model_name),
                               include_encoders=True)

    data_base_dir = 'data/small_norb_lord'

    azimuths = []
    elevations = []
    lightings = []
    lt_rts = []
    classes = []
    img_paths = []

    regex = re.compile(r'azimuth(\d+)_elevation(\d+)_lighting(\d+)_(\w+).jpg')
    for category in tqdm(os.listdir(data_base_dir)):
        for instance in os.listdir(os.path.join(data_base_dir, category)):
            for file_name in os.listdir(
                    os.path.join(data_base_dir, category, instance)):
                img_path = os.path.join(data_base_dir, category, instance,
                                        file_name)
                azimuth, elevation, lighting, lt_rt = regex.match(
                    file_name).groups()

                class_id = (int(category) * 10) + int(instance)
                azimuths.append(int(azimuth))
                elevations.append(int(elevation))
                lightings.append(int(lighting))
                lt_rts.append(lt_rt)
                classes.append(class_id)
                img_paths.append(img_path)

    df = pd.DataFrame({
        'azimuth': azimuths,
        'elevation': elevations,
        'lighting': lightings,
        'lt_rt': lt_rts,
        'classe': classes,
        'img_path': img_paths,
    })

    df = df.sample(frac=1).reset_index(drop=True)

    fxd_content = [df[df.lighting == i]['img_path'].iloc[0] for i in range(6)]
    fxd_class = df[df.classe == 0]['img_path'][:10]
    l2li = lambda x: [
        np.expand_dims(cv2.cvtColor(cv2.resize(plt.imread(i), dsize=(64, 64)),
                                    cv2.COLOR_BGR2GRAY),
                       axis=2).astype(np.float32) / 255.0 for i in x
    ]

    fxd_content_img = l2li(fxd_content)
    fxd_class_img = l2li(fxd_class)

    fxd_content_cnt, fxd_content_cls = pred_imgs(converter, fxd_content_img,
                                                 adain_enabled)
    fxd_class_cnt, fxd_class_cls = pred_imgs(converter, fxd_class_img,
                                             adain_enabled)

    plt.rcParams["figure.figsize"] = (20, 20)
    blank = np.zeros_like(fxd_content_img[0])
    output = [np.concatenate([blank] + list(fxd_content_img), axis=1)]
    for i in tqdm(range(10)):
        generated_imgs = [
            converter.generator.predict(
                [fxd_content_cnt[[j]], fxd_class_cls[[i]]])[0]
            for j in range(6)
        ]

        converted_imgs = [fxd_class_img[i]] + generated_imgs

        output.append(np.concatenate(converted_imgs, axis=1))

    merged_img = np.concatenate(output, axis=0)

    plt.xlabel('Content')
    plt.ylabel('Class')
    plt.imshow(np.squeeze(merged_img), cmap='gray')
    wandb.log({"Lighting plot": plt})
class MapEditor(QtGui.QMainWindow, Ui_MapEditor):
    def __init__(self, parent=None):
        super(MapEditor, self).__init__(parent)
        self.setupUi(self)

        self.setWindowIcon(QtGui.QIcon("assets/icon.png"))

        print("Working Directory: %s"%(WORKING_DIR))

        # Create our asset manager
        self.assetManager = AssetManager(WORKING_DIR)
        self.assetManager.load_asset("deny.png")

        # Create the canvas :D
        self.scene = EditorScene(self)  
        self.graphicsView = GraphicsView(self.scene)
        self.graphicsView.setObjectName("graphicsView")
        self.horizontalLayout.addWidget(self.graphicsView)

        # We can now initialize the scene
        self.scene.initialize_scene()

        # The tool that is currently active
        self.current_tool=None
        self.current_status=None

        # Init all available tools
        self.initialize_tools()

    def initialize_tools(self):
        self.toolSetLayout = QtGui.QVBoxLayout(self.scrollAreaWidgetContents)

        # Add tool status
        self.current_status = QtGui.QLabel("Tool: None", self)
        self.toolSetLayout.addWidget(self.current_status)

        # Add world building elements
        self._add_tool_section("Brushes:",elements.ELEM_BRUSH_CLASSES)
        self._add_tool_section("Player:",elements.ELEM_PLAYER_CLASSES)
        self._add_tool_section("Building:",elements.ELEM_WORLD_CLASSES)
        self._add_tool_section("Enemies:",elements.ELEM_ENEMY_CLASSES)
        self._add_tool_section("Power-Ups:",elements.ELEM_POWERUP_CLASSES)

    def _add_tool_section(self, title, tools):
        self.toolSetLayout.addWidget(QtGui.QLabel(title, self))
        i=0
        for tool in tools:
            # Create a new horizontal layout for every 2 tools
            if i == 0:
                currentContainer = QtGui.QWidget(self.centralwidget)
                currentSetLayout = QtGui.QHBoxLayout(currentContainer)
                self.toolSetLayout.addWidget(currentContainer)
            currentSetLayout.addWidget(ToolSet(tool, self))
            i=(i+1)%2

    def set_current_tool(self, tool):
        self.current_tool = tool
        self.current_status.setText("Tool: %s"%tool.get_tool_name())

    def element_clicked(self, element):
        if self.current_tool:
            self.current_tool.element_clicked(element)