def _prepare_generator(self, generator, noise_space, condition_distribution, new_W_irange, input_source): noise_dim = noise_space.get_total_dimension() condition_dim = self.condition_space.get_total_dimension() first_layer = generator.mlp.layers[0] pretrain_W, _ = first_layer.get_param_values() rng = generator.mlp.rng new_W = np.vstack((pretrain_W, rng.uniform(-new_W_irange, new_W_irange, (condition_dim, pretrain_W.shape[1])))) new_W = sharedX(new_W) new_W.name = first_layer.get_params()[0].name + '_retrain' first_layer.transformer = MatrixMul(new_W) first_layer.input_space = CompositeSpace( components=[noise_space, self.condition_space]) generator.mlp.input_space = first_layer.input_space # HACK! generator.mlp._input_source = input_source return ConditionalGenerator( generator.mlp, input_condition_space=self.condition_space, condition_distribution=condition_distribution, noise_dim=noise_dim)
def setUp(self): self.noise_dim = 10 self.num_labels = 10 self.condition_dtype = 'uint8' self.condition_space = VectorSpace(dim=self.num_labels, dtype=self.condition_dtype) self.condition_formatter = OneHotFormatter(self.num_labels, dtype=self.condition_dtype) self.condition_distribution = OneHotDistribution(self.condition_space) # TODO this nvis stuff is dirty. The ConditionalGenerator should handle it self.mlp_nvis = self.noise_dim + self.num_labels self.mlp_nout = 1 # Set up model self.mlp = MLP(nvis=self.mlp_nvis, layers=[Linear(self.mlp_nout, 'out', irange=0.1)]) self.G = ConditionalGenerator( input_condition_space=self.condition_space, condition_distribution=self.condition_distribution, noise_dim=self.noise_dim, mlp=self.mlp)