def __init__(self, latents: Union[RVconf, Layer] = RVconf(64, name="latents"), distribution: Literal['powerspherical', 'vonmisesfisher'] = 'vonmisesfisher', prior: Union[None, SphericalUniform, VonMisesFisher, PowerSpherical] = None, beta: Union[float, Interpolation] = linear(vmin=1e-6, vmax=1., steps=2000, delay_in=0), **kwargs): event_shape = latents.event_shape event_size = int(np.prod(event_shape)) distribution = str(distribution).lower() assert distribution in ('powerspherical', 'vonmisesfisher'), \ ('Support PowerSpherical or VonMisesFisher distribution, ' f'but given: {distribution}') if distribution == 'powerspherical': fn_distribution = _power_spherical(event_size) default_prior = SphericalUniform(dimension=event_size) else: fn_distribution = _von_mises_fisher(event_size) default_prior = VonMisesFisher(0, 10) if prior is None: prior = default_prior latents = DistributionDense( event_shape, posterior=DistributionLambda(make_distribution_fn=fn_distribution), prior=prior, units=event_size + 1, name=latents.name) super().__init__(latents=latents, analytic=True, beta=beta, **kwargs)
def __init__( self, beta: Union[float, Interpolation] = linear(vmin=1e-6, vmax=1.0, steps=2000, delay_in=0), **kwargs, ): super().__init__(beta=beta, **kwargs)
def __init__( self, n_components: int = 500, pseudoinputs_mean: float = -0.05, pseudoinputs_std: float = 0.01, beta: Union[float, Interpolation] = linear(vmin=1e-6, vmax=1., steps=2000, delay_in=0), **kwargs): super().__init__(beta=beta, **kwargs) self.n_components = n_components self.pseudoinputs_mean = pseudoinputs_mean self.pseudoinputs_std = pseudoinputs_std
def __init__( self, labels: RVconf = RVconf(10, 'onehot', projection=True, name="digits"), alpha: float = 10.0, mi_coef: Union[float, Interpolation] = linear(vmin=0.1, vmax=0.05, steps=20000), reverse_mi: bool = False, steps_without_mi: int = 1000, **kwargs, ): super().__init__(**kwargs) self._separated_steps = False self.labels = _parse_layers(labels) self._mi_coef = mi_coef self.alpha = alpha self.steps_without_mi = int(steps_without_mi) self.reverse_mi = bool(reverse_mi)
def __init__( self, mi_coef: Coefficient = 0.2, latents: RVconf = RVconf(32, 'mvndiag', projection=True, name='latents'), mutual_codes: RVconf = RVconf(10, 'mvndiag', projection=True, name='codes'), steps_without_mi: int = 100, beta: Coefficient = linear(vmin=1e-6, vmax=1., steps=2000), beta_codes: Coefficient = 0., name: str = 'MutualInfoVAE', **kwargs, ): super().__init__(beta=beta, latents=latents, name=name, **kwargs) self.is_binary_code = mutual_codes.is_binary if isinstance(mutual_codes, RVconf): mutual_codes = mutual_codes.create_posterior() self.mutual_codes = mutual_codes self._mi_coef = mi_coef self._beta_codes = beta_codes self.steps_without_mi = int(steps_without_mi)
# Configs # =========================================================================== ZDIM = 32 MAX_LENGTH = 48 BUFFER_SIZE = 100 PARALLEL = tf.data.experimental.AUTOTUNE # GaussianLayer, GammaLayer, NegativeBinomialLayer # POSTERIOR = partialclass(bay.layers.GammaLayer, # concentration_activation='softplus1', # rate_activation='softplus1') # POSTERIOR = partialclass(bay.layers.NegativeBinomialLayer, # count_activation='softplus1') POSTERIOR = bay.layers.GaussianLayer BETA = interpolation.linear(vmin=0, vmax=100, norm=500, delayOut=50, cyclical=True) # =========================================================================== # Load the data # =========================================================================== audio = AudioFeatureLoader() train, test = audio.load_fsdd() train = audio.create_dataset(train, return_path=False, max_length=MAX_LENGTH, cache='/tmp/fsdd_train.cache', shuffle=BUFFER_SIZE, parallel=PARALLEL, prefetch=-1)
N_CPU = 2 OVERRIDE = False configs = dict( vae=[ SemafoVAE, RemafoVAE, ], py=[ 0.004, # 0.06, # 0.2, # 0.5, # 0.95, ], coef=[ linear(vmin=0.1, vmax=0.01, length=20000, cyclical=False), # linear(vmin=0.2, vmax=0.02, length=20000, cyclical=True), # linear(vmin=0.2, vmax=0.02, length=20000, cyclical=False), # linear(vmin=0.5, vmax=0.1, length=20000, cyclical=True), ], ds=[ # 'mnist', # 'fashionmnist', # 'shapes3d', # 'dsprites', 'celeba' ], ) outdir = '/home/trung/exp/hyperparams' if not os.path.exists(outdir): os.makedirs(outdir)
def _validate_color_marker_size_legend(max_n_points, color, marker, size, text_marker=False, is_colormap=False, size_range=8, random_seed=1): """Return: colors, markers, sizes, legends""" from odin.backend import interpolation from matplotlib.colors import LinearSegmentedColormap # check size range if isinstance(size, Number): size_range = interpolation.const(vmax=size) if isinstance(size_range, Number): size_range = interpolation.const(vmax=size_range) elif isinstance(size_range, interpolation.Interpolation): pass else: vmin, vmax = as_tuple(size_range, N=2) size_range = interpolation.linear(vmin=float(vmin), vmax=float(vmax)) # check others default_color = 'b' if isinstance(color, (string_types, LinearSegmentedColormap)): default_color = color color = None # marker default_marker = '.' if isinstance(marker, string_types): default_marker = marker marker = None legend = [ [None] * max_n_points, # color [None] * max_n_points, # marker [None] * max_n_points, # size ] # create_label_map = lambda labs, default_val, fn_gen: \ ({labs[0]: default_val} if len(labs) == 1 else {i: j for i, j in zip(labs, fn_gen(len(labs), seed=random_seed))}) # ====== check arguments ====== # if color is None: color = [0] * max_n_points else: legend[0] = color # if marker is None: marker = [0] * max_n_points else: legend[1] = marker # if isinstance(size, Number): size = [0] * max_n_points elif size is None: size = [0] * max_n_points else: # given a list of labels legend[2] = size size_range.norm = np.max(size) # ====== validate the length ====== # for name, arr in [("color", color), ("marker", marker), ("size", size)]: assert len(arr) == max_n_points, \ "Given %d samples for `%s`, but require %d samples" % \ (len(arr), name, max_n_points) # ====== labels set ====== # color_labels = np.unique(color) color_map = create_label_map( color_labels, default_color, generate_random_colormaps if is_colormap else generate_palette_colors) # generate_random_colors marker_labels = np.unique(marker) if text_marker: fn = lambda mrk, seed: marker_labels else: fn = generate_random_marker marker_map = create_label_map(marker_labels, default_marker, fn) # size_labels = np.unique(size) size_map = create_label_map(size_labels, size_range.vmax, lambda n, seed: size_range(np.arange(n)).numpy()) # ====== prepare legend ====== # legend_name = [] legend_style = [] for c, m, s in zip(*legend): name = [] style = [] if c is None: # color name.append('') style.append(color_map[0]) else: name.append(str(c)) style.append(color_map[c]) if m is None: # marker style name.append('') style.append(marker_map[0]) else: name.append(str(m)) style.append(marker_map[m]) if s is None: # size name.append('') style.append(size_map[0]) else: name.append(str(s)) style.append(size_map[s]) # name name = tuple(name) style = tuple(style) if name not in legend_name: legend_name.append(name) legend_style.append(style) # legend = OrderedDict([(i, j) for i, j in zip(legend_style, legend_name)]) # ====== return ====== # return ([color_map[i] for i in color], [marker_map[i] for i in marker], [size_map[i] for i in size], legend)