# Generate bars data my_data = model.generate_data(params_gt, N // comm.size) # Configure DataLogger print_list = ('T', 'Q', 'pi', 'sigma', 'N', 'MAE') store_list = ('*') dlog.set_handler(print_list, TextPrinter) dlog.set_handler(print_list, StoreToTxt, output_path + '/terminal.txt') dlog.set_handler(store_list, StoreToH5, output_path + '/result.h5') model_params = model.standard_init(my_data) if 'anneal' in params: anneal = params.get('anneal') else: # Choose annealing schedule anneal = LinearAnnealing(50) anneal['T'] = [(0, 2.), (.7, 1.)] anneal['Ncut_factor'] = [(0, 0.), (2. / 3, 1.)] anneal['anneal_prior'] = False # Create and start EM annealing em = EM(model=model, anneal=anneal) em.data = my_data em.lparams = model_params em.run() dlog.close() pprint("Done")
# Number of datapoints to generate N = 1000 # Each datapoint is of D = size*size size = 5 # Dimensionality of the model H=2*size # number of latents D=size**2 # dimensionality of observed data # Approximation parameters for Expectation Truncation Hprime = 7 gamma = 5 # Import and instantiate a model from prosper.em.camodels.tsc_et import Ternary_ET model = Ternary_ET(D, H, Hprime, gamma) # Ground truth parameters. Only used to generate training data. params_gt = { 'W' : 10*generate_bars_dict(H), 'pi' : 1.0 / size, 'sigma' : 2.0 } from prosper.em.annealing import LinearAnnealing anneal = LinearAnnealing(300) anneal['T'] = [(0, 2.), (.7, 1.)] anneal['Ncut_factor'] = [(0,0.),(2./3,1.)] anneal['anneal_prior'] = False
else: model = BSC_ET(D, H, Hprime, gamma) # Configure DataLogger print_list = ('T', 'L', 'pi', 'sigma') dlog.set_handler(print_list, TextPrinter) # prints things to terminal txt_list = ('T', 'L', 'pi', 'sigma') dlog.set_handler(txt_list, StoreToTxt, output_path + '/results.txt') # stores things in a txt file h5_list = ('T', 'L', 'pi', 'sigma', 'W') dlog.set_handler(h5_list, StoreToH5, output_path + '/results.h5') # stores things in an h5 file # Choose annealing schedule from prosper.em.annealing import LinearAnnealing anneal = LinearAnnealing(20) # decrease anneal['T'] = [(0, 5.), (.8, 1.)] anneal['Ncut_factor'] = [(0, 0.), (0.5, 0.), (0.6, 1.)] # anneal['Ncut_factor'] = [(0,0.),(0.7,1.)] # anneal['Ncut_factor'] = [(0,0.),(0.7,1.)] anneal['W_noise'] = [(0, np.std(ts) / 2.), (0.7, 0.)] # anneal['pi_noise'] = [(0,0.),(0.2,0.1),(0.7,0.)] anneal['anneal_prior'] = False assert train_labels.shape[0] == ts.shape[0] my_data = {'y': ts, 'l': train_labels} model_params = model.standard_init(my_data) pp("model defined") em = EM(model=model, anneal=anneal) em.data = my_data em.lparams = model_params
# Number of datapoints to generate N = 1000 # Each datapoint is of D = size*size size = 5 # Dimensionality of the model H = 2 * size # number of latents D = size**2 # dimensionality of observed data # Approximation parameters for Expectation Truncation Hprime = 7 gamma = 5 # Latent states of dsc states = np.array([0., 1., 2.]) pi = np.array([0.8, 0.15, 0.05]) # Import and instantiate a model from prosper.em.camodels.dsc_et import DSC_ET model = DSC_ET(D, H, Hprime, gamma, states=states) # Ground truth parameters. Only used to generate training data. params_gt = {'W': 10 * generate_bars_dict(H), 'pi': pi, 'sigma': 2.0} from prosper.em.annealing import LinearAnnealing anneal = LinearAnnealing(100) anneal['T'] = [(0, 2.), (.7, 1.)] anneal['Ncut_factor'] = [(0, 0.), (2. / 3, 1.)] anneal['anneal_prior'] = False
# Number of datapoints to generate N = 1000 # Each datapoint is of D = size*size size = 5 # Dimensionality of the model H = 2 * size # number of latents D = size**2 # dimensionality of observed data # Approximation parameters for Expectation Truncation Hprime = 8 gamma = 5 # Import and instantiate a model from prosper.em.camodels.bsc_et import BSC_ET model = BSC_ET(D, H, Hprime, gamma) # Ground truth parameters. Only used to generate training data. params_gt = {'W': 10 * generate_bars_dict(H), 'pi': 1.0 / size, 'sigma': 2.0} # Choose annealing schedule from prosper.em.annealing import LinearAnnealing anneal = LinearAnnealing(150) anneal['T'] = [(0, 5.), (.8, 1.)] anneal['Ncut_factor'] = [(0, 0.), (2. / 3, 1.)] anneal['anneal_prior'] = False
else: model = BSC_ET(D, H, Hprime, gamma) # Configure DataLogger print_list = ('T', 'L', 'pi', 'sigma') dlog.set_handler(print_list, TextPrinter) # prints things to terminal txt_list = ('T', 'L', 'pi', 'sigma') dlog.set_handler(txt_list, StoreToTxt, output_path + '/results.txt') # stores things in a txt file h5_list = ('T', 'L', 'pi', 'sigma', 'W') dlog.set_handler(h5_list, StoreToH5, output_path + '/results.h5') # stores things in an h5 file # Choose annealing schedule from prosper.em.annealing import LinearAnnealing anneal = LinearAnnealing(120) # decrease anneal['T'] = [(0, 5.), (.8, 1.)] anneal['Ncut_factor'] = [(0, 0.), (0.5, 0.), (0.6, 1.)] # anneal['Ncut_factor'] = [(0,0.),(0.7,1.)] # anneal['Ncut_factor'] = [(0,0.),(0.7,1.)] anneal['W_noise'] = [(0, np.std(ts) / 2.), (0.7, 0.)] # anneal['pi_noise'] = [(0,0.),(0.2,0.1),(0.7,0.)] anneal['anneal_prior'] = False training_labels = np.array(training_labels) assert training_labels.shape[0] == ts.shape[0] my_data = {'y': ts, 'l': training_labels} model_params = model.standard_init(my_data) print("model defined") em = EM(model=model, anneal=anneal) em.data = my_data
""" Demonstrate how to use the annealing class to generate piecewise-linear annealing schedules. """ import sys from prosper.em.annealing import LinearAnnealing Tsteps = 80 Tstart = 20 Tend = 1.05 # Choose annealing schedule anneal = LinearAnnealing(Tsteps) anneal['T'] = [(10, Tstart) , (-10, Tend)] anneal['param_a'] = [(2/3, 0.) , (-10, 1.)] anneal['param_b'] = 0.5 assert anneal['param_c'] == 0.0 while not anneal.finished: print("[%3d] T=%.2f parameter_a=%.2f parameter_b=%.2f" % (anneal['step'], anneal['T'], anneal['param_a'], anneal["param_b"])) anneal.next()