Hprime = 4 gamma = 3 Tsteps = 40 Tstart = 3.0 Tend = 1.0 # ============================================================================ # Main if __name__ == "__main__": comm = MPI.COMM_WORLD pprint("=" * 78) pprint(" Running %d parallel processes" % comm.size) pprint("=" * 78) # Configure DataLogger # use('GTKAgg') dlog.start_gui(GUI) # dlog.set_handler('freeEnergy', YTPlotter) dlog.set_handler(("T", "Qmean", "pi", "sigma", "Wmin", "Wmean", "Wmax"), TextPrinter) dlog.set_handler("W", RFViewer, rf_shape=(D2, D2), symmetric=0, global_maximum=0) # dlog.set_handler('y', RFViewer, rf_shape=(D2, D2)) # Choose annealing schedule anneal = LinearAnnealing(Tsteps) anneal["T"] = [(10, Tstart), (-10, Tend)]
model = BSC_ET(D, H, Hprime, gamma, to_learn=['W','sigma','pi','mu']) data = {'y':patches} out_fname = output_path + "/data.h5" #setting up logging/output print_list = ('T', 'Q', 'pi', 'sigma', 'N', 'MAE', 'L') dlog.set_handler(print_list, TextPrinter) h5store_list = ('W', 'pi', 'sigma', 'y', 'MAE', 'N','L','Q','mu') dlog.set_handler(h5store_list, StoreToH5, output_path +'/result.h5') ###### Initialize model ####### # Initialize (Random) model parameters model_params = model.standard_init(data) #model_params['mu'] = np.mean(data['y'],axis=0) # Create and start EM annealing em = EM(model=model, anneal=anneal) em.data = data em.lparams = model_params em.run() dlog.close(True) pprint("Done")
# Parameters rf_shape = (26, 26) H = 16 # Configure Data-Logger dlog.start_gui(GUI) dlog.set_handler('W', RFViewer, rf_shape=rf_shape) dlog.set_handler('S', YTPlotter) dlog.set_handler('C', YTPlotter) dlog.set_handler(('T', 'S', 'C'), TextPrinter) # And GO! D = rf_shape[0] * rf_shape[1] Wshape = (H, D) i = 0 for T in np.linspace(0., 20, 50): i = i + 1 pprint("%i th iteration..." % i) W = np.random.normal(size=Wshape) dlog.append_all({ 'T': T, 'W': W, 'S': np.sin(T), 'C': np.cos(T), }) dlog.close()
# Annealing: # start_temp = 13.0 start_temp = 1.0 end_temp = 1.0 anneal_steps = 50 anneal_prior = False # Configure data logger below # ----------------------------- Parameters end -------------------------------- # Main if __name__ == "__main__": comm = MPI.COMM_WORLD pprint("=" * 40) pprint(" Running %d parallel processes" % comm.size) pprint("=" * 40) # -------------------------------- Controls ----------------------------------- run_algorithm = True pprint("\n============================= Warnings =============================") if (not "sigma" in to_learn) and (sigma_gt != sigma_learn): pprint("Generating Sigma not euqal to learned parameter. Aborting.") run_algorithm = False if (not "pi" in to_learn) and (pi_gt != pi_learn): pprint("Generating Pi not euqal to learned parameter. Aborting.") run_algorithm = False if ("sigma" in to_learn) and (start_temp > 1.0): pprint("Starting temperature not 1 with activated Sigma learning. Aborting.") run_algorithm = False
#%% top-layer variables top_train = .5 #when does the training start top_iters = (.8, 50) #when does how many iterations happen per step top_mulIt = 0 #should there even be more top-layer iterations each step #%% -- imput parsing 2.0 xD -- n = 2 while n < len(sys.argv): n2 = 1 try: a = float(sys.argv[n + 1]) try: exec('%s = %f' % (sys.argv[n], a)) pprint('%s = %f' % (sys.argv[n], a)) except: pprint('Error: Bad argument name!') except: if sys.argv[n] == 'outpath' or sys.argv[n] == 'h5path' or sys.argv[ n] == 'h5_path': try: exec('h5path = "%s"' % (sys.argv[n + 1].strip())) pprint('h5-path = "%s"' % sys.argv[n + 1].strip()) except: pprint('Error: Bad h5-path name!') n2 = 1 elif sys.argv[n] == 'mlppath' or sys.argv[n] == 'h5_mlp': try: exec('mlppath = "%s"' % (sys.argv[n + 1].strip()))
# Parameters rf_shape = (26, 26) H = 16 # Configure Data-Logger dlog.start_gui(GUI) dlog.set_handler('W', RFViewer, rf_shape=rf_shape) dlog.set_handler('S', YTPlotter) dlog.set_handler('C', YTPlotter) dlog.set_handler(('T', 'S', 'C'), TextPrinter) # And GO! D = rf_shape[0] * rf_shape[1] Wshape = (H,D) i = 0 for T in np.linspace(0., 20, 50): i = i + 1 pprint( "%i th iteration..." % i) W = np.random.normal(size=Wshape) dlog.append_all( { 'T': T, 'W': W, 'S': np.sin(T), 'C': np.cos(T), } ) dlog.close()
# Annealing: #start_temp = 13.0 start_temp = 1.0 end_temp = 1.0 anneal_steps = 50 anneal_prior = False # Configure data logger below # ----------------------------- Parameters end -------------------------------- # Main if __name__ == "__main__": comm = MPI.COMM_WORLD pprint("=" * 40) pprint(" Running %d parallel processes" % comm.size) pprint("=" * 40) # -------------------------------- Controls ----------------------------------- run_algorithm = True pprint( "\n============================= Warnings =============================" ) if (not 'sigma' in to_learn) and (sigma_gt != sigma_learn): pprint("Generating Sigma not euqal to learned parameter. Aborting.") run_algorithm = False if (not 'pi' in to_learn) and (pi_gt != pi_learn): pprint("Generating Pi not euqal to learned parameter. Aborting.") run_algorithm = False if ('sigma' in to_learn) and (start_temp > 1.0):
H = 2 * D2 Hprime = 6 gamma = 3 Tsteps = 50 Tstart = 1.1 Tend = 1.1 #============================================================================ # Main if __name__ == "__main__": comm = MPI.COMM_WORLD pprint("=" * 78) pprint(" Running %d parallel processes" % comm.size) pprint("=" * 78) #Configure DataLogger #use('GTKAgg') dlog.start_gui(GUI) #dlog.set_handler('freeEnergy', YTPlotter) dlog.set_handler(('T', 'Qmean', 'pi', 'sigma', 'Wmin', 'Wmean', 'Wmax'), TextPrinter) dlog.set_handler('W', RFViewer, rf_shape=(D2, D2), symmetric=1, global_maximum=1)
from pulp.utils.parallel import stride_data from pulp.utils.datalog import dlog, StoreToH5, TextPrinter, StoreToTxt from pulp.em import EM from pulp.em.annealing import LinearAnnealing from pulp.em.camodels.bsc_et import BSC_ET plt.rcParams['image.cmap'] = 'viridis' data_file, model_file, job_name = sys.argv[1:] comm = MPI.COMM_WORLD comm.Barrier() pprint("=" * 40) pprint(" Running %d parallel processes" % comm.size) pprint("=" * 40) data = joblib.load(data_file,mmap_mode='r') N_all = data['data'].shape[0] - (data['data'].shape[0] % comm.size) first_y, last_y = stride_data(N_all) patches = np.array(data['data'][first_y:last_y]) del(data) with open(model_file,'r') as model_fh: model_spec = yaml.load(model_fh) H, Hprime, gamma, n_anneal, Ncut, T = [model_spec[key] for key in ['H','Hprime','gamma', 'n_anneal', 'N_cut', 'T']]