N_out = 10 N_list = None input_type = 'task_inp' # output_type = 'factored' # output_type = 'rotated1.0' output_type = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] task = util.RandomDichotomies(num_class, num_dich, overlap=ovlp) sample_dichotomies = num_dich # sample_dichotomies = None this_exp = exp.random_patterns(task, SAVE_DIR, num_class=num_class, dim=100, var_means=1) # FOLDERS = 'results/continuous/%d_%d/%s/%s/'%(num_class,num_dich,input_type, output_type) # if (N_list is None): # files = os.listdir(SAVE_DIR+FOLDERS) # param_files = [f for f in files if 'parameters' in f] # if len(param_files)==0: # raise ValueError('No experiments in specified folder `^`') # Ns = np.array([re.findall(r"_(\d+)_(\d+)?",f)[0] \ # for f in param_files]).astype(int)
this_exp = exp.mnist_multiclass(task, SAVE_DIR, z_prior=latent_dist, num_layer=num_layer, weight_decay=decay, decoder=readout_weights, nonlinearity=nonlinearity, good_start=good_start, init_coding=coding_level) elif which_task == 'mog': task = util.RandomDichotomies(num_cond,num_var) this_exp = exp.random_patterns(task, SAVE_DIR, num_class=num_cond, dim=100, var_means=1, z_prior=latent_dist, num_layer=num_layer, weight_decay=decay, decoder=readout_weights, nonlinearity=nonlinearity, good_start=good_start, init_coding=coding_level, rot=rotation) elif which_task == 'structured': inp_task = tasks.EmbeddedCube(tasks.StandardBinary(int(np.log2(num_cond))),100,noise_var=0.1) # inp_task = tasks.TwistedCube(tasks.StandardBinary(2), 100, f=rotation, noise_var=0.1) # inp_task = tasks.NudgedXOR(tasks.StandardBinary(2), 100, nudge_mag=rotation, noise_var=0.1, random=True) # task = tasks.LogicalFunctions(d=decs, function_class=num_var) task = tasks.RandomDichotomies(d=[(0,1,3,5),(0,2,3,6),(0,1,2,4)]) # task = tasks.RandomDichotomies(d=[(0,3)]) this_exp = exp.structured_inputs(task, input_task=inp_task, SAVE_DIR=SAVE_DIR, noise_var=0.1,
import students import assistants import util import experiments as exp #%% num_cond = 8 num_var = 2 task = util.RandomDichotomies(num_cond,num_var,0) # task = util.ParityMagnitude() # this_exp = exp.mnist_multiclass(task, SAVE_DIR, abstracts=abstract_variables) this_exp = exp.random_patterns(task, SAVE_DIR, num_class=8, dim=100, var_means=1, var_noise=0.1) #%% set up the task p = 2**num_var allowed_actions = [0,1,2] # allowed_actions = [0] p_action = [0.8,0.1,0.1] # p_action = [1.0] # output_states = this_exp.train_data[1].data # output_states = util.decimal(this_exp.train_data[1]) output_states = this_exp.train_data[0].data # output_states = ContinuousEmbedding(N_, 1.0)(this_exp.train_data[1])
elif this_task == 'mog': task = tasks.RandomDichotomies(num_class, num_dich, overlap=ovlp, use_mse=gaus_obs) exp = experiments.random_patterns(N=N, task=task, SAVE_DIR=SAVE_DIR, num_class=num_class, dim=100, var_means=1, H=H, nonlinearity=nonlinearity, num_layer=num_layer, z_prior=latent_dist, weight_decay=decay, decoder=readout_weights, bsz=64, lr=1e-3, nepoch=nepoch,\ sample_dichotomies=sample_dichotomies, init=init, skip_metrics=skip_metrics, init_coding=coding_level, good_start=ols_initialised, fix_decoder=fixed_decoder, rot=rot) elif this_task == 'structured': # bits = np.nonzero(1-np.mod(np.arange(num_class)[:,None]//(2**np.arange(np.log2(num_class))[None,:]),2)) # pos_conds = np.split(bits[0][np.argsort(bits[1])],int(np.log2(num_class))) # inp_task = tasks.EmbeddedCube(tasks.StandardBinary(int(np.log2(num_class)))) # inp_task = tasks.TwistedCube(tasks.StandardBinary(2), 100, f=coding_level, noise_var=0.1)
abstract_variables = util.DigitsBitwise() # abstract_variables = util.ParityMagnitude() # abstract_variables = util.RandomDichotomies(2) # task = util.RandomDichotomies(8,2,0) task = util.ParityMagnitude() Q = abstract_variables.num_var dic_type = 'general' # dic_type = 'simple' # this_exp = exp.mnist_multiclass(task, SAVE_DIR, abstracts=abstract_variables) this_exp = exp.random_patterns(task, SAVE_DIR, num_class=8, dim=100, var_means=1, abstracts=abstract_variables) #%% make factorized representation N = 100 C = np.random.rand(N, N) W1 = la.qr(C)[0][:, :this_exp.dim_output] targ = ((2 * this_exp.train_data[1].numpy() - 1) * 10) @ W1.T b = 0.8 * targ.min() linreg = linear_model.LinearRegression() # linreg.fit(this_exp.train_data[0],this_exp.train_data[1].numpy()@W1.T) linreg.fit(this_exp.train_data[0], targ - b)
Q = task.num_var # N_list = None # set to None if you want to automatically discover which N have been tested # N_list = [2,3,4,5,6,7,8,9,10,11,20,25,50,100] # N_list = None # N_list = [2,3,5,10,50,100] N_list = [100] # find experiments # this_exp = exp.mnist_multiclass(task, SAVE_DIR, # z_prior=latent_dist, # num_layer=num_layer, # weight_decay=decay) this_exp = exp.random_patterns(task, SAVE_DIR, num_class=8, dim=100, var_means=1, z_prior=latent_dist, num_layer=num_layer, weight_decay=decay) this_folder = SAVE_DIR + this_exp.folder_hierarchy() if (N_list is None): files = os.listdir(this_folder) param_files = [f for f in files if 'parameters' in f] if len(param_files) == 0: raise ValueError('No experiments in specified folder `^`') Ns = np.array([re.findall(r"N(\d+)_%s"%nonlinearity,f)[0] \ for f in param_files]).astype(int)