def main(): parser = build_parser() options = parser.parse_args() check_opts(options) setup_log(options) # args_pre = [ # options.data_dir, # ] kwargs_pre = { "train_ratio": options.training_ratio, # "val_ratio": options.validation_ratio, } X_train, y_train, X_val, y_val = preprocess(**kwargs_pre) args = [ X_train, y_train, X_val, y_val, ] kwargs = { "n_hidden1": options.n_hidden1, "n_hidden2": options.n_hidden2, "epochs": options.epochs, "batch_size": options.batch_size, "dropout_rate": options.dropout_rate, "learning_rate": options.learning_rate } optimize(*args, **kwargs)
def main(t, pattern): CO2_emission = 0 lngship_total = 0 hfoship_total = 0 mgoship_total = 0 data.set_t(t, pattern) prob, var_inflow, var_outflow = optimize("cost", size, t) opt_cost = pulp.value(prob.objective) for v in prob.variables(): i = v.name.split('_')[1] j = v.name.split('_')[3] # 排出CO2量を計算 if 'lng_out' in v.name: CO2_emission += v.varValue * CO2['lng'] elif 'hfo_out' in v.name: CO2_emission += v.varValue * CO2['hfo'] elif 'mgo_out' in v.name: CO2_emission += v.varValue * CO2['mgo'] # 各船舶の[隻・km]を算出 if ('lngship' in v.name) & ('out' in v.name): lngship_total += v.varValue * distance.iloc[dict_rev[i], dict_rev[j]] elif ('scrbship' in v.name) & ('out' in v.name): hfoship_total += v.varValue * distance.iloc[dict_rev[i], dict_rev[j]] elif ('ordship' in v.name) & ('out' in v.name): mgoship_total += v.varValue * distance.iloc[dict_rev[i], dict_rev[j]] print("\n") print("Status:", pulp.LpStatus[prob.status]) print("opt_cost: ", opt_cost, 'CO2_emission: ', CO2_emission, "\n") print('lngship_total_ton*km: ', lngship_total, '[隻・km]') print('hfoship_total_ton*km: ', hfoship_total, '[隻・km]') print('mgoship_total_ton*km: ', mgoship_total, '[隻・km]') print("\n") # 最適化の結果得られる各変数の値を表示する # for v in prob.variables(): # if v.varValue != 0: # print(v.name, "=", v.varValue) return opt_cost, prob, CO2_emission
dataspec_command = MADELON_spec.generate_dataspec_command() print netspec.to_string(dataspec_command) retcode = subprocess.check_call(dataspec_command) print 'data-spec reuslt:', retcode netgen_command = MADELON_spec.generate_netgen_command() print netspec.to_string(netgen_command) retcode = subprocess.check_call(netgen_command) print 'net-gen reuslt:', retcode # Setup opt lambdas = array([0.40, 2000.0]) fn = lambda x, item, epsilon: util.Gaussian_RBF_lambda(x, item, epsilon, lambdas) opt = optimize(RBF_func = fn) # First run super_transition_steps = 20000 # Starter run setup MADELON_spec.lf_step = 100 MADELON_spec.window_size = 4 MADELON_spec.epsilon = 0.02 MADELON_spec.repeat_iteration = 40 MADELON_spec.ceiling = 10 MADELON_spec.sample_sigmas = False MADELON_spec.use_decay = False MADELON_spec.negate = False
def tbn(ai , height_map=None , fdm=None , a=None , b=None , output_size=None , max_iter_time=4 , neighborhoods=[7,5,3] , wrap='no' , init_size=64 , init='random' , init_log_dir=None , log_dir='./log/' , optimize_log_dir=None , save_output=True ): assert((output_size is None) ^ (b is None)) assert((a is None) == (b is None)) assert(init in ('random', 'smart')) if height_map is not None: assert(ai.shape[:2] == height_map.shape[:2]) height_map = height_map.reshape(height_map.shape + (1,)) ai = np.concatenate([ai,height_map], axis=2) if fdm is not None: assert(ai.shape[:2] == fdm.shape[:2]) if a is not None: assert(a.shape[:2] == ai.shape[:2]) if output_size is None: if b is not None: output_size = b.shape[:2] else: output_size = ai.shape[:2] assert(init_size <= output_size[0] and init_size <= output_size[1]) def log(filename, image): if log_dir is not None: Image.fromarray(image.astype(np.uint8)).save(log_dir + filename) sizes = pyramid_sizes(init_size, ai.shape[:2], output_size) _ai, _fdm, _a, _b = tbn_pyramid_level(sizes[0], ai, fdm, a, b) if init == 'random': color_init_b, init_b = random_init(_ai, sizes[0][1]) elif init == 'smart': color_init_b, init_b = generate_initial_output(_ai,_a,_b,init_log_dir) log('init_b.png', color_init_b) bi, bi_rgb = optimize(_ai, init_b, _fdm, _a, _b, max_iter_time, neighborhoods, wrap, optimize_log_dir) old_size = sizes[0] for new_size in sizes[1:]: log('bi' + str(old_size[1]) + '.png', bi_rgb) bi = upsample(bi, old_size, new_size) _ai, _fdm, _a, _b = tbn_pyramid_level(new_size, ai, fdm, a, b) bi, bi_rgb = optimize(_ai, bi, _fdm, _a, _b, max_iter_time, neighborhoods, wrap, optimize_log_dir) old_size = new_size if save_output: Image.fromarray(bi_rgb[:,:,:3].astype(np.uint8)).save('./bi.png') if height_map is not None: Image.fromarray(bi_rgb[:,:,3].astype(np.uint8)).save('./bhm.png') return bi_rgb
#true = [{ 'theta' : np.dot( U.T , V1[i,:] ) , 'M' : 0.1*np.dot( U.T * V1[i,:] , U ) } for i in range(iii)] # #for t in true: # w,v = eig( np.eye(t['M'].shape[0]) - t['M'] ) # print 'eig true M' , w.real # #trupar = true #for i in range(5): # trupar = optimize(init_params=trupar,args=data) # callback_one(trupar,data) #trupar = objective.inflate(trupar) params = np.concatenate( [term.flatten(ip) for ip in init_params] ) for i in range(8): params = optimize(init_params=params,args=data) # callback_one(params,data) #params = objective.inflate(params) #optU = [param['theta'] for param in params] #print #print 'stimulus sigma : ', sigma #print 'true ||subunit RF||^2 : ', np.sum(U*U,axis=1) #print 'optimal ||subunit RF||^2 : ', [np.sum(optu*optu) for optu in optU] #print # #def show(string,p): # print 'log-likelihood of %s = %f barrier = %f ldet = %f minw = %f' \ # % ( string , objective.f(p,data), objective.barrier(p,data) , # objective.ldet(p,data) , np.min(objective.eigs(p,data)) )
def run(self): self.preRun() optimize(DefaultResDir, DefaultCdnDir) self.postRun() return 0
os.remove(file_remove) print("Deleted " + file_remove) def pretrain(**kwargs): d = dsc.DeepSubspaceClustering(**kwargs) if ('save_path' in kwargs): clean(kwargs['save_path'].replace('{0:.4g}', '*.npz')) return d.pre_loss, 1.0 opt_params = {'model':pretrain, 'dataset':'Coil20', 'n_rand':10, \ 'hidden_dims':[256,64,256], 'sda_optimizer':'Adam', 'sda_decay':'none', 'weight_init':'sda-normal', \ 'weight_init_params':{'epochs_max': 10000, \ 'sda_printstep': -100, \ 'validation_step': 10, \ 'stop_criteria': 3}, \ 'space': [Real(1.0E-05, 1.0E-01, "log-uniform", name='lr'), \ Integer(1, 200, name='batch_num')], 'save_path':"./saved/models/coil20/256.64_10000.10.3_{0:.4g}"} data_loaded = loadmat("./saved/rescaled/" + opt_params.pop('dataset')) opt_params['inputX'] = data_loaded['X'] opt_params['inputX_val'] = data_loaded['X_val'] result = optimize(forest_minimize, opt_params, 100, random_seed=0, verb_model=False, verb=True) dump(result, "optims/pretrain/256.64_10000.10.3.opt")
def model(X_train, Y_train, X_test, Y_test, num_iterations=2000, learning_rate=0.5, print_cost=False): """ Builds the logistic regression model by calling the function you've implemented previously Arguments: X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train) Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train) X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test) Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test) num_iterations -- hyperparameter representing the number of iterations to optimize the parameters learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize() print_cost -- Set to true to print the cost every 100 iterations Returns: d -- dictionary containing information about the model. """ ### START CODE HERE ### # initialize parameters with zeros (≈ 1 line of code) w, b = initialize_with_zeros(X_train.shape[0]) # Gradient descent (≈ 1 line of code) parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost=False) # Retrieve parameters w and b from dictionary "parameters" w = parameters["w"] b = parameters["b"] # Predict test/train set examples (≈ 2 lines of code) Y_prediction_test = predict(w, b, X_test) Y_prediction_train = predict(w, b, X_train) ### END CODE HERE ### # Print train/test Errors print("train accuracy: {} %".format( 100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100)) print("test accuracy: {} %".format( 100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100)) d = { "costs": costs, "Y_prediction_test": Y_prediction_test, "Y_prediction_train": Y_prediction_train, "w": w, "b": b, "learning_rate": learning_rate, "num_iterations": num_iterations } return d
Real(1.0E-04, 1.0E-01, "log-uniform", name='lambda2'), Real(1.0E+02, 1.0E+05, "log-uniform", name='lambda3')]} init = {'model':run_model, 'n_rand':10, 'images_norm':images_norm, 'images_norm_val':images_norm_val, 'labels':labels, \ 'load_path':load_path, 'hidden_dims':hidden_dims, 'trainC':True, 'giveC':False, \ 'space': [Real(1.0E-05, 1.0E-02, "log-uniform", name='lr'), \ Integer(1, 200, name='batch_num'), Real(1.0E+00, 1.0E+03, "log-uniform", name='alpha1'), Real(1.0E-02, 1.0E+01, "log-uniform", name='lambda1'), Real(1.0E-04, 1.0E-01, "log-uniform", name='lambda2'), Real(1.0E+02, 1.0E+05, "log-uniform", name='lambda3')]} print("1: SSC") result = optimize(forest_minimize, ssc, 100, random_seed=0, verb_model=False, verb=True) dump(result, "optims/train/1_SSC/256.64_10000.10.3.opt") print("2: Autoencoder+SSC") result = optimize(forest_minimize, ae, 100, random_seed=0, verb_model=False, verb=True) dump(result, "optims/train/2_SSC+AE/256.64_10000.10.3.opt") print("3: Global") result = optimize(forest_minimize,
from optimize import * import util from numpy import * from numpy.random import * from matplotlib.pyplot import * def objective(seq): return float(0.5*cos(seq)+1) lambdas = array([5]) fn = lambda x, item, epsilon: util.Gaussian_RBF_lambda(x, item, epsilon, lambdas) opt = optimize(fn) opt.bounds = [(2.0, 7.0)] opt.num_basis = 50 opt.start_point = [3.0] opt.maxeval = 100 opt.epsilons = arange(15.0, 16.0, 0.5) opt.bf_opt_steps = [0.1] opt.reinitialize() seq = arange(2.0, 7.0, 0.1) #y_value = -(0.5*seq*seq*seq - 0.1*seq*seq) y_value = 0.5*cos(seq)+1.0 xs = [] ys = [] x = opt.start_point for i in range(50): # x = opt.direct(float(i+1)) # Use direct to optimize