예제 #1
0
def train(filename,X_train, Y_train, Y_Label):

	nn = MLPRegressor(hidden_layer_sizes= (100,100,100))
	X = np.array(X_train[0:1000])
	Y = np.array(Y_train[0:1000])
	nn.fit(X, Y)
	test = nn.predict(np.array(X_train[1000:]))
	# plot predict data and real data
	train = np.array(Y_train[1000:])
	x = np.arange(0,len(X_train)-1000)
	plt.plot(x,train)
	plt.plot(x,test)
	plt.show()
	print (Y_train[1000:])

	train_pred = nn.score(np.array(X_train[0:1000]), np.array(Y_train[0:1000]), sample_weight=None)
	test_pred = nn.score(np.array(X_train[1000:]), np.array(Y_train[1000:]), sample_weight=None)
	print test_pred
	# predict label using trained regression
	# label_pred = test.tolist()
	# for i in range(len(label_pred)-1, 0,-1):
	# 	if label_pred[i]-label_pred[i-1] > 0: label_pred[i] = 1
	# 	else: label_pred[i] = 0
	# label_pred[0] = 0
	# count = 0
	# for (i, j) in zip(label_pred,Y_Label[1000:]):
	# 	if label_pred[i] == Y_Label[i]: count+=1
	# label_score.append(count*1.0/len(label_pred))


	pickle.dump(nn, open(filename, 'wb'))
	print (nn.get_params())
예제 #2
0
    def fit(self, verbose=False):
        data = self.data

        # split into train and test data
        train = data.T[0:150000]
        test = data.T[150000:]
        x_train = train.T[0:6].T
        y_train = train.T[6].T
        x_test = test.T[0:6].T
        y_test = test.T[6].T

        # train the model
        mlp = MLPRegressor()
        mlp.hidden_layer_sizes = self.hidden_layer_sizes
        mlp.activation = self.activation
        mlp.solver = self.solver
        mlp.alpha = self.alpha
        mlp.fit(x_train, y_train)
        self.mlp = mlp

        # predict
        y_train_p = mlp.predict(x_train)
        y_test_p = mlp.predict(x_test)

        # print training and test error
        train_error = np.mean((y_train - y_train_p)**2)
        test_error = np.mean((y_test - y_test_p)**2)
        if verbose:
            print(mlp.get_params())
            print("average training error is : %f" % (train_error))
            print("average test error is : %f" % (test_error))
        return train_error, test_error
예제 #3
0
def NN_Skit(x_train, y_train, x_test): 
	from sklearn.neural_network import MLPRegressor
	m = MLPRegressor(hidden_layer_sizes=(100,100,100),alpha=0.001,solver='adam',activation='relu',max_iter=1,random_state=1,verbose=True)
	#m = MLPRegressor(hidden_layer_sizes=(100,100,100,100,),random_state=1)
	#m = MLPRegressor(random_state=1)
	m.fit(x_train,y_train)
	print("Neural Network Score: {}".format(m.score(x_train,y_train)))
	print("Hyperparameters: {}".format(m.get_params()))
	mean_train = m.predict(x_train)
	mean_test = m.predict(x_test)
	return mean_train, mean_test, m
def train(filename, a, b, X_train, Y_train,Y_Label):
	m = len(X_train)*a/b

	# X_train = norm(X_train)

	nn = MLPRegressor()
	# shuffle(zip(X_train, Y_train))
	X = np.array(X_train[0:m])
	Y = np.array(Y_train[0:m])

	nn.fit(X, Y)
	test = nn.predict(np.array(X_train[m:]))
	# plot predict data and real data
	train = np.array(Y_train[m:])
	x = np.arange(0,len(Y_train[m:]))
	plt.plot(x,train)
	plt.plot(x,test)
	plt.show()
	print nn.score(np.array(X_train[0:m]), np.array(Y_train[0:m]), sample_weight=None)
	print nn.score(np.array(X_train[m:]), np.array(Y_train[m:]), sample_weight=None)
	pickle.dump(nn, open(filename, 'wb'))
	print (nn.get_params())

	X_class = []
	Y_class = []
	for i in range(0, len(X_train)):
		if Y_Label[i] == -1: continue
		X_class.append(X_train[i])
		Y_class.append(Y_Label[i])
	m = len(X_class)*a/b
	X_class = norm(X_class)
	nn1 = MLPClassifier(hidden_layer_sizes=(50,50,50,),max_iter=20000, learning_rate_init=0.02)
	nn2 = SVC()
	X = np.array(X_class[0:m])
	Y = np.array(Y_class[0:m])
	nn1.fit(X, Y)
	nn2.fit(X, Y)
	print nn1.predict(np.array(X_class[:m]))
	print nn2.predict(np.array(X_class[:m]))
	print (Y_class[:m])
	print nn1.score(np.array(X_class[0:m]), np.array(Y_class[0:m]), sample_weight=None)
	print nn1.score(np.array(X_class[m:]), np.array(Y_class[m:]), sample_weight=None)
	print nn2.score(np.array(X_class[0:m]), np.array(Y_class[0:m]), sample_weight=None)
	print nn2.score(np.array(X_class[m:]), np.array(Y_class[m:]), sample_weight=None)
	y_score = nn2.fit(np.array(X_class[0:m]), np.array(Y_class[0:m])).decision_function(X_class[m:])
	fpr = dict()
	tpr = dict()
	roc_auc = dict()

	fpr, tpr, _ = roc_curve(np.array(Y_class[m:]), y_score)
	roc_auc = auc(fpr, tpr)
def run_experiment(hidden_layer_sizes, number_months=12, learning_rate=.001):
    """

    :param hidden_layer_sizes: The hidden layers, e.g. (40, 10)
    :return:
    """
    mlp_regressor = MLPRegressor(
        hidden_layer_sizes=hidden_layer_sizes,
        activation='relu',  # most likely linear effects
        solver='adam',  # good choice for large data sets
        alpha=0.0001,  # L2 penalty (regularization term) parameter.
        batch_size='auto',
        learning_rate_init=learning_rate,
        max_iter=200,

        shuffle=True,

        random_state=None,
        tol=0.0001,
        #verbose=True,
        verbose=False,
        warm_start=False,  # erase previous solution

        early_stopping=False,  # stop if no increase during validation
        validation_fraction=0.1,  # belongs to early_stopping

        beta_1=0.9,  # solver=adam
        beta_2=0.999,  # solver=adam
        epsilon=1e-08  # solver=adam
    )

    setup_logger(hidden_layer_sizes, learning_rate)
    logging.info("hidden_layer_sizes=%s" % str(hidden_layer_sizes))
    logging.info("number_months=%i" % number_months)
    logging.info("learning_rate=%f" % learning_rate)
    for month in range(1, number_months):
        month_learned = "2016-%02i" % month
        logging.info("learn month %s" % month_learned)
        train(mlp_regressor, month_learned, month_learned, verbose=(month == 1))
        month_not_yet_learned = "2016-%02i" % (month + 1)
        logging.info("validate with month %s" % month_not_yet_learned)
        evaluate(mlp_regressor, month_not_yet_learned, month_not_yet_learned)
    logging.info(mlp_regressor.get_params())
    logger = logging.getLogger()
    handlers = logger.handlers[:]
    for handler in handlers:
        handler.close()
        logger.removeHandler(handler)
예제 #6
0
def main(argv): 

    # read in vehicles csv
    vehicles_df = pd.read_csv("../data/vehicles.csv", low_memory=False)
    
    vehicles_displ_mpg_all = vehicles_df[['displ', 'UHighway']]
    vehicles_displ_mpg = vehicles_displ_mpg_all[vehicles_displ_mpg_all.displ > 0]
    
    half = int(len (vehicles_displ_mpg) / 2)
    
    # create the training set with the first half of data
    first_half = vehicles_displ_mpg [:half]
    second_half = vehicles_displ_mpg [half:]
    
    first_half_sorted = first_half.sort_values(by=['displ', 'UHighway'])
    first_half_grouped_by_mean =  pd.DataFrame({'train_mean' : \
        first_half_sorted.groupby('displ')['UHighway'].mean()}).reset_index() 
    
    first_half_x = first_half_grouped_by_mean ['displ'].values.reshape(-1,1)
    print(type(first_half_x))
    print(first_half_x.shape)
    first_half_y = first_half_grouped_by_mean ['train_mean'].values.reshape(-1,1)
    print(first_half_y.shape)
        
    #ax = first_half_grouped_by_median.plot (x = "displ", y = "train_median", c = "b")
    #plt.show ()
    
    second_half_sorted = second_half.sort_values(by=['displ', 'UHighway'])
    second_half_grouped_by_mean =  pd.DataFrame({'test_mean' : \
        second_half_sorted.groupby('displ')['UHighway'].mean()}).reset_index() 
    
    second_half_x = second_half_grouped_by_mean ['displ'].values.reshape(-1,1)
    second_half_y = second_half_grouped_by_mean ['test_mean'].values.reshape(-1,1)
    #second_half_grouped_by_median.plot (ax=ax, x = "displ", y = "test_median", c = "gold")
    #plt.show ()
    
    # Create linear regression object
    solvers = ['lbfgs', 'sgd', 'adam']
    for solver in solvers:
        regr = MLPRegressor(hidden_layer_sizes=(100,), max_iter=200, alpha=1e-4, 
            solver=solver, verbose=10, tol=1e-4, random_state=1,learning_rate_init=.1)

    # Train the model using the training sets
        regr.fit(first_half_x, first_half_y.ravel())
    
        # Make predictions using the testing set
        second_half_y_pred = regr.predict(second_half_x)
    
        print("\tModel parameters: ", regr.get_params(deep=False))
    
        # mean squared error
        mse = mean_squared_error(second_half_y, second_half_y_pred)
        rmse = np.sqrt(mse)
        print("Mean squared error: %.2f" % mse)
        print("Root mean squared error: %.2f" % rmse)
        # Explained variance score: 1 is perfect prediction
        print('R-squared score: %.2f' % r2_score(second_half_y, second_half_y_pred))
    
        # Plot outputs
        plt.scatter(first_half_x, first_half_y,  color='green', label = "training set")
        plt.scatter(second_half_x, second_half_y,  color='red', label = "testing set")
        plt.plot(second_half_x, second_half_y_pred, color='blue', linewidth=3)
        plt.legend(loc='upper right')
        plt.xlabel ("Engine displacement (liter)")
        plt.ylabel ("Fuel economy (MPG)")
        plt.text(2, 60, r'$solver=%s$' %solver, fontsize=10, fontweight='bold')
        plt.show()
예제 #7
0
from sklearn.neural_network import MLPRegressor

# model = MLPRegressor(hidden_layer_sizes=(6,1),activation ='identity',solver = 'lbfgs', learning_rate = 'constant',
#                      learning_rate_init =0.2,max_iter = 2000, random_state =13)

model = MLPRegressor(random_state=13)

# In[47]:

#To get pre-training Weights and Output Values
model.partial_fit(x_train, y_train)

# In[48]:

#Get model parameters
Params = model.get_params(deep=True)
type(Params)
print(Params)

# In[49]:

#Get Inital Parameters to compare pre-training and post-training

InitialWeightsI_H = model.coefs_[0]
# print(InitialWeightsI_H)
print(np.shape(InitialWeightsI_H))

InitialWeightsH_O = model.coefs_[1]
# print(InitialWeightsH_O)
print(np.shape(InitialWeightsH_O))
예제 #8
0
scaler = StandardScaler()
# Fit only to the training data
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
X[columns] = scaler.transform(X[columns])

from sklearn.neural_network import MLPRegressor

mlp = MLPRegressor(hidden_layer_sizes=(30,30,30))

mlp.fit(X_train,y_train)

predictions = mlp.predict(X_test)
mlp.score(X_test, y_test)
mlp.get_params()

from sklearn.metrics import r2_score, explained_variance_score
print(r2_score(y_test,predictions))
print(explained_variance_score(y_test, predictions))

import seaborn as sns

sns.lmplot("x", "y", data=pd.DataFrame({"x":y_test, "y" : predictions}))


preds = mlp.predict(X)

data['preds'] = preds

data['expected_diff'] = [i-j for i,j in zip(data.price.tolist(), data.preds.tolist())]
예제 #9
0
def main():

  # pull in the hyperparameters for the neural network
  parser = argparse.ArgumentParser()
  parser.add_argument('config_file')
  parser.add_argument('--retrain',help='retrain model',action='store_true')
  parser.add_argument('--val_only',help='generate validation output only',action='store_true')
  parser.add_argument('--train_fullres',help='train on full resolution spectra',action='store_true')
  parser.add_argument('--disable_wp',help='disable weight propagation',action='store_true')

  args       = parser.parse_args()
  retrain    = args.retrain
  use_wp     = (not args.disable_wp)
  val_only   = args.val_only
  full_res   = args.train_fullres

  configname     = pathsplit(args.config_file)[1].replace('.json','')
  config         = json_load_ascii(args.config_file)
  wavelengthfile = config["wavelength_file"]
  names          = config['inputvector_order']
  trainfile      = config['trainfile']
  trainfile_rs   = config.get('trainfile_rs',None)

  if 'alb' not in names:
    names.append('alb')

  # assume outdir is always the same as trainfile (not trainfile_rs)
  outdir,trainf = pathsplit(trainfile)
  
  wl_inst, fwhm_inst = load_wavelength_file(wavelengthfile)

  # prm20151026t173213_libRadtran.mat (full res) shape = (9072, 7101)
  # prm20151026t173213_libRadtran_PRISM_rs.mat (instrument) shape = (9072, 246)

  if full_res:
    # Train on full-resolution RTM channels, rather than (downsampled)
    # instrument channels
    print('loading trainfile: "%s"'%str((trainfile)))
    D = loadmat(trainfile)
    wl =  D['wl'].squeeze()
    assert(len(wl)>len(wl_inst))
  else:
    # Load the resampled instrument channels
    print('loading trainfile_rs: "%s"'%str((trainfile_rs)))
    D  = loadmat(trainfile_rs)
    wl =  D['wl'].squeeze()
    assert(len(wl)==len(wl_inst))
    assert((wl==wl_inst).all())
    
  inputs = s.float32(D['input'])
  print('inputs.shape: "%s"'%str((inputs.shape)))
    
  tgts = D['rho']
  
  # dimensionality of state space = n_inputs
  n_inputs = len(names)
  print('names: "%s"'%str((names)))
  assert(n_inputs == inputs.shape[1])
  
  n_samples = len(inputs)
  n_wl = len(wl)

  # construct train/test partitions
  random.seed(42)    
  samp_idx = s.arange(n_samples)    
  tr_mask = np.zeros(n_samples,dtype=np.bool8)
  stratify_inputs=True
  if stratify_inputs:
    uniq = []
    trinputs,teinputs = [],[]
    for i in range(n_inputs):
      # hold out central unique value of each variable, train on rest
      holdi = np.unique(inputs[:,i])
      teinputs.append(np.array([holdi[len(holdi)//2]]))
      trinputs.append(np.setdiff1d(holdi,teinputs[-1]))
      print('i,trinputs[i],teinputs[i]: "%s"'%str((i,trinputs[i],
                                                   teinputs[i])))
      uniq.append(holdi)
    
    # NOTE: leave all albedo values in training set
    # partition on remaining (4) states
    for i in range(n_inputs):
      if not names[i].startswith('alb'):
        tr_mask |= np.isin(inputs[:,i],teinputs[i])
    # invert mask to get training indices 
    tr_mask = ~tr_mask
  else:
    #  validate on (100*p)% of the data
    p = 0.2
    random.shuffle(samp_idx)
    tr_mask[:int(n_samples*(1-p))] = 1
    
  tr_idx = samp_idx[tr_mask]
  val_idx = samp_idx[~tr_mask]
  n_samplesv = len(val_idx)
  print('n_samples: %d'%(n_samples))
  print('n_train:   %d'%(len(tr_idx)))
  print('n_val:     %d'%(n_samplesv))
  print('n_inputs:  %d'%(n_inputs))
  print('n_wl:      %d'%(n_wl))
  
  # initialize model
  # n_layers = n_hidden_layers + 1 (output)
  n_layers = 2
  n_hidden = 55
  if n_layers==2:
    weight_labs = ['input','output']
    layers = (n_hidden,)
  else:
    weight_labs = ['input','hidden','output']
    layers = (n_hidden,n_hidden,)

  # halfwidth of overlapping channel input range
  # n_half == 0 -> monochromatic
  # n_half == 1->3 channel averaging, 2->5 channel averaging, ...
  n_half = 0
  average_over = True # only valid if n_half > 0
  n_over = 2*n_half+1

  # 'auto'== 200 samples/batch
  batch_size = 'auto' 

  # train first subnetwork for many more epochs to ensure convergence
  init_max_iter = 500
  max_iter = 500

  long_train = False
  if long_train:
    init_max_iter *= 4
    max_iter *= 4

  # compute validation accuracy every val_step epochs
  init_step = 100
  val_step = 25

  # early stopping for val models, init via weight propagation
  es_val = True
  # early stopping for final models, initialized with val weights
  es_fin = True 

  # tol == -1 -> train until max_iter 
  tol_init = -1 
  tol_val = 1e-200 if es_val else -1
  tol_fin = 1e-200 if es_fin else -1

  # use a small percentage of training data for early stopping
  es_percent = 0.1 

  # set early stopping=True and disable on case-by-case basis
  mlpparms = dict(hidden_layer_sizes=layers, activation='relu', solver='adam',
                  alpha=1e-5, batch_size=batch_size, learning_rate='adaptive',
                  learning_rate_init=0.001, power_t=0.5, max_iter=max_iter,
                  random_state=42, early_stopping=True, tol=tol_init,
                  warm_start=False, momentum=0.9, nesterovs_momentum=True,
                  shuffle=False, validation_fraction=es_percent,
                  beta_1=0.9, beta_2=0.999, epsilon=1e-10, verbose=False,
                  n_iter_no_change=10)
  model = MLP(**mlpparms) 

  # map state parameter values to unit range
  inputs = normalize_inputs(inputs,names,clip=False)
  
  val_loss = dict(mse=[],mae=[])

  # pick a few validation toa spectra at varying mad to show in detail
  val_tgts = tgts[val_idx]
  val_dtgts = np.diff(val_tgts,1)
  val_dtgts_med = np.median(val_dtgts,axis=0)
  val_dtgts_mad = abs(val_dtgts-val_dtgts_med).sum(axis=1)
  sorti = np.argsort(val_dtgts_mad)
  val_qper = [0.25,0.5,0.75]
  val_toaidx = [sorti[int(n_samplesv*qp)] for qp in val_qper] 
  val_toalab = ['argq%d'%(qi*100) for qi in val_qper]
  val_toalab = dict(zip([sorti[0]]+val_toaidx+[sorti[-1]],
                        ['argmin']+val_toalab+['argmax']))
  for ridx,rlab in val_toalab.items():
    print('val_dtgts_mad[%s]: "%s"'%(rlab,str((val_dtgts_mad[ridx]))))
    print('inputs[%s]: "%s"'%(rlab,str((inputs[val_idx[ridx]]))))

  # bookkeeping for selected val_toa spectra + predictions
  val_toatrue = dict([(idx,s.zeros(n_wl)) for idx in val_toaidx])
  val_toapred = dict([(idx,s.zeros(n_wl)) for idx in val_toaidx])
  val_toamse = dict([(idx,s.zeros(n_wl)) for idx in val_toaidx])

  plot_toadat=False
  if plot_toadat:
    dwl=wl[:val_dtgts.shape[1]]
    fig0,ax0 = pl.subplots(2,1,sharex=True,sharey=False)
    for ridx,rlab in val_toalab.items():
      ax0[0].plot(wl,val_tgts[ridx],label=rlab)
      ax0[1].plot(dwl,val_dtgts[ridx],label='diff(%s)'%rlab)

    ax0[0].legend()
    ax0[1].legend()
    pl.show()
  
  accum = s.zeros(n_wl)

  modelclass =  '_'.join(['mlp']+list(map(str,layers))+[str(n_over)])
  if long_train:
    modelclass += '_longtr'
  if not use_wp:
    modelclass += '_nowp'
    
  modeldir = pathjoin(outdir,modelclass)
  print('modeldir: "%s"'%str((modeldir)))  
  if not pathexists(modeldir):
    os.makedirs(modeldir)

  log_filename = 'train_pid%s.log'%str(os.getpid())
  log_file = pathjoin(modeldir,log_filename)
  print('Writing log_file=%s'%log_file)
  sleep(1)

  log_fid = open(log_file,'w')
  print('# c, iter, mse, mae, time',file=log_fid)
      
  modelprefix = pathjoin(modeldir,splitext(trainf)[0])
    
  modelbase = modelprefix+'_c%s.pkl'
  init_modelbase = modelprefix+'_init_c%s.pkl'
  fin_modelbase = modelprefix+'_fin_c%s.pkl'

  # weight/bias data bookeeping
  W,b = {},{}
  
  # weight/bias output files
  Wf,bf = {},{}
  
  # validation weights/biases constructed on training set,
  # assesed on validation set  
  W['val'] = [np.zeros([n_wl,n_inputs,n_hidden]),
              np.zeros([n_wl,n_hidden,n_over])]
  b['val'] = [np.zeros([n_wl,n_hidden]),
              np.zeros([n_wl,n_over])]
  
  # "final" weights/biases constructed using *all* states
  # NOTE: use these during deployment to isofit/rt_nn.py
  W['fin'] = [np.zeros([n_wl,n_inputs,n_hidden]),
              np.zeros([n_wl,n_hidden,n_over])]
  b['fin'] = [np.zeros([n_wl,n_hidden]),
              np.zeros([n_wl,n_over])]

  # output file paths for validation/final outputs
  # TODO (BDB, 04/17/19): make multi_layer consistent 
  Wf['val'] = [modelprefix+'_W%dv.npy'%l for l in [1,2]]
  bf['val'] = [modelprefix+'_b%dv.npy'%l for l in [1,2]]
  
  Wf['fin'] = [modelprefix+'_W%dnpy'%l for l in [1,2]]
  bf['fin'] = [modelprefix+'_b%d.npy'%l for l in [1,2]]

  if n_layers==3:
    Wf['val'] = [Wf['val'][0],modelprefix+'_Wmv.npy',Wf['val'][1]]
    bf['val'] = [bf['val'][0],modelprefix+'_bmv.npy',bf['val'][1]]
    Wf['fin'] = [Wf['fin'][0],modelprefix+'_Wm.npy',Wf['fin'][1]]
    bf['fin'] = [bf['fin'][0],modelprefix+'_bm.npy',bf['fin'][1]]
    
    W['val'] = [W1v[0],np.zeros([n_wl,n_hidden,n_hidden]),W2v[1]]
    b['val'] = [b1v[0],np.zeros([n_wl,n_hidden]),b2v[1]]
    W['fin'] = [W1f[0],np.zeros([n_wl,n_hidden,n_hidden]),W2f[1]]
    b['fin'] = [b1f[0],np.zeros([n_wl,n_hidden]),b2f[1]]
  
  abserr = dict(fin=np.zeros([n_samples,len(wl)]), val=np.zeros([n_samplesv,len(wl)]))
  sqderr = dict(fin=np.zeros([n_samples,len(wl)]), val=np.zeros([n_samplesv,len(wl)]))

  # initialize model on initial channel(s)
  cr = s.arange(0,n_over)

  tr_input = inputs[tr_idx]
  val_input = inputs[val_idx]
  tr_tgts = tgts[tr_idx,cr]
  val_tgts = tgts[val_idx,cr]

  init_model_file = init_modelbase%str(0)
  model_params = model.get_params(deep=True)
  model_init = None
  model_init_time = gettime()
  if not pathexists(init_model_file) or retrain:
    fit_init = fit(model, init_model_file, tr_input, tr_tgts, val_input,
                   val_tgts, init_max_iter, es_tol=tol_init,
                   val_step=init_step)
                   
    model_init,model_init_err = fit_init
    model_init_mse,model_init_mae = model_init_err
    model_init_time = gettime()-model_init_time
    model_init_iter = model_init.n_iter_
    print('%d, %d, %.16f, %.16f, %d'%(-1,model_init_iter,model_init_mse,
                                      model_init_mae,model_init_time),
          file=log_fid)
    log_fid.flush()
  else:
    model_init = joblib.load(init_model_file)
    print('loaded',init_model_file)
    
  models = [model_init]
  model_file = modelbase%str(0)
  
  # train channelwise subnetworks
  for c in range(n_wl):    
    # define channel range (cmin==cmax for monochromatic)
    n_off = n_wl-c
    if c < n_half+1:
      cmin,cmax = 0,n_over-1
    elif n_off < n_half+1:
      cmin,cmax = n_wl-n_over,n_wl-1
    else:
      cmin,cmax = c-n_half,c+n_half
      
    cr = s.arange(cmin,cmax+1) if (n_half==0 or average_over) else c
    
    print('\n##### Training subnetwork for center channel %d, wl=[%.2f,%.2f] ##########'%(c,wl[cmin],wl[cmax]))
    # grab the training/validation targets for the current channel(s)  
    fin_tgts = tgts[:,cr].reshape([-1,n_over])
    tr_tgts = tgts[tr_idx,cr].reshape([-1,n_over])
    val_tgts = tgts[val_idx,cr].reshape([-1,n_over])

    # update model file after storing the previous file
    prev_model_file = None
    if use_wp:
      prev_model_file = model_file if c!=0 else init_model_file
    model_file = modelbase%str(c)

    model_c_time = gettime()
    if not pathexists(model_file) or retrain:
      # train new model, save to model_file
      model_c = clone(model)      
      model_c.set_params(**model_params)
      fit_c = fit(model, model_file, tr_input, tr_tgts, val_input,
                  val_tgts, max_iter, val_step=val_step,
                  es_tol=tol_val, prev_model_file=prev_model_file)
      model_c,model_c_err = fit_c
      model_c_mse,model_c_mae = model_c_err
      model_c_iter = model_c.n_iter_
      model_c_time = gettime()-model_c_time
    else:
      # restore from model_file
      model_c = joblib.load(model_file)
      val_preds = model_c.predict(val_input).reshape(val_tgts.shape)
      model_c_iter = model_c.n_iter_
      model_c_mse = mse(val_tgts,val_preds)
      model_c_mae = mae(val_tgts,val_preds)
      model_c_time = gettime()-model_c_time

    print('%d, %d, %.16f, %.16f, %d'%(c,model_c_iter,model_c_mse,model_c_mae,
                                      model_c_time),file=log_fid)
    log_fid.flush()

    if c==0 and model_init is not None:
      # replace initial model with refined version
      models[0] = model_c
    else:
      models.append(model_c)

    # save the output validation weights / biases
    for l in range(n_layers):
      W['val'][l][c] = np.array(model_c.coefs_[l])
      b['val'][l][c] = np.array(model_c.intercepts_[l])


    val_preds = model_c.predict(val_input).reshape(val_tgts.shape)
    #print('cr,val_preds.shape,val_tgts.shape: "%s"'%str((cr,val_preds.shape,
    #                                                    val_tgts.shape)))
    
    # generate detailed summaries for val_toaidx spectra
    for ri in val_toaidx:
      if n_half==0 or average_over:
        # model generates predictions for 0 or more adjacent channels and
        # we average the predictions for all generated channels
        predi,truei = val_preds[ri],val_tgts[ri]
      else:
        # model generates predictions for 1 or more adjacent channels but
        # we only consider the target channel in generating output
        # target channel centered unless c first/prev channel
        predi,truei = val_preds[ri][c-cmin],val_tgts[ri][c-cmin]

      val_toaabserri = np.abs(predi-truei)
      val_toatrue[ri][cr] = truei
      val_toapred[ri][cr] += predi
      val_toamse[ri][cr] += (val_toaabserri*val_toaabserri)

    # keep track of how many times we generate a prediction for each channel
    # (accmum==ones(n_wvl) for monochromatic case)
    accum[cr] += 1
    val_abserr = np.abs(val_tgts-val_preds)
    val_sqderr = val_abserr*val_abserr

    sqderr['val'][:,cr] += val_sqderr
    abserr['val'][:,cr] += val_abserr

    # track the mean/std/median/mad of the mse and mae
    val_loss['mse'].append([np.mean(val_sqderr),np.std(val_sqderr),
                            np.median(val_sqderr),mad(val_sqderr)])
    val_loss['mae'].append([np.mean(val_abserr),np.std(val_abserr),
                            np.median(val_abserr),mad(val_abserr)])

    if val_only: # skip training "final" model
      print('val_only==True, skipping production model fit')
      continue

    print('\n##### Training full subnetwork for center channel %d, wl=[%.3f,%.3f] ##########'%(c,wl[cmin],wl[cmax]))
    fin_model_file = fin_modelbase%(str(c))
    fin_model_filename = pathsplit(fin_model_file)[1]
    model_f_time = gettime()
    if not pathexists(fin_model_file) or retrain:
      # train "final" production model on *all* inputs
      # start with converged validation model for this channel
      model_f = deepcopy(model_c)
      model_f.fit(inputs, fin_tgts)
      model_f_time = gettime()-model_f_time
      joblib.dump(model_f, fin_model_file)
      print('saved',fin_model_filename)
    else:
      model_f = joblib.load(fin_model_file)
      print('loaded',fin_model_filename)

    fin_preds = model_f.predict(inputs).reshape(fin_tgts.shape)
    fin_abserr = np.abs(fin_tgts-fin_preds)
    fin_sqderr = fin_abserr*fin_abserr
    sqderr['fin'][:,cr] += fin_sqderr
    abserr['fin'][:,cr] += fin_abserr

    # extract the final model weights / biases
    for l in range(n_layers):
      W['fin'][l][c] = np.array(model_f.coefs_[l])
      b['fin'][l][c] = np.array(model_f.intercepts_[l])

  # end of channelwise training loop
  
  evalkeys = ['val'] if val_only else ['val','fin']
  for evalkey in evalkeys:
    abserr[evalkey] = abserr[evalkey] / accum 
    sqderr[evalkey] = sqderr[evalkey] / accum 

    outmat = modelprefix+'_%s_sqderr.mat'%evalkey
    savemat(outmat,{'training_idx':tr_idx,
                    'validation_idx':val_idx,
                    'sqderr':sqderr[evalkey],
                    'abserr':abserr[evalkey],
                    'wl':wl})
    print('saved',outmat)
    Woutf,boutf = Wf[evalkey],bf[evalkey]
    Wout,bout = W[evalkey],b[evalkey]    
    for l,(Wl,bl) in enumerate(zip(Woutf,boutf)):
      np.save(Wl,Wout[l])
      np.save(bl,bout[l])
      
    print('saved %s weights to: "%s"'%(evalkey,str(((Woutf[0],boutf[0]),(Woutf[1],boutf[1])))))
    
  # plot the selected val_toaidx predictions vs. actuals
  for idx in val_toaidx:
    toatitle = ', '.join('%s=%.2f'%(n,v) for n,v in zip(names,inputs[idx]))

    toatrue = val_toatrue[idx]
    toapred = val_toapred[idx] / accum
    toamse  = val_toamse[idx] / accum

    toafig = modelprefix+'_toa%d_%s_pred.pdf'%(val_idx[idx],
                                               val_toalab[idx])
    fig,ax = pl.subplots(3,1,sharex=True,sharey=False)
    ax[0].plot(wl,toatrue)
    ax[0].plot(wl,toapred,c='r',ls=':')
    ax[1].plot(wl[:-1],s.diff(toatrue))
    ax[1].plot(wl[:-1],s.diff(toapred),c='r',ls=':')
    ax[2].plot(wl,toamse)
    pl.suptitle(toatitle)
    pl.savefig(toafig)
    print('saved toafig: "%s"'%str((toafig)))

  # plot mse/mae error curves    
  for errkey in val_loss:
    errdat = np.array(val_loss[errkey])
    errmean,errstd = errdat[:,0],errdat[:,1]
    errstr='mean val_%s: %g, std: %g'%(errkey,s.mean(errdat[:,0]),s.std(errdat[:,0]))
    print(errstr)
  
    errfig = modelprefix+'_%s.pdf'%errkey
    fig,ax = pl.subplots(2,1,sharex=True,sharey=False)
    plotmeanstd(ax[0],wl,errmean,errstd)
    plotmeanstd(ax[1],wl,errmean,errstd,diff=True)  
    ax[0].set_ylabel(errkey)
    ax[1].set_ylabel('diff(%s)'%errkey)
    ax[0].set_title(errstr)
    pl.savefig(errfig)
    print('saved errfig: "%s"'%str((errfig)))
    
  # free some memory
  del D
예제 #10
0
    MSE = np.sum((sample_validation.rings - prediccions)**2) / N_valid
    if mse is None or mse > MSE:
        size_ideal_mse = size
        mse = MSE
    mses.append(MSE)

size = size_ideal_mse
print("Best config with {}".format(size))

model_nnet = MLPRegressor(hidden_layer_sizes=size,
                          alpha=0,
                          activation="logistic",
                          max_iter=1000,
                          solver='lbfgs')
model_nnet = model_nnet.fit(sample.loc[:, "male":"shell_weight"], sample.rings)
print(model_nnet.get_params(), file=open('coeficients/mlp_singlelayer', 'w'))

prediccions = model_nnet.predict(sample.loc[:, "male":"shell_weight"])
#Obtenim R-Squared sobre train
NMSE = sum((sample.rings - prediccions)**2) / ((N - 1) * np.var(sample.rings))
print("NMSE MLP:", NMSE)
R_squared = (1 - NMSE) * 100
print("Our model explain the {}% of the train variance".format(R_squared))

#Obtenim les mètriques que ens serviran per comparar els diferents models
prediccions = model_nnet.predict(sample_validation.loc[:,
                                                       "male":"shell_weight"])
MAE = np.sum(abs(sample_validation.rings - prediccions)) / N_valid
print("MAE on validation data:", MAE)
MSE_valid = np.sum((sample_validation.rings - prediccions)**2) / N_valid
print("MSE validation MLP:", MSE_valid)
예제 #11
0
            price_vec.append(l[3])

    scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1))
    mat = numpy.array(feature_mat)
    mat = scaler.fit_transform(mat)
    price_vec = numpy.array(price_vec)

    #ftrain, ftest, ptrain, ptest = train_test_split(mat, price_vec, test_size=0.1)

    clf = MLPRegressor(solver='lbfgs',
                       hidden_layer_sizes=(4, 4, 4),
                       random_state=1)
    clf.fit(ftrain, ptrain)
    result = clf.predict(ftest)
    print(result)
    print(len(result))

    print(ptest)
    print(len(ptest))
    print('score sklearn :', clf.score(ftest, ptest))
    print(clf.get_params())

    # pos = where(sign_vec == 1)
    # neg = where(sign_vec == 0)
    # scatter(mat[pos, 0], mat[pos, 1], marker='o', c='b')
    # scatter(mat[neg, 0], mat[neg, 1], marker='x', c='r')
    # xlabel('Exam 1 score')
    # ylabel('Exam 2 score')
    # legend(['Not Admitted', 'Admitted'])
    # show()
예제 #12
0
def _mlp_regression_train(table,
                          feature_cols,
                          label_col,
                          hidden_layer_sizes=(100, ),
                          activation='relu',
                          solver='adam',
                          alpha=0.0001,
                          batch_size_auto=True,
                          batch_size='auto',
                          learning_rate='constant',
                          learning_rate_init=0.001,
                          max_iter=200,
                          random_state=None,
                          tol=0.0001):
    _, features = check_col_type(table, feature_cols)
    label = table[label_col]

    mlp_model = MLPRegressor(hidden_layer_sizes=hidden_layer_sizes,
                             activation=activation,
                             solver=solver,
                             alpha=alpha,
                             batch_size=batch_size,
                             learning_rate=learning_rate,
                             learning_rate_init=learning_rate_init,
                             max_iter=max_iter,
                             shuffle=True,
                             random_state=random_state,
                             tol=tol)
    mlp_model.fit(features, label)

    predict = mlp_model.predict(features)

    intercepts = mlp_model.intercepts_
    coefficients = mlp_model.coefs_
    loss = mlp_model.loss_

    _mean_absolute_error = mean_absolute_error(label, predict)
    _mean_squared_error = mean_squared_error(label, predict)
    _r2_score = r2_score(label, predict)

    result_table = pd.DataFrame.from_items(
        [['Metric', ['Mean Absolute Error', 'Mean Squared Error', 'R2 Score']],
         ['Score', [_mean_absolute_error, _mean_squared_error, _r2_score]]])

    label_name = {
        'hidden_layer_sizes': 'Hidden Layer Sizes',
        'activation': 'Activation Function',
        'solver': 'Solver',
        'alpha': 'Alpha',
        'batch_size': 'Batch Size',
        'learning_rate': 'Learning Rate',
        'learning_rate_init': 'Learning Rate Initial',
        'max_iter': 'Max Iteration',
        'random_state': 'Seed',
        'tol': 'Tolerance'
    }
    get_param = mlp_model.get_params()
    param_table = pd.DataFrame.from_items(
        [['Parameter', list(label_name.values())],
         ['Value', [get_param[x] for x in list(label_name.keys())]]])

    rb = BrtcReprBuilder()
    rb.addMD(
        strip_margin("""
    | ### MLP Classification Result
    | {result}
    | ### Parameters
    | {list_parameters}
    """.format(result=pandasDF2MD(result_table),
               list_parameters=pandasDF2MD(param_table))))

    model = _model_dict('mlp_regression_model')
    model['features'] = feature_cols
    model['label'] = label_col
    model['intercepts'] = mlp_model.intercepts_
    model['coefficients'] = mlp_model.coefs_
    model['loss'] = mlp_model.loss_
    model['mean_absolute_error'] = _mean_absolute_error
    model['mean_squared_error'] = _mean_squared_error
    model['r2_score'] = _r2_score
    model['activation'] = activation
    model['solver'] = solver
    model['alpha'] = alpha
    model['batch_size'] = batch_size
    model['learning_rate'] = learning_rate
    model['learning_rate_init'] = learning_rate_init
    model['max_iter'] = max_iter
    model['random_state'] = random_state
    model['tol'] = tol
    model['mlp_model'] = mlp_model
    model['_repr_brtc_'] = rb.get()

    return {'model': model}
예제 #13
0
#poly = make_pipeline(PolynomialFeatures(3), Ridge())
mpl = MLPRegressor(beta_1=0.99)
'''
y_t = y[-1000:-2]
y = y[0:-1000]
X_t = X[-1000:-2]
X = X[0:-1000]
mpl.fit(X, y)
poly.fit(X, y)
mpl_pred = mpl.predict(X_t)
poly_pred = poly.predict(X_t)
'''
mpl_pred = cross_val_predict(mpl, X, y, cv=10)
#poly_pred = cross_val_predict(poly, X, y, cv=10)
#nn_pred = cross_val_predict(model, X, y, cv=10)
print mpl.get_params()

def plot_cross():
    fig, ax = plt.subplots()
    ax.scatter(y, mpl_pred, c='b', marker='x')
#    ax.scatter(y, poly_pred, c='y', marker ='+')
    #ax.scatter(y, nn_pred, c='r')
    ax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4)
    ax.set_xlabel('Measured')
    ax.set_ylabel('Predicted')
    plt.show()

#mpl_pred = shift(mpl_pred, 30, cval=0)
#poly_pred = shift(poly_pred, 30, cval=0)

def plot_time():
예제 #14
0
from sklearn import ensemble

clf4=ensemble.RandomForestRegressor(random_state=0)
clf4.fit(input.T[0:384],targets[0:384])

print(clf4.score(input.T[384:],targets[384:]))
import matplotlib.pyplot as plt

parameters={ 'min_samples_split':[2,10], 'max_depth':[3, 15],'n_estimators':[10,50]}
from sklearn import grid_search
###### Step 5 Testing With Neural Networks
from sklearn.neural_network import MLPRegressor

clf5=MLPRegressor(random_state=0,hidden_layer_sizes=500,activation='logistic',max_iter=500,)
clf5.fit(input.T[0:384],targets[0:384])
pred=clf5.predict(input.T[384:])
pred_train=clf5.predict(input.T[0:384])
print(clf5.score(input.T[384:],targets[384:]))
print(clf5.get_params())

### Plotting the results

plt.figure(1)
plt.plot(range(0,len(targets[384:])),pred,'red',range(0,len(targets[384:])),targets[384:],'blue')
plt.figure(2)
plt.plot(range(0,len(targets[0:384])),pred_train,'red',range(0,len(targets[0:384])),targets[0:384],'blue')
plt.show()
from sklearn.externals import joblib

#### Dumping the obtained classifier
joblib.dump(clf5, 'clf5.pkl')
predict_train1 = mlp.predict(data_train)
rmse_train1 = rmse(target_train, predict_train1)

#RMSE Test Data
predict_test1 = mlp.predict(data_test)
rmse_test1 = rmse(target_test, predict_test1)

#Weight values
weights = mlp.coefs_
#Bias values
biases = mlp.intercepts_

#Write Params down
results = open("diabetesBaseResults.txt", "a")
results.write("----PARAMS----\n")
results.write(str(mlp.get_params()) + "\n" + "\n")

#Write RMSE down
results = open("diabetesBaseResults.txt", "a")
results.write("----RMSE----\n")
results.write("            Train Data            |    Test Data  " + "\n")
results.write("Pre-Train    " + str(rmse_train0) + "      " + str(rmse_test0) +
              "\n")
results.write("Post-Train   " + str(rmse_train1) + "      " + str(rmse_test1) +
              "\n" + "\n")

#Write down target values and actual values for TEST data
results = open("diabetesBaseResults.txt", "a")
results.write("----TARGETS----PREDICTED VALS----\n")
for elem in range(len(target_test)):
    results.write(
예제 #16
0
        for lri in parameters["learning_rate_init"]:
            for lr in parameters["learning_rate"]:
                for alpha in parameters["alpha"]:
                    for hls in parameters["hidden_layer_sizes"]:
                        clf = MLPRegressor(solver='lbfgs',
                                           learning_rate_init=lri,
                                           learning_rate=lr,
                                           alpha=alpha,
                                           hidden_layer_sizes=hls,
                                           early_stopping=True,
                                           random_state=42)
                        clf.fit(X_train, T_train)
                        T_pred = clf.predict(X_val)
                        if mean_squared_error(T_val, T_pred) < min_mse:
                            min_mse = mean_squared_error(T_val, T_pred)
                            best_estimator = clf.get_params()

        print(
            f'The best paramters returned by the grid search are:\n {best_estimator}'
        )
        print(f'The minimal MSE on the validation set is: {min_mse}')

        # Fit the best estimator
        clf = MLPRegressor(**best_estimator)
        clf.fit(X_train, T_train)

        # Predict on the test set
        T_pred = clf.predict(X_test)

        # Plot T_pred against T_test
        plot_mg_time_series([T_test, T_pred],
                    solver=solver,
                    random_state=random_state)

# training the model
mlpr.fit(x_train, y_train)

# looking at the attributes
loss = mlpr.loss_  # the loss computed with the loss function
coefs = mlpr.coefs_  # a list of the weight matrix for each layer
intercepts = mlpr.intercepts_  # a list of the bias vector for each layer
n_iter = mlpr.n_iter_  # the number of interations the solver has run
n_layers = mlpr.n_layers_  # the number of layers of the model
n_outputs = mlpr.n_outputs_  # the number of outputs
out_activation = mlpr.out_activation_  # the name of the output activation function used

# looking at the methods
get_params = mlpr.get_params()  # returning the parameters for the model
predition_array = mlpr.predict(
    x_test
)  # running the test dataset through the model, giving an array of predicted values
train_score = mlpr.score(
    x_train, y_train)  # returns the mean accuracy of the training set
test_score = mlpr.score(x_test,
                        y_test)  # returns the mean accuracy of the test set

print(
    'The R^2 score for the train dataset is: %.3f and the R^2 for the test dataset is: %.3f'
    % (train_score, test_score))

pdb.set_trace()
예제 #18
0
    train = values[:n_train_hours, :]
    test = values[n_train_hours:-30, :]
    # split into input and outputs
    n_obs = n_hours * n_features
    train_X, train_y = train[:, :-5], train[:, -1]
    test_X, test_y = test[:, :-5], test[:, -1]
    print(train_X.shape, len(train_X), train_y.shape)

    eval_set = [(test_X, test_y)]
    # fit model no training data
    ####learning_rate={0.01,0.05,0.1,0.2}
    ####max_math={3,4,5}
    ####n_estimators={80,90,100,110,120}
    model = MLPRegressor()
    model.fit(train_X, train_y)
    print(model.get_params())
    ####对模型进行打分
    preds_train = model.predict(train_X)
    print("模型打分情况:", model.score(train_X, train_y))

    yhat = model.predict(test_X)
    # invert scaling for forecast
    yhat = yhat.reshape(len(yhat), 1)
    inv_yhat = concatenate((test_X[:, -31:], yhat), axis=1)

    inv_yhat = scaler.inverse_transform(inv_yhat)
    inv_yhat = inv_yhat[:, -1]
    # invert scaling for actual
    test_y = test_y.reshape((len(test_y), 1))
    inv_y = concatenate((test_X[:, -31:], test_y), axis=1)
    inv_y = scaler.inverse_transform(inv_y)
예제 #19
0
print("Coeficiente de R^2 asociado a la predicción dentro de la muestra")
print(regr.score(X_train,y_train))
en_pred = regr.predict(X_test)

print("R^2 para el conjunto de test", regr.score(X_test,y_test))
print("Error absoluto medio", mean_absolute_error(y_test,en_pred))

input("\n--- Pulsa una tecla para continuar ---\n")

print("Redes neuronales")

# Establezco un número alto de iteraciones máximas para que no haya problemas de convergencia
redes = MLPRegressor(max_iter = 2000)
redes.fit(X_train,y_train)
print("Parámetros de la red neuronal")
print(redes.get_params())
# R^2 para el conjunto de training
print("In R^2")
print(redes.score(X_train,y_train))

red_pred = redes.predict(X_test)
# R^2 para el conjunto de test
print("Out R^2")
print(redes.score(X_test,y_test))
print("Error absoluto medio", mean_absolute_error(y_test,red_pred))

input("\n--- Pulsa una tecla para continuar ---\n")

print("SVR")

svr = SVR(C=1,epsilon=0.2)
예제 #20
0
#clf = MLPRegressor(solver='lbfgs', alpha=1e-5,
#		                    hidden_layer_sizes=(50), random_state=1, max_iter=1000)

#pipeline = PMMLPipeline([
#		  ('clf', clf)
#		])
#pipeline.fit(df[df.columns.difference(["Open"])], df["Prediction"])
#sklearn2pmml(pipeline, "PredictiveModel.pmml")

estimator = MLPRegressor(hidden_layer_sizes=(1000, ) * 4,
                         activation='relu',
                         max_iter=int(1e2),
                         verbose=True,
                         random_state=1,
                         tol=0)
logging.info('Estimator: {}'.format(estimator.get_params()))

y_pred = sklearn.model_selection.cross_val_predict(estimator=estimator,
                                                   X=X,
                                                   y=y,
                                                   cv=6)
estimator.fit(X_train, y_train)

confidence = estimator.score(X_test, y_test)
print("confidence: ", confidence)

forecast_prediction = estimator.predict(X_forecast)
print(forecast_prediction)

z = y_pred.tolist()
price1 = pd.concat([pd.Series(df['Open']), pd.Series(z)],
예제 #21
0
def cascade_prediction(train_file, test_file, val_file, embedding_file,
                       embedding_dim):
    embeddings = np.fromfile(embedding_file,
                             np.float32).reshape(-1, embedding_dim)
    #embeddings = read_embeddings_avg(embedding_file)
    label = 0

    x_test = []
    y_test = []
    index = 0
    rb = open(test_file, 'r')
    for line in rb.readlines():
        elems = line.strip().split('\t')
        x_test.append(embeddings[index])
        index += 1
        value = int(elems[-1].split(' ')[label])
        value = np.log(value + 1.0) / np.log(2.0)
        y_test.append(value)
    rb.close()

    x_train = []
    y_train = []
    rb = open(train_file, 'r')
    for line in rb.readlines():
        elems = line.strip().split('\t')
        x_train.append(embeddings[index])
        index += 1
        value = int(elems[-1].split(' ')[label])
        value = np.log(value + 1) / np.log(2.0)
        y_train.append(value)
        rb.close()

    x_val = []
    y_val = []
    rb = open(val_file, 'r')
    for line in rb.readlines():
        elems = line.strip().split('\t')
        x_val.append(embeddings[index])
        index += 1
        value = int(elems[-1].split(' ')[label])
        value = np.log(value + 1) / np.log(2.0)
        y_val.append(value)
    rb.close()

    parameter_space = {
        'hidden_layer_sizes': [(64, 32, 16), (64, 32, 64), (64, )],
        'activation': ['tanh', 'relu', 'logistic'],
        'solver': ['sgd', 'adam'],
        'alpha': [0.0001, 0.001, 0.05, 0.01, 0.5, 1],
        'learning_rate': ['constant', 'adaptive'],
        'shuffle': [False],
        'random_state': [0],
        'max_iter': [200]
    }
    x = x_train + x_val
    y = y_train + y_val
    fold = [-1 for i in x_train]
    fold.extend([0 for i in x_val])
    '''
    model = MLPRegressor()
    ps = PredefinedSplit(test_fold=fold)
    clf = GridSearchCV(model, parameter_space, n_jobs=10, cv=ps)
    clf.fit(x, y)
    '''
    clf = MLPRegressor(hidden_layer_sizes=(64, 32),
                       activation=relu,
                       solver=adam,
                       shuffle=False,
                       random_state=0)
    clf.fit(x, y)
    print(clf.get_params())
    y_pred = clf.predict(x_test)
    print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))