def evaluate(method, model, test_gen): from sklearn.metrics import mean_absolute_error y_pred = predict(method, model, test_gen) m, v = get_mean_std() test_df = pd.read_excel(os.path.join('rsna-bone-age', 'Bone age ground truth.xlsx')) y_true = test_df['Ground truth bone age (months)'].values.astype(np.float64) y_true = (y_true - m) / v if method == 'only_point': # mae months mad = mean_absolute_error(v * y_true, v * y_pred) return -1, -1, mad y_l_pred, y_u_pred, y_point_pred = y_pred # picp K_u = y_u_pred > y_true K_l = y_l_pred < y_true picp = np.mean(K_l * K_u) # mpiw mpiw = np.mean(y_u_pred - y_l_pred) # mae months mad = mean_absolute_error(v * y_true, v * y_point_pred) return picp, mpiw, mad
def mae_months(y_true, y_pred): if method == 'only_point': return mean_absolute_error(boneage_div * y_true, boneage_div * y_pred) y_true = y_true[:, 0] y_u_pred = y_pred[:, 0] y_l_pred = y_pred[:, 1] if method == 'piven': y_v = y_pred[:, 2] y_eli = y_v * y_u_pred + (1 - y_v) * y_l_pred if method == 'qd': y_eli = 0.5 * y_u_pred + 0.5 * y_l_pred return mean_absolute_error(boneage_div * y_true, boneage_div * y_eli)
def eva_matrics(y_true, y_pred): """Evaluation evaluate the predicted resul. # Arguments y_true: List/ndarray, ture data. y_pred: List/ndarray, predicted data. 模型效果指标评估 y_true:真实的数据值 y_pred:回归模型预测的数据值 explained_variance_score:解释回归模型的方差得分,其值取值范围是[0,1],越接近于1说明自变量越能解释因变量 的方差变化,值越小则说明效果越差。 mean_absolute_error:平均绝对误差(Mean Absolute Error,MAE),用于评估预测结果和真实数据集的接近程度的程度 ,其其值越小说明拟合效果越好。 mean_squared_error:均方差(Mean squared error,MSE),该指标计算的是拟合数据和原始数据对应样本点的误差的 平方和的均值,其值越小说明拟合效果越好。 r2_score:判定系数,其含义是也是解释回归模型的方差得分,其值取值范围是[0,1],越接近于1说明自变量越能解释因 变量的方差变化,值越小则说明效果越差。 """ # mape = MAPE(y_true, y_pred) vs = metrics.explained_variance_score(y_true, y_pred) mae = metrics.mean_absolute_error(y_true, y_pred) mse = metrics.mean_squared_error(y_true, y_pred) r2 = metrics.r2_score(y_true, y_pred) print('explained_variance_score:%f' % vs, " :larger is better") # print('mape:%f%%' % mape, " :smaller is better") print('mae:%f' % mae, " :smaller is better") print('mse:%f' % mse, " :smaller is better") print('rmse:%f' % math.sqrt(mse), " :smaller is better") print('r2:%f' % r2, " : =1 is better")
def metric(y_true, y_pred): """ Parameters ---------- y_true : Keras tensor Keras tensor including the ground truth. Since the keras tensor includes an extra column to store the index of the data sample in the training set this column is ignored. y_pred : Keras tensor Keras tensor with the predictions of the contamination model (no data index). """ return mean_absolute_error(y_true[:, :nout], y_pred[:, :nout])
def mae(y_true, y_pred): y_true = y_true[:, 0] y_u_pred = y_pred[:, 0] y_l_pred = y_pred[:, 1] if method == 'piven': y_v = y_pred[:, 2] y_eli = y_v * y_u_pred + (1 - y_v) * y_l_pred if method == 'qd': y_eli = 0.5 * y_u_pred + 0.5 * y_l_pred return mean_absolute_error(div * y_true, div * y_eli)
def metric(y_true, y_pred): """ Parameters ---------- y_true : Keras tensor Keras tensor including the ground truth y_pred : Keras tensor Keras tensor including the predictions of a heteroscedastic model. The predictions follow the order: (mean_0, S_0, mean_1, S_1, ...) with S_i the log of the variance for the ith output. """ if nout > 1: y_out = K.reshape(y_pred[:, 0::nout], K.shape(y_true)) else: y_out = K.reshape(y_pred[:, 0], K.shape(y_true)) return mean_absolute_error(y_true, y_out)
def __init__(self, state_dim, action_dim, option_dim, max_action, action_space): self.alpha = 0.2 self.lr = 0.0003 self.option_num = option_dim self.policy_type = "Gaussian" self.target_update_interval = 1 self.automatic_entropy_tuning = True self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") """ critic network """ self.critic = QNetwork(state_dim, action_dim, 400).to(device=self.device) self.critic_optim = Adam(self.critic.parameters(), lr=self.lr) self.critic_target = QNetwork(state_dim, action_dim, 400).to(self.device) hard_update(self.critic_target, self.critic) self.sampling_prob = torch.FloatTensor(state).to(self.device) # ===================================================================== # # Option Model # # ===================================================================== # self.option_state_input, self.option_action_input, self.option_input_concat, self.option_out_dec, \ self.option_out, self.option_out_noise, self.option_model = self.create_option_model() Advantage = np.stop_gradient(self.target_q_value - self.predicted_v_value) Weight = np.divide(np.exp(Advantage - np.max(Advantage)), self.sampling_prob) W_norm = Weight / K.mean(Weight) critic_conditional_entropy = weighted_entropy(self.option_out, tf.stop_gradient(W_norm)) p_weighted_ave = weighted_mean(self.option_out, tf.stop_gradient(W_norm)) self.critic_entropy = critic_conditional_entropy - self.c_ent * entropy( p_weighted_ave) self.vat_loss = kl(self.option_out, self.option_out_noise) self.reg_loss = metrics.mean_absolute_error(self.option_input_concat, self.option_out_dec) self.option_loss = self.reg_loss + self.entropy_coeff * ( self.critic_entropy) + self.c_reg * self.vat_loss self.option_optimize = tf.train.AdamOptimizer(self.option_lr).minimize( self.option_loss) """ option network """ self.it = 0 if self.policy_type == "Gaussian": # Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper if self.automatic_entropy_tuning == True: self.target_entropy = -torch.prod( torch.Tensor(action_space.shape).to(self.device)).item() self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device) self.alpha_optim = Adam([self.log_alpha], lr=self.lr) self.policy = GaussianPolicy(state_dim, action_dim, 400, max_action).to(self.device) self.policy_optim = Adam(self.policy.parameters(), lr=self.lr) elif self.policy_type == "Multi_Gaussian": if self.automatic_entropy_tuning == True: self.target_entropy = -torch.prod( torch.Tensor(action_space.shape).to(self.device)).item() self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device) self.alpha_optim = Adam([self.log_alpha], lr=self.lr) self.policy = GaussianPolicy(state_dim, action_dim, 400, max_action).to(self.device) self.policy_optim = Adam(self.policy.parameters(), lr=self.lr) else: self.alpha = 0 self.automatic_entropy_tuning = False self.policy = DeterministicPolicy(state_dim, action_dim, 400, max_action).to(self.device) self.policy_optim = Adam(self.policy.parameters(), lr=self.lr)
def mae_heteroscedastic(y_true, y_pred): y_out = K.reshape(y_pred[:,:-1], K.shape(y_true)) return mean_absolute_error(y_true, y_out)
''' ord_idx = np.argsort(test_Y) ord_idx = ord_idx[np.linspace(0, len(ord_idx)-1, 4).astype(int)] # take 8 evenly spaced ones fig, m_axs = plt.subplots(2, 2, figsize = (8, 8)) for (idx, c_ax) in zip(ord_idx, m_axs.flatten()): cur_img = test_X[0][idx:(idx+1)] c_ax.imshow(cur_img[0, :,:,0], cmap = 'bone') c_ax.set_title('Age: %2.1fY\nPredicted Age: %2.1fY' % (test_Y_months[idx]/42.0, pred_Y[idx])) c_ax.axis('off') #fig.savefig('trained_img_predictions.png', dpi = 300) ''' from sklearn.metrics import mean_absolute_error mean_absolute_error(test_Y_months,pred_Y) p=pred_Y.reshape(-1,1) print(p.shape) p=p.flatten() p.shape #print(p) type(p) b=test_df['id'] pth=test_df['file'] b2=test_df['boneage'] #gender1=df['male']
new_df = quote_quote.filter(['Adj Close']) # Get teh last 60 day closing price values and convert the dataframe to an array last_60_days = new_df[-60:].values # Scale the data to be values between 0 and 1 last_60_days_scaled = scaler.transform(last_60_days) # Create an empty list X_test = [] # Append teh past 60 days X_test.append(last_60_days_scaled) # Convert the X_test data set to a numpy array X_test = np.array(X_test) # Reshape the data X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1)) # Get the predicted scaled price pred_price = model.predict(X_test) # undo the scaling pred_price = scaler.inverse_transform(pred_price) st.markdown("5. Предскажем 253 значение:") st.text(pred_price) # Calculate and print values of MAE, MSE, RMSE st.markdown("6. Метрики") st.text('Mean Absolute Error: %s' % metrics.mean_absolute_error(y_test, predictions)) st.text('Mean Squared Error: %s' % metrics.mean_squared_error(y_test, predictions)) st.text('Mean Absolute Percentage Error: %s' % np.mean(np.abs((predictions - y_test) / y_test) * 100)) st.text('Root Mean Squared Error: %s' % np.sqrt(np.mean(((predictions - y_test)**2))))
def loss(x): A, B = x return K.mean(mean_absolute_error(A, B))
def mean_absolute_error(yl, yg): return KM.mean_absolute_error(yl, yg)
def mae_months(in_gt, in_pred): return mean_absolute_error(boneage_div * in_gt, boneage_div * in_pred)
def mae_loss(y_true, y_pred): return metrics.mean_absolute_error(K.flatten(y_true), K.flatten(y_pred))
def mae(y_true, y_pred): return mean_absolute_error(y_true, y_pred)
def __init__(self, sess, env, state_dim, action_dim, action_bound, batch_size=64, tau=0.001, option_num=5, actor_lr=0.0001, critic_lr=0.001, option_lr=0.001, gamma=0.99, hidden_dim=(400, 300), entropy_coeff=0.1, c_reg=1.0, vat_noise=0.005, c_ent=4): self.env = env self.sess = sess self.state_dim = state_dim self.action_dim = action_dim self.action_bound = action_bound self.actor_lr = actor_lr self.critic_lr = critic_lr self.gamma = gamma self.tau = tau self.batch_size = batch_size self.hidden_dim = hidden_dim self.option_num = option_num self.entropy_coeff = entropy_coeff self.c_reg = c_reg self.vat_noise = vat_noise self.c_ent = c_ent self.option_lr = option_lr # ===================================================================== # # Actor Model # # ===================================================================== # self.actor_state_input_list = [] self.actor_out_list = [] self.actor_model_list = [] self.actor_weight_list = [] for i in range(self.option_num): actor_state_input, actor_out, actor_model, actor_weights = self.create_actor_model( ) self.actor_state_input_list.append(actor_state_input) self.actor_out_list.append(actor_out) self.actor_model_list.append(actor_model) self.actor_weight_list.append(actor_weights) self.actor_target_state_input_list = [] self.actor_target_out_list = [] self.actor_target_model_list = [] self.actor_target_weight_list = [] for i in range(self.option_num): actor_target_state_input, actor_target_out, \ actor_target_model, actor_target_weights = self.create_actor_model() self.actor_target_state_input_list.append(actor_target_state_input) self.actor_target_out_list.append(actor_target_out) self.actor_target_model_list.append(actor_target_model) self.actor_target_weight_list.append(actor_target_weights) self.action_gradient_list = [] for i in range(self.option_num): action_gradient = tf.placeholder(tf.float32, [None, self.action_dim]) self.action_gradient_list.append(action_gradient) self.actor_optimizer_list = [] for i in range(self.option_num): actor_params_grad = tf.gradients(self.actor_model_list[i].output, self.actor_weight_list[i], -self.action_gradient_list[i]) grads = zip(actor_params_grad, self.actor_weight_list[i]) self.actor_optimizer_list.append( tf.train.AdamOptimizer(self.actor_lr).apply_gradients(grads)) # ===================================================================== # # Critic Model # # ===================================================================== # self.critic_state_input, self.critic_action_input, \ self.critic_out_Q1, self.critic_out_Q2, self.critic_model = self.create_critic_model() self.critic_target_state_input, self.critic_target_action_input, \ self.critic_out_Q1_target, self.critic_out_Q2_target, self.target_critic_model = self.create_critic_model() self.target_q_value = tf.placeholder(tf.float32, [None, 1]) self.predicted_v_value = tf.placeholder(tf.float32, [None, 1]) self.sampling_prob = tf.placeholder(tf.float32, [None, 1]) # Define loss and optimization Op self.critic_loss = metrics.mean_squared_error(self.target_q_value, self.critic_out_Q1) \ + metrics.mean_squared_error(self.target_q_value, self.critic_out_Q2) self.critic_optimize = tf.train.AdamOptimizer(self.critic_lr).minimize( self.critic_loss) # Get the gradient of the net w.r.t. the action. self.action_grads = tf.gradients(self.critic_out_Q1, self.critic_action_input) # ===================================================================== # # Option Model # # ===================================================================== # self.option_state_input, self.option_action_input, self.option_input_concat, self.option_out_dec, \ self.option_out, self.option_out_noise, self.option_model = self.create_option_model() Advantage = tf.stop_gradient(self.target_q_value - self.predicted_v_value) Weight = tf.divide(tf.exp(Advantage - np.max(Advantage)), self.sampling_prob) W_norm = Weight / K.mean(Weight) # H(o|s, a) critic_conditional_entropy = weighted_entropy(self.option_out, tf.stop_gradient(W_norm)) p_weighted_ave = weighted_mean(self.option_out, tf.stop_gradient(W_norm)) self.critic_entropy = critic_conditional_entropy - self.c_ent * entropy( p_weighted_ave) self.vat_loss = kl(self.option_out, self.option_out_noise) self.reg_loss = metrics.mean_absolute_error(self.option_input_concat, self.option_out_dec) self.option_loss = self.reg_loss + self.entropy_coeff * ( self.critic_entropy) + self.c_reg * self.vat_loss self.option_optimize = tf.train.AdamOptimizer(self.option_lr).minimize( self.option_loss) # Initialize for later gradient calculations init_op = tf.global_variables_initializer() sess.run(init_op)
verbose=0).flatten() y_pred_test = network.predict(X_test, batch_size=batch_size, verbose=0).flatten() y_pred = network.predict(X_all, batch_size=batch_size, verbose=0).flatten() error_train = y_pred_train - y_train error_test = y_pred_test - y_test error = y - y_pred abs_error = abs(error) MASE_train = MASE(y_train, y_pred_train, 48) MASE_test = MASE(y_test, y_pred_test, 48) MASE_all = MASE(y, y_pred, 48) MAE_train = mean_absolute_error(y_train, y_pred_train).eval(session=sess) MAE_test = mean_absolute_error(y_test, y_pred_test).eval(session=sess) MAE_all = mean_absolute_error(y, y_pred).eval(session=sess) results_DF.loc[mdl, 'Model name'] = mdl results_DF.loc[mdl, 'CV MASE'] = MASE_CV results_DF.loc[mdl, 'Training MASE'] = MASE_train results_DF.loc[mdl, 'Test MASE'] = MASE_test results_DF.loc[mdl, 'MASE'] = MASE_all results_DF.loc[mdl, 'Training MAE'] = MAE_train results_DF.loc[mdl, 'Test MAE'] = MAE_test results_DF.loc[mdl, 'MAE'] = MAE_all results_DF.loc[mdl, 'CV Loss'] = loss_CV
''' ord_idx = np.argsort(test_Y) ord_idx = ord_idx[np.linspace(0, len(ord_idx)-1, 4).astype(int)] # take 8 evenly spaced ones fig, m_axs = plt.subplots(2, 2, figsize = (8, 8)) for (idx, c_ax) in zip(ord_idx, m_axs.flatten()): cur_img = test_X[0][idx:(idx+1)] c_ax.imshow(cur_img[0, :,:,0], cmap = 'bone') c_ax.set_title('Age: %2.1fY\nPredicted Age: %2.1fY' % (test_Y_months[idx]/42.0, pred_Y[idx])) c_ax.axis('off') #fig.savefig('trained_img_predictions.png', dpi = 300) ''' from sklearn.metrics import mean_absolute_error mean_absolute_error(test_Y_months,pred_Y) ''' p=pred_Y.reshape(-1,1) print(p.shape) p=p.flatten() p.shape #print(p) type(p) b=test_df['id'] pth=test_df['file'] b2=test_df['boneage'] #gender1=df['male'] a=abs(p-test_Y_months) #print(a)
def mae_in_months(self, x_p, y_p): return mean_absolute_error( (self.std_bone_age * x_p + self.mean_bone_age), (self.std_bone_age * y_p + self.mean_bone_age))
def mae_loss(y_true, y_pred, dims=dims): # need to scale up by number of pixels/voxels scaling = np.prod(dims) return scaling * metrics.mean_absolute_error(K.flatten(y_true), K.flatten(y_pred))
def mae_in_months(x_p, y_p): '''function to return mae in months''' return mean_absolute_error((std_bone_age*x_p + mean_bone_age), (std_bone_age*y_p + mean_bone_age))
def mae_months(in_gt, in_pred): return mean_absolute_error(mu + sigma * in_gt, mu + sigma * in_pred)
def run_cross_validation_create_models(nfolds=10): # input image dimensions batch_size = 48 # 24 nb_epoch = 8 # 8 random_state = 51 first_rl = 96 train_data, train_target, train_id = read_and_normalize_train_data() yfull_train = dict() kf = KFold(len(train_id), n_folds=nfolds, shuffle=True, random_state=random_state) #print('kf\n',kf) num_fold = 0 sum_score = 0 models = [] for train_index, test_index in kf: #print(train_index, test_index) model = create_model() X_train = train_data[train_index] Y_train = train_target[train_index] X_valid = train_data[test_index] Y_valid = train_target[test_index] num_fold += 1 print('Start KFold number {} from {}'.format(num_fold, nfolds)) print('Split train: ', len(X_train), len(Y_train)) print('Split valid: ', len(X_valid), len(Y_valid)) callbacks = [ EarlyStopping(monitor='val_loss', patience=50, verbose=0), ] #patience=3 #print(X_train) model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, shuffle=True, verbose=1, validation_data=(X_valid, Y_valid), callbacks=callbacks) #print(X_valid) predictions_valid = model.predict(X_valid.astype('float32'), batch_size=batch_size, verbose=2) for i in range(len(predictions_valid)): print(predictions_valid[i], Y_valid[i]) a = raw_input() score = mean_absolute_error(Y_valid, predictions_valid) #score = mean_absolute_error(Y_valid, predictions_valid) print('Score mae error: ', score) sum_score += score * len(test_index) # Store valid predictions for i in range(len(test_index)): yfull_train[test_index[i]] = predictions_valid[i] models.append(model) score = sum_score / len(train_data) print("mae train independent avg: ", score) info_string = '_' + str(np.round(score, 3)) + '_flds_' + str( nfolds) + '_eps_' + str(nb_epoch) + '_fl_' + str(first_rl) return info_string, models
def mae_metric(in_gt, in_pred): return mean_absolute_error(in_gt, in_pred)
def mae_months(in_gt, in_pred): return mean_absolute_error(in_gt, in_pred)