def plot2d(): xs = [random_normal() for _ in range(1000)] ys1 = [x + random_normal() / 2 for x in xs] ys2 = [-x + random_normal() / 2 for x in xs] print(correlation(xs, ys1)) print(correlation(xs, ys2)) plt.scatter(xs, ys1, marker='.', color='black', label='ys1') plt.scatter(xs, ys2, marker='.', color='gray', label='ys2') plt.xlabel('xs') plt.ylabel('ys') plt.legend(loc=9) plt.title('Very Different Joint Distributions') plt.show()
def main(): # compose data for xgboost train, train_target, val, val_target = load_dataset() # np arrays [X 2048] [X 15] (large X) # train = np.array([[1,2,3,4,5], [2,3,4,5,6], [4,5,6,7,8], [5,6,7,8,9]]) # train_target = np.array([3,4,5,6]) print(train.shape, train_target.shape, val[0].shape, val_target[0].shape) regressor = xgb.XGBRegressor(tree_method='gpu_hist', predictor='gpu_predictor') # regressor.fit(train, train_target) score = regressor.score(train, train_target) print('Score:', score) # print(regressor.predict(np.array([[6,7,8,9,10]]))) video_count = len(val) corr_mean = 0.0 for i in range(video_count): preds = regressor.predict(val[i]) preds = torch.from_numpy(preds).cuda() preds = torch.cat([preds, preds[-1:]]) preds = preds.unsqueeze(1) # T -> T 1 preds = interpolate_output(preds, 1 , 6) pred_len = val_target[i].shape[0] preds = preds[:pred_len] val_ti = torch.from_numpy(val_target[i]).cuda() val_ti = val_ti.unsqueeze(1) # T -> T 1 corr, _ = correlation(preds, val_ti) corr_mean += corr.item() print('Correlation:', corr_mean/video_count)
def test_result(filt_emp): global best_conf # do a correlation evalutation filt_emp = np.concatenate((filt_emp, filt_emp[-1:])) output = interpolate_output(torch.from_numpy(filt_emp), 1, 6) cor, _ = correlation(output[:vid_labels.shape[0]], torch.from_numpy(vid_labels)) print('Correlation:', cor.item()) return cor.item()
def _generate_complete_graph(D: spn.Dataset, S: spn.Scope): n = len(S) G = graph_tool.generation.complete_graph(n, self_loops=False, directed=False) W = G.new_edge_property('double') G.edge_properties['weights'] = W for e in G.edges(): s = int(e.source()) t = int(e.target()) if e not in W: W[e] = utils.correlation(D, s, t) return G
def _generate_sample_graph(D: spn.Dataset, S: spn.Scope, M: VertexMap, sample_func): G = graph_tool.Graph(directed=False) W = G.new_edge_property('double') G.edge_properties['weights'] = W G.add_vertex(n=len(S)) for v in S: N = sample_func(S, v) for u in N: s = M.g(v) t = M.g(u) if G.edge(s, t) is None: e = G.add_edge(s, t) W[e] = utils.correlation(D, s, t) return G
def selected_data_using_corr(self): # selected data using corr. df = self._des2cat(self.df, self.category_cols) X_data = df.drop(['price'], axis=1) y_data = np.log(df['price']) top_corr = correlation(df) X_data = X_data.loc[:, top_corr[1:]] # vis X_data self.max_X_data = np.max(X_data.max()) vis_boxplot(X_data, save_path='./selected_data_using_corr', vmax=self.max_X_data) y_data = pd.DataFrame(y_data) return pd.concat((y_data, X_data), axis=1)
def extraFea(self, x): output = [] input = x for x in input: result = [] x = abs(x[3].cpu().numpy()) result.append(utils.mean_abs_dev(x)) result.append(utils.average(x)) result.append(utils.fre_skewness(x)) result.append(utils.fre_kurtosis(x)) result.append(utils.energy(x)) result.append(utils.entropy(x)) #temp = utils.ar_coef(x) #for i in temp: # result.append(i) x1 = x[0:int(len(x) / 2)] x2 = x[int(len(x) / 2):] result.append(utils.correlation(x1, x2)) result.append(utils.fswa(x)) output.append([result]) return output
def insight(): scope = [ 'https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive' ] creds = ServiceAccountCredentials.from_json_keyfile_name( 'SIOTfinal.json', scope) client = gspread.authorize(creds) sound_g = client.open('sound').sheet1 room_g = client.open('room').sheet1 outside_g = client.open('outside').sheet1 room_df = get_room_temp(room_g) out_df = get_outside_temp(outside_g) sound_df = get_sound(sound_g) intervals = ["15min", "30min", "1h", "4h"] dates = [] dates_obj = [] all_date = sound_df.resample('D').sum() for i in range(len(all_date.index)): dates.append(str(all_date.index[i].strftime('%Y-%m-%d'))) dates_obj.append(all_date.index[i]) dates = dates[1:] dates_obj = dates_obj[1:] variables = [ 'room temperature', 'room humidity', 'local temperature', 'local humidity', 'atmospheric pressure', 'wind speed', 'cloud' ] room_variables = ['room temperature', 'room humidity'] out_variables = [ 'local temperature', 'local humidity', 'atmospheric pressure', 'wind speed', 'cloud' ] current_date = request.args.get("dates") current_interval = request.args.get("intervals") current_variable = request.args.get("variables") if current_interval == None: current_interval = "15min" if current_date == None: current_date = str(datetime.today().strftime('%Y-%m-%d')) if current_variable == None: current_variable = 'room temperature' date = current_date[8:10] + current_date[5:7] + current_date[0:4] sound_date = time_select(sound_df, date) room_date = time_select(room_df, date) out_date = time_select(out_df, date) if current_variable in room_variables: col = room_date[current_variable] elif current_variable in out_variables: col = out_date[current_variable] relation_plot = sound_line(sound_date['Trigger bool'], col, current_interval) correlation_plot = correlation(dates_obj, sound_df, room_df, out_df) s1, div1 = components(relation_plot) s2, div2 = components(correlation_plot) return render_template('insight.html', title="Insight", s1=s1, div1=div1, s2=s2, div2=div2, intervals=intervals, current_variable=current_variable, variables=variables, current_interval=current_interval, dates=dates, current_date=current_date)
def _generate_complete_network(D: spn.Dataset, S: spn.Scope): G = nx.complete_graph(S) for i, u in enumerate(S): for j, v in enumerate(S[i + 1:]): G.add_edge(u, v, weight=utils.correlation(D, i, j + i + 1)) return G
def future_policy_value(self, x, a, trans, seq_len, seq_mask, agent, opt, create_summary=False): """ Computes the value of a policy according to the critic when updated using the objective function :param x: observations :param a: actions :param trans: entire tuple of transition (s_t, a_t, r_t, d_t, s_{t+1}) :param seq_len: Length of trajectories :param seq_mask: Binary mask of trajectories :param agent: agent to compute value for :param opt: optimizer to use for the policy update :param create_summary: whether to create summary ops :return: tensor of batched future policy value """ with tf.variable_scope('future_policy_value'): policy = agent.main.policy policy_vars = policy.trainable_variables # The replace manager can replace the policy variables with updated variables replace_manager = policy.variable_scope.custom_getter use_adam = self.dconfig.obj_func_second_order_adam step_size = self.dconfig.obj_func_second_order_stepsize step_count = self.dconfig.obj_func_second_order_steps + 1 batch_size = self.dconfig.buffer_sample_size # Split tensors according to number of inner gradient descent steps x_s = tf.split(x, step_count, axis=0) a_s = tf.split(a, step_count, axis=0) if seq_len is not None: seq_len_s = tf.split(seq_len, step_count, axis=0) seq_mask_s = tf.split(seq_mask, step_count, axis=0) else: seq_len_s = utils.ConstArray() seq_mask_s = utils.ConstArray(seq_mask) trans_s = list( zip(*(tf.split(e, step_count, axis=0) for e in trans))) objective_val = None policy_grads = None opt_args_dict = {} current_vars = policy_vars var_names = [var.op.name for var in policy_vars] for i in range(step_count - 1): # Run policy policy_result = policy(x_s[i], seq_len=seq_len_s[i]) # Run objective objective_val = self.objective(x_s[i], a_s[i], trans_s[i], seq_len_s[i], seq_mask_s[i], agent, policy_result, create_summary) # Compute policy gradients policy_grads = tf.gradients(objective_val * seq_mask_s[i], current_vars) if use_adam: def grad_transform(grad, var, var_name): if var_name in opt_args_dict: opt_args = opt_args_dict[var_name] else: opt_args = [] new_grad, *opt_args = opt.adapt_gradients(grad, var, *opt_args, lr=step_size) opt_args_dict[var_name] = opt_args return new_grad else: def grad_transform(grad, *args): return step_size * grad # Use adam or vanilla SGD for inner gradient step transformed_grads = [ grad_transform(grad, var, var_name) for grad, var, var_name in zip( policy_grads, current_vars, var_names) ] one_step_updated_policy_vars = [ var - grad for var, grad in zip(current_vars, transformed_grads) ] one_step_updated_policy_vars_dict = OrderedDict( zip(var_names, one_step_updated_policy_vars)) # # Updates replace manager to run policy with updated variables in the next loop iteration replace_manager.replace_dict = one_step_updated_policy_vars_dict current_vars = one_step_updated_policy_vars # Run policy with final parameters future_policy = policy(x, seq_len=seq_len) replace_manager.replace_dict = None # Estimate the final policy value future_policy_value = agent.main.critic( x, future_policy.action) * seq_mask if create_summary: orig_policy = policy(x_s[-1], seq_len=seq_len_s[-1]) partial_future_policy_value = future_policy_value[-batch_size:] tf.summary.histogram('objective_value', objective_val) tf.summary.histogram('policy_grads', utils.flat(policy_grads)) tf.summary.histogram('policy_value', orig_policy.value) tf.summary.histogram('future_policy_value', partial_future_policy_value) tf.summary.histogram( 'policy_value_gain', partial_future_policy_value - orig_policy.value) sample_axis = [ 0, 1 ] if self.dconfig.recurrent_time_steps > 1 else 0 cor = utils.correlation(-orig_policy.value, objective_val, sample_axis) tf.summary.scalar('objective_critic_correlation', tf.squeeze(cor)) grad, = tf.gradients(objective_val, policy_result.value) if grad is not None: tf.summary.histogram('objective_critic_grads', grad) return future_policy_value
def recusive_application_performance(self, net, dataset, split_point, samples=20): print('===> Evaluating performance of recursive application') path = os.path.join(config['output_dir'], self.output_name, 'recursive') mkdir(path) if split_point - samples / 2 < 0: start_index = 0 end_index = int(split_point + samples) else: start_index = int(split_point - samples / 2) end_index = int(split_point + samples / 2) print('-- Start index:', start_index) print('-- End index:', end_index) mse = [] cor = [] psnr = [] ssim = [] diff_avrg = [] diff_max = [] diff_x = [] diff_y = [] change_psnr_x = [] change_psnr_y = [] change_diff_x = [] change_diff_y = [] input_img = dataset[start_index][0].expand(1, -1, -1, -1).to(self.device) if self.parameterized: params = dataset[start_index][2].expand(1, -1, -1, -1).to(self.device) for index in range(start_index, end_index): pred_input = self._prepare_tensor_img(input_img[0].clone(), is_input=True) if self.parameterized: predicted = net((input_img, params)) else: predicted = net(input_img) target = dataset[index][1].expand(1, -1, -1, -1).to(self.device) if self.args.mask: for i, j in itertools.product(range(predicted.shape[0]), range(predicted.shape[1])): predicted[i][j] = self.MASK * predicted[i][j] input_img = torch.cat((torch.tensor( predicted.clone().detach()[0][0:3]).expand( 1, -1, -1, -1), self.MASK.expand(1, -1, -1, -1)), 1) else: input_img = predicted.clone().detach() cur_mse = self.criterionMSE(predicted, target).item() if not self.args.use_pressure: predicted_x, predicted_y = self._prepare_tensor_img( predicted[0]) target_x, target_y = self._prepare_tensor_img( dataset[index][1]) else: predicted_x, predicted_y, predicted_p = self._prepare_tensor_img( predicted[0]) target_x, target_y, target_p = self._prepare_tensor_img( dataset[index][1]) merge_and_save( target_x, predicted_x, 'Real', 'Predicted', os.path.join(path, 'x_recursive_{}.png'.format(index - start_index))) merge_and_save( target_y, predicted_y, 'Real', 'Predicted', os.path.join(path, 'y_recursive_{}.png'.format(index - start_index))) predicted_img = self.denormalize_output( predicted).detach().cpu().numpy() target_img = self.denormalize_input(target).detach().cpu().numpy() mse += [cur_mse] psnr += [10 * math.log10(1 / cur_mse)] cor += [ np.average( np.array([ correlation(predicted_img[i], target_img[i]) for i in range(predicted_img.shape[0]) ])) ] ssim += [ np.average( np.array([ ssim_metr(predicted_img[i].T, target_img[i].T, multichannel=True) for i in range(predicted_img.shape[0]) ])) ] diff_avrg_, _, diff_max_ = imgs_perc_diff(target_img, predicted_img) diff_avrg.append(diff_avrg_) diff_max.append(diff_max_) diff_x.append( imgs_perc_diff(target_img[0][0], predicted_img[0][0])[0]) diff_y.append( imgs_perc_diff(target_img[0][1], predicted_img[0][1])[0]) real_input = self._prepare_tensor_img(dataset[index][0], True) change_x_real = np.abs(target_x - real_input[0]) change_x_predicted = np.abs(pred_input[0] - predicted_x) change_y_real = np.abs(target_y - real_input[1]) change_y_predicted = np.abs(pred_input[1] - predicted_y) change_mse_x = (np.square(change_x_real - change_x_predicted)).mean(axis=None) change_mse_y = (np.square(change_y_real - change_y_predicted)).mean(axis=None) change_psnr_x += [10.0 * np.log10(255.0 / np.sqrt(change_mse_x))] change_psnr_y += [10.0 * np.log10(255.0 / np.sqrt(change_mse_y))] change_diff_x += [ imgs_perc_diff(change_x_real, change_x_predicted)[0] ] change_diff_y += [ imgs_perc_diff(change_y_real, change_y_predicted)[0] ] merge_and_save( change_x_real, change_x_predicted, 'Real', 'Predicted', os.path.join(path, 'x_diff_{}.png'.format(index - start_index))) merge_and_save( change_y_real, change_y_predicted, 'Real', 'Predicted', os.path.join(path, 'y_diff_{}.png'.format(index - start_index))) print('> Recursive application {} completed'.format(index - start_index)) with open( os.path.join(self.root_dir, self.output_name, 'recursive_application.txt'), 'w') as list_hand: list_hand.write('Split index: {}\n'.format(str(samples / 2))) list_hand.write('{} {}\n'.format('mse: ', ','.join(str(i) for i in mse))) list_hand.write('{} {}\n'.format('cor: ', ','.join(str(i) for i in cor))) list_hand.write('{} {}\n'.format('psnr: ', ','.join(str(i) for i in psnr))) list_hand.write('{} {}\n'.format('ssim: ', ','.join(str(i) for i in ssim))) list_hand.write('{} {}\n'.format( 'diff_avrg: ', ','.join(str(i) for i in diff_avrg))) list_hand.write('{} {}\n'.format( 'diff_max: ', ','.join(str(i) for i in diff_max))) list_hand.write('{} {}\n'.format('x_diff_avrg: ', ','.join(str(i) for i in diff_x))) list_hand.write('{} {}\n'.format('y_diff_max: ', ','.join(str(i) for i in diff_y))) list_hand.write('{} {}\n'.format( 'change_psnr_x: ', ','.join(str(i) for i in change_psnr_x))) list_hand.write('{} {}\n'.format( 'change_psnr_y: ', ','.join(str(i) for i in change_psnr_y))) list_hand.write('{} {}\n'.format( 'change_diff_x: ', ','.join(str(i) for i in change_diff_x))) list_hand.write('{} {}\n'.format( 'change_diff_y: ', ','.join(str(i) for i in change_diff_y)))
def individual_images_performance(self, net, test_dataloader): print('===> Evaluating performance on individual images') mse = [] cor = [] psnr = [] ssim = [] diff_avrg = [] diff_max = [] diff_x = [] diff_y = [] change_mse_x = [] change_mse_y = [] change_psnr_x = [] change_psnr_y = [] change_diff_x = [] change_diff_y = [] change_psnr = [] for iteration, batch in enumerate(test_dataloader, 1): real_a, real_b = batch[0].to(self.device), batch[1].to(self.device) if self.parameterized: params = batch[2].to(self.device) predicted = net((real_a, params)) else: predicted = net(real_a) if self.args.mask: for i, j in itertools.product(range(predicted.shape[0]), range(predicted.shape[1])): predicted[i][j] = self.MASK * predicted[i][j] cur_mse = self.criterionMSE(predicted, real_b).item() predicted = self.denormalize_output( predicted).detach().cpu().numpy() real_a = self.denormalize_output(real_a).detach().cpu().numpy() real_b = self.denormalize_output(real_b).detach().cpu().numpy() mse += [cur_mse] psnr += [10 * math.log10(1 / cur_mse)] cor += [ np.average( np.array([ correlation(predicted[i], real_b[i]) for i in range(predicted.shape[0]) ])) ] ssim += [ np.average( np.array([ ssim_metr(predicted[i].T, real_b[i].T, multichannel=True) for i in range(predicted.shape[0]) ])) ] diff_avrg_, _, diff_max_ = imgs_perc_diff(real_b, predicted) diff_avrg.append(diff_avrg_) diff_max.append(diff_max_) for i in range(predicted.shape[0]): diff_x.append(imgs_perc_diff(real_b[i][0], predicted[i][0])[0]) diff_y.append(imgs_perc_diff(real_b[i][1], predicted[i][1])[0]) # error images batch_change_mse = [] batch_change_mse_x = [] batch_change_mse_y = [] batch_change_psnr_x = [] batch_change_psnr_y = [] batch_change_diff_x = [] batch_change_diff_y = [] for ind in range(real_a.shape[0]): if self.args.use_pressure: real_change_img = np.abs(real_a[ind][0:3] - real_b[ind]) else: real_change_img = np.abs(real_a[ind][0:2] - real_b[ind]) predicted_change_img = np.abs(predicted[ind] - real_b[ind]) cur_mse = (np.square(real_change_img - predicted_change_img)).mean(axis=None) cur_psnr = 10 * np.log10(255.0 / np.sqrt(cur_mse)) batch_change_mse.append(cur_psnr) real_change_img_x = np.abs(real_a[ind][0] - real_b[ind][0]) predicted_change_img_x = np.abs(predicted[ind][0] - real_b[ind][0]) real_change_img_y = np.abs(real_a[ind][1] - real_b[ind][1]) predicted_change_img_y = np.abs(predicted[ind][1] - real_b[ind][1]) x_cur_mse = (np.square(real_change_img_x - predicted_change_img_x)).mean(axis=None) y_cur_mse = (np.square(real_change_img_y - predicted_change_img_y)).mean(axis=None) x_cur_psnr = 10 * np.log10(255.0 / np.sqrt(x_cur_mse)) y_cur_psnr = 10 * np.log10(255.0 / np.sqrt(y_cur_mse)) cur_diff_x, _, _ = imgs_perc_diff(real_change_img_x, predicted_change_img_x) cur_diff_y, _, __ = imgs_perc_diff(real_change_img_y, predicted_change_img_y) batch_change_mse_x.append(x_cur_mse) batch_change_mse_y.append(y_cur_mse) batch_change_psnr_x.append(x_cur_psnr) batch_change_psnr_y.append(y_cur_psnr) batch_change_diff_x.append(cur_diff_x) batch_change_diff_y.append(cur_diff_y) change_psnr.append(np.array(batch_change_mse).mean()) change_mse_x.append(np.array(batch_change_mse_x).mean()) change_mse_y.append(np.array(batch_change_mse_y).mean()) change_psnr_x.append(np.array(batch_change_psnr_x).mean()) change_psnr_y.append(np.array(batch_change_psnr_y).mean()) change_diff_x.append(np.array(batch_change_diff_x).mean()) change_diff_y.append(np.array(batch_change_diff_y).mean()) if iteration % 10 == 0: print('> Evaluation {} completed'.format(iteration)) mse = np.array(mse) cor = np.array(cor) psnr = np.array(psnr) ssim = np.array(ssim) diff_avrg = np.array(diff_avrg) diff_max = np.array(diff_max) change_mse_x = np.array(change_mse_x) change_mse_y = np.array(change_mse_y) change_psnr_x = np.array(change_psnr_x) change_psnr_y = np.array(change_psnr_y) change_diff_x = np.array(change_diff_x) change_diff_y = np.array(change_diff_y) with open( os.path.join(self.root_dir, self.output_name, 'metrics_avrg.txt'), 'w') as avrg_hand: avrg_hand.write('{} {}\n'.format('Avrg mse: ', np.average(mse))) avrg_hand.write('{} {}\n'.format('Avrg cor: ', np.average(cor))) avrg_hand.write('{} {}\n'.format('Avrg psnr: ', np.average(psnr))) avrg_hand.write('{} {}\n'.format('Avrg ssim: ', np.average(ssim))) avrg_hand.write('{} {}\n'.format('Avrg avrg_diff_perc: ', np.average(diff_avrg))) avrg_hand.write('{} {}\n'.format('Avrg max_diff_perc: ', np.average(diff_max))) avrg_hand.write('{} {}\n'.format('Avrg avrt_diff_x: ', np.average(diff_x))) avrg_hand.write('{} {}\n'.format('Avrg avrt_diff_y: ', np.average(diff_y))) avrg_hand.write('{} {}\n'.format('Var mse: ', np.var(mse))) avrg_hand.write('{} {}\n'.format('Var cor: ', np.var(cor))) avrg_hand.write('{} {}\n'.format('Var psnr: ', np.var(psnr))) avrg_hand.write('{} {}\n'.format('Var ssim: ', np.var(ssim))) avrg_hand.write('{} {}\n'.format('Var avrg_diff_perc: ', np.var(diff_avrg))) avrg_hand.write('{} {}\n'.format('Var max_diff_perc: ', np.var(diff_max))) avrg_hand.write('{} {}\n'.format('Var avrt_diff_x: ', np.var(diff_x))) avrg_hand.write('{} {}\n'.format('Var avrt_diff_y: ', np.var(diff_y))) avrg_hand.write('{} {}\n'.format('avrg_Change_mse_x: ', np.mean(change_psnr))) avrg_hand.write('{} {}\n'.format('avrg_Change_mse_x: ', np.mean(change_mse_x))) avrg_hand.write('{} {}\n'.format('avrg_Change_mse_y: ', np.mean(change_mse_y))) avrg_hand.write('{} {}\n'.format('avrg_Change_psnr_x: ', np.mean(change_psnr_x))) avrg_hand.write('{} {}\n'.format('avrg_Change_psnr_y: ', np.mean(change_psnr_y))) avrg_hand.write('{} {}\n'.format('avrg_Change_diff_x: ', np.mean(change_diff_x))) avrg_hand.write('{} {}\n'.format('avrg_Change_diff_y: ', np.mean(change_diff_y))) with open( os.path.join(self.root_dir, self.output_name, 'metrics_list.txt'), 'w') as list_hand: list_hand.write('{} {}\n'.format('mse: ', ','.join(str(i) for i in mse))) list_hand.write('{} {}\n'.format('cor: ', ','.join(str(i) for i in cor))) list_hand.write('{} {}\n'.format('psnr: ', ','.join(str(i) for i in psnr))) list_hand.write('{} {}\n'.format('ssim: ', ','.join(str(i) for i in ssim))) list_hand.write('{} {}\n'.format( 'diff_avrg: ', ','.join(str(i) for i in diff_avrg))) list_hand.write('{} {}\n'.format( 'diff_max: ', ','.join(str(i) for i in diff_max)))
import json data = json.loads(open("titles.json").read()) counts = [x["count"] for x in data] lengths = [len(x["title"]) for x in data] words = [x["title"].count(" ") for x in data] word_lengths = [len(x["title"])/x["title"].count(" ") for x in data] colons = [1 if ":" in x["title"] else 0 for x in data] with_colon = 0 with_num = 0 wo_colon = 0 wo_num = 0 for entry in data: if ":" in entry["title"]: with_colon += entry["count"] with_num += 1 else: wo_colon += entry["count"] wo_num += 1 print "Correlation between length and count:", correlation(lengths, counts) print "Correlation between num words and count:", correlation(words, counts) print "Correlation between avg word length and count:", correlation(word_lengths, counts) print "Correlation between colon and count:", correlation(colons, counts) print "Avg count with colon:", with_colon / with_num print "Avg count without colon:", wo_colon / wo_num
def matrix_entry(i, j): return correlation(data[:,i], data[:,j])
pert_idose = ft['pert_idose'] else: pert_idose = None predict = model(drug, data.gene, mask, pert_type, cell_id, pert_idose) loss = model.loss(lb, predict) epoch_loss += loss.item() lb_np = np.concatenate((lb_np, lb.cpu().numpy()), axis=0) predict_np = np.concatenate((predict_np, predict.cpu().numpy()), axis=0) print('Dev loss:') print(epoch_loss / (i + 1)) rmse_score = rmse(lb_np, predict_np) rmse_list_dev.append(rmse_score) print('RMSE: %.4f' % rmse_score) pearson, _ = correlation(lb_np, predict_np, 'pearson') pearson_list_dev.append(pearson) print('Pearson\'s correlation: %.4f' % pearson) spearman, _ = correlation(lb_np, predict_np, 'spearman') spearman_list_dev.append(spearman) print('Spearman\'s correlation: %.4f' % spearman) precision = [] for k in precision_degree: precision_neg, precision_pos = precision_k(lb_np, predict_np, k) print("Precision@%d Positive: %.4f" % (k, precision_pos)) print("Precision@%d Negative: %.4f" % (k, precision_neg)) precision.append([precision_pos, precision_neg]) precisionk_list_dev.append(precision) if best_dev_pearson < pearson: best_dev_pearson = pearson