from data import get_image_sample # Modelname: CSRNet # Based on https://github.com/Neerajj9/CSRNet-keras # Related paper https://arxiv.org/abs/1802.10062 # Download dataset first: # https://drive.google.com/file/d/16dhJn7k4FWVwByRsQAEpl9lwjuV03jVI/view print( "Preprocessing images (skipping all files that have already been processed)" ) process_images() print( "Show one random image from the dataset including the created groundtruth heatmap" ) sample_image = get_image_sample() show_sample(sample_image) print("Train the model and save the weights") train("Model") print("Show the same sample as before but including the generated heatmap") show_sample(sample_image, "Model") print( "Analyze the model. Compute the Mean Absolute Error on different dataset subsets." ) analyze_model("Model")
def main(): """ Reset TensorFlow before running anything """ tf.reset_default_graph() """ Create the stimulus class to generate trial paramaters and input activity """ stim = stimulus.Stimulus() n_input, n_hidden, n_output = par['shape'] N = par['batch_train_size'] * par[ 'num_batches'] # trials per iteration, calculate gradients after batch_train_size """ Define all placeholder """ mask = tf.placeholder( tf.float32, shape=[par['num_time_steps'], par['batch_train_size']]) x = tf.placeholder( tf.float32, shape=[n_input, par['num_time_steps'], par['batch_train_size']]) # input data y = tf.placeholder( tf.float32, shape=[n_output, par['num_time_steps'], par['batch_train_size']]) # target data # enter "config=tf.ConfigProto(log_device_placement=True)" inside Session to check whether CPU/GPU in use with tf.Session() as sess: #with tf.device("/gpu:0"): model = Model(x, y, mask) init = tf.global_variables_initializer() sess.run(init) t_start = time.time() saver = tf.train.Saver() # Restore variables from previous model if desired if par['load_previous_model']: saver.restore(sess, par['save_dir'] + par['ckpt_load_fn']) print('Model ' + par['ckpt_load_fn'] + ' restored.') # keep track of the model performance across training model_performance = { 'accuracy': [], 'loss': [], 'perf_loss': [], 'spike_loss': [], 'trial': [], 'time': [] } for i in range(par['num_iterations']): # generate batch of N (batch_train_size X num_batches) trials trial_info = stim.generate_trial() # keep track of the model performance for this batch loss = np.zeros((par['num_batches'])) perf_loss = np.zeros((par['num_batches'])) spike_loss = np.zeros((par['num_batches'])) accuracy = np.zeros((par['num_batches'])) for j in range(par['num_batches']): """ Select batches of size batch_train_size """ ind = range(j * par['batch_train_size'], (j + 1) * par['batch_train_size']) target_data = trial_info['desired_output'][:, :, ind] input_data = trial_info['neural_input'][:, :, ind] train_mask = trial_info['train_mask'][:, ind] """ Run the model If learning rate > 0, then also run the optimizer; if learning rate = 0, then skip optimizer """ if par['learning_rate'] > 0: _, loss[j], perf_loss[j], spike_loss[j], y_hat, state_hist, W_rnn, = \ sess.run([model.train_op, model.loss, model.perf_loss, model.spike_loss, model.y_hat, \ model.hidden_state_hist, model.WY], {x: input_data, y: target_data, mask: train_mask}) else: loss[j], perf_loss[j], spike_loss[j], y_hat, state_hist, W_rnn = \ sess.run([model.loss, model.perf_loss, model.spike_loss, model.y_hat, model.hidden_state_hist, \ model.WY], {x: input_data, y: target_data, mask: train_mask}) accuracy[j] = analysis.get_perf(target_data, y_hat, train_mask) iteration_time = time.time() - t_start model_performance = append_model_performance( model_performance, accuracy, loss, perf_loss, spike_loss, (i + 1) * N, iteration_time) """ Save the network model and output model performance to screen """ if (i + 1) % par['iters_between_outputs'] == 0 or i + 1 == par[ 'num_iterations']: print_results(i, N, iteration_time, perf_loss, spike_loss, state_hist, accuracy) save_path = saver.save(sess, par['save_dir'] + par['ckpt_save_fn']) """ Analyze the network model and save the results """ if par['analyze_model']: weights = eval_weights(W_rnn) analysis.analyze_model(trial_info, y_hat, state_hist, model_performance, weights)
def main(gpu_id=None): if gpu_id is not None: os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id """ Reset TensorFlow before running anything """ tf.reset_default_graph() """ Create the stimulus class to generate trial paramaters and input activity """ stim = stimulus.Stimulus() n_input, n_hidden, n_output = par['shape'] N = par[ 'batch_train_size'] # trials per iteration, calculate gradients after batch_train_size """ Define all placeholder """ mask = tf.placeholder( tf.float64, shape=[par['num_time_steps'], par['batch_train_size']]) x = tf.placeholder( tf.float64, shape=[n_input, par['num_time_steps'], par['batch_train_size']]) # input data target = tf.placeholder(tf.float64, shape=[ par['n_output'], par['num_time_steps'], par['batch_train_size'] ]) # input data actual_reward = tf.placeholder( tf.float64, shape=[par['num_time_steps'], par['batch_train_size']]) pred_reward = tf.placeholder( tf.float64, shape=[par['num_time_steps'], par['batch_train_size']]) actual_action = tf.placeholder(tf.float64, shape=[ par['num_time_steps'], par['n_output'], par['batch_train_size'] ]) config = tf.ConfigProto() #config.gpu_options.allow_growth=True # enter "config=tf.ConfigProto(log_device_placement=True)" inside Session to check whether CPU/GPU in use with tf.Session(config=config) as sess: if gpu_id is not None: model = Model(x, target, actual_reward, pred_reward, actual_action, mask) else: #with tf.device("/gpu:0"): model = Model(x, target, actual_reward, pred_reward, actual_action, mask) init = tf.global_variables_initializer() sess.run(init) # keep track of the model performance across training model_performance = { 'accuracy': [], 'loss': [], 'perf_loss': [], 'spike_loss': [], 'trial': [] } for i in range(par['num_iterations']): # generate batch of batch_train_size trial_info = stim.generate_trial() """ Run the model """ pol_out, val_out, pol_rnn, action, stacked_mask, reward = sess.run([model.pol_out, model.val_out, model.h_pol, model.action, \ model.stacked_mask,model.reward], {x: trial_info['neural_input'], target: trial_info['desired_output'], mask: trial_info['train_mask']}) trial_reward = np.squeeze(np.stack(reward)) trial_action = np.stack(action) #plt.imshow(np.squeeze(trial_reward)) #plt.colorbar() #plt.show() _, _, pol_loss, val_loss = sess.run([model.train_pol, model.train_val, model.pol_loss, model.val_loss], \ {x: trial_info['neural_input'], target: trial_info['desired_output'], mask: trial_info['train_mask'], \ actual_reward: trial_reward, pred_reward: np.squeeze(val_out), actual_action:trial_action }) accuracy, _, _ = analysis.get_perf(trial_info['desired_output'], action, trial_info['train_mask']) #model_performance = append_model_performance(model_performance, accuracy, val_loss, pol_loss, spike_loss, (i+1)*N) """ Save the network model and output model performance to screen """ if i % par['iters_between_outputs'] == 0 and i > 0: print_results(i, N, pol_loss, 0., pol_rnn, accuracy) r = np.squeeze(np.sum(np.stack(trial_reward), axis=0)) print('Mean mask', np.mean(stacked_mask), ' val loss ', val_loss, ' reward ', np.mean(r), np.max(r)) #plt.imshow(np.squeeze(stacked_mask[:,:])) #plt.colorbar() #plt.show() #plt.imshow(np.squeeze(trial_reward)) #plt.colorbar() #plt.show() """ Save model, analyze the network model and save the results """ #save_path = saver.save(sess, par['save_dir'] + par['ckpt_save_fn']) if par['analyze_model']: weights = eval_weights() analysis.analyze_model(trial_info, y_hat, state_hist, syn_x_hist, syn_u_hist, model_performance, weights, \ simulation = True, lesion = False, tuning = False, decoding = False, load_previous_file = False, save_raw_data = False) # Generate another batch of trials with test_mode = True (sample and test stimuli # are independently drawn), and then perform tuning and decoding analysis trial_info = stim.generate_trial(test_mode=True) y_hat, state_hist, syn_x_hist, syn_u_hist = \ sess.run([model.y_hat, model.hidden_state_hist, model.syn_x_hist, model.syn_u_hist], \ {x: trial_info['neural_input'], y: trial_info['desired_output'], mask: trial_info['train_mask']}) analysis.analyze_model(trial_info, y_hat, state_hist, syn_x_hist, syn_u_hist, model_performance, weights, \ simulation = False, lesion = False, tuning = par['analyze_tuning'], decoding = True, load_previous_file = True, save_raw_data = False)
def main(gpu_id): os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id """ Reset TensorFlow before running anything """ tf.reset_default_graph() """ Create the stimulus class to generate trial paramaters and input activity """ stim = stimulus.Stimulus() n_input, n_hidden, n_output = par['shape'] N = par[ 'batch_train_size'] # trials per iteration, calculate gradients after batch_train_size """ Define all placeholder """ mask = tf.placeholder( tf.float32, shape=[par['num_time_steps'], par['batch_train_size']]) x = tf.placeholder( tf.float32, shape=[n_input, par['num_time_steps'], par['batch_train_size']]) # input data y = tf.placeholder( tf.float32, shape=[n_output, par['num_time_steps'], par['batch_train_size']]) # target data config = tf.ConfigProto() config.gpu_options.allow_growth = True # enter "config=tf.ConfigProto(log_device_placement=True)" inside Session to check whether CPU/GPU in use with tf.Session(config=config) as sess: with tf.device("/gpu:0"): model = Model(x, y, mask) init = tf.global_variables_initializer() sess.run(init) t_start = time.time() saver = tf.train.Saver() # Restore variables from previous model if desired if par['load_previous_model']: saver.restore(sess, par['save_dir'] + par['ckpt_load_fn']) print('Model ' + par['ckpt_load_fn'] + ' restored.') # keep track of the model performance across training model_performance = { 'accuracy': [], 'loss': [], 'perf_loss': [], 'spike_loss': [], 'trial': [], 'time': [] } for i in range(par['num_iterations']): # generate batch of batch_train_size trial_info = stim.generate_trial() """ Run the model """ _, loss, perf_loss, spike_loss, y_hat, state_hist, syn_x_hist, syn_u_hist = \ sess.run([model.train_op, model.loss, model.perf_loss, model.spike_loss, model.y_hat, \ model.hidden_state_hist, model.syn_x_hist, model.syn_u_hist], {x: trial_info['neural_input'], \ y: trial_info['desired_output'], mask: trial_info['train_mask']}) accuracy, _, _ = analysis.get_perf(trial_info['desired_output'], y_hat, trial_info['train_mask']) iteration_time = time.time() - t_start model_performance = append_model_performance( model_performance, accuracy, loss, perf_loss, spike_loss, (i + 1) * N, iteration_time) """ Save the network model and output model performance to screen """ if (i + 1) % par['iters_between_outputs'] == 0 or i + 1 == par[ 'num_iterations']: print_results(i, N, iteration_time, perf_loss, spike_loss, state_hist, accuracy) """ Save model, analyze the network model and save the results """ #save_path = saver.save(sess, par['save_dir'] + par['ckpt_save_fn']) if par['analyze_model']: weights = eval_weights() analysis.analyze_model(trial_info, y_hat, state_hist, syn_x_hist, syn_u_hist, model_performance, weights, \ simulation = True, tuning = False, decoding = False, load_previous_file = False, save_raw_data = False) # Generate another batch of trials with decoding_test_mode = True (sample and test stimuli # are independently drawn), and then perform tuning and decoding analysis update = {'decoding_test_mode': True} update_parameters(update) trial_info = stim.generate_trial() y_hat, state_hist, syn_x_hist, syn_u_hist = \ sess.run([model.y_hat, model.hidden_state_hist, model.syn_x_hist, model.syn_u_hist], \ {x: trial_info['neural_input'], y: trial_info['desired_output'], mask: trial_info['train_mask']}) analysis.analyze_model(trial_info, y_hat, state_hist, syn_x_hist, syn_u_hist, model_performance, weights, \ simulation = False, tuning = par['analyze_tuning'], decoding = True, load_previous_file = True, save_raw_data = False) if par['trial_type'] == 'dualDMS': # run an additional session with probe stimuli save_fn = 'probe_' + par['save_fn'] update = {'probe_trial_pct': 1, 'save_fn': save_fn} update_parameters(update) trial_info = stim.generate_trial() y_hat, state_hist, syn_x_hist, syn_u_hist = \ sess.run([model.y_hat, model.hidden_state_hist, model.syn_x_hist, model.syn_u_hist], \ {x: trial_info['neural_input'], y: trial_info['desired_output'], mask: trial_info['train_mask']}) analysis.analyze_model(trial_info, y_hat, state_hist, syn_x_hist, \ syn_u_hist, model_performance, weights, simulation = False, tuning = False, decoding = True, \ load_previous_file = False, save_raw_data = False)