Ejemplo n.º 1
0
def compare_brains_in_regions(brain_regions, regions_to_voxels):
  for region_name, voxels in regions_to_voxels[i].items():
    if region_name in best_brain_regions:
      if region_name not in brain_regions:
        brain_regions[region_name] = []
      voxels = [v for v in voxels if v in brain_fs[i].get_selected_indexes()]
      brain_regions[region_name].append(brains[-1][:, voxels])

  for region_name in brain_regions:
    if min(map(len, brain_regions[region_name])) > 0:
      x_brain, C_brain = get_dists(brain_regions[region_name])
      klz_brain, labels_brain = compute_dist_of_dists(x_brain, C_brain, brain_labels)
      print(region_name, '\t', np.mean(klz_brain), '\t', np.std(klz_brain))
      plot(klz_brain, labels_brain)
Ejemplo n.º 2
0
    all_embeddings = []
    all_labels = []
    for key in embeddings.keys():
        all_embeddings += embeddings[key][1:]
        all_labels += labels[key][1:]

    #x, C = get_dists(brains + all_embeddings)
    #klz, prz, labels_ = compute_dist_of_dists(x, C, brain_labels + all_labels)
    #plot(prz, brain_regions_labels)
    #klz = np.asarray(klz)
    #print(klz.shape)

    #print(brain_regions_labels)

    import csv

    x, C = get_dists(all_embeddings)
    klz, prz, labels_ = compute_dist_of_dists(x, C, all_labels)
    with open('bert_com_sim_prz_' + '.csv', 'w') as f:
        writer = csv.writer(f)
        writer.writerow(all_labels)
        writer.writerows(prz)

    print(all_labels)
    plot(prz, all_labels)

    half = int(len(all_labels) / 2)
    for i in np.arange(half):
        print(all_labels[i].split("_")[-1],
              all_labels[i + half].split("_")[-1], prz[i][i + half])
Ejemplo n.º 3
0
                              str(i) + '_Brain'), 'rb'))
        for b in blocks:
            brains[-1].extend(a['brain_activations'][b][11 + delay:-3])

        brains[-1] = np.asarray(brains[-1])
        brain_fs[i].fit(brains[-1])
        original_selected_voxels = brain_fs[i].get_selected_indexes()
        print(len(original_selected_voxels))
        brains[-1] = brains[-1][:, original_selected_voxels]
        brain_labels.append('brain_' + str(i))
        print(brains[-1].shape)

    print('avg brain shape:', )
    x, C = get_dists(brains + all_embeddings)
    klz, prz, labels_ = compute_dist_of_dists(x, C, brain_labels + all_labels)
    plot(prz, labels_)
    klz = np.asarray(klz)
    print(klz.shape)

    import csv

    with open('klz_' + str(delay) + '.csv', "w") as f:
        writer = csv.writer(f)
        writer.writerows(klz)

    with open('prz_' + str(delay) + '.csv', "w") as f:
        writer = csv.writer(f)
        writer.writerows(prz)
    #Get brain regions:

    for l in labels_:
Ejemplo n.º 4
0
                labels[embedding_key].append(embedding_key + '_context_' +
                                             str(context_size))

    print("####################################")
    all_embeddings_per_context_length = {}
    all_labels_per_context_length = {}
    for context_size in np.arange(7):
        all_embeddings_per_context_length[context_size] = []
        all_labels_per_context_length[context_size] = []
        for key in embeddings.keys():
            all_embeddings_per_context_length[context_size] += [
                embeddings[key][context_size]
            ]
            all_labels_per_context_length[context_size] += [
                labels[key][context_size]
            ]

    import csv

    for context_size in np.arange(7):
        x, C = get_dists(all_embeddings_per_context_length[context_size])
        klz, prz, labels_ = compute_dist_of_dists(
            x, C, all_labels_per_context_length[context_size])
        print(context_size, ':', np.mean(prz))
        with open('com_sim_prz_' + str(context_size) + '.csv', 'w') as f:
            writer = csv.writer(f)
            writer.writerow(all_labels_per_context_length[context_size])
            writer.writerows(prz)

        plot(prz, all_labels_per_context_length[context_size])
    def train(self, episodes=-1):

        # Hacky...
        if episodes < 0:
            episodes = self.max_episodes

        episode = 0
        all_rewards = []

        try:
            # Set this to "while True" for genuine convergence
            for e in range(episodes):

                # Start episode
                episode_reward = 0
                self.episode_count = e
                t = 0
                state = self.env.reset()
                state = np.reshape(state, [1, self.n_features])

                while True:
                    # self.env.render()

                    # Select action
                    action = self._select_action(state)

                    # Execute transition
                    next_state, reward, done, info = self.env.step(action)
                    episode_reward += reward
                    next_state = np.reshape(next_state, [1, self.n_features])

                    # Store experience tuple in memory
                    self.memory.append(
                        (state, action, reward, next_state, done))
                    state = next_state

                    # Replay using mini batch
                    self._update_Q()

                    # Copy learned Q function into target network
                    if t % self.net_replacement_freq == 0:
                        self.Q_ = clone_model(self.Q)
                        self.Q_.set_weights(self.Q.get_weights())

                    t += 1
                    if done:
                        break

                all_rewards.append(episode_reward)
                sma = np.mean(all_rewards[-SMA_WINDOW:])
                logger.info('{},{},{},{}'.format(episode, episode_reward,
                                                 self.epsilon, sma))
                episode += 1

                # Uncomment for episodic epsilon decay
                if not self.epsilon_special:
                    if self.epsilon > self.epsilon_min:
                        self.epsilon *= self.epsilon_decay_rate

                # Special case: stepwise epsilon decay
                else:
                    if episode < 150:
                        self.epsilon = 1.0
                    elif episode < 250:
                        self.epsilon = 0.5
                    else:
                        self.epsilon = 0.0

                # Convergence
                if sma >= 200:
                    self.solved = True
                    break

        except KeyboardInterrupt:
            logger.info('KeyboardInterrupt: halting training')
        finally:
            plot(all_rewards)
            self._save_model()
            return all_rewards
Ejemplo n.º 6
0
    randoms = [
        uniform_random_300, normal_random_300, uniform_random_100,
        normal_random_100, uniform_random_1000, normal_random_1000
    ]

    random_labels = [
        'uniform_random_300', 'normal_ransom_300', 'uniform_random_100',
        'normal_random_100', 'uniform_random_1000', 'normal_random_1000'
    ]

    print("brain shapes:", brain_regions[0].shape)
    x, C = get_dists(brain_regions + all_embeddings + randoms)
    klz, prz, labels_, p_vals = compute_dist_of_dists(
        x, C, brain_regions_labels + all_labels + random_labels)
    plot(prz, labels_)
    plot(p_vals, labels_)

    klz = np.asarray(klz)
    print(klz.shape)

    dic2save = {"klz": klz, "prz": prz, "labels_": labels_, "p_vals": p_vals}
    np.save(
        '_'.join(list(map(str, FLAGS.blocks))) +
        "rsa_results_all_brain_regions_" + str(delay) + "_" +
        selecting_feature, dic2save)

    x, C = get_dists(brains + all_embeddings + randoms)
    klz, prz, labels_, p_vals = compute_dist_of_dists(
        x, C, brain_labels + all_labels + random_labels)
    plot(prz, labels_)
Ejemplo n.º 7
0
                voxels = [v for v in voxels if v in original_selected_voxels]
                brain_regions.append(brains[-1][:, voxels])
                brain_regions_labels.append(
                    ['subject_' + str(i) + region_name])

        selected_voxels = [
            v for v in original_selected_voxels
            if voxel_to_regions[i][v] in best_brain_regions
        ]
        brains[-1] = brains[-1][:, selected_voxels]
        brain_labels.append('brain_' + str(i))
        print(brains[-1].shape)

    x, C = get_dists(brains + all_embeddings)
    klz, prz, labels_ = compute_dist_of_dists(x, C, brain_labels + all_labels)
    plot(prz, brain_regions_labels + all_labels)
    klz = np.asarray(klz)
    print(klz.shape)

    #print(brain_regions_labels)

    print(brain_regions_labels)
    for context_size in [1]:
        print('context_size:', context_size)
        for bi in np.arange(len(brain_regions_labels)):
            print(brain_regions_labels[bi])
            output = brain_regions_labels[bi][0] + ' '
            for key in embeddings:
                print(key, embeddings[key][context_size].shape)
                embedding_key = key + '_' + str(context_size)
                x, C = get_dists([brain_regions[bi]] +
Ejemplo n.º 8
0
    def train(self, env):

        # Track rewards
        all_rewards = []

        try:
            while True:
                state = env.reset()
                state = [
                    round(s, PRECISION)
                    for s in state[:CONTINUOUS_OBSERVATIONS]
                ]
                action = self._query_initial(
                    state, env.discrete_obs_space
                )  # set the state and get first action
                episode_return = 0
                steps = 0
                total_Q_update = 0

                while True:
                    new_state, reward, done, details = env.step(action)
                    new_state = [
                        round(s, PRECISION)
                        for s in new_state[:CONTINUOUS_OBSERVATIONS]
                    ]
                    # env.render()
                    episode_return += reward

                    # if steps % 10 == 0:
                    #     print([x for x in new_state])
                    #     print("step {} total_reward {:+0.2f}".format(steps, episode_return))
                    steps += 1

                    if done:
                        break

                    action, delta_Q = self._query(state, action, new_state,
                                                  reward,
                                                  env.discrete_obs_space)
                    total_Q_update += delta_Q

                all_rewards.append(episode_return)

                sma = np.mean(all_rewards[-SMA_WINDOW:])

                if self.episodes % 10 == 0:
                    if self.episodes >= SMA_WINDOW:
                        logger.info(
                            'Episode {} | Reward = {} | SMA = {}'.format(
                                self.episodes, episode_return, sma))
                    else:
                        logger.info('Episode {} | Reward = {}'.format(
                            self.episodes, episode_return))

                # Convergence
                if self.episodes > SMA_WINDOW and sma >= SOLUTION_THRESHOLD:
                    break

                self.episodes += 1
        except KeyboardInterrupt:
            logger.warn('KeyboardInterrupt - halting training')

        plot(all_rewards,
             title='Rewards per episode',
             xlab='Episode',
             ylab='Reward')
        logger.info('{}% of actions were random'.format(
            round(100. * self.random_actions / self.total_actions, 2)))