[0, 0, 0], [2, 1, 0], ]) partitions = np.vstack((day1partitions, day1partitions, day1partitions, day4partitions, day5partitions, day5partitions)) #%% alphas = np.power(10., np.arange(1, -50, -1)) posts = np.empty((len(alphas), 3)) * np.nan for idx, alpha in enumerate(alphas): animal = Animal(alpha, observationModelClass=Gauss) for i in range(observations.shape[0]): animal.experience(observations[i, :]) for i in range(3): # for each partition posts[idx, i] = animal.partition_prob(partitions[:, i]) posts #%% figInd += 1 #%% fig = plt.figure(figInd) fig.clear() bar_width = 0.2 idxs = np.array([0, 15, 30]) + 1 #+5 idxs = np.array([0, 20, 40]) + 1 #+5 for i in range(3): plt.bar( np.array([1, 2, 3]) + (i - 1) * bar_width, posts[idxs, i], bar_width) #plt.plot(np.array([1,2,3])+(i-1)*bar_width/10, posts[idxs,i], 'x') labels = []
np.pi).astype(int) # 2 clusters, animal1 partition2_1 = np.zeros((nExperiences, )) # 1 cluster, animal2 partition2_2 = np.mod(animal2.experiences[:, 0] - midpoint2, 2 * np.pi) > np.pi # 2 clusters, animal2 #%% # partition_prob figInd += 1 figInd = 2 fig = plt.figure(figInd) fig.clear() ax = plt.subplot(1, 2, 1) bar_width = 0.2 inds = np.array([0, 1]) - bar_width / 2 rat1 = animal1.partition_prob(partition1_1) - animal1.partition_prob( partition1_2) rat2 = animal1.partition_prob(partition2_1) - animal1.partition_prob( partition2_2) plt.bar(np.array([-bar_width / 2, +bar_width / 2]), -np.array([rat2, rat1]), bar_width, color='rb') m = np.max(np.abs(plt.ylim())) plt.ylim([-m, m]) adjustPlot(ax, fuzzyzero=True) plt.xticks(np.array([-bar_width / 2, +bar_width / 2]), ['Random', 'Directed']) plt.ylabel('Negative Partition Evidence Ratio') plt.savefig('figures/' + filename + '-' + str(figInd) + '.pdf', bbox_inches='tight')
def experiment(alpha=alpha_default, sd0=sd0_default, sd=sd_default, N=N_default, distance=distance_default): np.random.seed(1) _model = WorldModel(CRP(alpha), Gauss(sigma_0=sd0)) _animal = Animal(worldModel=_model) experiences = np.nan * np.ones((N * nPresentations * 2)) partition1 = np.array([]) partition2 = np.array([]) partitionN = np.array([]) lps = np.nan * np.empty((N, 3)) posts = np.nan * np.empty((N * nPresentations * 2, 3)) similarity = np.nan * np.empty((N, 1)) for i in range(N): c1 = stats.norm(loc=np.array([-distance / 2]), scale=sd * np.ones((1, K))) c2 = stats.norm(loc=np.array([+distance / 2]), scale=sd * np.ones((1, K))) for _i in range(nPresentations): experience1 = c1.rvs() experiences[i * nPresentations * 2 + _i * 2] = experience1 post1 = _animal.experience(experience1) if i + _i == 0: partition1 = np.array([[0]]) partition2 = np.array([[0]]) partitionN = np.array([[0]]) else: partition1 = np.vstack((partition1, 0)) partition2 = np.vstack((partition2, 0)) partitionN = np.vstack( (partitionN, 2 * i * nPresentations + _i)) if i > 0: posts[i * nPresentations * 2 + _i, :] = post1.squeeze() _animal.clusterAssignments = partition2 for _i in range(nPresentations): experience2 = c2.rvs() experiences[i * nPresentations * 2 + _i * 2 + 1] = experience2 post2 = _animal.experience(experience2) partition1 = np.vstack((partition1, 0)) partition2 = np.vstack((partition2, 1)) partitionN = np.vstack( (partitionN, (2 * i + 1) * nPresentations + _i)) if i > 0: posts[i * nPresentations * 2 + nPresentations + _i, :] = post2.squeeze() _animal.clusterAssignments = partition2 #TODO: look over this for what to do about partitions2activity ret = partitions2activity(_animal, experience1, [partition1, partition2], [zeta0, zeta1A, zeta1B]) activity1 = ret[0] tmp_lps = ret[1] ret = partitions2activity(_animal, experience2, [partition1, partition2], [zeta0, zeta1A, zeta1B]) activity2 = ret[0] tmp_lps_2 = ret[1] np.testing.assert_array_equal(tmp_lps, tmp_lps_2) lps[i, 0:2] = tmp_lps lps[i, 0] = _animal.partition_prob(partition1) lps[i, 1] = _animal.partition_prob(partition2) lps[i, 2] = _animal.partition_prob(partitionN) similarity[i] = np.corrcoef(activity1, activity2)[0, 1] return similarity, lps, _animal, partition1, partition2, c1, c2, experiences, posts