Exemple #1
0
    def _q_argmax(self, tp, x, learn_params):
        """
        Calculate the parameters maximizing the Q function (lower bound)
        :param tp: Type posterior probability
        :param x: Observed matrix [GxC] of molecular counts
        :param learn_params: Which parameters should the model improve: 'mu', 'tau', 'p', or any combination of these.
                             Default is ['mu', 'tau', 'p']. Returns the old version of parameters not in this list.
        :return: A tuple of new parameters (log(tau), p, mu)
        """
        tau_ = self.log_tau
        mu_ = self.mu
        p_ = self.p
        tp_exp = np.exp(tp)  # was already normalized so OK to exponentiate

        if 'tau' in learn_params:
            tau_ = lse(tp, 0) + np.log(EPS)  # logsumexp the probabilties over all cells, and regularize
            tau_ = tau_ - lse(tau_)  # normalize in log space

        if 'p' in learn_params:
            Xcg = np.sum(x,axis = 0)
            muTc = np.sum(mu_, axis =0).reshape((self.T,1))
            p_ = Xcg / (Xcg + (1-p_) * np.sum((tp_exp*muTc.T),1))

        if 'mu' in learn_params:
            x_tile = np.tile(x, [self.T, 1, 1])
            mu_tile = np.transpose(np.tile(mu_, [self.C, 1, 1]))
            comp_p_tile = np.tile((1 - self.p), [self.T, self.G, 1])
            Wtc_tile = np.transpose(np.tile(tp_exp, [self.G, 1, 1]), [2,0,1])
            Wtc_norm_fact = tp_exp.sum(0).reshape((1, self.T)) + EPS
            mu_ = (Wtc_tile * (mu_tile * comp_p_tile + x_tile)).sum(2).T / Wtc_norm_fact

        return tau_, p_, mu_
Exemple #2
0
 def Z_DP_scalar(self, x, W_u, W_b):
     z = np.vstack([-np.dot(W_u, x[0]), np.zeros(W_u.shape[0])])
     for i in xrange(1, len(x)):
         for n in xrange(self.n_labels):
             z[1, n] = lse(z[0] - np.dot(x[i],W_u[n]) - np.squeeze(W_b[:,n]*self.phi_B[:,n]))
         z[0], z[1] = z[1], 0.
     return lse(z[0])
Exemple #3
0
 def Z_DP_concat(self, x, W_u, W_b):
     z = np.vstack([-np.dot(W_u, x[0]), np.zeros(W_u.shape[0])])
     for i in xrange(1, len(x)):
         for n in xrange(self.n_labels):
             z[1, n] = lse(z[0] - np.dot(x[i], W_u[n]) - np.dot(W_b[:, n],
                 np.concatenate((x[i - 1], x[i]))))
         z[0], z[1] = z[1], 0.
     return lse(z[0])
Exemple #4
0
 def Z_full_DP_scalar(self, x, W_u, W_b):
     z = np.vstack([-np.dot(W_u, x[0]), np.zeros(W_u.shape[0])])
     for i in xrange(1, len(x)):
         for n in xrange(self.n_labels):
             q = np.zeros(self.n_labels)
             for m in xrange(self.n_labels):
                 q[m] = z[0, m] - np.dot(x[i], W_u[n]) - W_b[n, m] * self.phi_B[n, m]
             z[1, n] = lse(q)
         z[0], z[1] = z[1], 0.
     return lse(z[0])
Exemple #5
0
def get_proposal(args, n_propose=1, sample='topN'):
    """
    Get the proposal for an additional item from f, given S
    """
    Sorig, f = args

    assert isinstance(f, DiversityFun), "Model must be an instance of DiversityFun."
    assert n_propose == 1  # modified to work for this case only

    V = f.V
    N = len(V)

    S = Sorig[:]
    # Vred = np.array(list(set(V).difference(set(S))))
    Vred = np.delete(np.array(V), S)
    f_S = f(S)

    probs = []
    gains = []
    vals = f.all_singleton_adds(S)
    vals = np.delete(vals, S)
    gains = vals - f_S
    # for i in Vred:
    #     f_Si = f(list(set(S).union(set([i]))))
    #     probs.append(f_Si)
    #     gains.append(f_Si - f_S)
    probs = np.exp(vals - lse(vals))

    order = Vred[np.argsort(probs)[::-1]]
    probs = np.sort(probs)[::-1]

    return {'order': order, 'probs': probs}
Exemple #6
0
 def log_p(self, theta, resp):
     strokes = self._strokes
     s = len(strokes)
     A = self._params[:(s - 1)]
     B = self._params[(s - 1):]
     make_tens = T.Tensor in map(type, [A, B, theta, resp])
     if make_tens:
         theta = to_tens(theta)
     d = {stroke: i for i, stroke in enumerate(strokes)}
     resp = P.clip(resp, strokes[0], strokes[-1])
     temp = P.vectorize(d.get)(resp)
     if make_tens:
         resp = to_tens(temp)
     else:
         resp = temp
     lo = [0.0 * theta] * s  # log odds vs par
     I = P.searchsorted(strokes, self._par)
     for i in range(I + 1, s):
         lo[i] = lo[i - 1] - A[i - 1] * (theta - B[i - 1])
     for i in range(I - 1, -1, -1):
         lo[i] = lo[i + 1] + A[i] * (theta - B[i])
     if make_tens:
         lo = T.stack(lo)
         lpp = lo - T.logsumexp(lo, dim=0, keepdim=True)
         lp = sum([lpp[i] * to_tens(resp == i) for i in range(s)])
     else:
         lo = P.stack(lo)
         lpp = lo - lse(lo, 0, keepdims=True)
         lp = sum([lpp[i] * (resp == i) for i in range(s)])
     return lp
Exemple #7
0
def compute_value_function(q_values, ent_wt=0.0):
    if ent_wt > 0:
        # soft max
        v_fn = ent_wt * lse((1.0 / ent_wt) * q_values, axis=1, keepdims=False)
    else:
        # hard max
        v_fn = np.max(q_values, axis=1)
    return v_fn
Exemple #8
0
 def type_posterior(self, x, tl=None):
     """
     Calculate the log-posterior probability of types
     :param x: Observed matrix [GxC] of molecular counts
     :return tl: type likelihood. If not given, calculated from x, if given - x is ignored.
     """
     if tl is None: tl = self._type_likelihood(x)
     tp = tl - np.reshape(lse(tl, axis=1), [self.C, 1])
     return tp
Exemple #9
0
    def __pnk(self):
        self.pnk = np.zeros((self.n, self.k))
        gvs = {cluster: GV(d =self.GaussComp[cluster].d, S = self.GaussComp[cluster].chol_prec_rvs(), \
        mu = self.GaussComp[cluster].mu_rvs(), method='chol_prec') for cluster in xrange(self.k)}
        for datapoint in xrange(self.n):
            a =self.pnk[datapoint] = np.array([self.pi[cluster]+gvs[cluster].logp(self.Xm[datapoint])\
            for cluster in xrange(self.k)])
            self.pnk[datapoint] = np.exp(a - lse(a))

        return self.pnk
Exemple #10
0
 def loglikelihood(self, x, return_tl=False):
     """
     Compute the log likelihood of x, given this model
     :param x: Observed matrix [GxC] of molecular counts
     :param return_tl: whether type probability per cell should also be returned
     :return: Pr(X|model)[, log[Pr(X_c|T_c=t,model)], if requested]
     """
     tl = self._type_likelihood(x)
     ll = np.sum(lse(tl, 1))
     return (ll, tl) if return_tl else ll
Exemple #11
0
    def __coll_Gibbs_update(self, t=1):

        arr = np.arange(self.n)
        np.random.shuffle(arr)

        for datapoint in arr:
            self.nk[datapoint] = self.k + 1
            ppd = np.array([math.log(self.alpha/self.k+len(self.nk[self.nk==cluster])) +\
            GWM(GC(d=self.d, X=self.Xm[self.nk==cluster])).post_pred_lp_(self.Xm[datapoint]) for cluster in xrange(self.k)])
            ppd = np.exp(ppd - lse(ppd))
            self.nk[datapoint] = np.random.choice(np.arange(self.k), p=ppd)
Exemple #12
0
 def marginal_slow(self, x, W, pairs, normalize=False, logscale=True):
     """
     unnormalized
     """
     I, V = map(list, zip(*pairs))
     J, y = sorted(set(xrange(len(x))) - set(I)), self.set_idxs(np.zeros(len(x), int), I, V)
     prob = lse([-self.E(x, self.set_idxs(y, J, c), W) for c in self.configs(len(J))])
     if normalize:
         prob -= self.Z(x, *W)
     if not logscale:
         prob = np.exp(prob)
     return prob
Exemple #13
0
	def sampleK(self):
		"Samples a cluster assignment k from P(z_new=k|x_new,Z,X)"
		logPriorProbs = np.array( np.log(list(self.priorsOld())+list(self.priorNew())))
		logLikelihoods = self.loglikelihoodsOld()
		logLikelihoods.append(self.loglikelihoodNew())
		logLikelihoods = np.array(logLikelihoods)
		logPosteriorKs = logPriorProbs+logLikelihoods
		logPosteriorKs -= lse(logPosteriorKs)
		posteriorKs = np.exp(logPosteriorKs)
		if len(self.clusters) == 0:
			ks = [0]
		else:
			ks = self.clusters+[max(self.clusters)+1]
		return ks[r.multinomial(1,posteriorKs).argmax()]
Exemple #14
0
 def marg_helper(self, x, W_u, W_b, i, val, up=False):
     """ helper """
     if i in (0, len(x) - 1):
         return 0.
     j = i+1 if up else i-1
     k = i+1 if up else i
     pair = lambda m: (val, m) if up else (m, val)
     msgs = []
     for l in self.L:
         msg = -np.dot(x[j], W_u[l]) - self.B_term(W_b, x, k, pair(l))
         if 0 < j < len(x) - 1:
             msg += self.marg_helper(x, W_u, W_b, j, l, up)
         msgs.append(msg)
     return lse(msgs)
Exemple #15
0
	def mapK(self):
		"Samples MAP cluster assignment k from P(z_new=k|x_new,Z,X)"
		logPriorProbs = np.array( np.log(list(self.priorsOld())+list(self.priorNew())))
		#print "logPriorProbs",logPriorProbs
		logLikelihoods = self.loglikelihoodsOld()
		logLikelihoods.append(self.loglikelihoodNew())
		logLikelihoods = np.array(logLikelihoods)
		#print "logLikelihoods:", logLikelihoods
		logPosteriorKs = logPriorProbs+logLikelihoods
		logPosteriorKs -= lse(logPosteriorKs)
		posteriorKs = np.exp(logPosteriorKs)
		#print "kPosterior:", posteriorKs
		if len(self.clusters) == 0:
			ks = [0]
		else:
			ks = self.clusters+[max(self.clusters)+1]
		return ks[posteriorKs.argmax()]
Exemple #16
0
    def test(self,
             sess,
             beam=False,
             print_paths=False,
             save_model=True,
             auc=False):
        batch_counter = 0
        paths = defaultdict(list)
        answers = []
        feed_dict = {}
        all_final_reward_1 = 0
        all_final_reward_3 = 0
        all_final_reward_5 = 0
        all_final_reward_10 = 0
        all_final_reward_20 = 0
        auc = 0

        total_examples = self.test_environment.total_no_examples
        for episode in tqdm(self.test_environment.get_episodes()):
            batch_counter += 1

            temp_batch_size = episode.no_examples

            self.qr = episode.get_query_relation()
            feed_dict[self.query_relation] = self.qr
            # set initial beam probs
            beam_probs = np.zeros((temp_batch_size * self.test_rollouts, 1))
            # get initial state
            state = episode.get_state()
            mem = self.agent.get_mem_shape()
            agent_mem = np.zeros(
                (mem[0], mem[1], temp_batch_size * self.test_rollouts,
                 mem[3])).astype('float32')
            previous_relation = np.ones(
                (temp_batch_size * self.test_rollouts, ),
                dtype='int64') * self.relation_vocab['DUMMY_START_RELATION']
            feed_dict[self.range_arr] = np.arange(temp_batch_size *
                                                  self.test_rollouts)
            feed_dict[self.input_path[0]] = np.zeros(temp_batch_size *
                                                     self.test_rollouts)

            ####logger rl_code####
            if print_paths:
                self.entity_trajectory = []
                self.relation_trajectory = []
            ####################

            self.log_probs = np.zeros(
                (temp_batch_size * self.test_rollouts, )) * 1.0

            # for each time step
            for i in range(self.path_length):
                if i == 0:
                    feed_dict[self.first_state_of_test] = True
                feed_dict[self.next_relations] = state['next_relations']
                feed_dict[self.next_entities] = state['next_entities']
                feed_dict[self.current_entities] = state['current_entities']
                feed_dict[self.prev_state] = agent_mem
                feed_dict[self.prev_relation] = previous_relation

                loss, agent_mem, test_scores, test_action_idx, chosen_relation = sess.run(
                    [
                        self.test_loss, self.test_state, self.test_logits,
                        self.test_action_idx, self.chosen_relation
                    ],
                    feed_dict=feed_dict)

                if beam:
                    k = self.test_rollouts
                    new_scores = test_scores + beam_probs
                    if i == 0:
                        idx = np.argsort(new_scores)
                        idx = idx[:, -k:]
                        ranged_idx = np.tile([b for b in range(k)],
                                             temp_batch_size)
                        idx = idx[np.arange(k * temp_batch_size), ranged_idx]
                    else:
                        idx = self.top_k(new_scores, k)

                    y = idx // self.max_num_actions
                    x = idx % self.max_num_actions

                    y += np.repeat([b * k for b in range(temp_batch_size)], k)
                    state['current_entities'] = state['current_entities'][y]
                    state['next_relations'] = state['next_relations'][y, :]
                    state['next_entities'] = state['next_entities'][y, :]
                    agent_mem = agent_mem[:, :, y, :]
                    test_action_idx = x
                    chosen_relation = state['next_relations'][
                        np.arange(temp_batch_size * k), x]
                    beam_probs = new_scores[y, x]
                    beam_probs = beam_probs.reshape((-1, 1))
                    if print_paths:
                        for j in range(i):
                            self.entity_trajectory[j] = self.entity_trajectory[
                                j][y]
                            self.relation_trajectory[
                                j] = self.relation_trajectory[j][y]
                previous_relation = chosen_relation

                ####logger rl_code####
                if print_paths:
                    self.entity_trajectory.append(state['current_entities'])
                    self.relation_trajectory.append(chosen_relation)
                ####################
                state = episode(test_action_idx)
                self.log_probs += test_scores[
                    np.arange(self.log_probs.shape[0]), test_action_idx]
            if beam:
                self.log_probs = beam_probs

            ####Logger rl_code####

            if print_paths:
                self.entity_trajectory.append(state['current_entities'])

            # ask environment for final reward
            rewards = episode.get_reward()  # [B*test_rollouts]
            reward_reshape = np.reshape(
                rewards, (temp_batch_size,
                          self.test_rollouts))  # [orig_batch, test_rollouts]
            self.log_probs = np.reshape(self.log_probs,
                                        (temp_batch_size, self.test_rollouts))
            sorted_indx = np.argsort(-self.log_probs)
            final_reward_1 = 0
            final_reward_3 = 0
            final_reward_5 = 0
            final_reward_10 = 0
            final_reward_20 = 0
            AP = 0
            ce = episode.state['current_entities'].reshape(
                (temp_batch_size, self.test_rollouts))
            se = episode.start_entities.reshape(
                (temp_batch_size, self.test_rollouts))
            for b in range(temp_batch_size):
                answer_pos = None
                seen = set()
                pos = 0
                if self.pool == 'max':
                    for r in sorted_indx[b]:
                        if reward_reshape[b, r] == self.positive_reward:
                            answer_pos = pos
                            break
                        if ce[b, r] not in seen:
                            seen.add(ce[b, r])
                            pos += 1
                if self.pool == 'sum':
                    scores = defaultdict(list)
                    answer = ''
                    for r in sorted_indx[b]:
                        scores[ce[b, r]].append(self.log_probs[b, r])
                        if reward_reshape[b, r] == self.positive_reward:
                            answer = ce[b, r]
                    final_scores = defaultdict(float)
                    for e in scores:
                        final_scores[e] = lse(scores[e])
                    sorted_answers = sorted(final_scores,
                                            key=final_scores.get,
                                            reverse=True)
                    if answer in sorted_answers:
                        answer_pos = sorted_answers.index(answer)
                        # print("answer: ", answer)
                        # print("sorted_answers: ", sorted_answers)
                    else:
                        answer_pos = None

                if answer_pos != None:
                    if answer_pos < 100:
                        final_reward_20 += 1
                        if answer_pos < 50:
                            final_reward_10 += 1
                            if answer_pos < 10:
                                final_reward_5 += 1
                                if answer_pos < 5:
                                    final_reward_3 += 1
                                    if answer_pos < 1:
                                        final_reward_1 += 1
                if answer_pos == None:
                    AP += 0
                else:
                    AP += 1.0 / ((answer_pos + 1))
                if print_paths:
                    qr = self.train_environment.grapher.rev_relation_vocab[
                        self.qr[b * self.test_rollouts]]
                    start_e = self.rev_entity_vocab[episode.start_entities[
                        b * self.test_rollouts]]
                    end_e = self.rev_entity_vocab[episode.end_entities[
                        b * self.test_rollouts]]
                    paths[str(qr)].append(
                        str(start_e) + "\t" + str(end_e) + "\n")
                    paths[str(qr)].append("Reward:" + str(
                        1 if answer_pos != None and answer_pos < 10 else 0) +
                                          "\n")
                    for r in sorted_indx[b]:
                        indx = b * self.test_rollouts + r
                        if rewards[indx] == self.positive_reward:
                            rev = 1
                        else:
                            rev = -1
                        answers.append(self.rev_entity_vocab[se[b, r]] + '\t' +
                                       self.rev_entity_vocab[ce[b, r]] + '\t' +
                                       str(self.log_probs[b, r]) + '\n')
                        paths[str(qr)].append('\t'.join([
                            str(self.rev_entity_vocab[e[indx]])
                            for e in self.entity_trajectory
                        ]) + '\n' + '\t'.join([
                            str(self.rev_relation_vocab[re[indx]])
                            for re in self.relation_trajectory
                        ]) + '\n' + str(rev) + '\n' +
                                              str(self.log_probs[b, r]) +
                                              '\n___' + '\n')
                    paths[str(qr)].append("#####################\n")

            all_final_reward_1 += final_reward_1
            all_final_reward_3 += final_reward_3
            all_final_reward_5 += final_reward_5
            all_final_reward_10 += final_reward_10
            all_final_reward_20 += final_reward_20
            auc += AP

        all_final_reward_1 /= total_examples
        all_final_reward_3 /= total_examples
        all_final_reward_5 /= total_examples
        all_final_reward_10 /= total_examples
        all_final_reward_20 /= total_examples
        auc /= total_examples
        if save_model:
            if all_final_reward_10 >= self.max_hits_at_10:
                self.max_hits_at_10 = all_final_reward_10
                self.save_path = self.model_saver.save(
                    sess, self.model_dir + "model" + '.ckpt')

        if print_paths:
            logger.info("[ printing paths at {} ]".format(self.output_dir +
                                                          '/test_beam/'))
            for q in paths:
                j = q.replace('/', '-')
                with codecs.open(self.path_logger_file_ + '_' + j, 'a',
                                 'utf-8') as pos_file:
                    for p in paths[q]:
                        pos_file.write(p)
            with open(self.path_logger_file_ + 'answers', 'w') as answer_file:
                for a in answers:
                    answer_file.write(a)

        with open(self.output_dir + '/scores.txt', 'a') as score_file:
            score_file.write("Hits@1: {0:7.4f}".format(all_final_reward_1))
            score_file.write("\n")
            score_file.write("Hits@5: {0:7.4f}".format(all_final_reward_3))
            score_file.write("\n")
            score_file.write("Hits@10: {0:7.4f}".format(all_final_reward_5))
            score_file.write("\n")
            score_file.write("Hits@50: {0:7.4f}".format(all_final_reward_10))
            score_file.write("\n")
            score_file.write("Hits@100: {0:7.4f}".format(all_final_reward_20))
            score_file.write("\n")
            score_file.write("auc: {0:7.4f}".format(auc))
            score_file.write("\n")
            score_file.write("\n")

        logger.info("Hits@1: {0:7.4f}".format(all_final_reward_1))
        logger.info("Hits@5: {0:7.4f}".format(all_final_reward_3))
        logger.info("Hits@10: {0:7.4f}".format(all_final_reward_5))
        logger.info("Hits@50: {0:7.4f}".format(all_final_reward_10))
        logger.info("Hits@100: {0:7.4f}".format(all_final_reward_20))
        logger.info("auc: {0:7.4f}".format(auc))
Exemple #17
0
 def Z_PL(self, x, y, W, s):
     return lse([-self.E_PL(x, y, W, s, l) for l in self.L])
 def logZ_FacLoc_star(self, args):
     partial_list, order = args
     logZ_part = -float('inf')
     for ind in partial_list:
         logZ_part += lse([logZ_part, self.logZ_FacLoc(ind, order)])
     return logZ_part  # self.logZ_FacLoc(*args)
Exemple #19
0
	def getMarginalLikelihood(self):
		priorMeanSamples = norm.rvs(self.mu0,self.sigma_0,size=1000).reshape(1000,1)
		bentObservations = np.tile(self.observations,1000).reshape(len(self.observations),1000)
		likelihoodUnderPriorSamples = norm.logpdf(self.observations,loc=priorMeanSamples,scale=self.sampleVariance)
		return np.exp(lse(likelihoodUnderPriorSamples)-np.log(1000))
    def logZ_fast(self, parallel=False):
        if _debug:
            print('starting logZ_fast')

        logZ = self([]) - self.n_logz[0]

        W = self.W
        N = len(self.V)
        D = self.n_dim

        order = np.argsort(W, axis=0)
        for d in range(D):
            order[:, d] = order[::-1, d]

        import time
        time1 = time.time()
        if parallel:
            ind = np.zeros(D)
            ind_list = []
            for k in range(N**D):
                ind_list.append(list(ind))

                # increase indices
                ind[0] += 1
                for l in range(D):
                    if ind[l] >= N:
                        if l + 1 < D:
                            ind[l + 1] += 1
                        ind[l] = 0

            import multiprocessing
            import itertools
            N_CPU = multiprocessing.cpu_count()
            n_per_cpu = int(np.ceil(len(ind_list) / float(N_CPU)))
            partial_list = []
            for i in range(N_CPU):
                start = n_per_cpu * i
                end = n_per_cpu * (i + 1)
                end = min(end, len((ind_list)))
                if start >= end:
                    continue
                partial_list.append(ind_list[start:end])

            pool = multiprocessing.Pool(N_CPU)
            logZ_list = pool.map(self.logZ_FacLoc_star,
                                 zip(partial_list, itertools.repeat(order)))
            pool.close()
            pool.join()

            for res in logZ_list:
                logZ = lse([logZ, res])
        else:
            ind = np.zeros(D)
            for k in range(N**D):
                logZ = lse([logZ, self.logZ_FacLoc(ind, order)])

                # increase indices
                ind[0] += 1
                for l in range(D):
                    if ind[l] >= N:
                        if l + 1 < D:
                            ind[l + 1] += 1
                        ind[l] = 0
        time2 = time.time()

        print("It took %f seconds." % ((time2 - time1)))

        return logZ + self.n_logz[0]
Exemple #21
0
 def Z_slow(self, x, *W):
     return lse([-self.E(x, y, W) for y in self.configs(len(x))])
Exemple #22
0
 fast log-scale node/edge marginals (optional: normalize with Z)
 pairs: 
  1) node marginal - [(i,v)] where i=node-index and v=clamped-value - P(y_i=v)
  2) edge marginal - [(i,u),(j,v)] for P(y_i=u, y_j=v)
 """
 if len(pairs) == 1:
     (i, u), (j, u2) = pairs * 2
     prob = -np.dot(x[i], W_u[u])
 else:
     (i, u), (j, u2) = pairs
     prob = -sum(np.dot(x[k],W_u[w]) for k,w in pairs) - self.B_term(W_b,x,j,(u,u2))
 if i > 0:
     left = np.zeros((2, self.n_labels))
     if i > 1:
         for m in self.L:
             left[0, m] = lse([-np.dot(x[0], W_u[l]) - self.B_term(W_b, x, 1, (l, m))
                 for l in self.L])
     for v in range(2, i):
         for m in self.L:
             left[1, m] = lse([left[0, l] - self.B_term(W_b, x, v, (l, m))
                 - np.dot(x[v - 1], W_u[l]) for l in self.L])
         left[0], left[1] = left[1], 0.
     prob += lse([left[0, l] - np.dot(x[i - 1], W_u[l]) - self.B_term(W_b, x, i, (l, u))
         for l in self.L])
 if j < len(x) - 1:
     right = np.zeros((2, self.n_labels))
     if j < len(x) - 2:
         for m in self.L:
             right[0, m] = lse([-np.dot(x[-1], W_u[l]) - self.B_term(W_b, x, -1, (m, l))
                 for l in self.L])
     for v in range(len(x) - 3, j, -1):
         for m in self.L:
Exemple #23
0
	def getMarginalLikelihood(self):
		priorMeans, priorVariances = norm.rvs(self.mu0,self.sigma0,size=100), 1./chi2.rvs(self.v0,size=100)
		bentObservations = np.tile(self.observations,100).reshape(len(self.observations),100)
		likelihoodUnderPriorSamples = norm.logpdf(bentObservations,loc=priorMeans,scale=priorVariances)[0]
		return np.exp(lse(likelihoodUnderPriorSamples)-np.log(100))
Exemple #24
0
 def Z(self, x, W_u, W_b):
     z = -np.dot(W_u, x[0])
     for i in xrange(1, len(x)):
         z = lse((z - np.dot(W_u,x[i,:,None]) - self.B_term(W_b,x,i))[self.L,:,self.L].T, 1)
     return lse(z)