예제 #1
0
 def __E_step(self, times, Y, **args):
     #1) Set up default values
     args.setdefault('Sigma_w', self.Sigma_w)
     args.setdefault('Sigma_y', self.Sigma_y)
     args.setdefault('Sigma_w_val', cov_mat_precomp(args['Sigma_w']))
     args.setdefault('Sigma_y_val', cov_mat_precomp(args['Sigma_y']))
     args.setdefault('mu_w', self.mu_w)
     #2) Load some variables
     inv_sig_w = args['Sigma_w_val']['inv']
     inv_sig_y = args['Sigma_y_val']['inv']
     mu_w = args['mu_w']
     Phi = self.__get_Phi(times, **args)
     #3) Compute expectations
     w_means = []
     w_covs = []
     for n, time in enumerate(times):
         Tn = len(Y[n])
         sum_mean = np.dot(inv_sig_w, mu_w)
         sum_cov = inv_sig_w
         for t in xrange(Tn):
             phi_nt = Phi[n][t]
             tmp1 = np.dot(np.transpose(phi_nt), inv_sig_y)
             sum_mean = sum_mean + np.dot(tmp1, Y[n][t])
             sum_cov = sum_cov + np.dot(tmp1, phi_nt)
         Swn = utils.force_sym(np.linalg.inv(sum_cov))
         wn = np.dot(Swn, sum_mean)
         w_means.append(wn)
         w_covs.append(Swn)
     return {'w_means': w_means, 'w_covs': w_covs}
예제 #2
0
    def __M_step(self, times, Y, expectations, **args):
        Phi = self.__get_Phi(times, **args)
        N = len(times)
        w_means = expectations['w_means']
        w_covs = expectations['w_covs']
        n_var = lambda X: sum(map(lambda x: np.outer(x, x), X))

        #1) Optimize mu_w
        wn_sum = sum(w_means)
        if 'prior_mu_w' in args:
            prior_mu_w = args['prior_mu_w']
            mu_w = (wn_sum + prior_mu_w['k0'] * prior_mu_w['m0']) / (
                N + prior_mu_w['k0'])
        else:
            mu_w = (wn_sum) / N

        #2) Optimize Sigma_w
        diff_w = map(lambda x: x - mu_w, w_means)
        if 'no_Sw' in args and args['no_Sw'] == True:
            sw_sum = utils.force_sym(n_var(diff_w))
        else:
            sw_sum = utils.force_sym(sum(w_covs) + n_var(diff_w))

        self.__Sigma_w_mle = sw_sum / N  # Maximum likelyhood estimate for Sigma_w
        if 'prior_Sigma_w' in args:
            prior_Sigma_w = args['prior_Sigma_w']
            v0 = prior_Sigma_w['v']
            D = np.shape(self.Sigma_w)[0]
            if 'mean_cov_mle' in prior_Sigma_w:
                S0 = prior_Sigma_w['mean_cov_mle'](
                    v0, self.__Sigma_w_mle) * (v0 + D + 1)
            else:
                S0 = prior_Sigma_w['invS0']
            Sigma_w = (S0 + sw_sum) / (N + v0 + D + 1)
        else:
            Sigma_w = self.__Sigma_w_mle

        #3) Optimize Sigma_y
        diff_y = []
        uncert_w_y = []
        for n in xrange(N):
            for t in xrange(len(times[n])):
                diff_y.append(Y[n][t] - np.dot(Phi[n][t], w_means[n]))
                uncert_w_y.append(
                    np.dot(np.dot(Phi[n][t], w_covs[n]), Phi[n][t].T))
        if 'no_Sw' in args and args['no_Sw'] == True:
            Sigma_y = (n_var(diff_y)) / len(diff_y)
        else:
            Sigma_y = (n_var(diff_y) + sum(uncert_w_y)) / len(diff_y)

        #4) Update
        self.mu_w = mu_w
        if args['print_inner_lb']:
            print 'lb(mu_w)=', self.__EM_lowerbound(times, Y, expectations,
                                                    **args)

        self.Sigma_w = utils.force_sym(Sigma_w)
        if args['joint_indep']:
            self.Sigma_w = utils.make_block_diag(self.Sigma_w,
                                                 args['num_joints'])
        if args['print_inner_lb']:
            print 'lb(Sigma_w)=', self.__EM_lowerbound(times, Y, expectations,
                                                       **args)

        if args['diag_sy']:
            self.Sigma_y = np.diag(np.diag(Sigma_y))
        else:
            self.Sigma_y = utils.force_sym(Sigma_y)
        if args['print_inner_lb']:
            print 'lb(Sigma_y)=', self.__EM_lowerbound(times, Y, expectations,
                                                       **args)

        #5) Update optional parameters
        if args['opt_basis_pars']:
            obj = lambda pars: -self.__em_lb_likelihood(times, Y, expectations, mu_w=mu_w, \
                    Sigma_w=Sigma_w, Sigma_y=Sigma_y, basis_params=pars, q=True)
            obj_debug = lambda x: lambda_debug(obj, x, "cost")
            jac = autograd.grad(obj)
            #print "Objective at x0: ", obj(self.get_basis_pars())
            #print "Gradient at x0: ", jac(self.get_basis_pars())
            #o_basis_pars = opt.minimize(lambda x: lambda_debug(obj,x,"cost"), self.get_basis_pars(), method="CG", jac=lambda x: lambda_debug(jac,x,"grad"))
            o_basis_pars = opt.minimize(obj,
                                        self.get_basis_pars(),
                                        method="Powell")
            #o_basis_pars = opt.minimize(obj, self.get_basis_pars(), method="Nelder-Mead")
            if o_basis_pars.success:
                self.set_basis_pars(o_basis_pars.x)
            else:
                print "Warning: The optimization of the basis parameters failed. Message: ", o_basis_pars.message
            if args['print_inner_lb']:
                print 'lb(basis_params)=', self.__EM_lowerbound(
                    times, Y, expectations, **args)