Exemple #1
0
 def _rot_diff(self, state):
     r_diff = util.matrix_distance(self.r, state.r)
     state.flip()
     r_diff_2 = util.matrix_distance(self.r, state.r)
     state.flip()
     if r_diff > r_diff_2:
         r_diff = r_diff_2
     return r_diff
Exemple #2
0
 def _rot_diff(self, bp):
     r_diff = util.matrix_distance(self.r(), bp.r())
     bp.flip()
     r_diff_2 = util.matrix_distance(self.r(), bp.r())
     bp.flip()
     if r_diff > r_diff_2:
         r_diff = r_diff_2
     return r_diff
    def accept_score(self, node):
        best_score = 10000
        for i, bp_state in enumerate(node.cur_state.end_states):
            if i == 0:
                continue
            score = util.distance(bp_state.d, self.target.d)

            r_diff = util.matrix_distance(bp_state.r, self.target.r)
            r_diff_flip = util.matrix_distance(bp_state.r, self.target_flip.r)

            if r_diff > r_diff_flip:
                r_diff = r_diff_flip

            score += 2 * r_diff
            if score < best_score:
                best_score = score
        return best_score
def new_score_function_new(current, end, endflip):
    #d_diff = util.distance(current.d,end.d)*.25
    d_diff = (util.distance(current.sugars[0], end.sugars[1]) + \
              util.distance(current.sugars[1], end.sugars[0]))*0.50

    if d_diff > 25:
        return d_diff

    r_diff = util.matrix_distance(current.r, end.r)
    r_diff_flip = util.matrix_distance(current.r, endflip.r)

    if r_diff > r_diff_flip:
        r_diff = r_diff_flip

    if d_diff < 0.0001:
        d_diff = 0.00001
    scale = (math.log(150 / d_diff) - 1)
    if scale > 2:
        scale = 2
    if scale < 0:
        scale = 0

    return d_diff + scale * r_diff
def new_score_function(current, end, endflip):
    d_diff = util.distance(current.d, end.d)

    if d_diff > 25:
        return d_diff

    r_diff = util.matrix_distance(current.r, end.r)
    r_diff_flip = util.matrix_distance(current.r, endflip.r)
    print current.r
    print end.r

    if r_diff > r_diff_flip:
        r_diff = r_diff_flip

    if d_diff < 0.0001:
        d_diff = 0.00001
    scale = (math.log(150 / d_diff) - 1)
    if scale > 2:
        scale = 2
    if scale < 0:
        scale = 0

    print d_diff, scale * r_diff, scale
    return d_diff + scale * r_diff
    def record(self, fname="summary.txt"):
        mst = self.mtst.to_mst()

        ranges = []
        for n in self.mtst:
            ranges.append(range(0, len(n.data.members)))

        df = pd.DataFrame(columns="alpha,beta,gamma,dist,rot_dist".split(","))

        combos = itertools.product(*ranges)
        last_combo = None
        j = 0
        org = [0, 0, 0]
        I = np.eye(3)
        for c in combos:
            if last_combo == None:
                last_combo = c

            for i in range(0, len(c)):
                if c[i] == last_combo[i]:
                    continue
                else:
                    mst.replace_state(
                        i,
                        self.mtst.get_node(i).data.members[c[i]].motif_state)

            d = mst.last_node().data.cur_state.end_states[1].d
            r = mst.last_node().data.cur_state.end_states[1].r
            euler = t.euler_from_matrix(r)
            dist = util.distance(d, org)

            rot_dist = util.matrix_distance(I, r)

            df.loc[j] = [
                euler[0] * 180 / math.pi, euler[1] * 180 / math.pi,
                euler[2] * 180 / math.pi, dist, rot_dist
            ]

            last_combo = c
            j += 1
            if j % 1000 == 0:
                print j

        df.to_csv("test.csv")
Exemple #7
0
    def learn(self,
              Observations,
              Actions,
              W,
              mu,
              trueA=None,
              trueB=None,
              onlyC=0,
              criterion=1e-3,
              nIter=None,
              print_freq=-1):
        nSubjects, nSamples = Observations.shape
        Xi = np.zeros((nSubjects, self.nStates, self.nStates, nSamples - 1))
        Gamma = np.zeros((nSubjects, self.nStates, nSamples))
        RECORD = {
            'pi': [],
            'A': [],
            'B': [],
            'C': [],
            'loglikelihood': None,
            'Q': None,
            'dist_A': None,
            'dist_B': None,
            'bias_A': None,
            'bias_B': None,
            'KL_A': None,
            'KL_B': None
        }

        D = np.diag(W.sum(axis=1))
        L = D - W

        itr = 0
        done = 0
        err1, err2, err3, err4 = 0, 0, 0, 0
        LL, Q, DISTA, DISTB, BIASA, BIASB, KLA, KLB = [], [], [], [], [], [], [], []
        while not done:
            ll, q = [], []
            for i in range(nSubjects):
                pi_ = util.distribute(self.C[i], self.pi)
                A_ = util.distribute(self.C[i], self.A)
                B_ = util.distribute(self.C[i], self.B)
                alpha, beta = util.forward_backward(Observations[i],
                                                    Actions[i], pi_, A_, B_)
                ll.append(np.log(util.elim_zero(alpha[:, -1].sum())))
                xi, gamma = util.exp_counts(Observations[i], Actions[i], A_,
                                            B_, alpha, beta)
                Xi[i] = xi
                Gamma[i] = gamma
            if not onlyC:
                newpi, newA, newB = self.update1(Observations, Actions, Xi,
                                                 Gamma)
                err1 = np.max(np.abs(self.pi - newpi))
                err2 = np.max(np.abs(self.A - newA))
                err3 = np.max(np.abs(self.B - newB))
                self.A[:], self.B[:], self.pi[:] = newA, newB, newpi

                RECORD['pi'].append(newpi)
                RECORD['A'].append(newA)
                RECORD['B'].append(newB)

            for i in range(nSubjects):
                pi_ = util.distribute(self.C[i], self.pi)
                A_ = util.distribute(self.C[i], self.A)
                B_ = util.distribute(self.C[i], self.B)
                alpha, beta = util.forward_backward(Observations[i],
                                                    Actions[i], pi_, A_, B_)
                # ll.append(np.log(alpha[:, -1].sum()))
                xi, gamma = util.exp_counts(Observations[i], Actions[i], A_,
                                            B_, alpha, beta)
                Xi[i] = xi
                Gamma[i] = gamma
            newC = self.update2(Observations, Actions, W, mu, Xi, Gamma)

            err4 = np.max(np.abs(self.C - newC))
            self.C[:] = newC
            regularity = mu * np.trace(self.C.T.dot(L).dot(self.C))
            for i in range(nSubjects):
                _q = np.dot(
                    Gamma[i, :, 0],
                    np.log(util.elim_zero(util.distribute(self.C[i],
                                                          self.pi))))  # for pi
                _q += np.sum(
                    Xi[i].sum(axis=2) *
                    np.log(util.elim_zero(util.distribute(self.C[i],
                                                          self.A))))  # for A
                for omega in range(self.nLevels):
                    _q += np.sum(
                        np.sum(Gamma[i, :, Observations[i] == omega]) * np.log(
                            util.elim_zero(
                                util.distribute(
                                    self.C[i], self.B[:, :, omega]))))  # for B
                q.append(_q - regularity / nSubjects)  # regularization term
            LL.append(ll)
            Q.append(q)
            if trueA is not None:
                distA, distB, biasA, biasB, klA, klB = [], [], [], [], [], []
                for i in range(nSubjects):
                    distA.append(
                        util.matrix_distance(
                            util.distribute(self.C[i], self.A[:, 0, :, :]),
                            trueA(i)))
                    distB.append(
                        util.matrix_distance(
                            util.distribute(self.C[i], self.B), trueB(i)))
                    biasA.append(
                        util.matrix_bias(
                            util.distribute(self.C[i], self.A[:, 0, :, :]),
                            trueA(i)))
                    biasB.append(
                        util.matrix_bias(util.distribute(self.C[i], self.B),
                                         trueB(i)))
                    klA.append(
                        util.matrix_KL(
                            util.distribute(self.C[i], self.A[:, 0, :, :]),
                            trueA(i)))
                    klB.append(
                        util.matrix_KL(util.distribute(self.C[i], self.B),
                                       trueB(i)))
                DISTA.append(distA)
                DISTB.append(distB)
                BIASA.append(biasA)
                BIASB.append(biasB)
                KLA.append(klA)
                KLB.append(klB)

            RECORD['C'].append(newC)
            if print_freq > 0:
                if (itr + 1) % print_freq == 0:
                    print("%3d %.4f %.4f %.4f %.4f; %.4f %.4f %.4f %.4f" %
                          (itr + 1, err1, err2, err3, err4, np.mean(ll),
                           np.mean(ll) - regularity / nSubjects,
                           np.mean(q) + regularity / nSubjects, np.mean(q)),
                          end=" ")
                    if trueA is not None:
                        print("%.4f %.4f %.4f %.4f %.4f %.4f" %
                              (np.mean(distA), np.mean(biasA), np.mean(klA),
                               np.mean(distB), np.mean(biasB), np.mean(klB)))
                    else:
                        print()
            if itr > nIter:
                done = 1
            if max(err1, err2, err3, err4) < criterion:
                done = 1
            itr += 1
        RECORD['loglikelihood'] = np.array(LL).T
        RECORD['Q'] = np.array(Q).T
        RECORD['dist_A'] = np.array(DISTA).T
        RECORD['dist_B'] = np.array(DISTB).T
        RECORD['bias_A'] = np.array(BIASA).T
        RECORD['bias_B'] = np.array(BIASB).T
        RECORD['KL_A'] = np.array(KLA).T
        RECORD['KL_B'] = np.array(KLB).T
        return RECORD
Exemple #8
0
    def learn(self,
              Observations,
              Actions,
              pi0=None,
              A0=None,
              B0=None,
              trueA=None,
              trueB=None,
              nIter=1000,
              criterion=1e-3,
              print_freq=-1):
        nSubjects, nSamples = Observations.shape
        Xi = np.zeros((nSubjects, self.nStates, self.nStates, nSamples - 1))
        Gamma = np.zeros((nSubjects, self.nStates, nSamples))
        RECORD = {
            'loglikelihood': [],
            'A': [],
            'B': [],
            'dist_A': None,
            'dist_B': None
        }

        itr = 0
        done = 0
        LL, DISTA, DISTB = [], [], []
        if pi0 is not None:
            pi, A, B = pi0, A0, B0
        else:
            pi, A, B = self.pi, self.A, self.B
        while not done:
            ll = []
            for i in range(nSubjects):
                alpha, beta = util.forward_backward(Observations[i],
                                                    Actions[i], pi[i], A, B)
                ll.append(np.log(alpha[:, -1].sum()))
                xi, gamma = util.exp_counts(Observations[i], Actions[i], A, B,
                                            alpha, beta)
                Xi[i] = xi
                Gamma[i] = gamma
            newpi, newA, newB = self.update(Observations, Actions, Xi, Gamma)
            err1 = np.max(np.abs(pi - newpi))
            err2 = np.max(np.abs(A - newA))
            err3 = np.max(np.abs(B - newB))
            LL.append(ll)
            if trueA is not None:
                distA = 0
                for a in range(self.nActions):
                    distA += util.matrix_distance(newA[a], trueA[a])
                distB = util.matrix_distance(newB, trueB)
                DISTA.append(distA)
                DISTB.append(distB)
            # update RECORD
            RECORD['A'].append(newA)
            RECORD['B'].append(newB)
            if print_freq > 0:
                if (itr + 1) % print_freq == 0:
                    print("%4d %.4f %.4f %.4f %.4f " %
                          (itr + 1, err1, err2, err3, np.mean(ll)),
                          end=" ")
                    if trueA is not None:
                        print("%.4f" % (distA), end=" ")
                        print("%.4f" % (distB), end=" ")
                    print()
            A[:], B[:], pi[:] = newA, newB, newpi

            if itr > nIter:
                done = 1
            if err1 < criterion and err2 < criterion and err3 < criterion:
                done = 1
            itr += 1
        RECORD['loglikelihood'] = np.array(LL).T
        RECORD['dist_A'] = np.array(DISTA).T
        RECORD['dist_B'] = np.array(DISTB).T
        return pi, A, B, RECORD
Exemple #9
0
    def learn(self, Observations, W, mu, trueA=None, onlyC=0, criterion=1e-3, nIter=1000, print_freq=-1):
        nSubjects, nSamples = Observations.shape
        # pi, A, C = self.pi, self.A, self.C
        RECORD = {'pi': [], 'A': [], 'C': [],
                  'loglikelihood': None,
                  'dist_A': None, 'bias_A': None, 'KL_A': None}

        D = np.diag(W.sum(axis=1))
        L = D - W

        itr = 0
        done = 0
        err1, err2, err3 = 0, 0, 0
        LL, DISTA, BIASA, KLA = [], [], [], []
        while not done:
            # STEP 1
            if not onlyC:
                newpi, newA = self.update1(Observations)
                err1 = np.max(np.abs(self.pi - newpi))
                err2 = np.max(np.abs(self.A - newA))
                self.A[:], self.pi[:] = newA, newpi
                RECORD['pi'].append(newpi)
                RECORD['A'].append(newA)
            # STEP 2
            newC = self.update2(Observations, W, mu)
            err3 = np.max(np.abs(self.C - newC))
            self.C[:] = newC

            regularity = mu * np.trace(self.C.T.dot(L).dot(self.C))
            ll = []
            for i in range(nSubjects):
                ll.append(MC_likelihood(util.distribute(self.C[i], self.pi),
                                        util.distribute(self.C[i], self.A),
                                        Observations[i]))
            LL.append(ll)
            if trueA is not None:
                distA, biasA, klA = [], [], []
                for i in range(nSubjects):
                    distA.append(util.matrix_distance(util.distribute(self.C[i], self.A), trueA(i)))
                    biasA.append(util.matrix_bias(util.distribute(self.C[i], self.A), trueA(i)))
                    klA.append(util.matrix_KL(util.distribute(self.C[i], self.A), trueA(i)))
                DISTA.append(distA)
                BIASA.append(biasA)
                KLA.append(klA)

            RECORD['C'].append(newC)
            if print_freq > 0:
                if (itr + 1) % print_freq == 0:
                    print("%3d %.4f %.4f %.4f; %.4f %.4f"
                          % (itr + 1, err1, err2, err3,
                             np.mean(ll), np.mean(ll) - regularity / nSubjects), end=" ")
                    if trueA is not None:
                        print("%.4f %.4f %.4f" %(np.mean(distA), np.mean(biasA), np.mean(klA)))
                    else:
                        print()
            if itr > nIter-2:
                done = 1
            if max(err1, err2, err3) < criterion:
                done = 1
            itr += 1
        RECORD['loglikelihood'] = np.array(LL).T
        RECORD['dist_A'] = np.array(DISTA).T
        RECORD['bias_A'] = np.array(BIASA).T
        RECORD['KL_A'] = np.array(KLA).T
        return RECORD