def __init__(self, vars, nObs, T, N, observed_jumps, model=None):
        # DES Temp:
        self.logp = []
        self.nObs = nObs
        self.T = T
        self.N = N
        self.zeroIndices = np.roll(self.T.cumsum(), 1)
        self.zeroIndices[0] = 0
        # self.max_obs = max_obs

        model = modelcontext(model)
        vars = inputvars(vars)
        shared = make_shared_replacements(vars, model)

        super(ForwardS, self).__init__(vars, shared)

        self.observed_jumps = observed_jumps
        step_sizes = np.sort(np.unique(observed_jumps))
        self.step_sizes = step_sizes = step_sizes[step_sizes > 0]

        pi = stick_breaking.backward(self.shared["pi_stickbreaking"])
        lower = model.free_RVs[1].distribution.dist.lower
        upper = model.free_RVs[1].distribution.dist.upper
        Q = rate_matrix_one_way(lower, upper).backward(self.shared["Q_ratematrixoneway"])
        B0 = logodds.backward(self.shared["B0_logodds"])
        B = logodds.backward(self.shared["B_logodds"])
        X = self.shared["X"]

        # at this point parameters are still symbolic so we
        # must create get_params function to actually evaluate them
        self.get_params = evaluate_symbolic_shared(pi, Q, B0, B, X)
示例#2
0
    def __init__(self, vars, N, T, K, D, Dd, O, nObs, model=None):
    #DES Temp:
        self.logp = []
        self.N = N
        self.T = T
        self.K = K
        self.D = D
        self.Dd = Dd
        self.O = O
        self.nObs = nObs
        #self.max_obs = max_obs
        self.zeroIndices = np.roll(self.T.cumsum(),1)
        self.zeroIndices[0] = 0

        #self.pos_O_idx = np.zeros((D,max_obs,N), dtype=np.bool_)
        #for n in xrange(N):
        #    for t in xrange(self.T[n]):
        #        self.pos_O_idx[:,t,n] = np.in1d(np.arange(self.D), self.O[:,t,n])

        #self.OO = np.zeros((self.nObs,self.Dd),dtype=np.int)
        #self.OO = np.zeros((self.Dd,self.N,self.max_obs),dtype=np.int)
        self.negMask = np.zeros((self.nObs,D),dtype=np.int)
        #self.negMask = np.zeros((self.N,self.max_obs,D),dtype=np.int)
        for n in range(self.N):
            n0 = self.zeroIndices[n]
            for t in range(self.T[n]):
            #for t in range(self.max_obs):
                #self.OO[n0+t,:] = self.O[n0+t,:]
                self.negMask[n0+t,:] = 1-np.in1d(np.arange(self.D), self.O[n0+t,:]).astype(np.int)
        self.posMask = (self.O != -1).astype(np.int)

        #self.betaMask = np.zeros((max_obs,N,2))
        #for n in range(self.N):
        #    self.betaMask[:(T[n]-1),n,:] = 1

        model = modelcontext(model)
        vars = inputvars(vars)
        shared = make_shared_replacements(vars, model)

        super(ForwardX, self).__init__(vars, shared)

        S = self.shared['S']
        B0 = logodds.backward(self.shared['B0_logodds'])
        B = logodds.backward(self.shared['B_logodds'])
       
        Z = model.vars[6].distribution.transform_used.backward(self.shared['Z_anchoredbeta'])
        #Z = anchoredbeta.backward(self.shared['Z_anchoredbeta'])
        #Z = logodds.backward(self.shared['Z_logodds'])
        L = logodds.backward(self.shared['L_logodds'])

        #at this point parameters are still symbolic so we
        #must create get_params function to actually evaluate them
        self.get_params = evaluate_symbolic_shared(S, B0, B, Z, L)