def __init__(self,sim, final_momentum=0.9, initial_momentum=0.5,momentum_switchover=5,lr_s=1e-6, lr_nu=1e-2 , lr_Yslack=1e-2, maxIter=1000,initS=0.0,initSviaLineSearch=True):
     self.initSviaLineSearch=initSviaLineSearch
     self.sim=sim
     self.initYslack=0
     self.n=self.sim.N*2
     self.theta=self.sim.theta/(self.sim.L/self.sim.winSize); 
     self.initC0 = np.ones(self.sim.numReplicates,dtype=floatX)*logit(sim.X0.min())
     self.Times=np.tile(sim.getGenerationTimes(),(self.sim.numReplicates,1)).T.astype(np.float32)
     self.momentum_ = T.scalar()
     self.final_momentum=final_momentum; self.initial_momentum=initial_momentum;self.momentum_switchover=momentum_switchover;self.W=3;self.lr_s=lr_s;self.lr_theta=lr_Yslack;self.lr_nu=lr_nu;self.maxIter=maxIter;self.initS=initS
     self.lrS_ = T.scalar();self.lrNu_ = T.scalar();self.lrTheta_ = T.scalar();self.target_ = T.matrix(); self.times_ = T.fmatrix("times"); self.theta_ = T.scalar()
     self.Yslack__=theano.shared(np.asarray(0, dtype = floatX), 'theta');self.n_ = T.scalar("n ")
     self.S__=theano.shared(np.asarray(self.initS, dtype = floatX))
     self.c__=theano.shared(self.initC0, 'c')
     self.weightUpdateS__ = theano.shared(np.asarray(0, dtype = floatX))
     self.weightUpdatec__ = theano.shared(np.zeros(self.sim.numReplicates, dtype = floatX))
     self.weightUpdateYslack__ = theano.shared(np.asarray(0, dtype = floatX))
     self.pred_= Z(sig_(0.5*self.S__*self.times_ + self.c__),self.n_,self.theta_) + self.Yslack__
     self.Feedforward_ = theano.function(inputs=[self.times_,self.n_,self.theta_], outputs=self.pred_)
     self.cost_=0
     for j in range(self.sim.numReplicates):
         self.cost_ += 0.5*((self.target_[:,j] - self.pred_[:,j])**2).sum()
     self.Loss_ = theano.function(inputs=[self.target_,self.pred_], outputs=self.cost_)
     self.gS_,self.gc_, self.gYslack_ = T.grad(self.cost_, [self.S__,self.c__, self.Yslack__])
     
     self.updatesS=[(self.weightUpdateS__,  self.momentum_ * self.weightUpdateS__ - self.lrS_ * self.gS_),(self.S__, self.S__ + self.momentum_ * self.weightUpdateS__ - self.lrS_ * self.gS_)]
     self.updatesc=[(self.weightUpdatec__,  self.momentum_ * self.weightUpdatec__ - self.lrNu_ * self.gc_),(self.c__, self.c__ + self.momentum_ * self.weightUpdatec__ - self.lrNu_ * self.gc_)]
     self.updatesYslack=[(self.weightUpdateYslack__,  self.momentum_ * self.weightUpdateYslack__ - self.lrTheta_ * self.gYslack_),(self.Yslack__, self.Yslack__ + self.momentum_ * self.weightUpdateYslack__ - self.lrTheta_ * self.gYslack_)]
     self.updates=   self.updatesc +self.updatesS + self.updatesYslack
     self.Objective_ = theano.function([ self.target_, self.lrS_, self.lrNu_, self.lrTheta_, self.times_,self.momentum_,self.n_,self.theta_], self.cost_, on_unused_input='warn',updates=self.updates,allow_input_downcast=True)
Exemplo n.º 2
0
 def __init__(self,initNu0,final_momentum=0.9, initial_momentum=0.5,momentum_switchover=5,times=[10,20,30,40,50],S=3,lr=1e-2,maxIter=10000,initS=0.0, numReplicates=3):
     times=np.array(times)
     self.numReplicates=numReplicates
     self.initC0 = np.ones(self.numReplicates,dtype=floatX)*logit(initNu0)
     self.times=times[times!=0].astype(np.float32)
     self.times=np.tile(self.times,(self.numReplicates,1)).T.astype(np.float32)
     self.momentum_ = T.scalar()
     self.final_momentum=final_momentum; self.initial_momentum=initial_momentum;self.momentum_switchover=momentum_switchover;self.W=3;self.lr=lr;
     self.maxIter=maxIter;self.initS=initS
     self.lr_ = T.scalar();self.target_ =T.matrix(); self.times_ = T.fmatrix("times")
     self.S__=theano.shared(np.asarray(self.initS, dtype = floatX), 'S')
     self.c__=theano.shared(self.initC0, 'c')
     self.weightUpdateS__ = theano.shared(np.asarray(0, dtype = floatX))
     self.weightUpdatec__ = theano.shared(np.zeros(self.numReplicates, dtype = floatX))
     self.pred_= sig_(0.5*self.S__*self.times_ + self.c__)
     self.Feedforward_ = theano.function(inputs=[self.times_], outputs=self.pred_)
     self.cost_=0
     for j in range(self.numReplicates):
         self.cost_ += 0.5*((self.target_[:,j] - self.pred_[:,j])**2).sum()
     self.Loss_ = theano.function(inputs=[self.target_,self.pred_], outputs=self.cost_)
     self.gS_,self.gc_ = T.grad(self.cost_, [self.S__,self.c__])
     
     self.updatesS=[(self.weightUpdateS__,  self.momentum_ * self.weightUpdateS__ - self.lr_ * self.gS_),(self.S__, self.S__ + self.momentum_ * self.weightUpdateS__ - self.lr_ * self.gS_)]
     self.updatesc=[(self.weightUpdatec__,  self.momentum_ * self.weightUpdatec__ - self.lr_ * self.gc_),(self.c__, self.c__ + self.momentum_ * self.weightUpdatec__ - self.lr_ * self.gc_)]
     self.updates= self.updatesS+self.updatesc
     self.Objective_ = theano.function([ self.target_, self.lr_,self.times_,self.momentum_], self.cost_, on_unused_input='warn',updates=self.updates,allow_input_downcast=True)
Exemplo n.º 3
0
def getErr(i=96, s=0.01):
    sim = Simulation.Simulation.load(
        s=s,
        nu0=0.005,
        experimentID=i,
        ModelName="TimeSeries",
        numReplicates=10,
        step=10,
        startGeneration=0,
        maxGeneration=50,
    )
    bot = pd.read_pickle(home + "out/BottleneckedNeutrals.NotNormalized.df")[i].loc[[0, 10, 20, 30, 40, 50]]
    td = Estimate.Estimate.getEstimate(sim.X, n=200, method="tajimaD", removeFixedSites=True, normalizeTajimaD=False)
    td.index = sim.getTrueGenerationTimes()
    ctd = td.apply(lambda x: ((x - bot).diff().iloc[1:]))
    ctd[leaveOneOut(ctd)] = None
    ctd = ctd.apply(regularize)
    t = sim.getTrueGenerationTimes()
    nu = sig(s * t / 2 + logit(0.1))
    a = Estimate.Estimate.getEstimate(sim.X0, n=200, method="pi")
    b = -Estimate.Estimate.getEstimate(sim.X0, n=200, method="watterson") / (1.0 / np.arange(1, 201)).sum()
    f = pd.Series(b * np.log(1 - nu) - a * nu ** 2, index=t).diff().iloc[1:]
    # ctd.mean(1).plot(color='b',legend=False);f.plot(linewidth=3,color='k');
    # ctds.mean(1).plot(ax=plt.gca(),color='r',legend=False);
    return ((ctd.mean(1).sum() - f.mean())) / neurtrality(i, s)
 def setSIM(self, sim): 
     if sim is not None:  
         self.sim=sim
         self.numReplicates=self.sim.numReplicates
         self.n=self.sim.N*2
         self.initTheta=self.sim.theta/(self.sim.L/self.sim.winSize); 
         self.initC0 = np.ones(self.numReplicates,dtype=floatX)*logit(sim.X[0].mean(1).min())
         self.Times=np.tile(sim.getGenerationTimes(),(self.numReplicates,1)).T.astype(np.float32)
         self.replicateIndex=range(self.numReplicates)
     else:
         self.initC0 = np.ones(self.numReplicates,dtype=floatX)
     
     try:
         self.reset()
     except:      
         pass
Exemplo n.º 5
0
def load2(iii=96, s=0.01):
    sim = Simulation.Simulation.load(
        s=s,
        nu0=0.005,
        experimentID=iii,
        ModelName="TimeSeries",
        numReplicates=10,
        step=10,
        startGeneration=0,
        maxGeneration=50,
    )
    simn = Simulation.Simulation.load(
        s=0,
        nu0=0.005,
        experimentID=iii,
        ModelName="TimeSeries",
        numReplicates=10,
        step=10,
        startGeneration=0,
        maxGeneration=50,
    )
    bot = pd.read_pickle(home + "out/BottleneckedNeutrals.NotNormalized.df")[iii].loc[[0, 10, 20, 30, 40, 50]]
    td = Estimate.Estimate.getEstimate(
        sim.X, n=200, method="tajimaD", removeFixedSites=True, normalizeTajimaD=False
    ).mean(1)
    td.index = sim.getTrueGenerationTimes()
    tdn = Estimate.Estimate.getEstimate(
        simn.X, n=200, method="tajimaD", removeFixedSites=True, normalizeTajimaD=False
    ).mean(1)
    tdn.index = simn.getTrueGenerationTimes()
    ctd = [td - bot, regularize2(td - bot), regularize3(td - bot)]
    ctdn = [tdn - bot, regularize2(tdn - bot), regularize3(tdn - bot)]

    t = sim.getTrueGenerationTimes()
    nu = sig(s * t / 2 + logit(0.005))
    a = Estimate.Estimate.getEstimate(sim.X0, n=200, method="pi")
    b = -Estimate.Estimate.getEstimate(sim.X0, n=200, method="watterson") / (1.0 / np.arange(1, 201)).sum()
    D0 = Estimate.Estimate.getEstimate(sim.X0, n=200, method="tajimaD", normalizeTajimaD=False)
    Dt = pd.Series(b * np.log(1 - nu) - a * nu ** 2, index=t)
    return map(lambda x: x, ctd), map(lambda x: x, ctdn), Dt