Beispiel #1
0
m_gam2 = r(gamcall)

ranova = r('anova')

print r['summary'](m_gam2)
anovares = ranova(m_gam, m_gam2, test = "Chisq") 
print anovares   # ANOVA correctly detect a better fit if separated.
print anovares[4]  # p- value

def predict(i):
    r('m0.pred <- predict(m.gam2, newdata = nd%d, type = "link")' % i) # predict from m.gam object, using new data
    r('m0.pred <- m0.pred - mean(m0.pred)')  # centering at zero 
    #m_zero = r('m0.pred + mean(c(0, coef(m0.glm)))')  # zero as anchor
    m_zero = r('m0.pred - m0.pred[1]')                # zero as anchor 
    return np.array(m_zero)
    

mzeros = np.zeros((len(svec), len(files)))
for i in range(len(files)):
    mzeros[:,i] = predict(i)

##
plt.figure()
plt.plot(svec, m_zero, label="GAM all", linewidth=2)
for i in range(len(files)):
    plt.plot(svec, mzeros[:,i], label="GAM %d" % i, linewidth=2)
    mlds.plotscale(objs[i], observer="GLM %d" % i, marker='o', linewidth=0)
plt.legend(loc=2)
plt.show()

Beispiel #2
0
glms=[]
for f in files:
    O = mlds.MLDSObject(f, boot= True, standardscale= st, save=True)
    #O.parallel=True
    #O.run()
    O.load()
    glms.append( O )

obsGAM = mlds_gam.MLDSGAMCompare(files, standardscale = st)
obsGAM.run()

pval = obsGAM.anovares[4]
print pval

l = plt.plot(obsGAM.stim, obsGAM.scales, linewidth=2)

for o, obs in enumerate(glms):
    plotscale(obs, '$\gamma = %.2f$' % gammas[o], l[o].get_color(), linewidth=0, marker='o')
    #plt.plot(obs.stim, obs.scale, linewidth=0, marker='o', markerfacecolor = l[o].get_color(), label='$\gamma = %.2f$' % gammas[o])

ax = setplotproperties(ax)
plt.legend(loc=2, frameon=False)
plt.title('Example using power fn',  {'fontsize': 10} )
fig.savefig('example.pdf')
plt.show()
        
    

   
             
Beispiel #3
0
######
# 3. Now we simulate the observer performing the method of triads.
# The simulated results are stored in the file fname
# (Reduce nblocks to 1 if you just want to see the result format.)
fname = mlds.simulateobserver(fn, stim, nblocks=15)


#######
# 4. Finally, we want to estimate the scale from the simulated data.
obs = mlds.MLDSObject(fname, boot=True, standardscale=False, verbose=True)
obs.load()  # this takes a while as bootstrap is done
obs.printinfo()

# we can plot the scale
# the shape of the scale should coincide with a power function with exponent 2
mlds.plotscale(obs)
plt.xlim([-0.1, 1.1])
plt.show()

# and the noise estimated by MLDS should be the double of the noise
# introduced at the sensory level in the simulation
print("noise estimate from MLDS: %.3f" % (1 / obs.mns[-1]))
print("which must be the double of the sensory noise %.3f" % fn.sigmamax)

# finally, GoF measures should be OK, as we are simulating an observer
# that actually performs the decision model assumed by MLDS
obs.rundiagnostics()
print("GoF measures:")
print('AIC: %f, DAF: %f' % (obs.AIC, obs.DAF))
print('p-val: %f' % obs.prob)
Beispiel #4
0
######
# 3. Now we simulate the observer performing the method of triads.
#    The simulated results are stored in the file fname
fname = mlds.simulateobserver(fn, stim, nblocks=15)


#######
# 4. Finally, we want to estimate the scale from the simulated data.
obs = mlds.MLDSObject(fname, boot=True, standardscale=False)
obs.load()  # this takes a while as bootstrap is done
obs.printinfo

# we can plot the scale
# the shape of the scale should coincide with a power function with 
# exponent 2
mlds.plotscale(obs)
plt.xlim([-0.1, 1.1])
plt.show()

# and the noise estimated by MLDS should be the double of the noise
# introduced at the sensory level in the simulation
print "noise estimate from MLDS: %.3f" % (1/obs.mns[-1])
print "which must be the double of the sensory noise %.3f" % fn.sigmamax


# finally, GoF measures should be OK, as we are simulating an observer
# that actually performs the decision model assumed by MLDS
# obs.rundiagnostics()
# print "GoF measures:"
# print 'AIC: %f, DAF: %f' % (obs.AIC, obs.DAF)
# print 'p-val: %f' % obs.prob