inputImage[:,:,1] = startinputImage
    inputImage[:,:,2] = startinputImage
    [rg, by, wb] = image_edit.ConvertRGBtoOpponentColor(inputImage, gray)
    
        
    ########################### LGN CELLS ###################################
    # white black center-surround processing 
    # (but with current parameters it actually does very little)
    
    # padding
    PaddingSize = math.floor(np.max(wb.shape)/2) 
    PaddingColor = wb[0,0]
    wb2 = image_edit.im_padding(wb, PaddingSize, PaddingColor)
    
    # convolution - mirrors of each other
    OnOff_Excite =   image_edit.conv2(wb2,C)
    OnOff_Inhibit =  image_edit.conv2(wb2,E)

    # shunting parameters
    paramA = 50 # 1 
    paramB = 90 # 90
    paramD = 60 # 60
    
    # shunting
    x_OnOff = (paramB*OnOff_Excite-paramD*OnOff_Inhibit)/(paramA+(OnOff_Excite+OnOff_Inhibit))
    x_OffOn = (paramB*OnOff_Inhibit-paramD*OnOff_Excite)/(paramA+(OnOff_Inhibit+OnOff_Excite))
    
    # cutting negative values
    x_pos = x_OnOff - x_OffOn
    x_pos[x_pos<0] = 0
    x_neg = x_OffOn - x_OnOff
Example #2
0
ax8.imshow(P2[:,:,3])#-P2[:,0:5,3])

fig, (ax1,ax2) = plt.subplots(ncols=2, figsize=(10,10))
ax1.imshow(temp-temp2)
ax2.imshow(temp2)

#ax5.imshow(y[:,:,0])
#ax6.imshow(y[:,:,1])
#ax7.imshow(y[:,:,2])
#ax8.imshow(y[:,:,3])
#####################################################################
# Convolution method comparison
from scipy import signal
ypos24=np.abs(signal.convolve(LGNwb2,F2[:,:,0],mode='same'))
ypos2=np.abs(scipy.ndimage.convolve(LGNwb2,F2[:,:,0]))
ypos23=np.abs(image_edit.conv2(LGNwb2, F2[:,:,0], mode='same'))


#####################################################################
# Remove start column and row, add to end, shift matching to Matlab indexing
store = np.vstack((startinputImage[1::,:],startinputImage[0,:]))
startinputImage = np.vstack((store[:,1::].T,store[:,0])).T

#####################################################################
# COMPARE DIFFERENT METHODS' TRUTH 

S_wb1 = np.zeros((sX, sY))
S_rg1 = np.zeros((sX, sY))
S_by1 = np.zeros((sX, sY)) 
# Number of pixels in this FIDO
for i in np.arange(0,numFIDOs[0]):
Example #3
0
def multi_conv2(A):
    print image_edit.conv2(wb2,A)
Example #4
0
    (negF>90).choose(negF,0)
    negnorm = np.sum(np.sum(negF*negF))
    negF = negF/np.sqrt(negnorm)
    
    F[:,:,k] = posF + negF
    
    # normalize full kernel
    normalizer = np.sum(np.sum( F[:,:,k]*F[:,:,k] ) )
    F[:,:,k] = F[:,:,k]/np.sqrt(normalizer)


#convolve orientation filters with White's Illusion

y=np.zeros((i_x,i_y,K))
for i in range(K):
    Ini = np.abs(image_edit.conv2(wb2, F[:,:,i])) # convolve
    Ini = image_edit.im_cropping(Ini, PaddingSize)  # padding
    Ini[Ini<0] = 0                                  # half wave rectify
    y[:,:,i]=Ini
    

# pool across contrast polarity
inI=5
planeSize = y.shape

z1= np.zeros((planeSize[0], planeSize[1], nOrient))
for k in np.arange(0,K/2):
    z1[:,:,k] = y[:,:,k] + y[:,:,k+K/2]

# set upper limit for boundary activity
#boundaryUpperLimit=25;