Пример #1
0
def do_fast_ica(pca_first):
	mo1_cj_inverse = numpy.array(mo1_cj).T
	mo2_cj_inverse = numpy.array(mo2_cj).T
	if pca_first:
		mo1_cj_array = mdp.pca(mo1_cj_inverse, input_dim=4, output_dim=3)
		mo2_cj_array = mdp.pca(mo2_cj_inverse, input_dim=4, output_dim=3)
	else:
		mo1_cj_array = mo1_cj_inverse
		mo2_cj_array = mo2_cj_inverse
	a = mdp.fastica(mo1_cj_array)
	b = mdp.fastica(mo2_cj_array)
	return a,b
Пример #2
0
def do_fast_ica(pca_first):
    mo1_cj_inverse = numpy.array(mo1_cj).T
    mo2_cj_inverse = numpy.array(mo2_cj).T
    if pca_first:
        mo1_cj_array = mdp.pca(mo1_cj_inverse, input_dim=4, output_dim=3)
        mo2_cj_array = mdp.pca(mo2_cj_inverse, input_dim=4, output_dim=3)
    else:
        mo1_cj_array = mo1_cj_inverse
        mo2_cj_array = mo2_cj_inverse
    a = mdp.fastica(mo1_cj_array)
    b = mdp.fastica(mo2_cj_array)
    return a, b
Пример #3
0
    def perform(self):
        self.source_list = numpy.array(self.source_list, 'double')
        self.source_list = self.source_list.transpose()

        guess_matrix = None
        self.source_list = mdp.fastica(self.source_list,
                                       approach='defl',
                                       g='gaus',
                                       guess=guess_matrix,
                                       fine_g='gaus',
                                       mu=0.01,
                                       stabilization=0.001,
                                       sample_size=1,
                                       fine_tanh=1,
                                       max_it=50000000,
                                       max_it_fine=100,
                                       failures=500,
                                       limit=0.001,
                                       verbose=False,
                                       whitened=False,
                                       white_comp=self.nout,
                                       white_parm={'svd': True})

        result = []
        print self.source_list.shape
        for i in range(0, nout):
            result.append(self.one_to_two(self.source_list[:, i]))

        #sources=self.source_list
        self.source_list = []
        return result
Пример #4
0
 def GET(self, name):
     min,max=[],[]
     if not name: 
       names="&artist=the+beatles"
     else:
       names=""
       for n in name.split(','):
         names = names + "&artist=" +n
     rawdata = json.loads(urllib.urlopen("http://developer.echonest.com/api/v4/playlist/static?api_key=N6E4NIOVYMTHNDM8J&artist=%s&format=json&results=100&type=artist-radio&bucket=id:7digital&bucket=audio_summary&audio=true&variety=0.999"%(names)).read())['response']["songs"]
     keys = [u'key', u'tempo', u'mode', u'time_signature', u'duration', u'loudness']
     x=[]
     for s in rawdata:
         x.append([])
         for k in keys:
           x[-1].append(s['audio_summary'][k])
     y = mdp.fastica(scipy.array(x))
     maxc,minc = [],[]
     for col in range(len(keys)):
       maxc.append(y[:, col].max())
       minc.append( y[:, col].min())
     for col in range(len(keys)):
       rang = maxc[col] - minc[col]
       for row in range(100):
         y[row,col] = (y[row,col] - minc[col])/rang
     return json.dumps({'characteristics_keys':keys, 'characteristics':x, 'ica':y.tolist() , 'echo':rawdata})
Пример #5
0
 def compute( self , waveforms , sampling_rate , output_dim =2) :
     
     #~ waveforms2 = empty( (waveforms.shape[0] , waveforms.shape[1]*waveforms.shape[2] ) )
     #~ for i in range(waveforms.shape[0]):
         #~ waveforms2[i,:] = waveforms[i].reshape( (waveforms.shape[1]*waveforms.shape[2]) )
     
     waveforms2 = waveforms.reshape(waveforms.shape[0], -1)
     
     ica_mat = mdp.fastica(waveforms2 , whitened = 0,
                     white_comp = output_dim,
                     g = 'tanh' ,
                     approach= 'symm'  )
     return ica_mat
Пример #6
0
    def Main(self, model):
        # self.model = model
        data = array(model.GetCurrentData()[:])

        k = wx.GetNumberFromUser("ICA Dialog", "Enter number of independent components", "k", 1)

        ica_node = mdp.nodes.FastICANode()
        ica_node.set_output_dim(k)
        ica_data = mdp.fastica(data)

        # ica_data = r.fastICA(data, k, alg_typ = "deflation", fun = "logcosh", alpha = 1, method = "R", row_norm = 0, maxit = 200, tol = 0.0001, verbose = 1)
        fields = ["Comp%02d" % c for c in range(1, k + 1)]
        model.updateHDF("IcaPY", ica_data, fields=fields)
Пример #7
0
def apply_ica(x, i, g):
    '''
        Seperate the original rgb sources into 3 different channels
        i is between 0 to 2, which is the channel
        g is the mode to converge
    '''

    #x1 = numpy.transpose(x)
    x1 = numpy.array(x)
    #channels = mdp.fastica(x1, input_dim=3)
    #channels = mdp.fastica(x1, input_dim=3, verbose=True, g=g)
    channels = mdp.fastica(x1, input_dim=3, g=g)
    tmp = channels.transpose()
    return tmp[i]
Пример #8
0
def apply_ica(x, i, g):
    '''
        Seperate the original rgb sources into 3 different channels
        i is between 0 to 2, which is the channel
        g is the mode to converge
    '''

    #x1 = numpy.transpose(x)
    x1 = numpy.array(x)
    #channels = mdp.fastica(x1, input_dim=3)
    #channels = mdp.fastica(x1, input_dim=3, verbose=True, g=g)
    channels = mdp.fastica(x1, input_dim=3, g=g)
    tmp = channels.transpose()
    return tmp[i]
Пример #9
0
    def split_audio(self):
        self.fs, self.data = wav.read(os.getcwd() + '\\content\\' +
                                      self.file_name + '.wav')

        ww = wave.open(os.getcwd() + "\\content\\" + self.file_name + ".wav",
                       'wb')
        voices = []
        #FastICA - to separate channels and waves
        source = fastica(self.data.astype(float))
        #normalize
        source = np.int16(source / np.max(abs(source), axis=0) * 32767)

        #plt.plot(np.array(source))
        #plt.savefig(os.getcwd() + "\\content\\fastICA_plot.png")

        #Creating mono channele files:
        wav.write(
            os.getcwd() + '\\content\\' + self.file_name + '_ICAleft.wav',
            44100, source[:, 0])
        wav.write(
            os.getcwd() + '\\content\\' + self.file_name + '_ICAright.wav',
            44100, source[:, 1])
        ##########################PLOTTING#######################################
        plt.figure(1)
        plt.subplot(511)
        plt.title("fastICA")
        plt.plot(source)
        plt.subplot(513)
        plt.title("fastICA-left channel")
        plt.plot(source[:, 0])
        plt.subplot(515)
        plt.title("fastICA - right channel")

        plt.plot(source[:, 1], color='orange')

        plt.savefig(os.getcwd() + "\\content\\fastICA.png")
        ###########################SEPARATE VOICES##############################################
        #chunk = 1024#buffer
        #self.treshold = audioop.avg(self.data, 2) #treshold for silence
        #rms = 0 ## root-mean-square -> measure of the power in an audio signal.
        #i = 0
        eps = 200
        #for i in range(len(source)):
        pos = []
        pos.append(0)
        i = 1
        value = 0
Пример #10
0
def fastICA(mix_file, jamming_file):
    sig1, fs1, enc1 = wavread(mix_file)
    sig2, fs2, enc2 = wavread(jamming_file)
    sig1, sig2 = chop_sig(sig1, sig2)
    wavwrite(array([sig1, sig2]).T, "mixed.wav", fs1, enc1)
    # Load in the stereo file
    recording, fs, enc = wavread("mixed.wav")

    # Perform FastICA algorithm on the two channels
    sources = fastica(recording)

    # The output levels of this algorithm are arbitrary, so normalize them to 1.0.

    m = []
    for k in sources:
        m.append(k[0])
    # Write back to a file
    wavwrite(array(m), "sources.wav", fs, enc)
def pid_pca(args):
    #  import modular data processing toolkit
    import mdp
    # load data file
    tblfilename = "bf_optimize_mavlink.h5"
    h5file = tb.open_file(tblfilename, mode = "a")
    # table = h5file.root.v1.evaluations
    #  get tabke handle
    table = h5file.root.v2.evaluations

    # sort rows
    if not table.cols.mse.is_indexed:
        table.cols.mse.createCSIndex()
        
    if not args.sorted:
        pids = [ [x["alt_p"], x["alt_i"], x["alt_d"], x["vel_p"], x["vel_i"], x["vel_d"]]
             for x in table.iterrows() ]
        mses = [ [x["mse"]] for x in table.iterrows() ]
    else:
        pids = [ [x["alt_p"], x["alt_i"], x["alt_d"], x["vel_p"], x["vel_i"], x["vel_d"]] for x in table.itersorted("mse")]
        mses = [ [x["mse"]] for x in table.itersorted("mse")]
    print "best two", pids
    mses_a = np.log(np.clip(np.array(mses), 0, 200000.))
    mses_a /= np.max(mses_a)
    # FIXME: try kernel pca on this
    from sklearn.decomposition import PCA, KernelPCA, SparsePCA
    kpca = KernelPCA(n_components = None,
                     kernel="rbf", degree=6, fit_inverse_transform=True,
                     gamma=1/6., alpha=1.)
    # kpca = SparsePCA(alpha=2., ridge_alpha=0.1)
    X_kpca = kpca.fit_transform(np.asarray(pids).astype(float))
    # X_back = kpca.inverse_transform(X_kpca)

    Z_kpca = kpca.transform(np.asarray(pids).astype(float))

    print Z_kpca.shape, X_kpca.shape
    print "|Z_kpca|", np.linalg.norm(Z_kpca, 2, axis=1)
    # for i in range(8):
    #     pl.subplot(8,1,i+1)
    #     pl.plot(Z_kpca[:,i])
    #     pl.legend()
    # pl.show()

    
    # fast PCA
    # pid_p = mdp.pca(np.array(pids).astype(float))
    pid_array = np.array(pids).astype(float)
    print "pid_array.shape", pid_array.shape
    pcanode = mdp.nodes.PCANode(output_dim = 6)
    # pcanode.desired_variance = 0.75
    pcanode.train(np.array(pids).astype(float))
    pcanode.stop_training()
    print "out dim", pcanode.output_dim

    pid_p = pcanode.execute(np.array(pids).astype(float))

    # pid_array_mse = np.hstack((np.array(pids).astype(float), mses_a))
    pid_ica = mdp.fastica(np.array(pids).astype(float))
    print "ica.shape", pid_ica.shape
    # pid_p = np.asarray(pids)[:,[0, 3]]
    # pid_p = pids[:,0:2]
    # [:,0:2]
    sl_start = 0
    sl_end = 100
    sl = slice(sl_start, sl_end)

    print "expl var", pcanode.explained_variance
    pl.subplot(111)
    colors = np.zeros((100, 3))
    # colors = np.hstack((colors, 1-(0.5*mses_a)))
    colors = np.hstack((colors, 1-(0.8*mses_a)))
    # print colors.shape
    # pl.scatter(pid_p[sl,0], pid_p[sl,1], color=colors)

    # ica spektrum
    pid_ica_sum = np.sum(np.square(pid_ica), axis=0)
    # pid_ica_sum_sort = np.sort(pid_ica_sum)
    pid_ica_sum_0 = np.argmax(pid_ica_sum)
    pid_ica_sum[pid_ica_sum_0] = 0
    pid_ica_sum_1 = np.argmax(pid_ica_sum)
    
    # pl.scatter(pid_p[sl,0], pid_p[sl,1], color=colors)
    pl.scatter(pid_ica[sl,pid_ica_sum_0], pid_ica[sl,pid_ica_sum_1], color=colors)
    # pl.scatter(X_kpca[:,0], X_kpca[:,1], color=colors)
    pl.gca().set_aspect(1)
    # pl.scatter(pid_p[:,0], pid_p[:,1], alpha=1.)
    # pl.show()

    # plot raw pid values     
    pl.subplot(411)
    pl.plot(pid_array[sl,[0,3]], "o")
    pl.xlim((sl_start - 0.2, sl_end + 0.2))
    pl.subplot(412)
    pl.plot(pid_array[sl,[1,4]], "o")
    pl.xlim((sl_start - 0.2, sl_end + 0.2))
    pl.subplot(413)
    pl.plot(pid_array[sl,[2,5]], "o")

    # plot compressed pid values: pca, ica, ...
    # pl.subplot(211)
    # pl.plot(pid_p, ".")
    # pl.plot(pid_p[sl], "o")
    # pl.plot(pid_ica[sl] + np.random.uniform(-0.01, 0.01, size=pid_ica[sl].shape), "o")
    pl.xlim((sl_start - 0.2, sl_end + 0.2))
    # pl.plot(Z_kpca[:,:], "-o", label="kpca")
    # pl.plot(Z_kpca[:,:], ".", label="kpca")
    # pl.legend()
        
    # pl.subplot(212)
    pl.subplot(414)
    pl.plot(mses_a[sl], "ko")
    # pl.gca().set_yscale("log")
    pl.xlim((sl_start - 0.2, sl_end + 0.2))
    pl.show()

    # gp fit
    x = mses_a[sl]
    x_sup = np.atleast_2d(np.arange(0, x.shape[0])).T
    x_ones = x != 1.
    x_ones[0:20] = False
    print x, x_sup, x_ones, x_ones.shape
    print "x[x_ones]", x[x_ones].shape
    print "x_sup[x_ones]", x_sup[x_ones].shape

    from sklearn.gaussian_process import GaussianProcess
    # gp = GaussianProcess(regr='constant', corr='absolute_exponential',
    #                  theta0=[1e-4] * 1, thetaL=[1e-12] * 1,
    #                  thetaU=[1e-2] * 1, nugget=1e-2, optimizer='Welch')
    gp = GaussianProcess(corr="squared_exponential",
                         theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
                         nugget=1e-1/x[x_ones])
    gp.fit(x_sup[x_ones,np.newaxis], x[x_ones,np.newaxis])
    x_pred, sigma2_pred = gp.predict(x_sup, eval_MSE=True)
    print x_pred, sigma2_pred

    from sklearn import linear_model
    clf = linear_model.Ridge (alpha = .5)
    clf.fit(x_sup[x_ones,np.newaxis], x[x_ones,np.newaxis])
    x_pred = clf.predict(x_sup[20:100])
        
    pl.subplot(111)
    pl.plot(mses_a[sl], "ko")
    x_mean = np.mean(x[0:20])
    pl.plot(np.arange(0, 20), np.ones((20, )) * x_mean, "k-", alpha=0.5)
    pl.plot(np.arange(20, 100), x_pred, "k-", alpha=0.5)
    pl.axhspan(0.5, 1.1, 0, 0.19, facecolor="0.5", alpha=0.25)
    # pl.plot(x_pred + sigma2_pred, "k-", alpha=0.5)
    # pl.plot(x_pred - sigma2_pred, "k-", alpha=0.5)
    # pl.gca().set_yscale("log")
    pl.xlim((sl_start - 0.2, sl_end + 0.2))
    pl.ylim((0.5, 1.1))
    pl.text(5, 0.6, "Random\ninitialization")
    pl.text(40, 0.6, "Optimizer\nsuggestions")
    pl.xlabel("Episode #")
    pl.ylabel("MSE")
    if args.plotsave:
        pl.gcf().set_size_inches((10, 3))
        pl.gcf().savefig("%s-mse.pdf" % (sys.argv[0][:-3]), dpi=300,
                        bbox_inches="tight")
    pl.show()
Пример #12
0
# Mostly from http://www.endolith.com/wordpress/2009/11/22/a-simple-fastica-example/

from mdp import fastica
from scikits.audiolab import wavread, wavwrite
from numpy import abs, max, array

# Load in the microphone audio files
mic1, fs1, enc1 = wavread('channel1.wav')
mic2, fs2, enc2 = wavread('channel2.wav')
mic3, fs3, enc3 = wavread('channel3.wav')
mic4, fs4, enc4 = wavread('channel4.wav')

# transpose() because MDP expects each COLUMN to be an
# observed data stream.  Each ROW is one observation of that data across
# all data streams.
sources = fastica(array([mic1, mic2, mic3, mic4]).transpose())
 
# The output levels of this algorithm are arbitrary, so normalize them to 1.0.
sources /= max(abs(sources), axis = 0)

(frames, inputs) = sources.shape

# Write one output file for each resulting ICA transform.
for i in range(inputs):
    sourceColumn = sources[:,[i]]  # extract a column from the numpy ndarray
    wavwrite(sourceColumn, "resolved-source%d.wav" % i, fs1, enc1)
Пример #13
0
def ica(data, precision = 'float32'):
	return(mdp.fastica(data, dtype=precision))
Пример #14
0
def HRF_ICA(arq_withSS,arq,cromophore,MinIc=2,condition=1):
    """ 
        Computes the ICA algorithm and uses the criteria to define an HRF.
        You should give the as inputs:
            -a file name string (arq)
            -which chromophore to compute the ICs (chromohpore= 'HbO' or 'HbR')
            -the minimum number of ICs to be computed (MinIc)
            -which condition to analyze (condition)
    """      
    cla()
    close()
    
    pretime = 2 
    posttime = 20
    
#    arq = '6'
#    cromophore = 'HbR'
    if cromophore == 'HbO':
        crmo = 0
    if cromophore == 'HbR':
        crmo = 1
#    MinIc = 3
    IcFor = range(MinIc,4)
    
    # Define the file paths and file names
    nonlin = 'gaus'
    
    # Set the name of the folder to save the results in
    
    #The figures folder
    fol = 'Output/Min{2}/{0}/{1}'.format(os.path.split(arq)[1],cromophore,MinIc) 
    folder = '{0}/{1}'.format(fol, nonlin)
    if not os.path.exists(folder):
        os.makedirs(folder)
        
    #The .mat files folder    
    folder2 = 'Output/MatFiles/Min = {0}'.format(MinIc) 
    if not os.path.exists(folder2):
        os.makedirs(folder2)
    
    
    #Read the group averages
#    avgFiles = glob.glob('Input/*.mat')
#    
#    group_avg = sio.loadmat(avgFiles[0])
#    group_avg_withoutss  = sio.loadmat(avgFiles[1])
#    
#    #Get only the group HRF results
#    avgHRF = read_group(group_avg)
#    avgHRF_withoutss = read_group(group_avg_withouss)
    
    
    #Load the Homer2 data into a hmr2 object
    filecw6 = sio.loadmat(arq,appendmat = False)
    
    d1 = hmr2(filecw6,condition)
    
    del filecw6
    
    filecw6 = sio.loadmat(arq_withSS,appendmat = False)
    
    d1.HRF_withSS = filecw6['dcAvg'][:,:,:,condition]
    
    del filecw6
    # Resample the data to remove the unwated stimulus
    if condition == 1:
        resample_aux = find(d1.s == 1)
        resample_pts = range(resample_aux[0]-15*d1.fs,resample_aux[-1]+30*d1.fs)
    elif condition == 0:
        resample_aux = find(d1.s == 1)
        resample_pts = range(0,resample_aux[-2]+30*d1.fs)
        
    d1 = hmr2.Resample(d1,resample_pts)
    
    d1 = hmr2.CalcOD(d1,100)
    d1.dod = d1.dOD
    
    fb,fa = sp.signal.butter(4,array([0.0001,2])*2/d1.fs,btype='bandpass')
    d1.dod = sp.signal.filtfilt(fb,fa,d1.dod)
    
    d1.conc = hmr2.MBLL(d1,d1.dod,6)
    del d1.dOD
    
    schan = []
    for i in range(len(d1.Chan)):
            rho = np.linalg.norm(d1.SrcPos[d1.ml[i,0]-1,:]-d1.DetPos[d1.ml[i,1]-1,:])
            if rho <= 15:
                schan.append(i)
    
    ###########################################################################    
   
    canais = array([29,32,34])

    ###########################################################################
    
    ## Mean HRF for the large channels
    mHRF_large = mean(d1.HRF[:,crmo,canais],1)
    mHRF_large_withSS = mean(d1.HRF_withSS[:,crmo,canais],1)
        
    close()
    cla()
#    mHRF_large /= max(abs(mHRF_large))
    plot(d1.tHRF,mHRF_large,'red')
    plot(d1.tHRF,mHRF_large_withSS,'black')    
    ## Mean HRF for the short channels
    mHRF_short = mean(d1.HRF[:,crmo,schan],1)
#    mHRF_short /= max(abs(mHRF_short))
    plot(d1.tHRF,mHRF_short,'blue')
    savefig('{0}/mHRF.png'.format(fol),format='png')
    plt.show()
    close()
    cla()

    contralateral_channels = range(21,42)

    HRF_MITC_HbO = np.zeros((len(d1.Chan)))
    HRF_MITC_HbR = np.zeros((len(d1.Chan)))
    for i in range(len(d1.Chan)):
        HRF_MITC_HbO[i] = MITC(d1,d1.conc[:,i,0],pretime,posttime)
        HRF_MITC_HbR[i] = MITC(d1,d1.conc[:,i,1],pretime,posttime)
        
    close()
    cla()
#    
#    return d1, HRF_MITC_HbO, HRF_MITC_HbR
    magica = 0
    for nic in IcFor:
        print nic               
        y = mdp.fastica(d1.conc[:,canais,crmo],g='tanh',white_comp= nic,mu=0.8)
        close()
        cla()


            
        freq = np.fft.rfftfreq(d1.t.size,d = 1/d1.fs)
        amin = find(freq>= 0.005)[0]
        amax = find(freq>= 20)[0]
        yfft = np.zeros((size(y,0),size(y,1)))
        yfft2 = np.zeros((size(y,0)/2+1,size(y,1)))       
        
        resp_frac = np.zeros((size(y,1)))    
        
        fb,fa = sp.signal.butter(5,array([3])*2/d1.fs,btype='low')
    
        for i in range(size(y,1)):
            y[:,i] = sp.signal.filtfilt(fb,fa,y[:,i])
            y[:,i] /= max(abs(y[:,i]))
#            yfft2[:,i] = abs(np.fft.fft(y[:,i]))**2
            yfft2[:,i] = abs(np.fft.rfft(y[:,i]))**2
#            yfft2[:,i] /= max(abs(yfft2[:,i]))
#            yfft2[:,i] = abs(real(np.fft.fft(y[:,i])))
            dummy1 = find(freq>=0.01)
            dummy2 = find(freq>=0.1)
            dummy3 = find(freq>=2)
            
            fnoise = 1/freq**2
            yfft2[:,i] -= fnoise 
#            #Define the threshold (1/f area)            
            fthr_aux1 = sp.integrate.simps(y=fnoise[dummy1[0]:dummy2[0]]**2, x=freq[dummy1[0]:dummy2[0]], dx=freq[1]-freq[0])
            fthr_aux2 = sp.integrate.simps(y=fnoise[dummy2[0]:dummy3[0]]**2, x=freq[dummy2[0]:dummy3[0]], dx=freq[1]-freq[0])
            
            fthr = fthr_aux2/fthr_aux1
            #Area for the activation region
            activ_area = sp.integrate.simps(yfft2[dummy1[0]:dummy2[0],i],freq[dummy1[0]:dummy2[0]], dx=freq[1]-freq[0])
            resp_area = sp.integrate.simps(yfft2[dummy2[0]:dummy3[0],i],freq[dummy2[0]:dummy3[0]], dx=freq[1]-freq[0])
            
            resp_frac[i] = round((resp_area/activ_area) - fthr,2)
#            
                        
#            print 'areas:', fthr, resp_frac[i]
            ## Original Resp_frac
#            resp = max(yfft2[dummy1[0]:dummy2[0],i])
#            dummy1 = find(freq>=0.01)
#            dummy2 = find(freq>=0.05)
#            activ = max(yfft2[dummy1[0]:dummy2[0],i])
#            resp_frac[i] = round(resp/activ,2)

        mitc = np.zeros((size(y,1)))
        y_full = np.zeros(y.shape)
        for i in range(size(y,1)):
#            y_full[:,i] = y[:,i]
#            y[:,i] = lob.BandPassFilt(y[:,i],d1.fs,0.01,0.5,3,5)
            mitc[i] = MITC(d1,y[:,i],pretime,posttime)
        
        corr_stim = mitc
                   
        ts, mICA = lob.HRFAvgICA(d1, y, pretime,posttime)
        mHRF_short /= max(abs(mHRF_short))
        mHRF_large /= max(abs(mHRF_large))
        mHRF_large_withSS /= max(abs(mHRF_large_withSS))
        
        for i in range(nic):
#            mICA[:,i] = lob.BandPassFilt(mICA[:,i],d1.fs,0.01,0.5,3,5)
#            mICA[:,i] = ndimage.gaussian_filter(mICA[:,i],3)
            mICA[:,i] /= max(abs(mICA[:,i]))
        ##Cross correlation
        
        corr_large = np.zeros((size(y,1)))
        for i in range(size(y,1)):
            corr_large[i] = round(np.corrcoef(mICA[:,i],mHRF_large)[0,1],2)
            
        corr_short = np.zeros((size(y,1)))
        for i in range(size(y,1)):
            corr_short[i] = round(np.corrcoef(mICA[:,i],mHRF_short)[0,1],2)
            
        corr_withss = np.zeros((size(y,1)))
        for i in range(size(y,1)):
            corr_withss[i] = round(np.corrcoef(mICA[:,i],mHRF_large_withSS)[0,1],2)
#        corr_hmr = np.zeros((size(y,1)))
#        for i in range(size(y,1)):
#            corr_hmr[i] = round(np.corrcoef(mICA[:,i],mHRF_hmr)[0,1],2)
    
        close()
        cla()
#        for i in range(size(y,1)):
#            mICA[:,i] = ndimage.gaussian_filter(mICA[:,i],)
            
        fig, axes = plt.subplots(nrows=size(y,1), ncols=3, figsize=(13, 18))
        fig.subplots_adjust(wspace=0.24, hspace=0.33, bottom=0.05)
        
        
            
            
#        aux1 = max(max(yfft2[amin:amax,i]) for i in range(size(y,1)))
#        aux2 = min(min(yfft2[amin:amax,i]) for i in range(size(y,1)))
        
        
        # Turn off tick labels everywhere
        #    for ax in axes.flat: 
        #        for axis in [ax.xaxis, ax.yaxis]:
        #            axis.set_ticklabels([])
        
        for (i,j), ax in np.ndenumerate(axes):
                if j==0:
                    ax.plot(d1.t,y[:,i])
#                    ax.plot(d1.t,y[:,i],'red')
                    ax.plot(d1.t,(d1.s*max(max(y[:,i]),max(y_full[:,i]))),'black',linewidth=3.0)
                    ax.set_yticklabels([])
                    ax.set_xlim(left=d1.t[0],right=d1.t[-1])
                    ax.annotate('{0}'.format(corr_stim[i]), xy=(0.01, 0.5), 
                                xycoords='axes fraction', rotation=90, va='center',
                                ha='right', size = 15) 
                if j==1:
                    dummy1 = find(freq>=0.005)
                    dummy2 = find(freq>=5)                          
#                    fnoise /= max(abs(fnoise)) 
        #                ax.semilogx(freq,yfft[:,i])
#                    f, Pxx_den = sp.signal.periodogram(y_full[:,i],d1.fs)
#                    ax.semilogx(f,Pxx_den)
                    ax.loglog(freq,yfft2[:,i])
                    ax.loglog(freq,fnoise,'black')
                    ax.set_xlim(left=0.005,right=25)
#                    ax.set_ylim(0,1)
                    ax.annotate('{0}'.format(resp_frac[i]), xy=(0.01, 0.5), 
                                xycoords='axes fraction', rotation=90, va='center',
                                ha='right', size = 15)                
#                    ax.set_ylim(bottom=aux2,top=aux1)
#                    ax.set_yticks((aux1,aux2))
                    ax.set_yticklabels([])
        #                ax.set_xticks(ticks)
        #                ax.set_xticklabels(tickslabel)
                if j==2:
                    bux1 = max(mICA[:,i])
                    bux2 = min(mICA[:,i])
                    ax.plot(ts,mICA[:,i])
                    ax.set_yticklabels([])
                    ax.annotate('M {0}'.format(corr_large[i]), xy=(1, 0.5), 
                                xycoords='axes fraction', rotation=90, va='center',
                                ha='right', size = 15) 
                    ax.annotate('SS {0}'.format(corr_withss[i]), xy=(1.2, 0.5), 
                                xycoords='axes fraction', rotation=90, va='center',
                                ha='right', size = 15) 
                    ax.annotate('SH {0}'.format(corr_short[i]), xy=(0, 0.5), 
                                xycoords='axes fraction', rotation=90, va='center', 
                                ha='right', size = 15) 
                    ax.axvspan(0, 5, facecolor='0.8', edgecolor ='0.8', alpha=0.5)
        #            if j==2:
        #                cor = abs(corr[i,:])
        #                ax.plot(atraso,cor)
        #                a = min(cor)
        #                b = max(cor)
        #                ax.set_ylim(bottom = a-0.01*a, top = b+0.01*b)
        #                ax.set_yticks((a,b))
        #                ax.set_yticklabels(('%.2f'%a,'%.2f'%b))
        #                
                    
        #    corr = np.zeros(nic)            
        #    for i in range(nic):
        #        corr = np.corrcoef(d1.s[:,0],y[:,i])[0,1]
        #        axes[i,0.5].annotate('{0}'.format("%.2f" %corr), xy=(-0.01, 0.5), 
        #            xycoords='axes fraction', rotation=90, va='center', ha='right')
        
        savefig('{0}/{1}.png'.format(folder,nic),format='png')
        close()
        cla()
    
    
        aaa = find(corr_stim>=0.4)
        bbb = find(resp_frac<=0)
        
        ics_final = [val for val in aaa if val in bbb]
        
      
            
        if (len(ics_final) != 0):
            
            if (len(ics_final) != 1):
                print('WARNING: Two or more results are being plotted.')
            
#            with file('{0}/results.txt'.format(fol), 'a') as outfile:
#                for jj in ics_final:
#                    outfile.write('Results for the IC on the line n {0}\n'.format(jj+1))
#                    outfile.write('Corellation with large channels = {0}\n'.format(corr_large[jj]))
#                    outfile.write('Correlation with short channels= {0}\n'.format(corr_short[jj]))
#                    outfile.write('Correlation with stimulus = {0}\n'.format(corr_stim[jj]))                
#                    outfile.write('R frac = {0}\n'.format(resp_frac[jj]))
#                    outfile.write('Number of ICs needed {0}\n\n\n'.format(nic))
        
        #                        
            fig, axes = plt.subplots(ncols=len(ics_final), nrows=1,figsize=(len(ics_final)*9,5))#,figsize=(9,7))
            fig.subplots_adjust(wspace=0, hspace=0.7, bottom=0.2)
            
    
            if len(ics_final) != 1:
                MinShort = find(abs(corr_short) == min(abs(corr_short[ics_final])))
                for kk in range(len(ics_final)):
                    ## Correction for negative correlation
                    
#                         corr_large[ics_final[kk]] = -corr_large[ics_final[kk]]
#                         corr_short[ics_final[kk]] = -corr_short[ics_final[kk]]
#                         corr_large[ics_final[kk]] = -corr_large[ics_final[kk]]
                    if len(ics_final) == 2:
                        hh = max(abs(corr_short[ics_final[kk]]),abs(corr_large[ics_final[kk]]))
                        if hh == abs(corr_short[ics_final[kk]]) and corr_short[ics_final[kk]]<0 or hh == abs(corr_large[ics_final[kk]]) and corr_large[ics_final[kk]]<0:
                             mICA[:,ics_final[kk]]=-mICA[:,ics_final[kk]]
                             corr_short[ics_final[kk]] = -corr_short[ics_final[kk]]
                             corr_large[ics_final[kk]] = -corr_large[ics_final[kk]]
#                             corr_hmr[ics_final[kk]] = -corr_hmr[ics_final[kk]]
                         
                         
                        if ics_final[kk] == MinShort.all() and corr_large[ics_final[kk]]<0:
                            mICA[:,ics_final[kk]]=-mICA[:,ics_final[kk]]
                            corr_short[ics_final[kk]] = -corr_short[ics_final[kk]]
                            corr_large[ics_final[kk]] = -corr_large[ics_final[kk]]
#                            corr_hmr[ics_final[kk]] = -corr_hmr[ics_final[kk]]                            
                            
                        if ics_final[kk] != MinShort.all() and corr_short[ics_final[kk]] <0:
                            mICA[:,ics_final[kk]]=-mICA[:,ics_final[kk]]
                            corr_short[ics_final[kk]] = -corr_short[ics_final[kk]]
                            corr_large[ics_final[kk]] = -corr_large[ics_final[kk]]
#                            corr_hmr[ics_final[kk]] = -corr_hmr[ics_final[kk]] 
                            
                            
                            
                            
#                    if abs(corr_short[ics_final[kk]]) == min(abs(corr_short[ics_final])) and corr_large[ics_final[kk]]>0 and inverteu ==1:
#                        mICA[:,ics_final[kk]]=-mICA[:,ics_final[kk]]
#                    if abs(corr_short[ics_final[kk]]) == min(abs(corr_short[ics_final])) and corr_large[ics_final[kk]]<0 and inverteu ==0:
#                        mICA[:,ics_final[kk]]=-mICA[:,ics_final[kk]]
                    ## Normalization
#                    mICA[:,ics_final[kk]] -= min(mICA[:,ics_final[kk]])
#                    mICA[:,ics_final[kk]] /= max(mICA[:,ics_final[kk]])
#                    mHRF_large -= min(mHRF_large)                                
#                    mHRF_large /= max(mHRF_large)
#                    mHRF_short -= min(mHRF_short)                                
#                    mHRF_short /= max(mHRF_short)    
                    mHRF_short /= max(abs(mHRF_short))
                    mHRF_large /= max(abs(mHRF_large))
                    mHRF_large_withSS /= max(abs(mHRF_large_withSS))
                    mICA[:,ics_final[kk]] /= max(abs(mICA[:,ics_final[kk]]))                   
                    
                    axes[kk].plot(ts,mICA[:,ics_final[kk]],'black',label='HRF with ICA')
                    axes[kk].plot(ts,mHRF_large,'red', label = 'Block-Average HRF from long channels',lw = 3)
                    axes[kk].plot(ts,mHRF_short,'blue', label = 'Block-Average HRF from short Channels',lw = 3)
                    axes[kk].plot(ts,mHRF_large_withSS,'green', label = 'HRF with SS', lw=3)
                    axes[kk].set_ylim(-1.05,1.05)
                    axes[kk].set_xlim(-pretime,posttime)
                    box = axes[kk].get_position()
                    axes[kk].set_position([box.x0, box.y0, box.width * 0.8, box.height])
        #            axes[kk,0].legend()
                    axes[kk].set_xlabel('Time(s)', size='xx-large')
                    axes[kk].set_ylabel('Normalized Concentration', size='xx-large')
                    axes[kk].set_title('IC {0}'.format(kk+1), size='xx-large')
                    axes[kk].axvspan(0, 5, facecolor='0.8', edgecolor ='0.8', alpha=0.5)
                    for tick in axes[kk].xaxis.get_major_ticks():
                        tick.label.set_fontsize('x-large')
                    for tick in axes[kk].yaxis.get_major_ticks():
                        tick.label.set_fontsize('x-large')
                
                
                #            
                #                                    axes[1].plot(ts,mICA[:,a],label='mICA')
                #                        #            axes[1].plot(ts,mHRF2,'red', label = 'mHRF',lw = 3)
                #                        #            axes[1].set_ylim(min(min(mICA[:,a]),min(mHRF2))-0.05,1.05)
                #                                    axes[1].legend()
                #                              with file('Output/test.txt', 'a') as outfile:
                #                                    axes[1].set_ylabel('Normalized Intensity', size='x-large')
                #                                    axes[1].set_title('(B)', size='xx-large')
                #                                    axes[1].axvspan(0, 30, facecolor='0.8', edgecolor ='0.8', alpha=0.5)
                #                        
    #                axes[kk,1].loglog(freq,yfft2[:,ics_final[kk]],label='FFT of the IC')
    #                axes[kk,1].set_ylim(1,1.15*max(yfft2[:,ics_final[kk]]))
    #                axes[kk,1].set_xlim(0,2)
    #                axes[kk,1].set_xlabel('Frequency(Hz)', size='x-large')
    #                axes[kk,1].set_ylabel('Intensity', size='x-large')
    #                axes[kk,1].set_title('(B,{0})'.format(ics_final[kk]), size='xx-large')
    #    #            axes[kk,1].legend()
                #            axes[2].plot(ts,mHRF)
                #                                    axes[2].set_xlabel('Time(s)', size='x-large')
                #                                    axes[2].set_ylabel('Intensity', size='x-large')
                #                                    axes[2].set_title('(C)', s ize='xx-large')
                lgd = axes[1].legend(loc='center left', bbox_to_anchor=(1.05,0.5))
                plt.savefig('{0}/RESULT, NIC = {1}.pdf'.format(fol,nic),format='pdf',bbox_extra_artists=(lgd,), bbox_inches='tight')
                plt.savefig('{0}/RESULT, NIC = {1}.png'.format(fol,nic),format='png',bbox_extra_artists=(lgd,), bbox_inches='tight')
                close()
                cla()
            else:        
                # Correction for negative correlation
                hh = max(abs(corr_short[ics_final]),abs(corr_large[ics_final]))
                if hh == abs(corr_short[ics_final]) and corr_short[ics_final]<0 or hh == abs(corr_large[ics_final]) and corr_large[ics_final]<0:
                    mICA[:,ics_final]=-mICA[:,ics_final]
                    corr_short[ics_final] = -corr_short[ics_final]
                    corr_large[ics_final] = -corr_large[ics_final]
#                    corr_hmr[ics_final] = -corr_hmr[ics_final]
   
                ## Normalization
#                mICA[:,ics_final] -= min(mICA[:,ics_final])
#                mICA[:,ics_final] /= max(mICA[:,ics_final])
#                mHRF_large -= min(mHRF_large)                                
#                mHRF_large /= max(mHRF_large)
#                mHRF_short -= min(mHRF_short)                                
#                mHRF_short /= max(mHRF_short) 
                mHRF_short /= max(abs(mHRF_short))
                mHRF_large /= max(abs(mHRF_large))
#                mHRF_hmr /= max(abs(mHRF_hmr))
                mICA[:,ics_final] /= max(abs(mICA[:,ics_final]))   
    
                axes.plot(ts,mICA[:,ics_final],'black',label='Mean ICA')
                axes.plot(ts,mHRF_large,'red', label = 'Block-Average HRF from long channels',lw = 3)
                axes.plot(ts,mHRF_short,'blue', label = 'Block-Average HRF from short channels',lw = 3)
                axes.plot(ts,mHRF_large_withSS,'green', label = 'HRF with SS', lw=3)
                axes.set_ylim(-1.05,1.05)
                axes.set_xlim(-pretime,posttime)
                box = axes.get_position()
                axes.set_position([box.x0, box.y0, box.width * 0.8, box.height])
                lgd = axes.legend(loc='center left', bbox_to_anchor=(1, 0.5))
    #            axes[kk,0].legend()
                axes.set_xlabel('Time(s)', size='x-large')
                axes.set_ylabel('Normalized Concentration', size='x-large')
                axes.axvspan(0, 5, facecolor='0.8', edgecolor ='0.8', alpha=0.5)
            
            
            #            
            #                                    axes[1].plot(ts,mICA[:,a],label='mICA')
            #                        #            axes[1].plot(ts,mHRF2,'red', label = 'mHRF',lw = 3)
            #                        #            axes[1].set_ylim(min(min(mICA[:,a]),min(mHRF2))-0.05,1.05)
            #                                    axes[1].legend()
            #                              with file('Output/test.txt', 'a') as outfile:
            #                                    axes[1].set_ylabel('Normalized Intensity', size='x-large')
            #                                    axes[1].set_title('(B)', size='xx-large')
            #                                    axes[1].axvspan(0, 30, facecolor='0.8', edgecolor ='0.8', alpha=0.5)
    #        #                        
    #            axes[1].loglog(freq,yfft2[:,ics_final],label='FFT of the IC')
    #            axes[1].set_ylim(1,1.15*max(yfft2[:,ics_final]))
    #            axes[1].set_xlim(0,2)
    #            axes[1].set_xlabel('Frequency(Hz)', size='x-large')
    #            axes[1].set_ylabel('Intensity', size='x-large')
    #            axes[1].set_title('(B)', size='xx-large')
    #            axes[kk,1].legend()
            #            axes[2].plot(ts,mHRF)
            #                                    axes[2].set_xlabel('Time(s)', size='x-large')
            #                                    axes[2].set_ylabel('Intensity', size='x-large')
            #                                    axes[2].set_title('(C)', size='xx-large')
                plt.savefig('{0}/RESULT, NIC = {1}.pdf'.format(fol,nic),format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
                plt.savefig('{0}/RESULT, NIC = {1}.png'.format(fol,nic),format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')               
                close()
                cla()
            magica += 1
            if magica == 1:
                
#                scipy.io.savemat('OutputArtigo/MatFiles/Min = {2}/Min{2}_{1}-{0}({3}).mat'.format(arq,cromophore,MinIc,len(ics_final)),
#                                 mdict={'ts':ts, 'large_HRF': mHRF_large, 'short_HRF': mHRF_short,
#                                        'change': change, 'mICA': mICA, 'corr_large': corr_large,
#                                 'corr_short': corr_short, 'corr_stim': corr_stim,
#                                 'resp_frac':resp_frac, 'ics_final': ics_final, 'mHRF_large': mHRF_large,
#                                 'mHRF_short': mHRF_short, 'mHRF_hmr': mHRF_hmr, 'corr_hmr': corr_hmr})
                return                
                sys.exit()
Пример #15
0
def decompose_ica(spectrum_array):
	print '\nDecomposing spectra using ICA...'
	ics=n.transpose(mdp.fastica(spectrum_array,whitened=True))
	return ics
Пример #16
0
from mdp import fastica
from scikits.audiolab import wavread, wavwrite
from numpy import abs, max

# Load in the stereo file
recording, fs, enc = wavread('./train/input/mixed_1.wav')

# Perform FastICA algorithm on the two channels
sources = fastica(recording)

# The output levels of this algorithm are arbitrary, so normalize them to 1.0.
sources /= max(abs(sources), axis=0)

# Write back to a file
wavwrite(sources, 'sources.wav', fs, enc)
Пример #17
0
# Mostly from http://www.endolith.com/wordpress/2009/11/22/a-simple-fastica-example/

from mdp import fastica
from scikits.audiolab import wavread, wavwrite
from numpy import abs, max, array

# Load in the microphone audio files
mic1, fs1, enc1 = wavread('channel1.wav')
mic2, fs2, enc2 = wavread('channel2.wav')
mic3, fs3, enc3 = wavread('channel3.wav')
mic4, fs4, enc4 = wavread('channel4.wav')

# transpose() because MDP expects each COLUMN to be an
# observed data stream.  Each ROW is one observation of that data across
# all data streams.
sources = fastica(array([mic1, mic2, mic3, mic4]).transpose())

# The output levels of this algorithm are arbitrary, so normalize them to 1.0.
sources /= max(abs(sources), axis=0)

(frames, inputs) = sources.shape

# Write one output file for each resulting ICA transform.
for i in range(inputs):
    sourceColumn = sources[:, [i]]  # extract a column from the numpy ndarray
    wavwrite(sourceColumn, "resolved-source%d.wav" % i, fs1, enc1)
Пример #18
0
from mdp import fastica
from scikits.audiolab import wavread, wavwrite
from numpy import abs, max
 
# Load in the stereo file
recording, fs, enc = wavread('test.wav')
 
# Perform FastICA algorithm on the two channels
sources = fastica(recording)
 
# The output levels of this algorithm are arbitrary, so normalize them to 1.0.
sources /= (5 * max(abs(sources), axis = 0))
 
# Write back to a file
wavwrite(sources, 'testout.wav', fs, enc)
Пример #19
0
def ica(data, precision='float32'):
    return (mdp.fastica(data, dtype=precision))
Пример #20
0
 gaussian_pca.learn(xG)
 xg = gaussian_pca.transform(xG, k=2)
 
 ########################################################################
 
 """-------Implement kernel PCA with polynomial kernel------- """
 xP= mlpy.kernel_polynomial(z,z, gamma=1.0, b=1.0, d=2.0) # polynomial kernel matrix
 polynomial_pca = mlpy.KPCA()
 polynomial_pca.learn(xP)
 xp = polynomial_pca.transform(xP, k=3)
 print 'Implementing Kernel PCA'
 
 ########################################################################
 
 """-------Implement FastICA------- """
 ica = mdp.fastica(z, dtype='float32') 
 """ Implement PCA """
 pcan1 = mdp.nodes.CuBICANode()
 pcar1 = pcan1.execute(z)
 pcan2 = mdp.nodes.PCANode(output_dim=3)
 pcar2 = pcan2.execute(z)
 
 """-------Select pcar1 as the principal component------- """
 x_pc=pcar1
 
 ########################################################################
 
 """-------Implement KMeans------- """
 
 (res1,idx1,plot_id1)=calc_kmeans.getplotid(z,2)
 (res2,idx2,plot_id2)=calc_kmeans.getplotid(z,3)
Пример #21
0
import mdp
import numpy as np

A = np.array(
    [[1, 2, 3, 4, 5, 6, 7, 8],
     [3, 4, 4, 5, 5, 6, 7, 9],
     [1, 8, 2, 7, 3, 6, 4, 5],
     [9, 8, 7, 6, 5, 4, 3, 2],
     [9, 4, 8, 3, 7, 2, 6, 1],
     [2, 3, 2, 4, 2, 5, 2, 6],
     [3, 4, 3, 4, 4, 3, 4, 3],
     [3, 2, 4, 3, 2, 4, 3, 2],
     [5, 5, 4, 4, 6, 6, 2, 2],
     [2, 3, 6, 5, 4, 6, 7, 2],
     [1, 6, 5, 3, 8, 2, 3, 9]])
     
# perform ICA on some data x using single precision
y = mdp.fastica(A, dtype='float32') 
print y