def log_pdf(self, thetas):
        assert(len(shape(thetas)) == 2)
        assert(shape(thetas)[1] == self.dimension)
        
        result=zeros(len(thetas))
        for i in range(len(thetas)):
            labels=BinaryLabels(self.y)
            feats_train=RealFeatures(self.X.T)

            # ARD: set set theta, which is in log-scale, as kernel weights            
            kernel=GaussianARDKernel(10,1)
            kernel.set_weights(exp(thetas[i]))
            
            mean=ZeroMean()
            likelihood=LogitLikelihood()
            inference=LaplacianInferenceMethod(kernel, feats_train, mean, labels, likelihood)
            
            # fix kernel scaling for now
            inference.set_scale(exp(0))
            
            if self.ridge is not None:
                log_ml_estimate=inference.get_marginal_likelihood_estimate(self.n_importance, self.ridge)
            else:
                log_ml_estimate=inference.get_marginal_likelihood_estimate(self.n_importance)
            
            # prior is also in log-domain, so no exp of theta
            log_prior=self.prior.log_pdf(thetas[i].reshape(1,len(thetas[i])))
            result[i]=log_ml_estimate+log_prior
            
        return result
 def adapt(self, mcmc_chain, step_output):
     # this is an extension of the base adapt call
     KameleonWindow.adapt(self, mcmc_chain, step_output)
     
     iter_no = mcmc_chain.iteration
     
     if iter_no > self.sample_discard and iter_no < self.stop_adapt:
         learn_scale = 1.0 / sqrt(iter_no - self.sample_discard + 1.0)
         self.nu2 = exp(log(self.nu2) + learn_scale * (exp(step_output.log_ratio) - self.accstar))
Esempio n. 3
0
 def get_likelihood(self, evidence, hypothesis):
     param = hypothesis
     likelihood = 1
     for x in evidence:
         factor = exp(-self.low * param) - exp(-self.high * param)
         if not factor:
             likelihood *= 1
         else:
             likelihood *= param * exp(-param * x) / factor
     return likelihood
Esempio n. 4
0
 def probabilityChart(self, X):
     '''
     y dla wykresu gestosci
     '''
     #TODO : jak jest z ta funkcją harakterystyczną
     for x in X:
         P = self.lamb*exp(-self.lamb*x)
Esempio n. 5
0
def ssim(img1,img2):

    img1=img1.astype(float)
    gaussian_kernel_size=11
    gaussian_kernel_sigma=1.5
    gaussian_kernel = numpy.zeros((gaussian_kernel_size,gaussian_kernel_size))
    for i in range(gaussian_kernel_size):
        for j in range(gaussian_kernel_size):
            gaussian_kernel[i,j]=( 1 / (2*pi*(gaussian_kernel_sigma**2)) )*\
                                 exp(-(((i-5)**2)+((j-5)**2))/(2*(gaussian_kernel_sigma**2)))

    mu1=scipy.ndimage.filters.convolve(img1,gaussian_kernel)
    mu2=scipy.ndimage.filters.convolve(img2,gaussian_kernel)
    var1=scipy.ndimage.filters.convolve((img1-mu1)**2,gaussian_kernel)#,mode='constant',cval=0)
    var2=scipy.ndimage.filters.convolve((img2-mu2)**2,gaussian_kernel)
    mu12=mu1*mu2
    var12=scipy.ndimage.filters.convolve((img1-mu1)*(img2-mu2),gaussian_kernel)

    L=255
    K1=0.01
    K2=0.03

    C1=(K1*L)**2
    C2=(K2*L)**2

    SSIM = (2*mu12+C1)*(2*var12+C2)/((mu1**2+mu2**2+C1)*(var1+var2+C2))

    return numpy.average(SSIM)
    def test_predict(self):
        # define some easy training data and predict predictive distribution
        circle1 = Ring(variance=1, radius=3)
        circle2 = Ring(variance=1, radius=10)
        
        n = 100
        X = circle1.sample(n / 2).samples
        X = vstack((X, circle2.sample(n / 2).samples))
        y = ones(n)
        y[:n / 2] = -1.0
        
#        plot(X[:n/2,0], X[:n/2,1], 'ro')
#        hold(True)
#        plot(X[n/2:,0], X[n/2:,1], 'bo')
#        hold(False)
#        show()

        covariance = SquaredExponentialCovariance(1, 1)
        likelihood = LogitLikelihood()
        gp = GaussianProcess(y, X, covariance, likelihood)

        # predict on mesh
        n_test = 20
        P = linspace(X[:, 0].min() - 1, X[:, 1].max() + 1, n_test)
        Q = linspace(X[:, 1].min() - 1, X[:, 1].max() + 1, n_test)
        X_test = asarray(list(itertools.product(P, Q)))
#        Y_test = exp(LaplaceApproximation(gp).predict(X_test).reshape(n_test, n_test))
        Y_train = exp(LaplaceApproximation(gp).predict(X))
        print Y_train
        
        print Y_train>0.5
        print y
Esempio n. 7
0
def eval_queries(world):
    """
    Evaluates the queries given a possible world.
    """
    numerators = [0] * len(global_enumAsk.queries)
    denominator = 0
    expsum = 0
    for gf in global_enumAsk.grounder.itergroundings():
        if global_enumAsk.soft_evidence_formula(gf):
            expsum += gf.noisyor(world) * gf.weight
        else:
            truth = gf(world)
            if gf.weight == HARD:
                if truth in Interval(']0,1['):
                    raise Exception('No real-valued degrees of truth are allowed in hard constraints.')
                if truth == 1:
                    continue
                else:
                    return numerators, 0
            expsum += gf(world) * gf.weight
    expsum = exp(expsum)
    # update numerators
    for i, query in enumerate(global_enumAsk.queries):
        if query(world):
            numerators[i] += expsum
    denominator += expsum
    return numerators, denominator
Esempio n. 8
0
 def probabilityChart(self, X):
     '''
     y dla wykresu prawdopodobienstwa
     '''
     g = []
     for l in X:
         P = exp(-self.lamb)*(pow(self.lamb,l)/factorial(l))
         g.append(P)
     return g
Esempio n. 9
0
 def calculate_response(self, argument):
     """
      Method used to calculate response of threshold activation function for given argument
             :param argument: value that should be used as input by threshold activation function
             :type argument: float
             :return: value calculated by threshold activation function
             :rtype: float
     """
     return 1.0 / (1 + exp(-ThresholdActivator.FACTOR * argument))
Esempio n. 10
0
    def GetTaoFromQ(self,el):
        '''
        Computing wall shear stress in terms of the flow rate,
        using inverse womersley method of Cezeaux et al.1997
        '''
        self.radius = mean(el.Radius)        
        self.Res = el.R
        self.length = el.Length
        self.Name = el.Name
        
        #WOMERSLEY NUMBER
        self.alpha = self.radius * sqrt((2.0 *pi*self.density)/(self.tPeriod*self.viscosity))
        
        #FOURIER SIGNAL
        k = len(self.signal)
        n = 0
        while n < (self.nHarmonics):
            An = 0
            Bn = 0
            for i in arange(k):
                An += self.signal[i] * cos(n*(2.0*pi/self.tPeriod)*self.dt*self.nSteps[i])
                Bn += self.signal[i] * sin(n*(2.0*pi/self.tPeriod)*self.dt*self.nSteps[i])
            An = An * (2.0/k)
            Bn = Bn * (2.0/k)
            self.fourierModes.append(complex(An, Bn))
            n+=1
        
        self.Steps = linspace(0,self.tPeriod,self.samples)
        self.WssSignal = []  
        self.Tauplot = []
       
        for step in self.Steps:
            self.tao = -self.fourierModes[0].real * 2.0 
            
            k=1
            while k < self.nHarmonics:  
                cI = complex(0.,1.)
                cA = (self.alpha * pow((1.0*k),0.5)) * pow(cI,1.5)  
                c1 = 2.0 * jn(1, cA)
                c0 = cA * jn(0, cA)
                cT = complex(0, -2.0*pi*k*self.t/self.tPeriod)  
                '''tao computation'''
                taoNum = self.alpha**2*cI**3*jn(1,cA)
                taoDen = c0-c1
                taoFract = taoNum/taoDen
                cTao = self.fourierModes[k] * exp(cT) * taoFract
                self.tao += cTao.real
                k+=1

            self.tao *= -(self.viscosity/(self.radius**3*pi))
            self.Tauplot.append(self.tao*10) #dynes/cm2
            self.WssSignal.append(self.tao)
            self.t += self.dtPlot
            
        return self.WssSignal #Pascal
Esempio n. 11
0
 def prepare(self):
     figure(figsize=(18, 10))
     
     if self.distribution is not None:
         self.P = zeros((len(self.Xs), len(self.Ys)))
         for i in range(len(self.Xs)):
             for j in range(len(self.Ys)):
                 x = array([[self.Xs[i], self.Ys[j]]])
                 self.P[j, i] = self.distribution.log_pdf(x)
         
         self.P = exp(self.P)
Esempio n. 12
0
def get_gaussian_kernel(gaussian_kernel_width=11, gaussian_kernel_sigma=1.5):
    """Generate a gaussian kernel."""
    # 1D Gaussian kernel definition
    gaussian_kernel_1d = numpy.ndarray((gaussian_kernel_width))
    norm_mu = int(gaussian_kernel_width / 2)

    # Fill Gaussian kernel
    for i in range(gaussian_kernel_width):
        gaussian_kernel_1d[i] = (exp(-(((i - norm_mu) ** 2)) /
                                     (2 * (gaussian_kernel_sigma ** 2))))
    return gaussian_kernel_1d / numpy.sum(gaussian_kernel_1d)
Esempio n. 13
0
def create_gaussian_kernel(gaussian_kernel_sigma = 1.5, gaussian_kernel_width = 11):
    # 1D Gaussian kernel definition
    gaussian_kernel = np.ndarray((gaussian_kernel_width))
    mu = int(gaussian_kernel_width / 2)

    #Fill Gaussian kernel
    for i in range(gaussian_kernel_width):
            gaussian_kernel[i] = (1 / (sqrt(2 * pi) * (gaussian_kernel_sigma))) * \
                exp(-(((i - mu) ** 2)) / (2 * (gaussian_kernel_sigma ** 2)))

    return gaussian_kernel
Esempio n. 14
0
 def generateInt(self, k):
     '''
     generowanie k liczb
     '''
     T = [0]*k        
     for i in range(k):
         q = exp(-self.lamb)
         X = -1
         S = 1
         while S > q:
             U = uniform(low=0, high=1)
             S = S*U
             X = X + 1
         T[X] = T[X] + 1
     return T
 def log_pdf(self, X, component_index_given=None):
     """
     If component_index_given is given, then just condition on it,
     otherwise, should compute the overall log_pdf
     """
     if component_index_given == None:
         rez = zeros([len(X)])
         for ii in range(len(X)):
             logpdfs = zeros([self.num_components])
             for jj in range(self.num_components):
                 logpdfs[jj] = self.components[jj].log_pdf([X[ii]])
             lmax = max(logpdfs)
             rez[ii] = lmax + log(sum(self.mixing_proportion.omega * exp(logpdfs - lmax)))
         return rez
     else:
         assert(component_index_given < self.num_components)
         return self.components[component_index_given].log_pdf(X)
Esempio n. 16
0
 def kernel(self, X, Y=None):
     """
     Computes the standard Gaussian kernel k(x,y)=exp(-0.5* ||x-y||**2 / sigma**2)
     
     X - 2d array, samples on right hand side
     Y - 2d array, samples on left hand side, can be None in which case
         it is replaced by X
     """
     
     # bring to 2d array form if 1d
     assert(len(shape(X))==2)
         
     if Y is not None:
         assert(len(shape(X))==2)
             
     # if X=Y, use more efficient pdist call which exploits symmetry
     if Y is None:
         sq_dists = squareform(pdist(X, 'sqeuclidean'))
     else:
         sq_dists = cdist(X, Y, 'sqeuclidean')
 
     K = exp(-0.5 * (sq_dists) / self.sigma ** 2)
     return K
Esempio n. 17
0
 def test_log_mean_exp(self):
     X = asarray([-1, 1])
     X = reshape(X, (len(X), 1))
     y = asarray([+1. if x >= 0 else -1. for x in X])
     covariance = SquaredExponentialCovariance(sigma=1, scale=1)
     likelihood = LogitLikelihood()
     gp = GaussianProcess(y, X, covariance, likelihood)
     laplace = LaplaceApproximation(gp, newton_start=asarray([3, 3]))
     proposal=laplace.get_gaussian()
     
     n=200
     prior = gp.get_gp_prior()
     samples = proposal.sample(n).samples
     
     log_likelihood=asarray([gp.log_likelihood(f) for f in samples])
     log_prior = prior.log_pdf(samples)
     log_proposal = proposal.log_pdf(samples)
     
     X=log_likelihood+log_prior-log_proposal
     
     a=log(mean(exp(X)))
     b=GPTools.log_mean_exp(X)
     
     self.assertLessEqual(a-b, 1e-5)
Esempio n. 18
0
    def predict_proba(self, X):
        Ps = exp(self.predict_log_proba(X))

        return array([p / s for p, s in zip(Ps, Ps.sum(1))])
Esempio n. 19
0
def ssim_v2(img_mat_1, img_mat_2):
    #Variables for Gaussian kernel definition
    gaussian_kernel_sigma = 1.5
    gaussian_kernel_width = 11
    gaussian_kernel = numpy.zeros(
        (gaussian_kernel_width, gaussian_kernel_width))

    #Fill Gaussian kernel
    for i in range(gaussian_kernel_width):
        for j in range(gaussian_kernel_width):
            gaussian_kernel[i,j]=\
            (1/(2*pi*(gaussian_kernel_sigma**2)))*\
            exp(-(((i-5)**2)+((j-5)**2))/(2*(gaussian_kernel_sigma**2)))

    #Convert image matrices to double precision (like in the Matlab version)
    img_mat_1 = img_mat_1.astype(numpy.float)
    img_mat_2 = img_mat_2.astype(numpy.float)

    #Squares of input matrices
    img_mat_1_sq = img_mat_1**2
    img_mat_2_sq = img_mat_2**2
    img_mat_12 = img_mat_1 * img_mat_2

    #Means obtained by Gaussian filtering of inputs
    img_mat_mu_1 = scipy.ndimage.filters.convolve(img_mat_1, gaussian_kernel)
    img_mat_mu_2 = scipy.ndimage.filters.convolve(img_mat_2, gaussian_kernel)

    #Squares of means
    img_mat_mu_1_sq = img_mat_mu_1**2
    img_mat_mu_2_sq = img_mat_mu_2**2
    img_mat_mu_12 = img_mat_mu_1 * img_mat_mu_2

    #Variances obtained by Gaussian filtering of inputs' squares
    img_mat_sigma_1_sq = scipy.ndimage.filters.convolve(
        img_mat_1_sq, gaussian_kernel)
    img_mat_sigma_2_sq = scipy.ndimage.filters.convolve(
        img_mat_2_sq, gaussian_kernel)

    #Covariance
    img_mat_sigma_12 = scipy.ndimage.filters.convolve(img_mat_12,
                                                      gaussian_kernel)

    #Centered squares of variances
    img_mat_sigma_1_sq = img_mat_sigma_1_sq - img_mat_mu_1_sq
    img_mat_sigma_2_sq = img_mat_sigma_2_sq - img_mat_mu_2_sq
    img_mat_sigma_12 = img_mat_sigma_12 - img_mat_mu_12

    #c1/c2 constants
    #First use: manual fitting
    c_1 = 6.5025
    c_2 = 58.5225

    #Second use: change k1,k2 & c1,c2 depend on L (width of color map)
    l = 255
    k_1 = 0.01
    c_1 = (k_1 * l)**2
    k_2 = 0.03
    c_2 = (k_2 * l)**2

    #Numerator of SSIM
    num_ssim = (2 * img_mat_mu_12 + c_1) * (2 * img_mat_sigma_12 + c_2)
    #Denominator of SSIM
    den_ssim=(img_mat_mu_1_sq+img_mat_mu_2_sq+c_1)*\
    (img_mat_sigma_1_sq+img_mat_sigma_2_sq+c_2)
    #SSIM
    ssim_map = num_ssim / den_ssim
    index = numpy.average(ssim_map)

    return index
Esempio n. 20
0
 def inverseOfIncreasingExponentialFunction(p, y):
     ''' Inverse exponential funcion: x = e^(log(y/p[0])/p[1])    '''
     return exp(log(y / p[0]) / p[1])
Esempio n. 21
0
 def inverseOfIncreasingExponentialFunction(p, y):
     ''' Inverse exponential funcion: x = e^(log(y/p[0])/p[1])    '''
     return exp(log(y/p[0])/p[1])
Esempio n. 22
0
    def compute_ssim(self, im1, im2):
        """
        The function to compute SSIM
        @param im1: PIL Image object, or grayscale ndarray
        @param im2: PIL Image object, or grayscale ndarray
        @return: SSIM float value
        """

        # 1D Gaussian kernel definition
        gaussian_kernel_2d = np.ndarray((self.gaussian_kernel_width))
        mu = int(self.gaussian_kernel_width / 2)

        #Fill Gaussian kernel
        for i in xrange(self.gaussian_kernel_width):
            gaussian_kernel_2d[i] = (1 / (sqrt(2 * pi) * (self.gaussian_kernel_sigma))) * \
              exp(-(((i - mu) ** 2)) / (2 * (self.gaussian_kernel_sigma ** 2)))
        gshape = (self.gaussian_kernel_width, self.gaussian_kernel_width)
        gsigma = (self.gaussian_kernel_sigma, self.gaussian_kernel_sigma)
        gaussian_kernel_2d = gaussian(shape=gshape, sigma=gsigma)
        # pdb.set_trace()
        # convert the images to grayscale
        if im1.__class__.__name__ == 'Image':
            img_mat_1, img_alpha_1 = _to_grayscale(im1)
            # don't count pixels where both images are both fully transparent
            #if img_alpha_1 is not None:
            #img_mat_1[img_alpha_1 == 255] = 0
        else:
            img_mat_1 = im1
        if im2.__class__.__name__ == 'Image':
            img_mat_2, img_alpha_2 = _to_grayscale(im2)
            #if img_alpha_2 is not None:
            #img_mat_2[img_alpha_2 == 255] = 0
        else:
            img_mat_2 = im2

        #Squares of input matrices
        img_mat_1_sq = img_mat_1**2
        img_mat_2_sq = img_mat_2**2
        img_mat_12 = img_mat_1 * img_mat_2

        #Means obtained by Gaussian filtering of inputs
        img_mat_mu_1 = self.convolve_gaussian_2d(img_mat_1, gaussian_kernel_2d)
        img_mat_mu_2 = self.convolve_gaussian_2d(img_mat_2, gaussian_kernel_2d)

        #Squares of means
        img_mat_mu_1_sq = img_mat_mu_1**2
        img_mat_mu_2_sq = img_mat_mu_2**2
        img_mat_mu_12 = img_mat_mu_1 * img_mat_mu_2

        #Variances obtained by Gaussian filtering of inputs' squares
        img_mat_sigma_1_sq = self.convolve_gaussian_2d(img_mat_1_sq,
                                                       gaussian_kernel_2d)
        img_mat_sigma_2_sq = self.convolve_gaussian_2d(img_mat_2_sq,
                                                       gaussian_kernel_2d)

        #Covariance
        img_mat_sigma_12 = self.convolve_gaussian_2d(img_mat_12,
                                                     gaussian_kernel_2d)

        #Centered squares of variances
        img_mat_sigma_1_sq -= img_mat_mu_1_sq
        img_mat_sigma_2_sq -= img_mat_mu_2_sq
        img_mat_sigma_12 = img_mat_sigma_12 - img_mat_mu_12

        #set k1,k2 & c1,c2 to depend on L (width of color map)
        #l = 255
        k_1 = self.K[0]
        c_1 = (k_1 * self.L)**2
        k_2 = self.K[1]
        c_2 = (k_2 * self.L)**2

        #Numerator of SSIM
        num_ssim = (2 * img_mat_mu_12 + c_1) * (2 * img_mat_sigma_12 + c_2)

        #Denominator of SSIM
        den_ssim = (img_mat_mu_1_sq + img_mat_mu_2_sq + c_1) * \
          (img_mat_sigma_1_sq + img_mat_sigma_2_sq + c_2)

        #SSIM
        ssim_map = num_ssim / den_ssim
        index = np.average(ssim_map)

        return index
Esempio n. 23
0
 def _getAtomProbMB(self, idxGndAtom, wt, relevantGroundFormulas=None):
     '''            
     gets the probability of the ground atom with index idxGndAtom when given its Markov blanket (evidence set)
     using the specified weight vector
     '''
     #old_tv = self._getEvidence(idxGndAtom)
     # check if the ground atom is in a block
     block = None
     if idxGndAtom in self.mrf.gndBlockLookup and self.pmbMethod != 'old':
         blockname = self.mrf.gndBlockLookup[idxGndAtom]
         block = self.mrf.gndBlocks[blockname]   # list of gnd atom indices that are in the block
         sums = [0 for i in range(len(block))]   # init sum of weights for each possible assignment of block
                                                 # sums[i] = sum of weights for assignment where the block[i] is set to true
         idxBlockMainGA = block.index(idxGndAtom)
         # find out which one of the ground atoms in the block is true
         idxGATrueone = -1
         for i in block:
             if self.mrf._getEvidence(i):
                 if idxGATrueone != -1: raise Exception("More than one true ground atom in block %s!" % blockname)
                 idxGATrueone = i                    
         if idxGATrueone == -1: raise Exception("No true gnd atom in block!" % blockname)
         mainAtomIsTrueone = idxGATrueone == idxGndAtom
     else: # not in block
         wts_inverted = 0
         wts_regular = 0
         wr, wi = [], []
     # determine the set of ground formulas to consider
     checkRelevance = False
     if relevantGroundFormulas == None:
         try:
             relevantGroundFormulas = self.atomRelevantGFs[idxGndAtom]
         except:
             relevantGroundFormulas = self.mrf.gndFormulas
             checkRelevance = True
     # check the ground formulas
     #print self.gndAtomsByIdx[idxGndAtom]
     if self.pmbMethod == 'old' or block == None: # old method (only consider formulas that contain the ground atom)
         for gf in relevantGroundFormulas:
             if checkRelevance:
                 if not gf.containsGndAtom(idxGndAtom):
                     continue
             # gnd atom maintains regular truth value
             prob1 = self._getTruthDegreeGivenEvidence(gf)
             #print "gf: ", str(gf), " index: ", gf.idxFormula, ", wt size:", len(wt), " formula size:", len(self.formulas)
             if prob1 > 0:
                 wts_regular += wt[gf.idxFormula] * prob1
                 wr.append(wt[gf.idxFormula] * prob1)
             # flipped truth value
             #self._setTemporaryEvidence(idxGndAtom, not old_tv)
             self._setInvertedEvidence(idxGndAtom)
             #if self._isTrueGndFormulaGivenEvidence(gf):
             #    wts_inverted += wt[gf.idxFormula]
             #    wi.append(wt[gf.idxFormula])
             prob2 = self._getTruthDegreeGivenEvidence(gf)
             if prob2 > 0:
                 wts_inverted += wt[gf.idxFormula] * prob2
                 wi.append(wt[gf.idxFormula] * prob2)
             #self._removeTemporaryEvidence()
             #print "  F%d %f %s %f -> %f" % (gf.idxFormula, wt[gf.idxFormula], str(gf), prob1, prob2)
             self._setInvertedEvidence(idxGndAtom)
         #print "  %s %s" % (wts_regular, wts_inverted)
         return exp(wts_regular) / (exp(wts_regular) + exp(wts_inverted))
     elif self.pmbMethod == 'excl' or self.pmbMethod == 'excl2': # new method (consider all the formulas that contain one of the ground atoms in the same block as the ground atom)
         for gf in relevantGroundFormulas: # !!! here the relevant ground formulas may not be sufficient!!!! they are different than in the other case
             # check if one of the ground atoms in the block appears in the ground formula
             if checkRelevance:
                 gfRelevant = False
                 for i in block:
                     if gf.containsGndAtom(i):
                         gfRelevant = True
                         break
                 if not gfRelevant: continue
             # make each one of the ground atoms in the block true once
             idxSum = 0
             for i in block:
                 # set the i-th variable in the block to true
                 if i != idxGATrueone:
                     self.mrf._setTemporaryEvidence(i, True)
                     self.mrf._setTemporaryEvidence(idxGATrueone, False)
                 # is the formula true?
                 if self.mrf._isTrueGndFormulaGivenEvidence(gf):
                     sums[idxSum] += wt[gf.idxFormula]
                 # restore truth values
                 self.mrf._removeTemporaryEvidence()
                 idxSum += 1
         expsums = map(exp, sums)
         if self.pmbMethod == 'excl':
             if mainAtomIsTrueone:
                 return expsums[idxBlockMainGA] / sum(expsums)
             else:
                 s = sum(expsums)
                 return (s - expsums[idxBlockMainGA]) / s
         elif self.pmbMethod == 'excl2':
             if mainAtomIsTrueone:
                 return expsums[idxBlockMainGA] / sum(expsums)
             else:
                 idxBlockTrueone = block.index(idxGATrueone)
                 return expsums[idxBlockTrueone] / (expsums[idxBlockTrueone] + expsums[idxBlockMainGA])
     else:
         raise Exception("Unknown pmbMethod '%s'" % self.pmbMethod)
Esempio n. 24
0
def compute_ssim(im1, im2, gaussian_kernel_sigma=1.5, gaussian_kernel_width=11):
    """
    The function to compute SSIM
    @param im1: PIL Image object
    @param im2: PIL Image object
    @return: SSIM float value
    """

    # 1D Gaussian kernel definition
    gaussian_kernel_1d = numpy.ndarray((gaussian_kernel_width))
    mu = int(gaussian_kernel_width / 2)

    #Fill Gaussian kernel
    for i in xrange(gaussian_kernel_width):
            gaussian_kernel_1d[i] = (1 / (sqrt(2 * pi) * (gaussian_kernel_sigma))) * \
                exp(-(((i - mu) ** 2)) / (2 * (gaussian_kernel_sigma ** 2)))

    # convert the images to grayscale
    img_mat_1, img_alpha_1 = _to_grayscale(im1)
    img_mat_2, img_alpha_2 = _to_grayscale(im2)

    # don't count pixels where both images are both fully transparent
    if img_alpha_1 is not None and img_alpha_2 is not None:
        img_mat_1[img_alpha_1 == 255] = 0
        img_mat_2[img_alpha_2 == 255] = 0

    #Squares of input matrices
    img_mat_1_sq = img_mat_1 ** 2
    img_mat_2_sq = img_mat_2 ** 2
    img_mat_12 = img_mat_1 * img_mat_2

    #Means obtained by Gaussian filtering of inputs
    img_mat_mu_1 = convolve_gaussian_2d(img_mat_1, gaussian_kernel_1d)
    img_mat_mu_2 = convolve_gaussian_2d(img_mat_2, gaussian_kernel_1d)

    #Squares of means
    img_mat_mu_1_sq = img_mat_mu_1 ** 2
    img_mat_mu_2_sq = img_mat_mu_2 ** 2
    img_mat_mu_12 = img_mat_mu_1 * img_mat_mu_2

    #Variances obtained by Gaussian filtering of inputs' squares
    img_mat_sigma_1_sq = convolve_gaussian_2d(img_mat_1_sq, gaussian_kernel_1d)
    img_mat_sigma_2_sq = convolve_gaussian_2d(img_mat_2_sq, gaussian_kernel_1d)

    #Covariance
    img_mat_sigma_12 = convolve_gaussian_2d(img_mat_12, gaussian_kernel_1d)

    #Centered squares of variances
    img_mat_sigma_1_sq -= img_mat_mu_1_sq
    img_mat_sigma_2_sq -= img_mat_mu_2_sq
    img_mat_sigma_12 = img_mat_sigma_12 - img_mat_mu_12

    #set k1,k2 & c1,c2 to depend on L (width of color map)
    l = 255
    k_1 = 0.01
    c_1 = (k_1 * l) ** 2
    k_2 = 0.03
    c_2 = (k_2 * l) ** 2

    #Numerator of SSIM
    num_ssim = (2 * img_mat_mu_12 + c_1) * (2 * img_mat_sigma_12 + c_2)

    #Denominator of SSIM
    den_ssim = (img_mat_mu_1_sq + img_mat_mu_2_sq + c_1) * \
               (img_mat_sigma_1_sq + img_mat_sigma_2_sq + c_2)

    #SSIM
    ssim_map = num_ssim / den_ssim
    index = numpy.average(ssim_map)

    return index
Esempio n. 25
0
 def test_log_sum_exp(self):
     X=asarray([0.1,0.2,0.3,0.4])
     direct=log(sum(exp(X)))
     indirect=GPTools.log_sum_exp(X)
     self.assertLessEqual(norm(direct-indirect), 1e-10)
Esempio n. 26
0
 def log_lik_grad_vector(self, y, f):
     s = asarray([min(x, 0) for x in f])
     p = exp(s) * (exp(s) + exp(s - f))
     dlp = (y + 1) / 2 - p
     return dlp
Esempio n. 27
0
 def log_lik_hessian_vector(self, y, f):
     s = asarray([min(x, 0) for x in f])
     d2lp = -exp(2 * s - f) / (exp(s) + exp(s - f))**2
     return d2lp
Esempio n. 28
0
 def getP(self, sample):
     p = self._multConst * exp(-(1 / 2.0) * (transpose(sample - self.mu)*self._invCov*(sample - self.mu)))
     return p
Esempio n. 29
0
def compute_ssim(img_mat_1, img_mat_2):
    #Variables for Gaussian kernel definition
    gaussian_kernel_sigma=1.5
    gaussian_kernel_width=11
    gaussian_kernel=numpy.zeros((gaussian_kernel_width,gaussian_kernel_width))
    
    #Fill Gaussian kernel
    for i in range(gaussian_kernel_width):
        for j in range(gaussian_kernel_width):
            gaussian_kernel[i,j]=\
            (1/(2*pi*(gaussian_kernel_sigma**2)))*\
            exp(-(((i-5)**2)+((j-5)**2))/(2*(gaussian_kernel_sigma**2)))

    #Convert image matrices to double precision (like in the Matlab version)
    img_mat_1=img_mat_1.astype(numpy.float)
    img_mat_2=img_mat_2.astype(numpy.float)
    
    #Squares of input matrices
    img_mat_1_sq=img_mat_1**2
    img_mat_2_sq=img_mat_2**2
    img_mat_12=img_mat_1*img_mat_2
    
    #Means obtained by Gaussian filtering of inputs
    img_mat_mu_1=scipy.ndimage.filters.convolve(img_mat_1,gaussian_kernel)
    img_mat_mu_2=scipy.ndimage.filters.convolve(img_mat_2,gaussian_kernel)
        
    #Squares of means
    img_mat_mu_1_sq=img_mat_mu_1**2
    img_mat_mu_2_sq=img_mat_mu_2**2
    img_mat_mu_12=img_mat_mu_1*img_mat_mu_2
    
    #Variances obtained by Gaussian filtering of inputs' squares
    img_mat_sigma_1_sq=scipy.ndimage.filters.convolve(img_mat_1_sq,gaussian_kernel)
    img_mat_sigma_2_sq=scipy.ndimage.filters.convolve(img_mat_2_sq,gaussian_kernel)
    
    #Covariance
    img_mat_sigma_12=scipy.ndimage.filters.convolve(img_mat_12,gaussian_kernel)
    
    #Centered squares of variances
    img_mat_sigma_1_sq=img_mat_sigma_1_sq-img_mat_mu_1_sq
    img_mat_sigma_2_sq=img_mat_sigma_2_sq-img_mat_mu_2_sq
    img_mat_sigma_12=img_mat_sigma_12-img_mat_mu_12;
    
    #c1/c2 constants
    #First use: manual fitting
    c_1=6.5025
    c_2=58.5225
    
    #Second use: change k1,k2 & c1,c2 depend on L (width of color map)
    l=255
    k_1=0.01
    c_1=(k_1*l)**2
    k_2=0.03
    c_2=(k_2*l)**2
    
    #Numerator of SSIM
    num_ssim=(2*img_mat_mu_12+c_1)*(2*img_mat_sigma_12+c_2)
    #Denominator of SSIM
    den_ssim=(img_mat_mu_1_sq+img_mat_mu_2_sq+c_1)*\
    (img_mat_sigma_1_sq+img_mat_sigma_2_sq+c_2)
    #SSIM
    ssim_map=num_ssim/den_ssim
    index=numpy.average(ssim_map)

    return index
Esempio n. 30
0
 def log_lik_vector(self, y, f):
     s = -y * f
     ps = asarray([min(x, 0) for x in s])
     lp = -(ps + log(exp(-ps) + exp(s - ps)))
     return lp
Esempio n. 31
0
 def log_lik_vector_multiple(self, y, F):
     S = -y * F
     PS = asarray([asarray([min(x, 0) for x in s]) for s in S])
     LP = -(PS + log(exp(-PS) + exp(S - PS)))
     return LP
Esempio n. 32
0
 def log_lik_grad_vector(self, y, f):
     s = asarray([min(x, 0) for x in f])
     p = exp(s) * (exp(s) + exp(s - f));
     dlp = (y + 1) / 2 - p
     return dlp
Esempio n. 33
0
 def log_lik_vector(self, y, f):
     s = -y * f
     ps = asarray([min(x, 0) for x in s])
     lp = -(ps + log(exp(-ps) + exp(s - ps)))
     return lp
 def scale_adapt(self, learn_scale, step_output):
     which_component = step_output.sample.which_component
     self.dwscale[which_component] = exp(log(self.dwscale[which_component]) + learn_scale * (exp(step_output.log_ratio) - self.accstar))
Esempio n. 35
0
def compute_ssim(im1, im2, gaussian_kernel_sigma=1.5, gaussian_kernel_width=11):
    """
    The function to compute SSIM
    @param im1: PIL Image object
    @param im2: PIL Image object
    @return: SSIM float value
    """
    
    # 1D Gaussian kernel definition
    gaussian_kernel_1d = numpy.ndarray((gaussian_kernel_width))
    mu = int(gaussian_kernel_width / 2)

    #Fill Gaussian kernel
    for i in xrange(gaussian_kernel_width):
            gaussian_kernel_1d[i] = (1 / (sqrt(2 * pi) * (gaussian_kernel_sigma))) * \
                exp(-(((i - mu) ** 2)) / (2 * (gaussian_kernel_sigma ** 2)))

    # convert the images to grayscale
    img_mat_1, img_alpha_1 = _to_grayscale(im1)
    img_mat_2, img_alpha_2 = _to_grayscale(im2)
    
    # don't count pixels where both images are both fully transparent
    if img_alpha_1 is not None and img_alpha_2 is not None:
        img_mat_1[img_alpha_1 == 255] = 0
        img_mat_2[img_alpha_2 == 255] = 0
    
    #Squares of input matrices
    img_mat_1_sq = img_mat_1 ** 2
    img_mat_2_sq = img_mat_2 ** 2
    img_mat_12 = img_mat_1 * img_mat_2
    
    #Means obtained by Gaussian filtering of inputs
    img_mat_mu_1 = convolve_gaussian_2d(img_mat_1, gaussian_kernel_1d)
    img_mat_mu_2 = convolve_gaussian_2d(img_mat_2, gaussian_kernel_1d)
    
    #Squares of means
    img_mat_mu_1_sq = img_mat_mu_1 ** 2
    img_mat_mu_2_sq = img_mat_mu_2 ** 2
    img_mat_mu_12 = img_mat_mu_1 * img_mat_mu_2
    
    #Variances obtained by Gaussian filtering of inputs' squares
    img_mat_sigma_1_sq = convolve_gaussian_2d(img_mat_1_sq, gaussian_kernel_1d)
    img_mat_sigma_2_sq = convolve_gaussian_2d(img_mat_2_sq, gaussian_kernel_1d)
    
    #Covariance
    img_mat_sigma_12 = convolve_gaussian_2d(img_mat_12, gaussian_kernel_1d)
    
    #Centered squares of variances
    img_mat_sigma_1_sq -= img_mat_mu_1_sq
    img_mat_sigma_2_sq -= img_mat_mu_2_sq
    img_mat_sigma_12 = img_mat_sigma_12 - img_mat_mu_12
    
    #set k1,k2 & c1,c2 to depend on L (width of color map)
    l = 255
    k_1 = 0.01
    c_1 = (k_1 * l) ** 2
    k_2 = 0.03
    c_2 = (k_2 * l) ** 2
    
    #Numerator of SSIM
    num_ssim = (2 * img_mat_mu_12 + c_1) * (2 * img_mat_sigma_12 + c_2)
    
    #Denominator of SSIM
    den_ssim = (img_mat_mu_1_sq + img_mat_mu_2_sq + c_1) * \
               (img_mat_sigma_1_sq + img_mat_sigma_2_sq + c_2)
    
    #SSIM
    ssim_map = num_ssim / den_ssim
    index = numpy.average(ssim_map)

    return index
Esempio n. 36
0
 def inverseOfDecreasingExponentialFunction(p, y):
     """ Inverse exponential funcion: x = e^-(log(y/p[0])/p[1])    """
     return exp(-1 * log(y / p[0]) / p[1])
Esempio n. 37
0
 def test_log_sum_exp(self):
     X = asarray([0.1, 0.2, 0.3, 0.4])
     direct = log(sum(exp(X)))
     indirect = GPTools.log_sum_exp(X)
     self.assertLessEqual(norm(direct - indirect), 1e-10)
Esempio n. 38
0
    def GetVelFromQ(self, el):
        '''
        Computing velocity profile in terms of the flow rate,
        using inverse womersley method of Cezeaux et al.1997
        '''
        self.radius = mean(el.Radius)
        self.Res = el.R
        self.length = el.Length
        self.Name = el.Name
        Flow = mean(self.signal)

        #WOMERSLEY NUMBER
        self.alpha = self.radius * sqrt(
            (2.0 * pi * self.density) / (self.tPeriod * self.viscosity))
        self.Wom = self.alpha
        self.Re = (2.0 * Flow * self.SimulationContext.Context['blood_density']
                   ) / (pi * self.radius *
                        self.SimulationContext.Context['dynamic_viscosity'])

        #FOURIER SIGNAL
        k = len(self.signal)
        n = 0
        while n < (self.nHarmonics):
            An = 0
            Bn = 0
            for i in arange(k):
                An += self.signal[i] * cos(
                    n * (2.0 * pi / self.tPeriod) * self.dt * self.nSteps[i])
                Bn += self.signal[i] * sin(
                    n * (2.0 * pi / self.tPeriod) * self.dt * self.nSteps[i])
            An = An * (2.0 / k)
            Bn = Bn * (2.0 / k)
            self.fourierModes.append(complex(An, Bn))
            n += 1

        self.fourierModes[
            0] *= 0.5  #mean Flow, as expected. It's defined into xml input file.

        self.Steps = linspace(0, self.tPeriod, self.samples)

        self.VelRadius = {}
        self.VelRadiusSteps = {}
        self.VelocityPlot = {}
        for step in self.Steps:
            self.Velocity = {}
            y = -1  # raggio da -1 a 1, 200 punti.
            while y <= 1.:
                self.VelRadius[y] = 2 * (1.0**2 - y**2) * self.fourierModes[0]
                y += 0.01

            k = 1
            while k < self.nHarmonics:
                cI = complex(0., 1.)
                cA = (self.alpha * pow((1.0 * k), 0.5)) * pow(cI, 1.5)
                c1 = 2.0 * jn(1, cA)
                c0 = cA * jn(0, cA)
                cT = complex(0, -2.0 * pi * k * self.t / self.tPeriod)
                y = -1  #da -1 a 1  #y=0 #centerline
                while y <= 1.0:
                    '''vel computation'''
                    c0_y = cA * jn(0, (cA * y))
                    vNum = c0 - c0_y
                    vDen = c0 - c1
                    vFract = vNum / vDen
                    cV = self.fourierModes[k] * exp(cT) * vFract
                    self.VelRadius[
                        y] += cV.real  #valore di velocity riferito al raggio adimensionalizzato
                    self.Velocity[y] = self.VelRadius[y].real
                    y += 0.01
                k += 1

            unsortedRadii = []
            for rad, vel in self.Velocity.iteritems():
                unsortedRadii.append(rad)
            radii = sorted(unsortedRadii)

            self.VelPlot = []
            for x in radii:
                for rad, vel in self.Velocity.iteritems():
                    if x == rad:
                        self.VelPlot.append(vel * (100.0 /
                                                   (self.radius**2 * pi)))
            self.VelocityPlot[step] = self.VelPlot
            self.t += self.dtPlot
Esempio n. 39
0
 def likelihood(self, wt):
     l = self.f(wt)
     l = exp(l)
     return l
Esempio n. 40
0
    def GetWssPeaks(self, el, flowsig):
        '''
        This method returns Wss peak along the element.
        Wss in s=0 and s=1 is computed.
        '''
        r0 = el.Radius[0]
        r1 = el.Radius[len(el.Radius) - 1]
        r01Signal = []

        for sig in flowsig:
            r01Signal.append(sig)

        self.nSteps = arange(0, len(r01Signal), 1)
        self.dt = self.tPeriod / (len(self.nSteps) - 1)
        self.dtPlot = self.tPeriod / self.samples
        fourierModes = []

        #Computing for s=0
        r0WssSignal = []
        #WOMERSLEY NUMBER
        r0Alpha = r0 * sqrt(
            (2.0 * pi * self.density) / (self.tPeriod * self.viscosity))

        #Computing for s=1
        r1WssSignal = []
        #WOMERSLEY NUMBER
        r1Alpha = r1 * sqrt(
            (2.0 * pi * self.density) / (self.tPeriod * self.viscosity))

        k01 = len(r01Signal)
        n = 0
        while n < (self.nHarmonics):
            An = 0
            Bn = 0
            for i in arange(k01):
                An += r01Signal[i] * cos(
                    n * (2.0 * pi / self.tPeriod) * self.dt * self.nSteps[i])
                Bn += r01Signal[i] * sin(
                    n * (2.0 * pi / self.tPeriod) * self.dt * self.nSteps[i])
            An = An * (2.0 / k01)
            Bn = Bn * (2.0 / k01)
            fourierModes.append(complex(An, Bn))
            n += 1

        self.Steps = linspace(0, self.tPeriod, self.samples)
        for step in self.Steps:
            tao0 = -fourierModes[0].real * 2.0
            tao1 = -fourierModes[0].real * 2.0
            k = 1
            while k < self.nHarmonics:
                cI = complex(0., 1.)
                cA_0 = (r0Alpha * pow((1.0 * k), 0.5)) * pow(cI, 1.5)
                c1_0 = 2.0 * jn(1, cA_0)
                c0_0 = cA_0 * jn(0, cA_0)
                cA_1 = (r1Alpha * pow((1.0 * k), 0.5)) * pow(cI, 1.5)
                c1_1 = 2.0 * jn(1, cA_1)
                c0_1 = cA_1 * jn(0, cA_1)
                cT = complex(0, -2.0 * pi * k * self.t / self.tPeriod)
                '''R0: Wall shear stress computation'''
                taoNum_0 = r0Alpha**2 * cI**3 * jn(1, cA_0)
                taoDen_0 = c0_0 - c1_0
                taoFract_0 = taoNum_0 / taoDen_0
                cTao_0 = fourierModes[k] * exp(cT) * taoFract_0
                tao0 += cTao_0.real
                '''R1: Wall shear stress computation'''
                taoNum_1 = r1Alpha**2 * cI**3 * jn(1, cA_1)
                taoDen_1 = c0_1 - c1_1
                taoFract_1 = taoNum_1 / taoDen_1
                cTao_1 = fourierModes[k] * exp(cT) * taoFract_1
                tao1 += cTao_1.real

                k += 1

            tao0 *= -(self.viscosity / (r0**3 * pi))
            r0WssSignal.append(tao0)
            tao1 *= -(self.viscosity / (r1**3 * pi))
            r1WssSignal.append(tao1)
            self.t += self.dtPlot

        r0Peak = max(r0WssSignal)
        r1Peak = max(r1WssSignal)
        return r0Peak, r1Peak
Esempio n. 41
0
def compute_ssim(img_mat_1, img_mat_2):
    """Compute the Structure Similary Index between two images
    
    See:  Z. Wang, A. C. Bovik, H. R. Sheikh and E. P. Simoncelli, "Image quality assessment: From error visibility to structural similarity," IEEE Transactions on Image Processing, vol. 13, no. 4, pp. 600-612, Apr. 2004.
    Implementation after https://ece.uwaterloo.ca/~z70wang/research/ssim/
    """
    #Variables for Gaussian kernel definition
    gaussian_kernel_sigma=1.5
    gaussian_kernel_width=11
    gaussian_kernel=np.zeros((gaussian_kernel_width,gaussian_kernel_width))
    
    #Fill Gaussian kernel
    for i in range(gaussian_kernel_width):
        for j in range(gaussian_kernel_width):
            gaussian_kernel[i,j]=\
            (1/(2*pi*(gaussian_kernel_sigma**2)))*\
            exp(-(((i-5)**2)+((j-5)**2))/(2*(gaussian_kernel_sigma**2)))

    #Convert image matrices to double precision (like in the Matlab version)
    img_mat_1=img_mat_1.astype(np.float)
    img_mat_2=img_mat_2.astype(np.float)
    
    #Squares of input matrices
    img_mat_1_sq=img_mat_1**2
    img_mat_2_sq=img_mat_2**2
    img_mat_12=img_mat_1*img_mat_2
    
    #Means obtained by Gaussian filtering of inputs
    img_mat_mu_1=scipy.ndimage.filters.convolve(img_mat_1,gaussian_kernel)
    img_mat_mu_2=scipy.ndimage.filters.convolve(img_mat_2,gaussian_kernel)
        
    #Squares of means
    img_mat_mu_1_sq=img_mat_mu_1**2
    img_mat_mu_2_sq=img_mat_mu_2**2
    img_mat_mu_12=img_mat_mu_1*img_mat_mu_2
    
    #Variances obtained by Gaussian filtering of inputs' squares
    img_mat_sigma_1_sq=scipy.ndimage.filters.convolve(img_mat_1_sq,gaussian_kernel)
    img_mat_sigma_2_sq=scipy.ndimage.filters.convolve(img_mat_2_sq,gaussian_kernel)
    
    #Covariance
    img_mat_sigma_12=scipy.ndimage.filters.convolve(img_mat_12,gaussian_kernel)
    
    #Centered squares of variances
    img_mat_sigma_1_sq=img_mat_sigma_1_sq-img_mat_mu_1_sq
    img_mat_sigma_2_sq=img_mat_sigma_2_sq-img_mat_mu_2_sq
    img_mat_sigma_12=img_mat_sigma_12-img_mat_mu_12;
    
    #c1/c2 constants
    #First use: manual fitting
    c_1=6.5025
    c_2=58.5225
    
    #Second use: change k1,k2 & c1,c2 depend on L (width of color map)
    l=255
    k_1=0.01
    c_1=(k_1*l)**2
    k_2=0.03
    c_2=(k_2*l)**2
    
    #Numerator of SSIM
    num_ssim=(2*img_mat_mu_12+c_1)*(2*img_mat_sigma_12+c_2)
    #Denominator of SSIM
    den_ssim=(img_mat_mu_1_sq+img_mat_mu_2_sq+c_1)*\
    (img_mat_sigma_1_sq+img_mat_sigma_2_sq+c_2)
    #SSIM
    ssim_map=num_ssim/den_ssim
    index=np.average(ssim_map)

    return index
Esempio n. 42
0
def find_bursts(duration, dt, transient, N, M_t, M_i, max_freq):
    base = 2  #round lgbinwidth to nearest 2 so will always divide into durations
    expnum = 2.0264 * exp(-0.2656 * max_freq + 2.9288) + 5.7907
    lgbinwidth = (int(base * round(
        (-max_freq + 33) / base))) * ms  #23-good for higher freq stuff
    #lgbinwidth=(int(base*round((expnum)/base)))/1000   #use exptl based on some fit of choice binwidths
    #lgbinwidth=10*ms

    numlgbins = int(ceil(duration / lgbinwidth))
    #totspkhist=zeros((numlgbins,1))
    totspkhist = zeros(numlgbins)
    #totspkdist_smooth=zeros((numlgbins,1))
    skiptime = transient * ms
    skipbin = int(ceil(skiptime / lgbinwidth))

    inc_past_thresh = []
    dec_past_thresh = []

    #Create histogram given the bins calculated
    for i in xrange(numlgbins):
        step_start = (i) * lgbinwidth
        step_end = (i + 1) * lgbinwidth
        totspkhist[i] = len(M_i[logical_and(M_t > step_start, M_t < step_end)])

    ###smooth plot first so thresholds work better
    #totspkhist_1D=reshape(totspkhist,len(totspkhist))  #first just reshape so single row not single colm
    #b,a=butter(3,0.4,'low')
    #totspkhist_smooth=filtfilt(b,a,totspkhist_1D)

    #totspkhist_smooth=reshape(totspkhist,len(totspkhist))  #here we took out the actual smoothing and left it as raw distn. here just reshape so single row not single colm
    totspkdist_smooth = totspkhist / max(
        totspkhist[skipbin:]
    )  #create distn based on hist, but skip first skiptime to cut out transient excessive spiking

    #    ####### FOR MOVING THRESHOLD #################
    ## find points where increases and decreases over some threshold
    dist_thresh = []
    thresh_plot = []

    mul_fac = 0.35
    switch = 0  #keeps track of whether inc or dec last
    elim_noise = 1 / (max_freq * 2.5 * Hz)
    #For line 95, somehow not required in previous version?
    #elim_noise_units = 1/(max_freq*Hz*2.5)

    thresh_time = 5 / (max_freq)  #capture 5 cycles
    thresh_ind = int(floor(
        (thresh_time / lgbinwidth) /
        2))  #the number of indices on each side of the window

    #dist_thresh moves with window capturing approx 5 cycles (need special cases for borders) Find where increases and decreases past threshold (as long as a certain distance apart, based on "elim_noise" which is based on avg freq of bursts
    dist_thresh.append(
        totspkdist_smooth[skipbin:skipbin + thresh_ind].mean(0) +
        mul_fac * totspkdist_smooth[skipbin:skipbin + thresh_ind].std(0))

    for i in xrange(1, numlgbins):
        step_start = (i) * lgbinwidth
        step_end = (i + 1) * lgbinwidth

        #moving threshold
        if i > (skipbin +
                thresh_ind) and (i + thresh_ind) < len(totspkdist_smooth):
            #print(totspkdist_smooth[i-thresh_ind:i+thresh_ind])
            dist_thresh.append(
                totspkdist_smooth[i - thresh_ind:i + thresh_ind].mean(0) +
                mul_fac *
                totspkdist_smooth[i - thresh_ind:i + thresh_ind].std(0))
        elif (i + thresh_ind) >= len(totspkdist_smooth):
            dist_thresh.append(totspkdist_smooth[-thresh_ind:].mean(0) +
                               mul_fac *
                               totspkdist_smooth[-thresh_ind:].std(0))
        else:
            dist_thresh.append(
                totspkdist_smooth[skipbin:skipbin + thresh_ind].mean(0) +
                mul_fac *
                totspkdist_smooth[skipbin:skipbin + thresh_ind].std(0))

        if (totspkdist_smooth[i - 1] <
                dist_thresh[i]) and (totspkdist_smooth[i] >= dist_thresh[i]):
            #inc_past_thresh.append(step_start-0.5*lgbinwidth)
            if (inc_past_thresh):  #there has already been at least one inc,
                if (
                        abs(inc_past_thresh[-1] -
                            (step_start - 0.5 * lgbinwidth)) > elim_noise
                ) and switch == 0:  #must be at least x ms apart (yHz), and it was dec last..
                    inc_past_thresh.append(
                        step_start - 0.5 * lgbinwidth
                    )  #take lower point (therefore first) when increasing. Need to -0.5binwidth to adjust for shift between index of bin width and index of bin distn
                    #print (['incr=%f'%inc_past_thresh[-1]])
                    thresh_plot.append(dist_thresh[i])
                    switch = 1
            else:
                inc_past_thresh.append(
                    step_start - 0.5 * lgbinwidth
                )  #take lower point (therefore first) when increasing. Need to -0.5binwidth to adjust for shift between index of bin width and index of bin distn
                thresh_plot.append(dist_thresh[i])
                switch = 1  #keeps track of that it was inc. last
        elif (totspkdist_smooth[i - 1] >=
              dist_thresh[i]) and (totspkdist_smooth[i] < dist_thresh[i]):
            # dec_past_thresh.append(step_end-0.5*lgbinwidth)  #take lower point (therefore second) when decreasing
            if (dec_past_thresh):  #there has already been at least one dec
                if (
                        abs(dec_past_thresh[-1] -
                            (step_end - 0.5 * lgbinwidth)) > elim_noise
                ) and switch == 1:  #must be at least x ms apart (y Hz), and it was inc last
                    dec_past_thresh.append(
                        step_end - 0.5 * lgbinwidth
                    )  #take lower point (therefore second) when decreasing
                    #print (['decr=%f'%dec_past_thresh[-1]])
                    switch = 0
            else:
                dec_past_thresh.append(
                    step_end - 0.5 * lgbinwidth
                )  #take lower point (therefore second) when decreasing
                switch = 0  #keeps track of that it was dec last

    if totspkdist_smooth[0] < dist_thresh[
            0]:  #if you are starting below thresh, then pop first inc.  otherwise, don't (since will decrease first)
        if inc_past_thresh:  #if list is not empty
            inc_past_thresh.pop(0)
#

#####################################################################
#
######### TO DEFINE A STATIC THRESHOLD AND FIND CROSSING POINTS

#    dist_thresh=0.15 #static threshold
#    switch=0  #keeps track of whether inc or dec last
#    overall_freq=3.6 #0.9
#    elim_noise=1/(overall_freq*5)#2.5)
#
#
#    for i in xrange(1,numlgbins):
#        step_start=(i)*lgbinwidth
#        step_end=(i+1)*lgbinwidth
#
#        if (totspkdist_smooth[i-1]<dist_thresh) and (totspkdist_smooth[i]>=dist_thresh):   #if cross threshold (increasing)
#            if (inc_past_thresh):    #there has already been at least one inc,
#                if (abs(dec_past_thresh[-1]-(step_start-0.5*lgbinwidth))>elim_noise) and switch==0:   #must be at least x ms apart (yHz) from the previous dec, and it was dec last..
#                    inc_past_thresh.append(step_start-0.5*lgbinwidth)  #take lower point (therefore first) when increasing. Need to -0.5binwidth to adjust for shift between index of bin width and index of bin distn
#                    #print (['incr=%f'%inc_past_thresh[-1]])     #-0.5*lgbinwidth
#                    switch=1
#            else:
#                inc_past_thresh.append(step_start-0.5*lgbinwidth)  #take lower point (therefore first) when increasing. Need to -0.5binwidth to adjust for shift between index of bin width and index of bin distn
#                switch=1   #keeps track of that it was inc. last
#        elif (totspkdist_smooth[i-1]>=dist_thresh) and (totspkdist_smooth[i]<dist_thresh):
#            if (dec_past_thresh):    #there has already been at least one dec
#                if (abs(inc_past_thresh[-1]-(step_end-0.5*lgbinwidth))>elim_noise) and switch==1:    #must be at least x ms apart (y Hz) from the previous incr, and it was inc last
#                    dec_past_thresh.append(step_end-0.5*lgbinwidth)  #take lower point (therefore second) when decreasing
#                    #print (['decr=%f'%dec_past_thresh[-1]])
#                    switch=0
#            else:
#                dec_past_thresh.append(step_end-0.5*lgbinwidth)  #take lower point (therefore second) when decreasing
#                switch=0    #keeps track of that it was dec last
#
#
#    if totspkdist_smooth[0]<dist_thresh:   #if you are starting below thresh, then pop first inc.  otherwise, don't (since will decrease first)
#        if inc_past_thresh:  #if list is not empty
#            inc_past_thresh.pop(0)

################################################################
###############################################################

######## DEFINE INTER AND INTRA BURSTS ########

#since always start with dec, intraburst=time points from 1st inc:2nd dec, from 2nd inc:3rd dec, etc.
#interburst=time points from 1st dec:1st inc, from 2nd dec:2nd inc, etc.

    intraburst_time_ms_compound_list = []
    interburst_time_ms_compound_list = []
    intraburst_bins = []  #in seconds
    interburst_bins = []

    #print(inc_past_thresh)
    if len(inc_past_thresh) < len(dec_past_thresh):  #if you end on a decrease
        for i in xrange(len(inc_past_thresh)):
            intraburst_time_ms_compound_list.append(
                arange(inc_past_thresh[i] / ms, dec_past_thresh[i + 1] / ms,
                       1))  #10 is timestep
            interburst_time_ms_compound_list.append(
                arange((dec_past_thresh[i] + dt) / ms,
                       (inc_past_thresh[i] - dt) / ms, 1))  #10 is timestep
            intraburst_bins.append(inc_past_thresh[i])
            intraburst_bins.append(dec_past_thresh[i + 1])
            interburst_bins.append(dec_past_thresh[i])
            interburst_bins.append(inc_past_thresh[i])
    else:  #if you end on an increase
        for i in xrange(len(inc_past_thresh) - 1):
            intraburst_time_ms_compound_list.append(
                arange(inc_past_thresh[i] / ms, dec_past_thresh[i + 1] / ms,
                       1))  #10 is timestep
            interburst_time_ms_compound_list.append(
                arange((dec_past_thresh[i] + dt) / ms,
                       (inc_past_thresh[i] - dt) / ms, 1))  #10 is timestep
            intraburst_bins.append(inc_past_thresh[i])
            intraburst_bins.append(dec_past_thresh[i + 1])
            interburst_bins.append(dec_past_thresh[i] + dt)
            interburst_bins.append(inc_past_thresh[i] - dt)
        if dec_past_thresh and inc_past_thresh:  #if neither dec_past_thresh nor inc_past_thresh is empty
            interburst_bins.append(dec_past_thresh[-1] +
                                   dt)  #will have one more inter than intra
            interburst_bins.append(inc_past_thresh[-1] + dt)

    interburst_bins = interburst_bins / second
    intraburst_bins = intraburst_bins / second

    intraburst_time_ms = [
        num for elem in intraburst_time_ms_compound_list for num in elem
    ]  #flatten list
    interburst_time_ms = [
        num for elem in interburst_time_ms_compound_list for num in elem
    ]  #flatten list

    num_intraburst_bins = len(
        intraburst_bins
    ) / 2  #/2 since have both start and end points for each bin
    num_interburst_bins = len(interburst_bins) / 2

    intraburst_bins_ms = [x * 1000 for x in intraburst_bins]
    interburst_bins_ms = [x * 1000 for x in interburst_bins]

    ######################################
    #bin_s=[((inc_past_thresh-dec_past_thresh)/2+dec_past_thresh) for inc_past_thresh, dec_past_thresh in zip(inc_past_thresh,dec_past_thresh)]
    bin_s = [((x - y) / 2 + y)
             for x, y in zip(inc_past_thresh, dec_past_thresh)] / second

    binpt_ind = [int(floor(x / lgbinwidth)) for x in bin_s]

    ########## FIND PEAK TO TROUGH AND SAVE VALUES  ###################
    ########## CATEGORIZE BURSTING BASED ON PEAK TO TROUGH VALUES ###################
    ########## DISCARD BINPTS IF PEAK TO TROUGH IS TOO SMALL ###################

    peaks = []
    trough = []
    peak_to_trough_diff = []
    min_burst_size = 0.2  #defines a burst as 0.2 or larger.

    for i in xrange(len(binpt_ind) - 1):
        peaks.append(max(totspkdist_smooth[binpt_ind[i]:binpt_ind[i + 1]]))
        trough.append(min(totspkdist_smooth[binpt_ind[i]:binpt_ind[i + 1]]))

    peak_to_trough_diff = [
        max_dist - min_dist for max_dist, min_dist in zip(peaks, trough)
    ]

    #to delete all bins following any <min_burst_size
    first_ind_not_burst = next(
        (x[0] for x in enumerate(peak_to_trough_diff) if x[1] < 0.2), None)
    #    if first_ind_not_burst:
    #        del bin_s[first_ind_not_burst+1:]   #needs +1 since bin_s has one additional value (since counts edges)

    #to keep track of any bins <0.2 so can ignore in stats later
    all_ind_not_burst = [
        x[0] for x in enumerate(peak_to_trough_diff) if x[1] < 0.2
    ]  #defines a burst as 0.2 or larger.

    bin_ms = [x * 1000 for x in bin_s]
    binpt_ind = [int(floor(x / lgbinwidth)) for x in bin_s]

    #for moving threshold only
    thresh_plot = []
    thresh_plot = [dist_thresh[x] for x in binpt_ind]

    #for static threshold
    #thresh_plot=[dist_thresh]*len(bin_ms)
    #
    #
    #    bin_s=[((inc_past_thresh-dec_past_thresh)/2+dec_past_thresh) for inc_past_thresh, dec_past_thresh in zip(inc_past_thresh,dec_past_thresh)]
    #    bin_ms=[x*1000 for x in bin_s]
    #    thresh_plot=[]
    #    binpt_ind=[int(floor(x/lgbinwidth)) for x in bin_s]
    #    thresh_plot=[dist_thresh[x] for x in binpt_ind]
    #
    binpts = xrange(int(lgbinwidth * 1000 / 2),
                    int(numlgbins * lgbinwidth * 1000), int(lgbinwidth * 1000))
    totspkhist_list = totspkhist.tolist(
    )  #[val for subl in totspkhist for val in subl]

    #find first index after transient to see if have enough bins to do stats
    bin_ind_no_trans = bisect.bisect(bin_ms, transient)
    intrabin_ind_no_trans = bisect.bisect(intraburst_bins, transient /
                                          1000)  #transient to seconds
    if intrabin_ind_no_trans % 2 != 0:  #index must be even since format is ind0=start_bin, ind1=end_bin, ind2=start_bin, .... .
        intrabin_ind_no_trans += 1
    interbin_ind_no_trans = bisect.bisect(interburst_bins, transient / 1000)
    if interbin_ind_no_trans % 2 != 0:
        interbin_ind_no_trans += 1

    return [
        bin_s, bin_ms, binpts, totspkhist, totspkdist_smooth, dist_thresh,
        totspkhist_list, thresh_plot, binpt_ind, lgbinwidth, numlgbins,
        intraburst_bins, interburst_bins, intraburst_bins_ms,
        interburst_bins_ms, intraburst_time_ms, interburst_time_ms,
        num_intraburst_bins, num_interburst_bins, bin_ind_no_trans,
        intrabin_ind_no_trans, interbin_ind_no_trans
    ]
Esempio n. 43
0
 def log_lik_vector_multiple(self, y, F):
     S = -y * F
     PS = asarray([asarray([min(x, 0) for x in s]) for s in S])
     LP = -(PS + log(exp(-PS) + exp(S - PS)))
     return LP
Esempio n. 44
0
 def likelihood(self, wt):
     l = self.f(wt)
     l = exp(l)
     return l
Esempio n. 45
0
 def log_lik_hessian_vector(self, y, f):
     s = asarray([min(x, 0) for x in f])
     d2lp = -exp(2 * s - f) / (exp(s) + exp(s - f)) ** 2
     return d2lp
Esempio n. 46
0
 def scale_adapt(self,learn_scale,step_output):
     # learn_scale is 1./iterations scheme, which is learning rate
     self.globalscale = exp(log(self.globalscale) + learn_scale * (exp(step_output.log_ratio) - self.accstar))