def test_add(self):
     gaussian_one = Gaussian(25, 3)
     gaussian_two = Gaussian(30, 4)
     gaussian_sum = gaussian_one + gaussian_two
     
     self.assertEqual(gaussian_sum.mean, 55)
     self.assertEqual(gaussian_sum.stdev, 5)
Пример #2
0
def test():

    nn = 101.
    x, y = np.mgrid[0:nn, 0:nn]
    xx = sorted(list(set(x.flatten().tolist())))

    GaussPars = 3., 15., 190. * np.pi / 180
    #GaussPars=0.001,0.001,30.*np.pi/180

    Sm, SM, PA = GaussPars

    #########
    psfPars = 1, 10., 120. * np.pi / 180
    #psfPars=0.001,.001,30.*np.pi/180
    PSm, PSM, PPA = psfPars
    z1 = Gaussian.GaussianXY(x, y, 1., off=(50, 50), sig=(PSm, PSM), pa=PPA)
    #########

    CPSF = ClassConfPSF(psfPars)
    GaussParsConv = CPSF.GiveConvGaussPars(GaussPars)
    Sm2, SM2, PA2 = GaussParsConv

    z0 = Gaussian.GaussianXY(x, y, 1., off=(50, 50), sig=(Sm, SM), pa=PA)
    z2 = Gaussian.GaussianXY(x, y, 1., off=(50, 50), sig=(Sm2, SM2), pa=PA2)

    pylab.clf()
    pylab.subplot(1, 3, 1)
    pylab.imshow(z0, interpolation="nearest")
    pylab.subplot(1, 3, 2)
    pylab.imshow(z1, interpolation="nearest")
    pylab.subplot(1, 3, 3)
    pylab.imshow(z2, interpolation="nearest")
    pylab.draw()
    pylab.show(False)
Пример #3
0
    def marginals(self, *args):
        self.do_inference()

        n = self.n
        proj = Gauss.Map(np.zeros((0, n)))  # n -> 0
        for t in args:
            proj = Gauss.copy(n).then(Gauss.tensor(proj, self.eval_term(t)))
        return self.state.then(proj)
Пример #4
0
    def nu(self):
        # make new variable
        v = Term.var(self.n)

        # extend state
        self.state = Gauss.tensor(self.state, Gauss.N(1))

        self.n += 1
        return v
Пример #5
0
class BayesianNeuralNetLayer(nn.Module):
    """
Defining the funtionality of 1 Bayesian Layer
Input:
    in_features: no.of inputs in a layer
    out_features: output channels desired in a layer
Output:
    layer with variational inference bundeled in
    """
    def __init__(self, in_features, out_features, PI, SIGMA1, SIGMA2):
        super().__init__()
        self.in_features = in_features
        self.out_features = out_features
        # Weight parameters
        self.weight_mu = nn.Parameter(
            torch.Tensor(out_features, in_features).normal_(-0, 0.2))
        self.weight_rho = nn.Parameter(
            torch.Tensor(out_features, in_features).uniform_(-5, -4))
        self.weight = Gaussian(self.weight_mu, self.weight_rho)
        # bias parameters: uniform distribution with given mean and standard
        # devatiation
        self.bias_mu = nn.Parameter(
            torch.Tensor(self.out_features).uniform_(0.1, 0.2))
        self.bias_rho = nn.Parameter(
            torch.Tensor(self.out_features).uniform_(-5, -4))
        self.bias = Gaussian(self.bias_mu, self.bias_rho)
        # Scaled Distributions
        self.weight_prior = ScaledMixtureGaussian(PI, SIGMA1, SIGMA2)
        self.bias_prior = ScaledMixtureGaussian(PI, SIGMA1, SIGMA2)
        self.log_prior = 0
        self.log_variational_posterior = 0

    def forward(self, input, sample=False, calcualte_log_probs=False):
        if self.training or sample:
            weight = self.weight.sample()
            bias = self.bias.sample()
        else:
            weight = self.weight.mu
            bias = self.bias.mu
        if self.training or calculate_log_probs:
            self.log_prior = self.weight_prior.log_prob(
                weight) + self.bias_prior.log_prob(bias)
            self.log_variational_posterior = self.weight.log_prob(
                weight) + self.bias.log_prob(bias)
        else:
            self.log_prior, self.log_variational_posterior = 0, 0

        return F.linear(input, weight, bias)
Пример #6
0
    def GiveGuess(self, Nsources):
        x, y, z = self.data.copy()
        S = []

        #self.plotIter(self.data[2],z)

        DicoGuess = {}
        for i in range(Nsources):
            DicoGuess[i] = {}
            ind = np.argmax(z)
            x0, y0, s0 = x[ind], y[ind], z[ind]
            #print s0,np.max(z)
            z -= Gaussian.GaussianXY(x0 - x,
                                     y0 - y,
                                     s0,
                                     sig=(self.psf[0], self.psf[1]),
                                     pa=self.psf[2])
            S.append([x0, y0, s0])
            DicoGuess[i]["l"] = x0
            DicoGuess[i]["m"] = y0
            DicoGuess[i]["s"] = s0
            #self.plotIter2(x,y,self.data[2],z)
            #self.plotIter(self.data[2],z)
            self.AddMissingDefault(DicoGuess[i])
        #S=np.array(S).T.flatten()
        #S=np.array(S.tolist()+[.1])

        Lout = self.DicoToListFreePars(DicoGuess)
        return Lout
    def sst_classic(self,kernel_size=5,sigma=2,tau=0.002,box_filter = [[1,0],[-1,0],[0,1],[0,-1]],iter=5):
        img = self.image
        self.sigma = sigma
        height,width,channel = img.shape
        dimage = np.float32(img)

                
        
        gray = cv.cvtColor(dimage, cv.COLOR_BGR2GRAY)
        

        sobelx = cv.Sobel(gray, cv.CV_32F, 1, 0, ksize=3)
        sobely = cv.Sobel(gray, cv.CV_32F, 0, 1, ksize=3)

        tensor_image = np.zeros((height,width,3))
        for j in range(height):
            for i in range(width):
                fx = sobelx[j,i]
                fy = sobely[j,i]
                tensor_image[j,i] = (fx*fx,fy*fy,fx*fy)
        
        #sigma = 2 * self.sigma * self.sigma     
        #smooth_structure_tensor = cv.GaussianBlur(tensor_image,(kernel_size,kernel_size),sigma)

        gaussian_func = Gaussian.Gaussian()
        smooth_structure_tensor = gaussian_func.calc(tensor_image,2,height,width,channel)
        print("generated smooth structure tensor!")
        self.smooth_structure_tensor = smooth_structure_tensor
        cv.imwrite("./img/smooth_structure_tensor.png",smooth_structure_tensor)
        return smooth_structure_tensor
Пример #8
0
    def GiveCovMat(self):

        x = self.x
        y = self.y

        N = x.size

        dN = 10
        N = 2 * dN + 1
        x, y = np.mgrid[-dN:dN:N * 1j, -dN:dN:N * 1j]
        x = x.flatten()
        y = y.flatten()

        N = x.size
        GG = np.zeros((N, N), np.float32)
        for i in range(N):
            GG[i, :] = Gaussian.GaussianXY(x[i] - x,
                                           y[i] - y,
                                           self.noise**2,
                                           sig=(self.psf[0], self.psf[1]),
                                           pa=self.psf[2])

        Ginv = ModLinAlg.invSVD(GG)

        pylab.clf()
        pylab.subplot(1, 2, 1)
        pylab.imshow(GG, interpolation="nearest")
        pylab.subplot(1, 2, 2)
        pylab.imshow(np.real(Ginv), interpolation="nearest")
        pylab.draw()
        pylab.show(False)
        stop
Пример #9
0
def genSpectrum(energies, intensities, widths):
    """ Gaussianifies the points on the spectrum using the input widths
    """

    maxE = max(energies)
    minE = min(energies)
    print "maxE", maxE
    print "minE", minE
    energyRange = np.linspace(minE-1000, maxE+1000, 10000)

    intensityRange = [0]*len(energyRange)
    print "Number of points to plot:", len(energyRange)

    # for i in range(len(energies)):
#         print "E: ", energies[i], " I: ", intensities[i]
    for i in range(0,len(energies)):
       # print "Gaussian for intensity i", intensities[i]
        if intensities[i]:
            gauss = G.gaussianGenerator(intensities[i], widths[i], energies[i])
            for x in range(len(energyRange)):
                intensityRange[x] += gauss(energyRange[x])

    ypoints = [gauss(x) for x in energyRange]
    #print "Intensities Gaussian"
   # print sorted(intensityRange, reverse=True)[:60]
    print "Finished Gaussian"
    return (energyRange, intensityRange)
Пример #10
0
def genSpectrum(energies, intensities, widths):
    """ Gaussianifies the points on the spectrum using the input widths
    """

    maxE = max(energies)
    minE = min(energies)
    print("maxE", maxE)
    print("minE", minE)
    energyRange = np.linspace(minE - 1000, maxE + 1000, 10000)

    intensityRange = [0] * len(energyRange)
    print("Number of points to plot:", len(energyRange))

    # for i in range(len(energies)):
    #         print "E: ", energies[i], " I: ", intensities[i]
    for i in range(0, len(energies)):
        # print "Gaussian for intensity i", intensities[i]
        if intensities[i]:
            gauss = G.gaussianGenerator(intensities[i], widths[i], energies[i])
            for x in range(len(energyRange)):
                intensityRange[x] += gauss(energyRange[x])

    ypoints = [gauss(x) for x in energyRange]
    #print "Intensities Gaussian"
    # print sorted(intensityRange, reverse=True)[:60]
    # print "Finished Gaussian"
    return (energyRange, intensityRange)
Пример #11
0
def crossValidate(DataMatrix, fold, patternDimension, labelIndex):
    labels = DataMatrix[:, labelIndex]
    labels = np.unique(labels)
    ## manipulate the input data for cross validation
    errorRateList = []
    gaussianMatrix = []
    for cvtimes in range(0, fold):
        trainingSet, testingPatterns , testingLabels =  \
            getCrossValidationSet(fold, cvtimes, DataMatrix, patternDimension, labelIndex)
        ## group training data, return a dictionary whose
        ## key is class label, and value is corresponding set of
        ## training data
        groupedTrainingSet = groupMessyData(trainingSet, patternDimension,
                                            labelIndex)
        ## construct gaussian objects
        gaussians = []
        for tempLabel in groupedTrainingSet:
            tempGaussian = gau.Gaussian(tempLabel, patternDimension)
            ## fitting the gaussian model
            tempGaussian.fitParameters(groupedTrainingSet.get(tempLabel))
            gaussians.append(tempGaussian)
        ## validate by tranversing the whole testing set
        prediction = getPrediction(testingPatterns, gaussians)
        ## calculate the error rate of this time of validation
        assert len(prediction) == testingLabels.shape[0]
        numOfError = 0
        for i in range(0, len(prediction)):
            #print prediction[i], testingLabels[i]
            if prediction[i] != testingLabels[i]:
                numOfError = numOfError + 1
        errorRate = 1.0 * numOfError / testingLabels.shape[0]
        errorRateList.append(errorRate)
        gaussianMatrix.append(gaussians)
    return errorRateList, gaussianMatrix
Пример #12
0
    def from_dist(self, dist):
        assert (dist.dom == 0)
        k = dist.cod

        self.state = Gauss.tensor(self.state, dist)

        vs = [Term.var(i) for i in range(self.n, self.n + k)]

        self.n += k
        return vs
Пример #13
0
    def do_inference(self):
        n_eqs = len(self.equations)

        if n_eqs > 0:
            n = self.n
            joint = Gauss.Map(np.zeros((0, n)))  # n -> 0
            for (s, t) in self.equations:
                diff = self.eval_term(s + (-1.0) * t)
                joint = Gauss.copy(n).then(Gauss.tensor(joint, diff))

            zz = np.zeros((n_eqs, 1))
            zeros = Gauss.Map(np.zeros((n_eqs, 0)), zz)  # 0 -> n_eqs

            if not Gauss.in_support(self.state.then(joint), zz):
                raise InferenceError("conditions cannot be satisfied")
            else:
                dis = self.state.disintegrate(joint)
                self.state = dis.dot(zeros)
                self.equations = []
Пример #14
0
 def func(self, pars, xyz):
     x, y, z = xyz
     l, m, s = self.GetPars(pars)
     G = np.zeros_like(x)
     for i in range(l.shape[0]):
         G += Gaussian.GaussianXY(l[i] - x,
                                  m[i] - y,
                                  s[i],
                                  sig=(self.psf[0], self.psf[1]),
                                  pa=self.psf[2])
     return G
Пример #15
0
 def __init__(self, in_features, out_features, PI, SIGMA1, SIGMA2):
     super().__init__()
     self.in_features = in_features
     self.out_features = out_features
     # Weight parameters
     self.weight_mu = nn.Parameter(
         torch.Tensor(out_features, in_features).normal_(-0, 0.2))
     self.weight_rho = nn.Parameter(
         torch.Tensor(out_features, in_features).uniform_(-5, -4))
     self.weight = Gaussian(self.weight_mu, self.weight_rho)
     # bias parameters: uniform distribution with given mean and standard
     # devatiation
     self.bias_mu = nn.Parameter(
         torch.Tensor(self.out_features).uniform_(0.1, 0.2))
     self.bias_rho = nn.Parameter(
         torch.Tensor(self.out_features).uniform_(-5, -4))
     self.bias = Gaussian(self.bias_mu, self.bias_rho)
     # Scaled Distributions
     self.weight_prior = ScaledMixtureGaussian(PI, SIGMA1, SIGMA2)
     self.bias_prior = ScaledMixtureGaussian(PI, SIGMA1, SIGMA2)
     self.log_prior = 0
     self.log_variational_posterior = 0
Пример #16
0
def rayleigh():
	kernel1, escalar = Gaussian.get_rayleigh_filter()
	nuevaMatriz = kernel.aplicarKernel(pixArray1, kernel1,escalar)

	for widget in ventanaFiltro.winfo_children():
		widget.destroy()

	figure = plt.Figure()
	subPlot = figure.add_subplot(111)
	subPlot.imshow(nuevaMatriz, cmap=plt.cm.gray)
	imagesTemp = FigureCanvasTkAgg(figure, master=ventanaFiltro)
	imagesTemp.draw()
	imagesTemp.get_tk_widget().pack(padx=5, pady=15)
Пример #17
0
def init():
    nn = 101.
    x, y = np.mgrid[0:nn, 0:nn]
    xx = sorted(list(set(x.flatten().tolist())))
    dx = xx[1] - xx[0]
    dx = 1.5
    z = Gaussian.GaussianXY(x,
                            y,
                            1.,
                            off=(50, 50),
                            sig=(1.2 * dx, 1.2 * dx),
                            pa=20. * np.pi / 180)
    z += Gaussian.GaussianXY(x,
                             y,
                             1.,
                             off=(55, 50),
                             sig=(1.2 * dx, 1.2 * dx),
                             pa=20. * np.pi / 180)
    z += Gaussian.GaussianXY(x,
                             y,
                             .5,
                             off=(25, 25),
                             sig=(1.2 * dx, 1.2 * dx),
                             pa=20. * np.pi / 180)
    z += Gaussian.GaussianXY(x,
                             y,
                             .5,
                             off=(75, 25),
                             sig=(1.2 * dx, 1.2 * dx),
                             pa=20. * np.pi / 180)
    noise = 0.01
    #z+=np.random.randn(nn,nn)*noise
    # z+=Gaussian.GaussianXY(x,y,1.,off=(50,50),sig=(1*dx,1*dx),pa=20.*np.pi/180)
    #pylab.clf()
    dx *= 1.5
    pylab.imshow(z, interpolation="nearest")
    pylab.show()
    Fit = ClassPointFit(x, y, z, psf=(dx, dx, 0.), noise=noise)
    Fit.DoAllFit()
Пример #18
0
    def func(self, pars, xyz):
        x, y, z = xyz
        l, m, s, dp = self.GetPars(pars)
        psf = self.givePsf(dp)
        # print "psf0: %s"%str(self.psf)
        # print "psf1: %s"%str(psf)

        G = np.zeros_like(x)
        for i in range(l.shape[0]):
            G += Gaussian.GaussianXY(l[i] - x,
                                     m[i] - y,
                                     s[i],
                                     sig=(psf[0], psf[1]),
                                     pa=self.psf[2])
        return G
Пример #19
0
def test():
    nn = 101.
    x, y = np.mgrid[0:nn, 0:nn]
    xx = sorted(list(set(x.flatten().tolist())))
    dx = xx[1] - xx[0]
    dx = 1.
    adp = 1.

    psfPars = 1, 10., 120. * np.pi / 180
    PSm, PSM, PPA = psfPars
    CPSF = ClassConvPSF(psfPars)

    GaussPars = 3, 10, 10. * np.pi / 180.
    GaussParsConv = CPSF.GiveConvGaussPars(GaussPars)
    Sm2, SM2, PA2 = GaussParsConv

    # z=Gaussian.GaussianXY(x,y,1.,off=(50,50),sig=(adp*dx,adp*dx),pa=20.*np.pi/180)
    # z+=Gaussian.GaussianXY(x,y,1.,off=(55,50),sig=(adp*dx,adp*dx),pa=20.*np.pi/180)
    # z+=Gaussian.GaussianXY(x,y,.5,off=(25,25),sig=(adp*dx,adp*dx),pa=20.*np.pi/180)
    # z+=Gaussian.GaussianXY(x,y,.5,off=(75,25),sig=(adp*dx,adp*dx),pa=20.*np.pi/180)

    # #z+=Gaussian.GaussianXY(x,y,.5,off=(75,75),sig=(5*adp*dx,adp*dx),pa=20.*np.pi/180)
    # z+=Gaussian.GaussianXY(x,y,.5,off=(50,50),sig=(5*adp*dx,adp*dx),pa=20.*np.pi/180)

    z = Gaussian.GaussianXY(x, y, .5, off=(75, 75), sig=(Sm2, SM2), pa=PA2)

    noise = 0.01
    #z+=np.random.randn(nn,nn)*noise
    # z+=Gaussian.GaussianXY(x,y,1.,off=(50,50),sig=(1*dx,1*dx),pa=20.*np.pi/180)
    #pylab.clf()
    #dx*=1.5
    #pylab.ion()

    pylab.clf()
    pylab.imshow(z, interpolation="nearest")
    pylab.draw()
    pylab.show(False)
    #Fit=ClassGaussFit(x,y,z,psf=(dx,dx,0.),noise=noise,FreePars=["l", "m","s","Sm","SM","PA"])
    Fit = ClassGaussFit(x,
                        y,
                        z,
                        psf=psfPars,
                        noise=noise,
                        FreePars=["l", "m", "s", "Sm", "SM", "PA"])
    Fit.DoAllFit()
Пример #20
0
    def GiveGuess(self, Nsources):
        x, y, z = self.data.copy()
        S = []

        #self.plotIter(self.data[2],z)
        for i in range(Nsources):
            ind = np.argmax(z)
            x0, y0, s0 = x[ind], y[ind], z[ind]
            #print s0,np.max(z)
            z -= Gaussian.GaussianXY(x0 - x,
                                     y0 - y,
                                     s0,
                                     sig=(self.psf[0], self.psf[1]),
                                     pa=self.psf[2])
            S.append([x0, y0, s0])
            #self.plotIter2(x,y,self.data[2],z)
            #self.plotIter(self.data[2],z)
        return S
Пример #21
0
    def funcNoPSF(self, pars, xyz):
        x, y, z = xyz

        #l,m,s,dp=self.GetPars(pars)
        DicoPars = self.ListToDicoPars(pars)
        l, m, s, Sm, SM, PA = self.DicoToListPars(DicoPars)

        dp = 0.
        psf = self.givePsf(dp)

        G = np.zeros_like(x)
        for i in range(l.shape[0]):
            G += Gaussian.GaussianXY(l[i] - x,
                                     m[i] - y,
                                     s[i],
                                     sig=(Sm[i], SM[i]),
                                     pa=PA[i])

        return G
Пример #22
0
    def func(self, pars, xyz):
        x, y, z = xyz

        #l,m,s,dp=self.GetPars(pars)
        DicoPars = self.ListToDicoPars(pars)
        l, m, s, Sm, SM, PA = self.DicoToListPars(DicoPars)
        #print Sm,SM,pars
        dp = 0.
        psf = self.givePsf(dp)

        G = np.zeros_like(x)
        for i in range(l.shape[0]):
            #G+=Gaussian.GaussianXY(l[i]-x,m[i]-y,s[i],sig=(psf[0],psf[1]),pa=self.psf[2])
            ThisSm, ThisSM, ThisPA = self.giveConvPsf((Sm[i], SM[i], PA[i]))
            G += Gaussian.GaussianXY(l[i] - x,
                                     m[i] - y,
                                     s[i],
                                     sig=(ThisSm, ThisSM),
                                     pa=ThisPA)

        return G
Пример #23
0
def main(filename, ExpNMR, nfiles):

    print "=========================="
    print "PyDP4 script,\nintegrating Tinker/MacroModel,"
    print "Gaussian/NWChem/Jaguar and DP4\nv0.7"
    print "\nCopyright (c) 2015-2018 Kristaps Ermanis, Jonathan M. Goodman"
    print "Distributed under MIT license"
    print "==========================\n\n"

    if nfiles < settings.NTaut or nfiles % settings.NTaut != 0:
        print "Invalid number of tautomers/input files - number of input files\
        must be a multiple of number of tautomers"

        quit()

    SetTMSConstants()

    #Check the number of input files, generate some if necessary
    if nfiles == 1:
        import InchiGen
        if len(settings.SelectedStereocentres) > 0:
            numDS, inpfiles = InchiGen.GenSelectDiastereomers(
                filename, settings.SelectedStereocentres)
        else:
            numDS, inpfiles = InchiGen.GenDiastereomers(filename)
        if settings.GenTaut:
            newinpfiles = []
            for ds in inpfiles:
                print "Generating tautomers for " + ds
                settings.NTaut, files = InchiGen.GenTautomers(ds)
                newinpfiles.extend(files)
            inpfiles = list(newinpfiles)
        if settings.GenProt:
            newinpfiles = []
            for ds in inpfiles:
                print "Generating protomers for " + ds
                settings.NTaut, files = InchiGen.GenProtomers(
                    ds, settings.BasicAtoms)
                newinpfiles.extend(files)
            inpfiles = list(newinpfiles)
    else:
        inpfiles = filename
        if settings.GenTaut:
            numDS = nfiles
            import InchiGen
            newinpfiles = []
            for ds in inpfiles:
                print "Generating tautomers for " + ds
                settings.NTaut, files = InchiGen.GenTautomers(ds)
                newinpfiles.extend(files)
            inpfiles = list(newinpfiles)
        else:
            numDS = int(nfiles / settings.NTaut)
            if numDS == 1:
                import InchiGen
                for f in filename:
                    tdiastereomers = []
                    numDS, tinpfiles = InchiGen.GenDiastereomers(f)
                    tdiastereomers.append(tinpfiles)
                tinpfiles = zip(*tdiastereomers)
                inpfiles = []
                for ds in tinpfiles:
                    inpfiles.extend(list(ds))

    print inpfiles

    #Check the existence of mm output files
    MMRun = False

    if settings.MMTinker:
        #Check if there already are Tinker output files with the right names
        tinkfiles = glob.glob('*.tout')
        mminpfiles = []
        for f in inpfiles:
            if f + '.tout' in tinkfiles and (f + 'rot.tout' in tinkfiles
                                             or settings.Rot5Cycle is False):
                if len(mminpfiles) == 0:
                    MMRun = True
            else:
                MMRun = False
                mminpfiles.append(f)
    else:
        #Check if there already are MacroModel output files with the right names
        mmfiles = glob.glob('*.log')
        mminpfiles = []
        for f in inpfiles:
            if f + '.log' in mmfiles and (f + 'rot.log' in mmfiles
                                          or settings.Rot5Cycle is False):
                if len(mminpfiles) == 0:
                    MMRun = True
            else:
                MMRun = False
                mminpfiles.append(f)

    if MMRun or settings.AssumeDone or settings.UseExistingInputs:
        print 'Conformation search has already been run for these inputs.\
                \nSkipping...'

        if settings.GenOnly:
            print "Input files generated, quitting..."
            quit()
    else:
        if settings.MMTinker:
            print 'Some Tinker files missing.'
            print '\nSeting up Tinker files...'
            Tinker.SetupTinker(len(inpfiles), settings, *mminpfiles)
            if settings.GenOnly:
                print "Input files generated, quitting..."
                quit()
            print '\nRunning Tinker...'
            Tinker.RunTinker(len(inpfiles), settings, *mminpfiles)
        else:
            print 'Some Macromodel files missing.'
            print '\nSetting up Macromodel files...'
            MacroModel.SetupMacromodel(len(inpfiles), settings, *mminpfiles)
            if settings.GenOnly:
                print "Input files generated, quitting..."
                quit()
            print '\nRunning Macromodel...'
            MacroModel.RunMacromodel(len(inpfiles), settings, *mminpfiles)

    if settings.OnlyConfS:
        print "Conformational search completed, quitting as instructed."
        quit()

    if (not settings.AssumeDone) and (not settings.UseExistingInputs):
        if settings.ConfPrune and not settings.Cluster:
            if settings.DFT == 'z' or settings.DFT == 'g' or settings.DFT == 'd':
                adjRMSDcutoff = Gaussian.AdaptiveRMSD(inpfiles[0], settings)
            elif settings.DFT == 'n' or settings.DFT == 'w' or settings.DFT == 'm':
                adjRMSDcutoff = NWChem.AdaptiveRMSD(inpfiles[0], settings)
            elif settings.DFT == 'j':
                adjRMSDcutoff = Jaguar.AdaptiveRMSD(inpfiles[0], settings)

            print 'RMSD cutoff adjusted to ' + str(adjRMSDcutoff)
        else:
            adjRMSDcutoff = settings.InitialRMSDcutoff

        #Run DFT setup script for every diastereomer
        print '\nRunning DFT setup...'
        i = 1
        for ds in inpfiles:
            if settings.DFT == 'z' or settings.DFT == 'g' or settings.DFT == 'd':
                print "\nGaussian setup for file " + ds + " (" + str(i) +\
                    " of " +  str(len(inpfiles)) + ")"
                if settings.Cluster == False:
                    Gaussian.SetupGaussian(ds, ds + 'ginp', 3, settings,
                                           adjRMSDcutoff)
                else:
                    Gaussian.SetupGaussianCluster(ds, ds + 'ginp', 3, settings)
            elif settings.DFT == 'n' or settings.DFT == 'w' or settings.DFT == 'm':
                print "\nNWChem setup for file " + ds +\
                    " (" + str(i) + " of " + str(len(inpfiles)) + ")"
                NWChem.SetupNWChem(ds, ds + 'nwinp', 3, settings,
                                   adjRMSDcutoff)
            elif settings.DFT == 'j':
                print "\nJaguar setup for file " + ds +\
                    " (" + str(i) + " of " + str(len(inpfiles)) + ")"
                Jaguar.SetupJaguar(ds, ds + 'jinp', 3, settings, adjRMSDcutoff)

            i += 1
        QRun = False
    elif settings.AssumeDone:
        QRun = True
    else:
        QRun = False

    if settings.DFT == 'z' or settings.DFT == 'g' or settings.DFT == 'd':
        Files2Run = Gaussian.GetFiles2Run(inpfiles, settings)
    elif settings.DFT == 'n' or settings.DFT == 'w' or settings.DFT == 'm':
        Files2Run = NWChem.GetFiles2Run(inpfiles, settings)
    elif settings.DFT == 'j':
        Files2Run = Jaguar.GetFiles2Run(inpfiles, settings)

    print Files2Run

    if len(Files2Run) == 0:

        if (settings.DFT == 'z' or settings.DFT == 'g' or settings.DFT == 'd') and\
            (settings.DFTOpt or settings.PM6Opt or settings.HFOpt or settings.M06Opt)\
     and not settings.AssumeDone:
            print "Checking if all geometries have converged"
            Ngeoms, Nunconverged, unconverged = Gaussian.CheckConvergence(
                inpfiles)
            if Nunconverged > 0:
                print "WARNING: Not all geometries have achieved convergence!"
                print ','.join([x[:-8] for x in unconverged])
                print "Number of geometries: " + str(Ngeoms)
                print "Unconverged: " + str(Nunconverged)
                Gaussian.ResubGeOpt(unconverged, settings)
                Files2Run = Gaussian.GetFiles2Run(inpfiles, settings)
                QRun = False
            else:
                QRun = True
        else:
            QRun = True

        #QRun = True

    if len(Files2Run) > settings.HardConfLimit:
        print "Hard conformation count limit exceeded, DFT calculations aborted."
        quit()

    if QRun:
        print 'DFT has already been run for these inputs. Skipping...'
    else:
        if settings.DFT == 'g':
            print '\nRunning Gaussian locally...'
            Gaussian.RunLocally(Files2Run, settings)

        elif settings.DFT == 'z':
            print '\nRunning Gaussian on Ziggy...'

            #Run Gaussian jobs on Ziggy cluster in folder named after date
            #and time in the short 1processor job queue
            #and wait until the last file is completed
            MaxCon = settings.MaxConcurrentJobs
            if settings.DFTOpt or settings.PM6Opt or settings.HFOpt or settings.M06Opt:
                for i in range(len(Files2Run)):
                    Files2Run[i] = Files2Run[i][:-5] + '.com'
            if len(Files2Run) < MaxCon:
                Gaussian.RunOnZiggy(0, settings.queue, Files2Run, settings)
            else:
                print "The DFT calculations will be done in " +\
                    str(math.ceil(len(Files2Run)/MaxCon)) + " batches"
                i = 0
                while (i + 1) * MaxCon < len(Files2Run):
                    print "Starting batch nr " + str(i + 1)
                    Gaussian.RunOnZiggy(
                        str(i + 1), settings.queue,
                        Files2Run[(i * MaxCon):((i + 1) * MaxCon)], settings)
                    i += 1
                print "Starting batch nr " + str(i + 1)
                Gaussian.RunOnZiggy(str(i + 1), settings.queue,
                                    Files2Run[(i * MaxCon):], settings)
        elif settings.DFT == 'd':
            print '\nRunning Gaussian on Darwin...'

            #Run Gaussian jobs on Darwin cluster in folder named after date
            #and title and wait until the last file is completed
            MaxCon = settings.MaxConcurrentJobsDarwin

            if settings.DFTOpt or settings.PM6Opt or settings.HFOpt or settings.M06Opt:
                for i in range(len(Files2Run)):
                    Files2Run[i] = Files2Run[i][:-5] + '.com'

            if len(Files2Run) < MaxCon:
                Gaussian.RunOnDarwin(0, Files2Run, settings)
            else:
                print "The DFT calculations will be done in " +\
                    str(math.ceil(len(Files2Run)/MaxCon)) + " batches"
                i = 0
                while (i + 1) * MaxCon < len(Files2Run):
                    print "Starting batch nr " + str(i + 1)
                    Gaussian.RunOnDarwin(
                        str(i + 1), Files2Run[(i * MaxCon):((i + 1) * MaxCon)],
                        settings)
                    i += 1
                print "Starting batch nr " + str(i + 1)
                Gaussian.RunOnDarwin(str(i + 1), Files2Run[(i * MaxCon):],
                                     settings)

        elif settings.DFT == 'n':
            print '\nRunning NWChem locally...'
            NWChem.RunNWChem(Files2Run, settings)

        elif settings.DFT == 'w':
            print '\nRunning NWChem on Ziggy...'

            #Run NWChem jobs on Ziggy cluster in folder named after date
            #and time in the short 1 processor job queue
            #and wait until the last file is completed
            now = datetime.datetime.now()
            MaxCon = settings.MaxConcurrentJobs
            if len(Files2Run) < MaxCon:
                NWChem.RunOnZiggy(now.strftime('%d%b%H%M'), settings.queue,
                                  Files2Run, settings)
            else:
                print "The DFT calculations will be done in " +\
                    str(math.ceil(len(Files2Run)/MaxCon)) + " batches"
                i = 0
                while (i + 1) * MaxCon < len(Files2Run):
                    print "Starting batch nr " + str(i + 1)
                    NWChem.RunOnZiggy(
                        now.strftime('%d%b%H%M') + str(i + 1), settings.queue,
                        Files2Run[(i * MaxCon):((i + 1) * MaxCon)], settings)
                    i += 1
                print "Starting batch nr " + str(i + 1)
                NWChem.RunOnZiggy(
                    now.strftime('%d%b%H%M') + str(i + 1), settings.queue,
                    Files2Run[(i * MaxCon):], settings)

        elif settings.DFT == 'm':
            print '\nRunning NWChem on Medivir cluster...'

            #Run NWChem jobs on Medivir cluster
            MaxCon = settings.MaxConcurrentJobs
            if len(Files2Run) < MaxCon:
                NWChem.RunOnMedivir(Files2Run, settings)
            else:
                print "The DFT calculations will be done in " +\
                    str(math.ceil(len(Files2Run)/MaxCon)) + " batches"
                i = 0
                while (i + 1) * MaxCon < len(Files2Run):
                    print "Starting batch nr " + str(i + 1)
                    NWChem.RunOnMedivir(
                        Files2Run[(i * MaxCon):((i + 1) * MaxCon)], settings)
                    i += 1
                print "Starting batch nr " + str(i + 1)
                NWChem.RunOnMedivir(Files2Run[(i * MaxCon):], settings)

        elif settings.DFT == 'j':
            print '\nRunning Jaguar locally...'
            Jaguar.RunJaguar(Files2Run, settings)

    if numDS < 2:
        print "DP4 requires at least 2 candidate structures!"
    else:
        allargs = []
        for i in range(numDS):
            allargs.append(settings.NTaut)
            allargs.extend(inpfiles[i * settings.NTaut:(i + 1) *
                                    settings.NTaut])
        allargs.append(ExpNMR)
        DP4outp = NMRAnalysis.main(numDS, settings, *allargs)
        print '\nWriting the DP4 output to DP4outp'
        DP4_ofile = open(allargs[-1] + '.dp4', 'w')
        DP4_ofile.write(DP4outp)
        DP4_ofile.close()
        print 'DP4 process completed successfully.'
 def setUp(self):
     self.gaussian = Gaussian(25, 2)
     self.gaussian.read_data_file('numbers.txt')
Пример #25
0
 def eval_term(self, term):
     (A, b) = self.eval_term_rec(term)
     return Gauss.Map(A, np.array([[b]]))
Пример #26
0
 def __init__(self):
     self.n = 0  # number of variables
     self.equations = []
     self.state = Gauss.N(0)
Пример #27
0
    print('\t1. Drvo odlucivanja')
    print('\t2. K najblizih suseda')
    print('\t3. Gausova raspodela')
    print('\t4. Neuronske mreze')
    while True:
        metoda = int(input(''))
        if metoda in (1, 2, 3, 4):
            break
        else:
            print('Neispravna opcija')
            print('Odabrali ste metodu pod rednim brojem ' + str(metoda))
            print('--------------------------------------------------------')

    if metoda == 1:
        print('Odabrali ste analizu metodom drveta odlučivanja')
        DTree.DTree(data)
    elif metoda == 2:
        print('Odabrali ste analizu metodom k najbližih suseda')
        KNeighbors.KNeighboors(data)
    elif metoda == 3:
        print('Odabrali ste analizu Gausovom metodom')
        Gaussian.Gaussian(data)
    elif metoda == 4:
        print('Odabrali ste analizu metodom neuronskih mreža')
        MLP.MLP(data)
    else:
        print('Neispravna opcija')

    metoda = input('Zelite li da isprobate drugu metodu? (y/n)')
    if metoda == 'n':
        break
Пример #28
0
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg

#Basics window interface
aplicacion = tk.Tk()
aplicacion.geometry("600x750")
text_frame = Frame(aplicacion)
text_frame.configure(bg='#177497')
text = tk.Text(text_frame, width=90, height=8)
text.pack(side=LEFT, padx=20, pady=10)

fig = Figure()
fig.set_facecolor('#177497')
canvas = FigureCanvasTkAgg(fig, master=aplicacion)

gaussian_kernel = Gaussian.get_gaussian_filter()[0]
gaussian_scalar = Gaussian.get_gaussian_filter()[1]
rayleigh_kernel = Gaussian.get_rayleigh_filter()[0]
rayleigh_scalar = Gaussian.get_gaussian_filter()[1]
gradient_x_kernel = np.asarray([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
gradient_y_kernel = np.asarray([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])

title = 'medical image processing'
back = tk.Frame(master=aplicacion, bg='#177497')
back.master.title(title)
back.pack_propagate(
    0
)  # Don't allow the widgets inside to determine the frame's width / height
back.pack(fill=tk.BOTH, expand=1)  # Expand the frame to fill the root window

img = PhotoImage(file='./medical2.png')
Пример #29
0
    def get_optimized_geometry(self):
        self.thefile.set(self.entry.get())
        gaussian, orca = False, False
        files = self.thefile.get().split()
        if len(files) > 1:
            self.log_update("Entering batch mode")

        # determine if file is from Gaussian or from ORCA
        for outputfile in files:
            self.log_update("{}".format(outputfile))
            try:
                output = G.GaussianOut(outputfile)
                if "Gaussian" in output.content().next():
                    gaussian = True
                    self.log_update("Gaussian file detected")
            except IOError:
                self.log_update("File not found. ErrorCode_teq18")
                return
            try:
                output = O.OrcaOut(outputfile)
                if "Program Version" in list(output.content())[20]:
                    orca = True
                    self.log_update("ORCA file detected.")

                elif "An Ab Initio, DFT and Semiempirical electronic structure package" in list(
                        output.content())[5]:
                    orca = True
                    self.log_update("ORCA file detected")
                elif "TOTAL RUN TIME" in list(output.content())[-1]:
                    orca = True
                    self.log_update("ORCA file detected")
            except IOError:
                self.log_update("File not found. ErrorCode_cax18")
                return

            # Returning if neither Gaussian or ORCA file was detected,
            if gaussian == False and orca == False:
                self.log_update(
                    "Neither Gaussian or ORCA output file type was detected. ErrorCode_xud25"
                )
                return

            ### GETTING GAUSSIAN OPT GEOM ###
            if gaussian:
                output = G.GaussianOut(outputfile)
                self.log_update("Getting Gaussian optimized geometry.")
                optgeom = output.geometry_trajectory()[-1]

                with open(
                        output.filename.split(",")[-1] + "_optimized.xyz",
                        "w") as f:
                    f.write("{}\n".format(output.no_atoms()))
                    f.write("Generated by the ToolBox\n")
                    for atom in optgeom:
                        f.write(' '.join(atom) + "\n")
                self.log_update("File written to {}".format(
                    output.filename.split(",")[-1] + "_optimized.xyz"))

            ### GETTING OFCA OPT GEOM ###
            elif orca:
                output = O.OrcaOut(outputfile)
                self.log_update("Getting ORCA optimized geometry.")
                optgeom = output.geometry_trajectory()[-1]
                with open(output.filename.replace(".out", "_optimized.xyz"),
                          "w") as f:
                    f.write(str(output.no_atoms()) + "\n")
                    f.write("Generated by the ToolBox\n")
                    for atom in optgeom:
                        f.write(atom + "\n")
                self.log_update("File written to {}".format(
                    output.filename.replace(".out", "_optimized.xyz")))
Пример #30
0
from Kirsh import *
from Gaussian import *
def compare(f1, f2, i = None, val = 0):
    '''
    funcao recursiva para efectuar a comparacao
    entre duas imagens pelo seu codigo LH
    a foto mais semelhante tem o valor mais baixo
    '''
    if i == None: i = len(f1)
    if i == 0: return val
    i -= 1
    if f1[i] + f2[i] != 0:
        val += ((f1[i] - f2[i])**2 / (f1[i] + f2[i]))
    return compare(f1, f2, i, val)

a = Gaussian("mirror.jpg", [7], [0.3])
c = a.code()
print c

print
print "K I R S H"
print
uma = Kirsh("mirror.jpg")
duas = Kirsh("mirror2.jpg")
tres = Kirsh("people.jpg")
f1 = uma.code()
f2 = duas.code()
f3 = tres.code()
f1_f2 = compare(f1,f2)
print "f1 - f2: %s" % f1_f2
f1_f3 = compare(f1,f3)
Пример #31
0
def gp(xs, kernel):
    K = [[ kernel(x1,x2) for x1 in xs] for x2 in xs ]
    return Gaussian.N(np.array(K))
Пример #32
0
"""
compute gradients along a trajectory of geometries using Gaussian
"""
from DFTB import XYZ
import Gaussian

if __name__ == "__main__":
    import sys
    import os
    if len(sys.argv) < 3:
        print "Usage: %s <geometry file> <gradient file>" % sys.argv[0]
        print "Compute gradients for all geometries in <geometry file> and write them to <gradient file>"
        exit(-1)
    geom_file = sys.argv[1]
    grad_file = sys.argv[2]
    tmp_dir = "/scratch/humeniuka/dftb/"
    os.system("module load g09")
    for atomlist in XYZ.read_xyz(geom_file):
        tmp_com = tmp_dir + "gaussian.com"
        tmp_out = tmp_dir + "gaussian.log"
        Gaussian.write_input(tmp_com, atomlist, \
                        route="# PBE/6-311G(3df,3pd)++ Force", \
                        title="Gradients for fitting repulsive potential")
        # compute forces with gaussian
        os.system("g09 < %s > %s" % (tmp_com, tmp_out))
        try:
            forces = Gaussian.read_forces(tmp_com)
        except Gaussian.FormatError as e:
            print e
        XYZ.write(grad_file, [forces], title="forces", units="forces", mode='a')