def setup(self):
        """ Sets up nonlinear optimization machinery
        """
        unix.mkdir(PATH.OPTIMIZE)

        # prepare output writers
        self.writer = Writer(path=PATH.OUTPUT)

        self.stepwriter = StepWriter(path=PATH.SUBMIT)

        # prepare algorithm machinery
        if PAR.SCHEME in ['NLCG']:
            self.NLCG = NLCG(path=PATH.OPTIMIZE,
                             maxiter=PAR.NLCGMAX,
                             thresh=PAR.NLCGTHRESH,
                             precond=self.precond())

        elif PAR.SCHEME in ['LBFGS']:
            self.LBFGS = LBFGS(path=PATH.OPTIMIZE,
                               memory=PAR.LBFGSMEM,
                               maxiter=PAR.LBFGSMAX,
                               thresh=PAR.LBFGSTHRESH,
                               precond=self.precond())

        # write initial model
        if exists(PATH.MODEL_INIT):
            import solver
            src = PATH.MODEL_INIT
            dst = join(PATH.OPTIMIZE, 'm_new')
            savenpy(dst, solver.merge(solver.load(src)))
Exemple #2
0
    def write_gradient(self, path):
        super(regularize, self).write_gradient(path)

        g = solver.load(path + '/' + 'gradient', suffix='_kernel')
        if not PAR.LAMBDA:
            return solver.merge(g)

        m = solver.load(path + '/' + 'model')
        mesh = self.getmesh()

        for key in solver.parameters:
            for iproc in range(PAR.NPROC):
                g[key][iproc] += PAR.LAMBDA *\
                    self.nabla(mesh, m[key][iproc], g[key][iproc])

        self.save(path, solver.merge(g), backup='noregularize')
Exemple #3
0
    def setup(self):
        """ Sets up nonlinear optimization machinery
        """
        unix.mkdir(PATH.OPTIMIZE)

        # prepare output writers
        self.writer = Writer(path=PATH.OUTPUT)

        self.stepwriter = StepWriter(path=PATH.SUBMIT)

        # prepare algorithm machinery
        if PAR.SCHEME in ["NLCG"]:
            self.NLCG = NLCG(path=PATH.OPTIMIZE, maxiter=PAR.NLCGMAX, thresh=PAR.NLCGTHRESH, precond=self.precond)

        elif PAR.SCHEME in ["LBFGS"]:
            self.LBFGS = LBFGS(
                path=PATH.OPTIMIZE,
                memory=PAR.LBFGSMEM,
                maxiter=PAR.LBFGSMAX,
                thresh=PAR.LBFGSTHRESH,
                precond=self.precond,
            )

        # write initial model
        if exists(PATH.MODEL_INIT):
            src = PATH.MODEL_INIT
            dst = join(PATH.OPTIMIZE, "m_new")
            savenpy(dst, solver.merge(solver.load(src)))
Exemple #4
0
    def regularize3d(self, path):
        assert (exists(path))

        g = solver.load(path + '/' + 'gradient', suffix='_kernel')
        if not PAR.LAMBDA:
            return solver.merge(g)

        m = solver.load(path + '/' + 'model')
        mesh = self.getmesh()

        for key in solver.parameters:
            for iproc in range(PAR.NPROC):
                g[key][iproc] += PAR.LAMBDA *\
                    self.nabla(mesh, m[key][iproc], g[key][iproc])

        return solver.merge(g)
Exemple #5
0
    def regularize(self, path):
        assert (exists(path))

        g = solver.load(path +'/'+ 'gradient', suffix='_kernel')
        if not PAR.LAMBDA:
            return solver.merge(g)

        m = solver.load(path +'/'+ 'model')
        mesh = self.getmesh()

        for key in solver.parameters:            
            for iproc in range(PAR.NPROC):
                g[key][iproc] += PAR.LAMBDA *\
                    self.nabla(mesh, m[key][iproc], g[key][iproc])
                    #self.nabla(m[key][iproc], g[key][iproc] , mesh, h)

        return solver.merge(g)
    def evaluate_gradient(self):
        """ Performs adjoint simulation to evaluate gradient
        """
        system.run('solver',
                   'eval_grad',
                   hosts='all',
                   path=PATH.GRAD,
                   export_traces=divides(optimize.iter, PAR.SAVETRACES))

        postprocess.write_gradient(path=PATH.GRAD)

        src = join(PATH.GRAD, 'gradient')
        dst = join(PATH.OPTIMIZE, 'g_new')
        savenpy(dst, solver.merge(solver.load(src, suffix='_kernel')))
Exemple #7
0
    def evaluate_gradient(self):
        """ Performs adjoint simulation to evaluate gradient
        """
        system.run('solver', 'eval_grad',
                   hosts='all',
                   path=PATH.GRAD,
                   export_traces=divides(optimize.iter, PAR.SAVETRACES))

        postprocess.write_gradient(
            path=PATH.GRAD)

        src = join(PATH.GRAD, 'gradient')
        dst = join(PATH.OPTIMIZE, 'g_new')
        savenpy(dst, solver.merge(solver.load(src, suffix='_kernel')))
Exemple #8
0
    def write_gradient(self, path):
        """ Reads kernels and writes gradient of objective function
        """
        if not exists(path):
            raise Exception()

        self.combine_kernels(path, solver.parameters)
        self.process_kernels(path, solver.parameters)

        g = solver.merge(solver.load(
                 path +'/'+ 'kernels/sum',
                 suffix='_kernel',
                 verbose=True))

        if PAR.LOGARITHMIC:
            # convert from logarithmic to absolute perturbations
            g *= solver.merge(solver.load(path +'/'+ 'model'))
        self.save(path, g)

        if PATH.MASK:
            # apply mask
            g *= solver.merge(solver.load(PATH.MASK))
            self.save(path, g, backup='nomask')
Exemple #9
0
 def search(self):
     self.bestWords = []
     self.prog.value = 0
     sekwencja = self.literki.text.replace(' ', '')
     if len(sekwencja) < 3:
         self.opis.text = 'Wprowadź 3 lub więcej liter i wyszukaj ' + \
                          'jeszcze raz.'
     else:
         if not self.BAZY:
             self.importFiles()
         self.hand = getFrequencyDict(sekwencja)
         lettersNr = len(sekwencja)
         self.opis.text = 'Obliczanie... \n\nCzekaj'
         wordlists = self.wordlist[:lettersNr - 2]
         crpWordlist = list(merge(*wordlists))
         # TODO implement thread search
         work = chooseWords(
             self, crpWordlist, lettersNr + 1,
             lambda x: Clock.schedule_once(partial(self.update_bar, x)))
         work.start()
         self.prog.value = 1.0
    def fix_near_field(self, path=''):
        """
        """
        import preprocess
        preprocess.setup()

        name = solver.check_source_names()[solver.getnode]
        fullpath = path + '/' + name
        #print 'DB: name=', name
        #print 'DB: fullpath=', fullpath

        g = solver.load(fullpath, suffix='_kernel')
        g_vec = solver.merge(g)
        nproc = solver.mesh.nproc

        #print 'DB: len(g_vec)=', len(g_vec)

        if not PAR.FIXRADIUS:
            return

        x, y, z = self.getcoords()
        #print 'DB: len(g)=', len(g)
        #print 'DB: len(g[vp][0])=', len(g['vp'][0])
        #print 'DB: x.shape=', x.shape
        #print 'DB: len(x)=', len(x)

        ##sys.exit("DB: stop from postporcess-regularize")

        lx = x.max() - x.min()
        ly = y.max() - y.min()
        lz = z.max() - z.min()
        nn = x.size
        nx = np.around(np.sqrt(nn * lx / (lz * ly)))
        ny = np.around(np.sqrt(nn * ly / (lx * lz)))
        nz = np.around(np.sqrt(nn * lz / (lx * ly)))
        dx = lx / nx * 1.25
        dy = ly / ny * 1.25
        dz = lz / nz * 1.25

        #print 'DB: lx=', lx
        #print 'DB: ly=', ly
        #print 'DB: lz=', lz
        #print 'DB: nn=', nn
        #print 'DB: nx=', nx
        #print 'DB: ny=', ny
        #print 'DB: nz=', nz
        #print 'DB: dx=', dx
        #print 'DB: dy=', dy
        #print 'DB: dz=', dz

        sigma = PAR.FIXRADIUS * (dx + dz + dy) / 3.0
        _, h = preprocess.load(solver.getpath + '/' + 'traces/obs')

        # mask sources
        mask = np.exp(-0.5 * ((x - h.sx[0])**2. + (y - h.sy[0])**2. +
                              (z - h.sz[0])**2.) / sigma**2.)

        # mask top
        # for matlab
        # z_sqrt=(abs(z).^(0.25)); depth_scale=1-z_sqrt/max(z_sqrt); figure; plot(depth_scale,z);
        z_factor = np.power(abs(z), 0.5)
        #max_z_factor = np.amax(z_factor)
        #scale_depth = 1.0 - z_factor/max_z_factor
        #print 'DB: max(z_factor)=',max_z_factor
        #print 'DB: max(scale_depth)=',np.amax(scale_depth)
        #print 'DB: min(scale_depth)=',np.amin(scale_depth)
        #mask *= scale_depth

        #mask_depth = solver.split(z)
        mask_depth = solver.split(z_factor)
        mask_d = solver.split(mask)

        ##print 'DB: sigma=',sigma
        ##print 'DB: mask=',mask
        #print 'DB: len(mask)=', len(mask)
        #print 'DB: len(mask_d)=', len(mask_d)
        ##print 'DB: len(g)=', len(g)
        ##print 'DB: len(g)[vp][0]=', len(g['vp'][0])

        for key in solver.parameters:
            for iproc in range(nproc):
                #print 'DB: key, iproc=', key, iproc
                #print 'DB: len(g[key][iproc])=', len(g[key][iproc])
                #print 'DB: len(mask_d[key][iproc])=', len(mask_d[key][iproc])
                weight = np.sum(mask_d['vp'][iproc] * g[key][iproc]) / np.sum(
                    mask_d['vp'][iproc])
                #print 'DB: key, iproc, weigth= ', key, iproc, weight
                g[key][iproc] *= 1. - mask_d['vp'][iproc]
                g[key][iproc] *= mask_depth['vp'][iproc]
                #g[key][iproc] += mask_d['vp'][iproc]*weight

                #weight = np.sum(mask_d['vp'][iproc]*g[key][iproc])/np.sum(mask_d['vp'][iproc])
                ##print 'DB: key, iproc, weigth= ', key, iproc, weight
                #g[key][iproc] *= 1.-mask_d['vp'][iproc]
                #g[key][iproc] += mask_d['vp'][iproc]*weight

        # mask receivers
        #for ir in range(h.nr):
        #    mask = np.exp(-0.5*((x-h.rx[ir])**2.+(z-h.ry[ir])**2.)/sigma**2.)
        #    for key in solver.parameters:
        #        weight = np.sum(mask*g[key][0])/np.sum(mask)
        #        g[key][0] *= 1.-mask
        #        g[key][0] += mask*weight

        solver.save(fullpath, g, suffix='_kernel')
Exemple #11
0
 def importFiles(self):
     self.ltrVals = letterValues(PUNKTACJA)
     self.wordlist = loadWords(DICTIONARY)
     self.BAZY = True
     printDbg('imported {:d} words.'.format(len(list(
         merge(*self.wordlist)))))
Exemple #12
0
    def fix_near_field(self, path=''):
        """
        """
        import preprocess
        preprocess.setup()

        name = solver.check_source_names()[solver.getnode]
        fullpath = path + '/' + name

        g = solver.load(fullpath, suffix='_kernel')
        g_vec = solver.merge(g)
        nproc = solver.mesh.nproc

        if not PAR.FIXRADIUS:
            return

        x, y, z = self.getcoords()

        lx = x.max() - x.min()
        ly = y.max() - y.min()
        lz = z.max() - z.min()
        nn = x.size
        nx = np.around(np.sqrt(nn * lx / (lz * ly)))
        ny = np.around(np.sqrt(nn * ly / (lx * lz)))
        nz = np.around(np.sqrt(nn * lz / (lx * ly)))
        dx = lx / nx * 1.25
        dy = ly / ny * 1.25
        dz = lz / nz * 1.25

        sigma = PAR.FIXRADIUS * (dx + dz + dy) / 3.0
        _, h = preprocess.load(solver.getpath + '/' + 'traces/obs')
        mask = np.exp(-0.5 * ((x - h.sx[0])**2. + (y - h.sy[0])**2. +
                              (z - h.sz[0])**2.) / sigma**2.)

        scale_z = np.power(abs(z), 0.5)

        power_win = 10
        win_x = np.power(x, power_win)
        win_y = np.power(y, power_win)
        win_z = np.power(z, power_win)

        win_x = win_x / win_x.max()
        win_y = win_y / win_y.max()
        win_z = win_z / win_z.max()

        win_x = 1.0 - win_x[::-1]
        win_y = 1.0 - win_y[::-1]
        win_z = 1.0 - win_z[::-1]

        win_x_rev = win_x[::-1]
        win_y_rev = win_y[::-1]
        win_z_rev = win_z[::-1]

        taper_x = x * 0.0 + 1.0
        taper_y = y * 0.0 + 1.0
        taper_z = z * 0.0 + 1.0

        taper_x *= win_x
        taper_y *= win_y
        taper_z *= win_z
        taper_x *= win_x_rev
        taper_y *= win_y_rev
        taper_z *= win_z_rev

        scale_z = scale_z * taper_z + 0.1

        mask_x = solver.split(taper_x)
        mask_y = solver.split(taper_y)
        mask_z = solver.split(scale_z)
        mask_d = solver.split(mask)

        for key in solver.parameters:
            for iproc in range(nproc):
                weight = np.sum(mask_d['vp'][iproc] * g[key][iproc]) / np.sum(
                    mask_d['vp'][iproc])
                g[key][iproc] *= 1. - mask_d['vp'][iproc]
                g[key][iproc] *= mask_z['vp'][iproc]
                g[key][iproc] *= mask_x['vp'][iproc]
                g[key][iproc] *= mask_y['vp'][iproc]

        #sigma = 1.0
        ## mask receivers
        #for ir in range(h.nr):
        #    mask = np.exp(-0.5*((x-h.rx[ir])**2.+(y-h.ry[ir])**2.+(z-h.rz[ir])**2.)/sigma**2.)
        #    mask_d = solver.split(mask)
        #    #mask = np.exp(-0.5*((x-h.rx[ir])**2.+(z-h.ry[ir])**2.)/sigma**2.)
        #    for key in solver.parameters:
        #        for iproc in range(nproc):
        #            #weight = np.sum(mask*g[key][0])/np.sum(mask)
        #            g[key][iproc] *= 1.-mask_d['vp'][iproc]
        #            #g[key][0] += mask*weight

        solver.save(fullpath, g, suffix='_kernel')