Ejemplo n.º 1
0
 def Dependencies(self, params):
     approx_params = {
         'dataset': {
             'N': params['N'],
             'covariate_distribution': 'gaussian',
             'theta': [1] * params['d'],
             'seed': 0,
             'noise': 'bernoulli',
         },
         'model': {
             'type': 'lr'
         },
     }
     sample_params = copy.copy(approx_params)
     sample_params.update({
         'seed': 0,
         'iterations': 1000000,
         'alg': {
             'type': 'fmh',
             'params': {'taylor_order': 2},
         },
         'proposal': {
             'type': 'random_walk',
         },
     })
     return [
         jobs.Job('approximation', approx_params),
         jobs.Job('sample', sample_params),
     ]
Ejemplo n.º 2
0
def addAdsorbate(constraint="1"):
    constrnts = AND(COMPLETED, RELAX, SURFACE, SYMMETRIC(False), constraint)

    ads = {'H': ['O1']}

    output = db.query(['fwid', 'params_json', 'finaltraj_pckl'], constrnts)
    question = 'Are you sure you want to add adsorbates to %d slabs?' % len(
        output)

    if ask(question):
        for fw, paramStr, ftraj in output:
            params = json.loads(paramStr)

            newsurf = surfFuncs.adsorbedSurface(
                ftraj, json.loads(params['facet_json']), ads)
            if jobs.Job(params).spinpol():
                newsurf.set_initial_magnetic_moments([
                    3 if e in misc.magElems else 0
                    for e in newsurf.get_chemical_symbols()
                ])
            ase.visualize.view(newsurf)

            params['name'] += '_' + printAds(ads)
            params['surfparent'] = fw
            params['inittraj_pckl'] = pickle.dumps(newsurf)
            params['adsorbates_json'] = json.dumps(ads)
            job = jobs.Job(params)
            if job.new():
                viz.view(newsurf)
                question = 'Does this structure look right?\n' + abbreviateDict(
                    params)
                if ask(question):
                    job.check()
                    job.submit()
        misc.launch()
Ejemplo n.º 3
0
def varies(constraint, param, rang):
    """
	varies("xc='BEEF' and name like '%Pd%'",'pw',range(500,1000,100)) 
		--> create jobs varying planewave_cutoff (500,600,...,1000) for all BEEF jobs with Pd
	"""
    jbs = []
    defaults = RELAXORLAT
    cnst = AND(defaults, constraint)
    output = db.queryCol('storage_directory', cnst)
    existing = manage.listOfIncompleteJobStrs()

    if ask('Do you want to vary %s over range %s for %d jobs?' %
           (param, str(rang), len(output))):
        for stordir in output:
            params = json.loads(misc.readOnSherOrSlac(stordir + 'params.json'))
            for v in rang:
                ps = copy.deepcopy(params)
                assert not (jobs.Job(ps).new(
                )), 'varying a job that isn\'t complete or doesn\'t exist'
                ps[param] = v
                job = jobs.Job(ps)
                if job.new(existing): jbs.append(job)

        if ask('Do you want to launch %d new jobs?' % len(jbs)):
            for j in jbs:
                j.submit()
            misc.launch()
Ejemplo n.º 4
0
 def Dependencies(self, params):
     return [
         jobs.Job('dataset', params['dataset']),
         jobs.Job('approximation', {
             'dataset': params['dataset'],
             'model': params['model'],
         }),
     ]
Ejemplo n.º 5
0
    def Dependencies(self, params):
        deps = []
        for lN, (alg_type, alg_params) in itertools.product(
                range(6, 18),
                [
                    ('mh', {}),
                    ('fmh', {'taylor_order': 1}),
                    ('fmh', {'taylor_order': 2}),
                ]):
            N = 2 ** lN
            params = {
                'dataset': {
                    'N': N,
                    'covariate_distribution': 'gaussian',
                    'theta': [1] * 10,
                    'seed': 0,
                    'noise': 'bernoulli',
                },
                'model': {
                    'type': 'lr',
                },
                'alg': {
                    'type': alg_type,
                    'params': alg_params,
                },
                'seed': 0,
                'iterations': iterations,
                'proposal': {
                    'type': 'random_walk',
                },
            }
            deps.append(jobs.Job('sample', params))

        return deps
Ejemplo n.º 6
0
def relaunch(predicate=lambda x: True):
    """Relaunch timed out jobs and unconverged GPAW jobs"""
    fizzleLostRuns()
    lpad.detect_unreserved(expiration_secs=3600 * 24 * 7, rerun=True)
    timeouts, unconvergeds = filter(predicate, errFWIDS('timeout')), filter(
        predicate, errFWIDS('kohnsham'))
    tQuestion = "Do you want to relaunch %d timed out runs?" % len(timeouts)
    uQuestion = "Do you want to relaunch %d unconverged jobs?" % len(
        unconvergeds)
    if misc.ask(tQuestion):
        for fwid in timeouts:
            q = lpad.get_fw_dict_by_id(fwid)['spec']['_queueadapter']
            q['walltime'] = misc.doubleTime(q['walltime'])
            lpad.update_spec([fwid], {'_queueadapter': q})
            lpad.rerun_fw(fwid)
            if q['walltime'][:2] == '40':
                print 'warning, 40h job with fwid ', fwid
    if misc.ask(uQuestion):
        inc = listOfIncompleteJobStrs()

        for fwid in unconvergeds:
            p = fwid2params(fwid)
            p['sigma'] += 0.05
            p['mixing'] *= 0.5
            job = jobs.Job(p)
            lpad.archive_wf(fwid)
            if job.new(inc): job.submit()

    misc.launch()
Ejemplo n.º 7
0
    def Dependencies(self, params):
        d = params['d']
        deps = []

        algs = [
            ('mh', {}),
            ('fmh', {
                'taylor_order': 1
            }),
            ('fmh', {
                'taylor_order': 2
            }),
            ('flymc', {
                'qdb': 0.001
            }),
        ]

        for lN, (alg_type, alg_params), rho in itertools.product(
                range(6, 18),
                algs,
                self.rhos,
        ):
            N = 2**lN
            dep_params = {
                'alg': {
                    'type': alg_type,
                    'params': alg_params,
                },
                'num_runs': num_runs,
                'iterations': iterations,
                'proposal': {
                    'type': 'pcn',
                    'params': {
                        'rho': rho,
                    },
                },
            }

            if params['model_type'] == 'lr':
                dep_params['dataset'] = {
                    'N': N,
                    'covariate_distribution': 'gaussian',
                    'theta': [1] * d,
                    'seed': 0,
                    'noise': 'bernoulli',
                }
                dep_params['model'] = {'type': 'lr'}
            elif params['model_type'] == 'rlr':
                dep_params['dataset'] = {
                    'N': N,
                    'covariate_distribution': 'gaussian',
                    'theta': [1] * d,
                    'seed': 0,
                    'noise': 'gaussian',
                }
                dep_params['model'] = {'type': 'rlr', 'params': {'nu': 4.0}}

            deps.append(jobs.Job('iact_multiple', dep_params))

        return deps
Ejemplo n.º 8
0
 def Dependencies(self, params):
     deps = []
     for i in range(params['num_runs']):
         dep_params = copy.copy(params)
         del dep_params['num_runs']
         dep_params['seed'] = i
         deps.append(jobs.Job('iact', dep_params))
     return deps
Ejemplo n.º 9
0
def calculateBulkModulus(constraint="1"):

    cons = AND(LATTICEOPT,
               constraint)  # necessary for bulkmodulus calc to be valid
    output = db.query([
        'fwid', 'storage_directory', 'job_name', 'structure_ksb',
        'bulkvacancy_ksb', 'bulkscale_ksb', 'system_type_ksb',
        'planewave_cutoff', 'xc', 'kptden_ksb', 'psp_ksb', 'dwrat_ksb',
        'econv_ksb', 'mixing_ksb', 'nmix_ksb', 'maxstep_ksb', 'nbands_ksb',
        'sigma_ksb', 'fmax_ksb', 'dftcode'
    ], cons)
    existing = manage.listOfIncompleteJobStrs()

    question = 'Are you sure you want to calculate bulk modulus for %d structures?' % len(
        output)
    if ask(question):
        newjbs = []
        for fwid, stor_dir, name, structure, bvj, bsj, kind, pw, xc, kptden, psp, dwrat, econv, mixing, nmix, maxstep, nbands, sigma, fmax, dftcode in output:

            params = {
                'jobkind': 'bulkmod',
                'strain': 0.03,
                'inittraj_pckl':
                misc.restoreMagmom(misc.storageDir2pckl(stor_dir)),
                'name': name + '_bulkmod',
                'structure': structure,
                'bulkvacancy_json': bvj,
                'bulkscale_json': bsj,
                'kind': kind,
                'dftcode': dftcode,
                'pw': pw,
                'xc': xc,
                'kptden': kptden,
                'psp': psp,
                'dwrat': dwrat,
                'econv': econv,
                'mixing': mixing,
                'nmix': nmix,
                'maxstep': maxstep,
                'nbands': nbands,
                'sigma': sigma,
                'fmax': fmax,
                'parent': fwid
            }
            job = jobs.Job(params)

            if job.new(existing): newjbs.append(job)
        if ask("launch %d new jobs?" % len(newjbs)):
            for j in newjbs:
                j.submit()
            misc.launch()
Ejemplo n.º 10
0
async def postjob(ctx, income: float, *args):
    [name, *description] = (' '.join(args)).split(':')
    new_job = jobs.Job(
        db.globals.find_one_and_update({
            'key': 'job_counter'
        }, {
            '$inc': {
                'value': 1
            }
        }).get('value'))
    new_job.income = income
    new_job.name = name
    new_job.description = ':'.join(description)
    new_job.employer = ctx.author.id
    new_job.save()

    embed = discord.Embed()
    embed.add_field(name=name, value=await utils.get_job_output(ctx, new_job))
    await ctx.send('Successfully Added Job', embed=embed)
Ejemplo n.º 11
0
def main(args):

    for sig in (signal.SIGBREAK, signal.SIGINT, signal.SIGTERM):
        signal.signal(sig, on_stop)

    jobs_array.append(
        jobs.Job(interval=timedelta(seconds=jobs.JOBS_WAIT_TIME_SECONDS),
                 execute=process_messages_job))
    # jobs_array.append(jobs.Job(interval=timedelta(seconds=jobs.JOBS_WAIT_TIME_SECONDS), execute=update_user_detector_model))

    for job in jobs_array:
        job.start()

    updater = Updater(args.token,
                      request_kwargs=get_telegram_proxy_kwargs(args),
                      use_context=True)

    # Get the dispatcher to register handlers
    dp = updater.dispatcher

    id = ImageDownloader(args.flickr_key, args.flickr_secret)
    # dp.add_handler(MessageHandler(Filters.regex('[сС]лыш[ь]* (.*)(дай|запости|скинь|покажи|скинь|ещё|давай) ([а-я]+)'), photo))
    dp.add_handler(MessageHandler(Filters.regex('(^[сС]лыш[ь]?$)'), che))
    dp.add_handler(CommandHandler('help', help_handler))
    dp.add_handler(CommandHandler('koto', callback=id.koto_handler))
    dp.add_handler(CommandHandler('macho', callback=id.macho_handler))
    dp.add_handler(CommandHandler('chick', callback=id.chick_handler))
    dp.add_handler(DefaultTelegramHandler(callback=message_handler))
    # log all errors
    dp.add_error_handler(error_handler)

    # Start the Bot
    updater.start_polling()

    while not SIG_EXIT:
        sleep(1)

    updater.stop()
    # sql_communicator.disconnect()
    for job in jobs_array:
        job.stop()
    exit(0)
Ejemplo n.º 12
0
async def accept(ctx, job_id: int):
    job_doc = db.jobs.find_one({'_id': job_id})
    if not job_doc:
        await ctx.send("Could not find job")
        return

    juser = user.JUser(ctx.author.id)
    juser.save()

    job = jobs.Job()
    job.load(job_doc)

    if job.accepted:
        await ctx.send("Job is already taken")
        return

    employer = await bot.fetch_user(job.employer)
    embed = discord.Embed()
    embed.add_field(name=job.name, value=await get_job_output(job))
    if (job.income <= 0 and juser.jbucks < -1 * job.income):
        await ctx.send(
            'Sorry, you do not have enough jbux for this service (You have {} jbux)'
            .format(juser.jbucks))
        return
    if (job.income > 0 and employer.jbucks < job.income):
        await ctx.send(
            'Sorry, your employer does not have enough jbux to hire you (They have {} jbux)'
            .format(employer.jbucks))
        return

    await ctx.send('Hey {}, {} has accepted your job:'.format(
        employer.mention if employer else job.employer, ctx.author.mention),
                   embed=embed)

    if job.repeats == 'never':
        await transfer(ctx, user.JUser(job.employer), employer.mention, juser,
                       ctx.author.mention, job.income)
    else:
        db.jobs.update_one({'_id': job_id},
                           {'$set': {
                               'accepted': ctx.author.id
                           }})
Ejemplo n.º 13
0
def getXCcontribs(constraint="1"):
    """NOT YET DEBUGGED"""
    cons = AND(GPAW, RELAXORLAT, constraint)
    output = db.query([
        'fwid', 'storage_directory', 'job_name', 'system_type_ksb',
        'planewave_cutoff', 'xc', 'kptden_ksb', 'psp_ksb', 'dwrat_ksb',
        'econv_ksb', 'mixing_ksb', 'nmix_ksb', 'maxstep_ksb', 'nbands_ksb',
        'sigma_ksb', 'fmax_ksb', 'dftcode'
    ], cons)
    question = 'Are you sure you want to calculate XC contributions for %d structures?' % len(
        output)
    if ask(question):
        newjbs = []
        for fwid, stor_dir, name, kind, pw, xc, kptden, psp, dwrat, econv, mixing, nmix, maxstep, nbands, sigma, fmax, dftcode in output:
            params = {
                'jobkind': 'xc',
                'inittraj_pckl': storageDir2pckl(stor_dir),
                'name': name + '_xc',
                'kind': kind,
                'dftcode': dftcode,
                'pw': pw,
                'xc': xc,
                'kptden': kptden,
                'psp': psp,
                'dwrat': dwrat,
                'econv': econv,
                'mixing': mixing,
                'nmix': nmix,
                'maxstep': maxstep,
                'nbands': nbands,
                'sigma': sigma,
                'fmax': fmax,
                'parent': fwid
            }
            job = jobs.Job(params)
            if job.new() and ask('does this look right %s' % str(params)):
                newjbs.append(job)
        if ask("launch %d new jobs?" % len(newjbs)):
            for j in newjbs:
                j.submit()
            misc.launch()
Ejemplo n.º 14
0
def molecule(molname=None):
    """
	Give molecule name to choose initial structure, modify dictionary parameters for relaxation job
	"""

    existing = manage.listOfIncompleteJobStrs()

    for name, m in gas.aseMolDict.items():
        if molname is None or name == molname:
            mol = pickle.dumps(m)
            params = {
                'pw': 500,
                'xc': 'BEEF',
                'psp': 'gbrv15pbe',
                'fmax': 0.05,
                'dftcode': 'quantumespresso',
                'jobkind': 'relax',
                'inittraj_pckl': mol,
                'name': name,
                'relaxed': 0,
                'kind': 'molecule',
                'kptden':
                1  #doesn't matter, will be ignored. Use 1 to be safe.
                ,
                'dwrat': 10,
                'econv': 5e-4,
                'mixing': 0.1,
                'nmix': 10,
                'maxstep': 500,
                'nbands': -12,
                'sigma': 0.1
            }

            if molname is None or ask(
                    'Do you want to run a gas phase calculation with these params?\n%s'
                    % (misc.abbreviateDict(params))):
                jobs.Job(params).submit(existing)

    misc.launch()
Ejemplo n.º 15
0
async def quitjob(ctx, job_id: int):
    juser = user.JUser(ctx.author.id)
    job_doc = db.jobs.find_one({'_id': job_id})

    if not job_doc:
        await ctx.send("Could not find job")
        return

    job = jobs.Job()
    job.load(job_doc)

    if job.accepted != ctx.author.id:
        await ctx.send("This is not your job")
        return

    juser.save()
    db.jobs.update_one({'_id': job_id}, {'$set': {'accepted': 0}})

    embed = discord.Embed()
    job.accepted = 0
    embed.add_field(name=job.name, value=await utils.get_job_output(ctx, job))
    await ctx.send('You have quit your job:', embed=embed)
Ejemplo n.º 16
0
async def view(ctx, type, mine=None):
    fil = {}
    fil['income'] = {'$gt': 0} if type == 'jobs' else {'$lte': 0}
    if mine == 'posted':
        fil['accepted'] = 0
        fil['employer'] = ctx.author.id
    elif mine == 'accepted':
        juser = user.JUser(ctx.author.id)
        fil['accepted'] = juser.user_id
    elif mine == 'all':
        pass
    else:
        fil['accepted'] = 0

    embed = discord.Embed(title='Current {}'.format(type))
    if db.jobs.count_documents(fil) == 0:
        await ctx.send("No jobs found")
        return
    for j in db.jobs.find(fil):
        job = jobs.Job()
        job.load(j)
        embed.add_field(name=job.name, value=await get_job_output(job))
    await ctx.send(embed=embed)
Ejemplo n.º 17
0
async def postjob(ctx, income: float, repeats, *args):
    if repeats not in ['never', 'daily']:
        await ctx.send("Please specify if the job pays once or daily")
        return
    [name, description] = (' '.join(args)).split(':')
    new_job = jobs.Job(
        db.globals.find_one_and_update({
            'key': 'job_counter'
        }, {
            '$inc': {
                'value': 1
            }
        }).get('value'))
    new_job.income = income
    new_job.repeats = repeats
    new_job.name = name
    new_job.description = description
    new_job.employer = ctx.author.id
    new_job.save()

    embed = discord.Embed()
    embed.add_field(name=name, value=await get_job_output(new_job))
    await ctx.send('Successfully Added Job', embed=embed)
Ejemplo n.º 18
0
async def view(ctx, type, mine=None):
    fil = {}
    fil['income'] = {'$gt': 0} if type == 'requests' else {'$lte': 0}
    if mine == 'posted':
        fil['accepted'] = 0
        fil['employer'] = ctx.author.id
    elif mine == 'accepted':
        juser = user.JUser(ctx.author.id)
        fil['accepted'] = juser.user_id
    elif mine == 'all':
        pass
    else:
        fil['accepted'] = 0

    if db.jobs.count_documents(fil) == 0:
        await ctx.send("No {} found".format(type))
        return

    data = []
    for j in db.jobs.find(fil).sort('_id', -1):
        job = jobs.Job()
        job.load(j)
        data.append({'name': job.name, 'value': await get_job_output(ctx, job)})
    await paginate(ctx, 'Current {}'.format(type), data)
Ejemplo n.º 19
0
def makeStrJob(user, storedpath):
    if user != 'ksb': return None
    import jobs, json
    params = json.loads(readOnSherOrSlac(storedpath + 'params.json'))
    return str(jobs.Job(params))
Ejemplo n.º 20
0
def getVibs(constraint="1"):
    """
	Launch vibration calculation for all latticeopt or (vc-)relax jobs that meet some (SQL) constraint in the shared database
	Only (and all) nonmetal atoms will be vibrated (defined in misc.py)
	"""

    existing = manage.listOfIncompleteJobStrs()

    cons = AND(
        QE, RELAXORLAT, constraint
    )  #could we add a column to quickly filter things with nonmetals?

    output = db.query(
        [
            'fwid',
            'storage_directory',
            'job_name',
            'system_type_ksb'  # generic parameters
            ,
            'planewave_cutoff',
            'xc',
            'kptden_ksb',
            'psp_ksb',
            'dwrat_ksb',
            'econv_ksb',
            'mixing_ksb',
            'nmix_ksb',
            'maxstep_ksb',
            'nbands_ksb',
            'sigma_ksb',
            'fmax_ksb',
            'dftcode'  # calc parameters
            ,
            'structure_ksb',
            'bulkvacancy_ksb',
            'bulkscale_ksb'  # possible bulk parameters
            ,
            'facet_ksb',
            'xy_ksb',
            'layers_ksb',
            'constrained_ksb',
            'symmetric_ksb',
            'vacuum_ksb',
            'vacancies_ksb',
            'adsorbates_ksb',
            'sites_ksb'
        ],
        cons)  # possible surf parameters

    question = 'Are you sure you want to calculate vibrations for %d structures?' % len(
        output)

    if ask(question):
        newjbs = []
        for fwid, stor_dir, name, kind, pw, xc, kptden, psp, dwrat, econv, mixing, nmix, maxstep, nbands, sigma, fmax, dftcode, structure, bv, bs, facet, xy, lay, const, sym, vac, vacan, ads, sites in output:
            inittraj_pckl = storageDir2pckl(stor_dir)
            atoms = pickle.loads(inittraj_pckl)

            nonmetal_inds = [
                i for i, x in enumerate(atoms.get_chemical_symbols())
                if x in misc.nonmetalSymbs
            ]

            if len(nonmetal_inds) > 0:
                params = {
                    'jobkind': 'vib',
                    'inittraj_pckl': inittraj_pckl,
                    'name': name + '_vib',
                    'kind': kind,
                    'dftcode': dftcode,
                    'structure': structure,
                    'pw': pw,
                    'xc': xc,
                    'kptden': kptden,
                    'psp': psp,
                    'dwrat': dwrat,
                    'econv': econv,
                    'mixing': mixing,
                    'nmix': nmix,
                    'maxstep': maxstep,
                    'nbands': nbands,
                    'sigma': sigma,
                    'fmax': fmax,
                    'structure': structure,
                    'bulkvacancy_json': bv,
                    'bulkscale_json': bs,
                    'facet_json': facet,
                    'xy_json': xy,
                    'layers': lay,
                    'constrained': const,
                    'symmetric': sym,
                    'vacuum': vac,
                    'vacancies_json': vacan,
                    'adsorbates_json': ads,
                    'sites_base64': sites,
                    'parent': fwid,
                    'vibids_json': json.dumps(nonmetal_inds),
                    'delta': 0.04
                }

                job = jobs.Job(params)

                if job.new() and ask('does this look right %s' % str(params)):
                    newjbs.append(job)

        if ask("launch %d new jobs?" % len(newjbs)):
            for j in newjbs:
                j.submit(existing)
            misc.launch()
Ejemplo n.º 21
0
def fwid2strjob(fwid):
    return str(jobs.Job(lpad.get_fw_dict_by_id(fwid)['spec']['params']))
Ejemplo n.º 22
0
 def Dependencies(self, params):
     return [
         jobs.Job('dataset', params['dataset']),
     ]
Ejemplo n.º 23
0
    def Dependencies(self, params):
        d = 2
        N = 16

        deps = []

        base_params = {
            'seed': 0,
            'iterations': 10000000,
            'dataset': {
                'N': N,
                'covariate_distribution': 'gaussian',
                'theta': [1] * d,
                'seed': 0,
                'noise': 'bernoulli',
            },
            'model': {
                'type': 'lr'
            },
        }

        # Zig-zag has no proposal so is separated here.
        zigzag_params = copy.copy(base_params)
        zigzag_params['iterations'] *= 50
        zigzag_params.update({'alg': {'type': 'zigzag', 'params': {}}})
        deps.append(jobs.Job('sample', zigzag_params))

        # All discrete-time algorithms.
        algs = [
            ('mh', {}),
            ('fmh', {'taylor_order': 1}),
            ('fmh', {'taylor_order': 2}),
            ('flymc', {'qdb': 0}),
            ('flymc', {'qdb': 0.001}),
            ('flymc', {'qdb': 0.1}),
            ('flymc', {'qdb': 1.0}),
        ]

        props = [
            ('random_walk', {}),
            ('pcn', {'rho': 0.5}),
        ]

        for (alg_type, alg_params), (proposal_type, proposal_params) in itertools.product(algs, props):
            dep_params = copy.copy(base_params)
            dep_params.update({
                'alg': {
                    'type': alg_type,
                    'params': alg_params,
                },
                'proposal': {
                    'type': proposal_type,
                    'params': proposal_params,
                },
            })
            deps.append(jobs.Job('sample', dep_params))

        deps.append(
            jobs.Job(
                'approximation',
                {
                    'dataset': {
                        'N': N,
                        'covariate_distribution': 'gaussian',
                        'theta': [1] * d,
                        'seed': 0,
                        'noise': 'bernoulli',
                    },
                    'model': {
                        'type': 'lr',
                    },
                }
            )
        )

        return deps
Ejemplo n.º 24
0
def getBareSlab(constraint="1", check=True):
    """Create a bare slab for some set of relaxed bulk structures. Alter parameters below as necessary"""
    cons = AND(RELAXORLAT, BULK, MBEEF, PAWPSP, BCC, PW(1500), KPTDEN(2),
               constraint)

    facet = [1, 0, 0]
    xy = [1, 1]
    layers = 6
    constrained = 2
    symmetric = 1
    vacuum = 10
    vacancies = []

    output = db.query([
        'fwid', 'storage_directory', 'job_name', 'structure_ksb',
        'planewave_cutoff', 'xc', 'kptden_ksb', 'psp_ksb', 'dwrat_ksb',
        'econv_ksb', 'mixing_ksb', 'nmix_ksb', 'maxstep_ksb', 'nbands_ksb',
        'sigma_ksb', 'fmax_ksb', 'dftcode'
    ], cons)
    question = 'Are you sure you want to create bare slabs for %d structures?' % len(
        output)

    if ask(question):
        existing = manage.listOfIncompleteJobStrs()

        newjbs = []
        for fwid, stor_dir, name, structure, pw, xc, kptden, psp, dwrat, econv, mixing, nmix, maxstep, nbands, sigma, fmax, dftcode in output:
            ftraj = misc.readOnSherOrSlac(stor_dir + 'raw.pckl')

            surf, img = surfFuncs.bulk2surf(ftraj, facet, xy, layers,
                                            constrained, symmetric, vacuum,
                                            vacancies)
            name += '_' + ','.join(map(str, facet)) + '_' + 'x'.join(
                map(str, (xy + [layers])))
            params = {
                'jobkind': 'relax',
                'inittraj_pckl': pickle.dumps(surf),
                'name': name,
                'kind': 'surface',
                'dftcode': dftcode,
                'structure': structure  # don't need bulk vacancies/scale?
                ,
                'pw': pw,
                'xc': xc,
                'kptden': kptden,
                'psp': psp,
                'bulkparent': fwid,
                'dwrat': dwrat,
                'econv': econv,
                'mixing': mixing,
                'nmix': nmix,
                'maxstep': maxstep,
                'nbands': nbands,
                'sigma': sigma,
                'fmax': fmax,
                'sites_base64': img,
                'facet_json': json.dumps(facet),
                'xy_json': json.dumps(xy),
                'layers': layers,
                'constrained': constrained,
                'symmetric': symmetric,
                'vacuum': vacuum,
                'vacancies_json': json.dumps(vacancies),
                'adsorbates_json': json.dumps({})
            }

            job = jobs.Job(params)
            if job.new(existing):
                if check: viz.view(surf)
                question = 'Does this structure look right?\n' + misc.abbreviateDict(
                    params)
                if not check or ask(question): job.submit()
        misc.launch()
Ejemplo n.º 25
0
def addInterstitial(constraint="1", emttol=0.2, vischeck=True, limit=1):
    """
	Specify an interstitial and the number (up to which) you want to add them to the cell
	Filter duplicates via EMT-calculated energy (stipulate a tolerance)
	Assume final structure is triclinic
	"""

    inter, num = 'H', 2

    cons = AND(LATTICEOPT, QE, PW(500), KPTDEN(2), constraint)

    output = db.query(
        [
            'fwid',
            'storage_directory',
            'job_name',
            'system_type_ksb'  # generic parameters
            ,
            'planewave_cutoff',
            'xc',
            'kptden_ksb',
            'psp_ksb',
            'dwrat_ksb',
            'econv_ksb',
            'mixing_ksb',
            'nmix_ksb',
            'maxstep_ksb',
            'nbands_ksb',
            'sigma_ksb',
            'fmax_ksb',
            'dftcode'  # calc parameters
            ,
            'bulkvacancy_ksb',
            'bulkscale_ksb'
        ],
        cons,
        limit=limit)  # bulk parameters

    existing = manage.listOfIncompleteJobStrs(
    )  # check currently running/fizzled jobs to avoid duplicates

    if ask('Are you sure you want to create interstitials for %d structures?' %
           len(output)):
        newjbs, totJbs, totStruct = [], 0, 0  #initialize variables

        for fwid, stor_dir, name, kind, pw, xc, kptden, psp, dwrat, econv, mixing, nmix, maxstep, nbands, sigma, fmax, dftcode, bv, bs in output:

            params = {
                'jobkind': 'vcrelax',
                'kind': 'bulk',
                'dftcode': dftcode,
                'name': name,
                'pw': pw,
                'xc': xc,
                'kptden': kptden,
                'psp': psp,
                'bulkparent': fwid,
                'dwrat': dwrat,
                'econv': econv,
                'mixing': mixing,
                'nmix': nmix,
                'maxstep': maxstep,
                'nbands': nbands,
                'sigma': sigma,
                'fmax': fmax,
                'structure': 'triclinic',
                'bulkvacancy_json': bv,
                'bulkscale_json': bs
            }  #bulk

            ftraj = misc.storageDir2pckl(stor_dir)
            spnpl = any([
                x > 0
                for x in pickle.loads(ftraj).get_initial_magnetic_moments()
            ])

            trajs = [[(pickle.loads(ftraj), '')]]  # initial state

            for i in range(num):

                lastround = trajs[
                    -1]  # for all structures with n - 1 interstitials (note: initially there is only one, with 0 interstitials)
                trajs.append(
                    []
                )  # initialize container for all new structures with n interstitials
                for inputtraj, strname in lastround:
                    for newtraj, newname in interstitialFuncs.getInterstitials(
                            inputtraj, inter, spnpl):
                        trajs[-1].append(
                            (newtraj, strname + newname)
                        )  # add all new trajs found for all previous structures (append suffix to name)

            def modParams(
                    par, trj, nam
            ):  # For a given new traj and name, create input parameters
                p = copy.deepcopy(
                    par)  # All parameters common to all of these jobs
                p['name'] += nam  # Unique job name
                p['inittraj_pckl'] = pickle.dumps(trj)  # Initial structure
                return p

            def getEMT(x):
                a = pickle.loads(x['inittraj_pckl'])
                a.set_calculator(emt.EMT())
                return a.get_potential_energy()

            onelevel = [
                modParams(params, item[0], item[1]) for sublist in trajs[1:]
                for item in sublist
            ]  #collapse list of lists to a single list of input parameters

            emtPairs = sorted(
                zip(map(getEMT, onelevel),
                    onelevel))  # pairs of (Energy,Params) ordered by energy

            emtCounter, jbs = 0, []
            for e, p in emtPairs:
                if e - emtCounter > emttol:
                    jbs.append(jobs.Job(p))
                    emtCounter = e

            newjbs.extend([x for x in jbs if x.new(existing)])
            totJbs += len(jbs)
            totStruct += len(onelevel)

        check = ask(
            'Do you want to check %d/%d new jobs? (%d filtered by emt)' %
            (len(newjbs), totStruct, totStruct - totJbs))
        if not check and ask('Do you want to exit?'): return 0
        for jb in newjbs:
            question = 'Does this structure look right?\n' + misc.abbreviateDict(
                jb.params)
            if vischeck: viz.view(pickle.loads(jb.params['inittraj_pckl']))
            if not vischeck or ask(question): jb.submit()
        misc.launch()
Ejemplo n.º 26
0
 def Dependencies(self, params):
     return [
         jobs.Job('sample', params),
     ]
Ejemplo n.º 27
0
                    bbox_inches='tight',
                    pad_inches=0)


if __name__ == '__main__':
    for dirname in ['figs', 'output']:
        if not os.path.exists(dirname):
            os.mkdir(dirname)

    root_jobs = []
    for model, d in [
            ('lr', 10)
    ]:
        root_jobs.append(
            jobs.Job('ess_random_walk', {
                'model_type': model,
                'd': d,
            })
        )
        root_jobs.append(
            jobs.Job('ess_pcn', {
                'model_type': model,
                'd': d,
            })
        )
        root_jobs.append(
            jobs.Job('acceptance_pcn', {
                'model_type': model,
                'd': d,
            })
        )
        
Ejemplo n.º 28
0
def parseConfigFile(fname, Config=Config, Jobs=Jobs):
    global DefaultToolList

    CP = ConfigParser.ConfigParser()
    CP.readfp(file(fname.rstrip(), 'rt'))

    # First parse global options
    if CP.has_section('Options'):
        for opt in CP.options('Options'):
            # Is it one we expect
            if Config.has_key(opt):
                # Yup...override it
                Config[opt] = CP.get('Options', opt)

            elif CP.defaults().has_key(opt):
                pass  # Ignore DEFAULTS section keys

            elif opt in ('fabricationdrawing', 'outlinelayer'):
                print '*' * 73
                print '\nThe FabricationDrawing and OutlineLayer configuration options have been'
                print 'renamed as of GerbMerge version 1.0. Please consult the documentation for'
                print 'a description of the new options, then modify your configuration file.\n'
                print '*' * 73
                sys.exit(1)
            else:
                raise RuntimeError, "Unknown option '%s' in [Options] section of configuration file" % opt
    else:
        raise RuntimeError, "Missing [Options] section in configuration file"

    # Ensure we got a tool list
    if not Config.has_key('toollist'):
        raise RuntimeError, "INTERNAL ERROR: Missing tool list assignment in [Options] section"

    # Make integers integers, floats floats
    for key, val in Config.items():
        try:
            val = int(val)
            Config[key] = val
        except:
            try:
                val = float(val)
                Config[key] = val
            except:
                pass

    # Process lists of strings
    if Config['cutlinelayers']:
        Config['cutlinelayers'] = parseStringList(Config['cutlinelayers'])
    if Config['cropmarklayers']:
        Config['cropmarklayers'] = parseStringList(Config['cropmarklayers'])


# setup default x & y spacing, taking into account metric units
#    if (xspacing == 0):
#      if (Config['measurementunits'] == 'inch'):
#        xspacing = 0.125
#      else:
#        xspacing = 3

#    if (yspacing == 0):
#      if (Config['measurementunits'] == 'inch'):
#        yspacing = 0.125
#      else:
#        yspacing = 3

# Process list of minimum feature dimensions
    if Config['minimumfeaturesize']:
        temp = Config['minimumfeaturesize'].split(",")
        try:
            for index in range(0, len(temp), 2):
                MinimumFeatureDimension[temp[index]] = float(temp[index + 1])
        except:
            raise RuntimeError, "Illegal configuration string:" + Config[
                'minimumfeaturesize']

    # Process MergeOutputFiles section to set output file names
    if CP.has_section('MergeOutputFiles'):
        for opt in CP.options('MergeOutputFiles'):
            # Each option is a layer name and the output file for this name
            if opt[0] == '*' or opt in ('boardoutline', 'drills', 'placement',
                                        'toollist'):
                MergeOutputFiles[opt] = CP.get('MergeOutputFiles', opt)

    # Now, we go through all jobs and collect Gerber layers
    # so we can construct the Global Aperture Table.
    apfiles = []

    for jobname in CP.sections():
        if jobname == 'Options': continue
        if jobname == 'MergeOutputFiles': continue
        if jobname == 'GerbMergeGUI': continue

        # Ensure all jobs have a board outline
        if not CP.has_option(jobname, 'boardoutline'):
            raise RuntimeError, "Job '%s' does not have a board outline specified" % jobname

        if not CP.has_option(jobname, 'drills'):
            raise RuntimeError, "Job '%s' does not have a drills layer specified" % jobname

        for layername in CP.options(jobname):
            if layername[0] == '*' or layername == 'boardoutline':
                fname = CP.get(jobname, layername)
                apfiles.append(fname)

                if layername[0] == '*':
                    LayerList[layername] = 1

    # Now construct global aperture tables, GAT and GAMT. This step actually
    # reads in the jobs for aperture data but doesn't store Gerber
    # data yet.
    aptable.constructApertureTable(apfiles)
    del apfiles

    if 0:
        keylist = GAMT.keys()
        keylist.sort()
        for key in keylist:
            print '%s' % GAMT[key]
        sys.exit(0)

    # Parse the tool list
    if Config['toollist']:
        DefaultToolList = parseToolList(Config['toollist'])

    # Now get jobs. Each job implies layer names, and we
    # expect consistency in layer names from one job to the
    # next. Two reserved layer names, however, are
    # BoardOutline and Drills.

    Jobs.clear()

    do_abort = 0
    errstr = 'ERROR'
    if Config['allowmissinglayers']:
        errstr = 'WARNING'

    for jobname in CP.sections():
        if jobname == 'Options': continue
        if jobname == 'MergeOutputFiles': continue
        if jobname == 'GerbMergeGUI': continue

        print ''  # empty line before hand for readability
        print 'Reading data from', jobname, '...'

        J = jobs.Job(jobname)

        # Parse the job settings, like tool list, first, since we are not
        # guaranteed to have ConfigParser return the layers in the same order that
        # the user wrote them, and we may get Gerber files before we get a tool
        # list! Same thing goes for ExcellonDecimals. We need to know what this is
        # before parsing any Excellon files.
        for layername in CP.options(jobname):
            fname = CP.get(jobname, layername)

            if layername == 'toollist':
                J.ToolList = parseToolList(fname)
            elif layername == 'excellondecimals':
                try:
                    J.ExcellonDecimals = int(fname)
                except:
                    raise RuntimeError, "Excellon decimals '%s' in config file is not a valid integer" % fname
            elif layername == 'repeat':
                try:
                    J.Repeat = int(fname)
                except:
                    raise RuntimeError, "Repeat count '%s' in config file is not a valid integer" % fname

        for layername in CP.options(jobname):
            fname = CP.get(jobname, layername)

            if layername == 'boardoutline':
                J.parseGerber(fname, layername, updateExtents=1)
            elif layername[0] == '*':
                J.parseGerber(fname, layername, updateExtents=0)
            elif layername == 'drills':
                J.parseExcellon(fname)

        # Emit warnings if some layers are missing
        LL = LayerList.copy()
        for layername in J.apxlat.keys():
            assert LL.has_key(layername)
            del LL[layername]

        if LL:
            if errstr == 'ERROR':
                do_abort = 1

            print '%s: Job %s is missing the following layers:' % (errstr,
                                                                   jobname)
            for layername in LL.keys():
                print '  %s' % layername

        # Store the job in the global Jobs dictionary, keyed by job name
        Jobs[jobname] = J

    if do_abort:
        raise RuntimeError, 'Exiting since jobs are missing layers. Set AllowMissingLayers=1\nto override.'
Ejemplo n.º 29
0
 def add_job(self):
     self.job_button = jobs.Job(self)
Ejemplo n.º 30
0
def parseConfigFile(configFilePath, Config=Config, Jobs=Jobs):
    global DefaultToolList

    CP = configparser.ConfigParser()
    CP.read(configFilePath)

    # Store the base directory that all files are referenced from (the one the config file is in).
    configDir = os.path.dirname(configFilePath)

    # First parse global options and merge them into the global Config options object.
    if CP.has_section('Options'):
        for opt in CP.options('Options'):
            # Is it one we expect
            if opt in Config:
                # Yup...override it
                Config[opt] = CP.get('Options', opt)

            elif opt in CP.defaults():
                pass   # Ignore DEFAULTS section keys

            elif opt in ('fabricationdrawing', 'outlinelayer'):
                print('*' * 73)
                print('\nThe FabricationDrawing and OutlineLayer configuration options have been')
                print('renamed as of GerbMerge version 1.0. Please consult the documentation for')
                print('a description of the new options, then modify your configuration file.\n')
                print('*' * 73)
                sys.exit(1)
            else:
                raise RuntimeError("Unknown option '{:s}' in [Options] section of configuration file".format(opt))
    else:
        raise RuntimeError("Missing [Options] section in configuration file")

    # Ensure we got a tool list
    if 'toollist' not in Config:
        raise RuntimeError("INTERNAL ERROR: Missing tool list assignment in [Options] section")

    # Make integers integers, floats floats
    for key, val in Config.items():
        try:
            val = int(val)
            Config[key] = val
        except:
            try:
                val = float(val)
                Config[key] = val
            except:
                pass

    # Process lists of strings
    if Config['cutlinelayers']:
        Config['cutlinelayers'] = parseStringList(Config['cutlinelayers'])
    if Config['cropmarklayers']:
        Config['cropmarklayers'] = parseStringList(Config['cropmarklayers'])
    if Config['outlinelayers']:
        Config['outlinelayers'] = parseStringList(Config['outlinelayers'])

    # Process list of minimum feature dimensions
    if Config['minimumfeaturesize']:
        temp = Config['minimumfeaturesize'].split(",")
        try:
            for index in range(0, len(temp), 2):
                MinimumFeatureDimension[temp[index]] = float(temp[index + 1])
        except:
            raise RuntimeError("Illegal configuration string:" + Config['minimumfeaturesize'])

    # Process MergeOutputFiles section to set output file names
    if CP.has_section('MergeOutputFiles'):
        for opt in CP.options('MergeOutputFiles'):
            # Each option is a layer name and the output file for this name
            if opt[0] == '*' or opt in ('boardoutline', 'drills', 'placement', 'toollist'):
                MergeOutputFiles[opt] = CP.get('MergeOutputFiles', opt)

    # Now, we go through all jobs and collect Gerber layers
    # so we can construct the Global Aperture Table.
    apfiles = []

    for jobname in CP.sections():
        if jobname == 'Options' or jobname == 'MergeOutputFiles' or jobname == 'GerbMergeGUI':
            continue

        # Ensure all jobs have a board outline
        if not CP.has_option(jobname, 'boardoutline'):
            raise RuntimeError("Job '{:s}' does not have a board outline specified".format(jobname))

        if not CP.has_option(jobname, 'drills'):
            raise RuntimeError("Job '{:s}' does not have a drills layer specified".format(jobname))

        for layername in CP.options(jobname):
            if layername[0] == '*' or layername == 'boardoutline':
                fname = CP.get(jobname, layername)
                apfiles.append(fname)

                if layername[0] == '*':
                    LayerList[layername] = 1

    # Now construct global aperture tables, GAT and GAMT. This step actually
    # reads in the jobs for aperture data but doesn't store Gerber
    # data yet.
    aptable.constructApertureTable([os.path.join(configDir, x) for x in apfiles], GAT, GAMT)
    del apfiles

    # Parse the tool list
    if Config['toollist']:
        DefaultToolList = excellon.parseToolList(Config['toollist'])

    # Now get jobs. Each job implies layer names, and we
    # expect consistency in layer names from one job to the
    # next. Two reserved layer names, however, are
    # BoardOutline and Drills.

    Jobs.clear()

    do_abort = False
    errstr = 'ERROR'
    if Config['allowmissinglayers']:
        errstr = 'WARNING'

    for jobname in CP.sections():
        if jobname == 'Options' or jobname == 'MergeOutputFiles' or jobname == 'GerbMergeGUI':
            continue

        print('Reading data from', jobname, '...')

        J = jobs.Job(jobname)

        # Parse the job settings, like tool list, first, since we are not
        # guaranteed to have ConfigParser return the layers in the same order that
        # the user wrote them, and we may get Gerber files before we get a tool
        # list! Same thing goes for ExcellonDecimals. We need to know what this is
        # before parsing any Excellon files.
        for layername in CP.options(jobname):
            fname = CP.get(jobname, layername)

            if layername == 'toollist':
                fname = os.path.join(configDir, CP.get(jobname, layername))
                J.ToolList = excellon.parseToolList(fname)
            elif layername == 'excellondecimals':
                try:
                    J.ExcellonDecimals = int(fname)
                except:
                    raise RuntimeError("Excellon decimals '{:s}' in config file is not a valid integer".format(fname))
            elif layername == 'repeat':
                try:
                    J.Repeat = int(fname)
                except:
                    raise RuntimeError("Repeat count '{:s}' in config file is not a valid integer".format(fname))

        for layername in CP.options(jobname):
            fname = os.path.join(configDir, CP.get(jobname, layername))

            if layername == 'boardoutline':
                J.parseGerber(fname, layername, updateExtents=1)
            elif layername[0] == '*':
                J.parseGerber(fname, layername, updateExtents=0)
            elif layername == 'drills':
                J.parseExcellon(fname)

        # Emit warnings if some layers are missing
        LL = LayerList.copy()
        for layername in J.apxlat.keys():
            assert layername in LL
            del LL[layername]

        if LL:
            if errstr == 'ERROR':
                do_abort = True

            print("{:s}: Job {:s} is missing the following layers:".format(errstr, jobname))
            for layername in LL.keys():
                print("  {:s}".format(layername))

        # Store the job in the global Jobs dictionary, keyed by job name
        Jobs[jobname] = J

    if do_abort:
        raise RuntimeError("Exiting since jobs are missing layers. Set AllowMissingLayers=1\nto override.")