示例#1
0
print 'Nodes per job: %d' % npj
print 'MPI tasks per node: %d' % ppn
print 'Number of sub-block jobs: %d' % njobs
print 'Number of nodes: %d' % nodes

blocks = get_bootable_blocks(partition, nodes)
print 'Available blocks: %s' % blocks
boot_blocks(blocks)
# start sub-block jobs with background runjob helper processes
jobs = []
logs = []

# Lets make each job a tuple struct like
# (jobdir, bxv, v, rc).
for job, (block, corner, shape) in zip(jobdirs, block_corner_iter(blocks,
                                                                  npj)):
    print job, (block, corner, shape)
    os.chdir(os.path.join(scratch, job[0]))
    log = open('gbrelax.out', 'a')
    locargs = '--block %s --corner %s --shape %s' % (block, corner, shape)
    # runjob_args = ('python %s -n %d -p %d %s' % (locargs, npj*ppn, ppn, envargs)).split()
    pyargs = 'python /home/lambert/pymodules/imeall/imeall/run_dyn.py -rc {rc} -i_v {i_v} -i_bxv {i_bxv} '.format(
        rc=job[1], i_v=job[2], i_bxv=job[3])
    runjob_args = ('runjob %s -n %d -p %d %s : %s' %
                   (locargs, 1, 1, envargs, pyargs)).split()
    print ' '.join(runjob_args)
    jobs.append(subprocess.Popen(runjob_args, stdout=log))
    logs.append(log)

# wait for all background jobs to finish, then flush their logs
for (job, log) in zip(jobs, logs):
示例#2
0
job_xyz = glob.glob('feb*.xyz')[0]
bulk = Atoms(job_xyz)

line=[]
for at in bulk:
  line.append( FixedLine(at.index, (0,0,1)) )
bulk.set_constraint(line)

hostname, ip =  get_hostname_ip()
partsize, partition, job_name = get_cobalt_info()
blocks = get_bootable_blocks(partition, nodes)
print('Available blocks: %s' % blocks)
boot_blocks(blocks)

block, corner, shape = list(block_corner_iter(blocks, npj))[0]
print block, corner, shape

vasp_client = VaspClient(client_id=0,
                         kpts =[16,16,1],
                         amix = 0.01,
                         amin = 0.001,
                         bmix = 0.001,
                         amix_mag = 0.01,
                         bmix_mag = 0.001,
                         npj=npj,
                         ppn=ppn,
                         block=block,
                         corner=corner,
                         shape=shape,
                         exe    = vasp,
示例#3
0
job_xyz = glob.glob('feb*.xyz')[0]
bulk = Atoms(job_xyz)

line = []
for at in bulk:
    line.append(FixedLine(at.index, (0, 0, 1)))
bulk.set_constraint(line)

hostname, ip = get_hostname_ip()
partsize, partition, job_name = get_cobalt_info()
blocks = get_bootable_blocks(partition, nodes)
print('Available blocks: %s' % blocks)
boot_blocks(blocks)

block, corner, shape = list(block_corner_iter(blocks, npj))[0]
print block, corner, shape

vasp_client = VaspClient(client_id=0,
                         kpts=[16, 16, 1],
                         amix=0.01,
                         amin=0.001,
                         bmix=0.001,
                         amix_mag=0.01,
                         bmix_mag=0.001,
                         npj=npj,
                         ppn=ppn,
                         block=block,
                         corner=corner,
                         shape=shape,
                         exe=vasp,
示例#4
0
assert nodes == partsize

print 'Nodes per job: %d' % npj
print 'MPI tasks per node: %d' % ppn
print 'Number of sub-block jobs: %d' % njobs
print 'Number of nodes: %d' % nodes

blocks = get_bootable_blocks(partition, nodes)
print 'Available blocks: %s' % blocks

boot_blocks(blocks)

# start sub-block jobs with background runjob helper processes
jobs = []
logs = []
for job, (block, corner, shape) in zip(jobdirs, block_corner_iter(blocks, npj)):
    print job, (block, corner, shape)
    os.chdir(os.path.join(scratch, job))
    log = open('%d.vasp.stdout' % jobid, 'w')

    locargs = '--block %s --corner %s --shape %s' % (block, corner, shape)
    runjob_args = ('runjob %s -n %d -p %d %s : %s' % (locargs, npj*ppn, ppn, envargs, vasp)).split()
    print ' '.join(runjob_args)
    print

    jobs.append(subprocess.Popen(runjob_args, stdout=log))
    logs.append(log)
    
# wait for all background jobs to finish, then flush their logs
for (job, log) in zip(jobs, logs):
    job.wait()
示例#5
0
            job_name, os.path.splitext(os.path.basename(sys.argv[0]))[0])
    except KeyError:
        # Not running under cobalt, so let's qsub ourselves
        print 'NOT RUNNING COBALT'
        qsub_args = 'qsub -A %s -n %d -t %d -q %s --mode script --disable_preboot %s' % \
            (acct, nodes, runtime, queue, ' '.join(sys.argv))
        print qsub_args
        os.system(qsub_args)
        sys.exit(1)

    blocks = get_bootable_blocks(partition, nodes)
    print('Available blocks: %s' % blocks)
    boot_blocks(blocks)

    qm_subblocks = [
        (i, bcs) for (i, bcs) in enumerate(block_corner_iter(blocks, qm_npj))
    ]
    print 'qm_subblocks', qm_subblocks
    qm_clients = [
        VaspClient(client_id, qm_exe, qm_env, qm_npj, qm_ppn, block, corner,
                   shape, jobname, **vasp_args)
        for client_id, (block, corner, shape) in qm_subblocks
    ]
else:
    qm_clients = []
    hostname, ip = '<dummy>', 0

print 'FEN hostname: %s' % hostname
print 'FEN server IP: %s' % ip
print 'QM nodes per job: %r' % qm_npj
print 'QM MPI tasks per node: %r' % qm_ppn
示例#6
0
    os.system(qsub_args)
    sys.exit(1)

  if args.block !='0':
    block  = args.block
    corner = args.corner
    shape  = args.shape
  else:
#no block information available need to grab this.
    partsize  = int(os.environ['COBALT_PARTSIZE'])
    partition = os.environ['COBALT_PARTNAME']
    jobid     = int(os.environ['COBALT_JOBID'])
    blocks    = get_bootable_blocks(partition, nodes)
    print 'Available blocks: %s' % blocks
    boot_blocks(blocks)
    print block_corner_iter(blocks, nodes)
    for b, c, s in block_corner_iter(blocks,nodes):
      block, corner, shape = b, c, s
#Running this script on it's own so need to boot blocks
  npj    = int(args.npj)
  ppn    = int(args.ppn)
  hostname, ip = get_hostname_ip()

  reference_file = 'ref_slab.xyz' # Reference file for Nye tensor
  continuation = args.continuation             # If true, restart form last frame of most recent *.traj.xyz file
  test_mode    = False
  classical    = False             # If true, do classical MD instead of QM/MM
  sim_T        = 300.0*units.kB    # Simulation temperature
  rescale_velo = True             # Rescale velocities to 2*sim_T  
  timestep     = 1.0*units.fs      # Timestep (NB: time base units are not fs!)
  cutoff_skin  = 2.0*units.Ang     # Amount by which potential cutoff is increased
示例#7
0
        partsize, partition, job_name = get_cobalt_info()
        jobname = '%s.%s' % (job_name, os.path.splitext(os.path.basename(sys.argv[0]))[0])
    except KeyError:
        # Not running under cobalt, so let's qsub ourselves
        print 'NOT RUNNING COBALT'
        qsub_args = 'qsub -A %s -n %d -t %d -q %s --mode script --disable_preboot %s' % \
            (acct, nodes, runtime, queue, ' '.join(sys.argv))
        print qsub_args
        os.system(qsub_args)
        sys.exit(1)

    blocks = get_bootable_blocks(partition, nodes)
    print('Available blocks: %s' % blocks)
    boot_blocks(blocks)

    qm_subblocks = [(i, bcs) for (i, bcs) in enumerate(block_corner_iter(blocks, qm_npj)) ]
    print 'qm_subblocks', qm_subblocks
    qm_clients = [VaspClient(client_id, qm_exe, qm_env, qm_npj, qm_ppn, block, corner, shape, jobname, 
                            **vasp_args) for client_id, (block, corner, shape) in qm_subblocks]
else:
    qm_clients = []
    hostname, ip = '<dummy>', 0

print 'FEN hostname: %s' % hostname
print 'FEN server IP: %s' % ip
print 'QM nodes per job: %r' % qm_npj
print 'QM MPI tasks per node: %r' % qm_ppn
print 'Number of QM jobs: %d' % n_qm_jobs
print 'Total number of sub-block jobs: %d' % njobs
print 'Total number of nodes: %d' % nodes
# ******* End of parameters *************