Beispiel #1
0
    def __init__(
        self,
        ssh_session,
        local_root,
        job_uuid=None,
    ):
        self.local_root = os.path.abspath(local_root)
        if job_uuid:
            self.job_uuid = job_uuid
        else:
            self.job_uuid = str(uuid.uuid4())

        self.remote_root = os.path.join(ssh_session.get_session_root(),
                                        self.job_uuid)
        dlog.info("local_root is %s" % local_root)
        dlog.info("remote_root is %s" % self.remote_root)
        self.ssh = ssh_session.get_ssh_client()
        # keep ssh alive
        transport = self.ssh.get_transport()
        transport.set_keepalive(60)
        try:
            sftp = self.ssh.open_sftp()
            sftp.mkdir(self.remote_root)
            sftp.close()
        except:
            pass
Beispiel #2
0
def make_fp(iter_index, jdata, mdata):
    iter_name = make_iter_name(iter_index)
    work_path = os.path.join(iter_name, fp_name)
    create_path(work_path)
    picked_data_path = os.path.join(iter_name, model_devi_name,
                                    picked_data_name)
    if jdata.get("labeled", False):
        dlog.info("already labeled, skip make_fp and link data directly")
        os.symlink(os.path.abspath(picked_data_path),
                   os.path.abspath(os.path.join(work_path, "task.%03d" % 0)))
        os.symlink(os.path.abspath(picked_data_path),
                   os.path.abspath(os.path.join(work_path, "data.%03d" % 0)))
        return
    systems = get_systems(picked_data_path, jdata)
    fp_style = jdata['fp_style']
    if 'user_fp_params' in jdata.keys():
        fp_params = jdata['user_fp_params']
    else:
        fp_params = jdata['fp_params']
    jj = 0
    for system in systems:
        for subsys in system:
            sys_data = subsys.data
            task_name = "task.%03d.%06d" % (0, jj)
            task_path = os.path.join(work_path, task_name)
            create_path(task_path)
            if fp_style == "gaussian":
                ret = make_gaussian_input(sys_data, fp_params)
                with open(os.path.join(task_path, 'input'), 'w') as fp:
                    fp.write(ret)
            else:
                # TODO: support other formats
                raise RuntimeError("unsupported fp style")
            jj += 1
Beispiel #3
0
def make_fp_labeled(iter_index, jdata):
    dlog.info("already labeled, skip make_fp and link data directly")
    pick_data = jdata['pick_data']
    use_clusters = jdata.get('use_clusters', False)
    iter_name = make_iter_name(iter_index)
    work_path = os.path.join(iter_name, fp_name)
    create_path(work_path)
    picked_data_path = os.path.join(iter_name, model_devi_name,
                                    picked_data_name)
    if use_clusters:
        os.symlink(
            os.path.abspath(picked_data_path),
            os.path.abspath(
                os.path.join(work_path, "task." + data_system_fmt % 0)))
        os.symlink(
            os.path.abspath(picked_data_path),
            os.path.abspath(
                os.path.join(work_path, "data." + data_system_fmt % 0)))
    else:
        picked_data_path = os.path.abspath(picked_data_path)
        sys_path = glob.glob(os.path.join(picked_data_path, sys_name_pattern))
        cwd = os.getcwd()
        os.chdir(work_path)
        for ii in sys_path:
            sys_idx = os.path.basename(ii).split('.')[1]
            data_dir = 'data.' + data_system_fmt % int(sys_idx)
            task_dir = 'task.' + data_system_fmt % int(sys_idx)
            os.symlink(os.path.relpath(ii), data_dir)
            os.symlink(os.path.relpath(ii), task_dir)
        os.chdir(cwd)
Beispiel #4
0
def make_dispatcher(mdata,
                    mdata_resource=None,
                    work_path=None,
                    run_tasks=None,
                    group_size=None):
    if 'ali_auth' in mdata:
        from dpgen.dispatcher.ALI import ALI
        nchunks = len(_split_tasks(run_tasks, group_size))
        dispatcher = ALI(mdata['ali_auth'], mdata_resource, mdata, nchunks)
        dispatcher.init(work_path, run_tasks, group_size)
        return dispatcher
    else:
        hostname = mdata.get('hostname', None)
        #use_uuid = mdata.get('use_uuid', False)
        if hostname:
            context_type = 'ssh'
        else:
            context_type = 'local'
        try:
            batch_type = mdata['batch']
        except:
            dlog.info(
                'cannot find key "batch" in machine file, try to use deprecated key "machine_type"'
            )
            batch_type = mdata['machine_type']
        lazy_local = (mdata.get('lazy-local', False)) or (mdata.get(
            'lazy_local', False))
        if lazy_local and context_type == 'local':
            dlog.info('Dispatcher switches to the lazy local mode')
            context_type = 'lazy-local'
        disp = Dispatcher(mdata,
                          context_type=context_type,
                          batch_type=batch_type)
        return disp
Beispiel #5
0
def gen_simplify(args):
    if args.PARAM and args.MACHINE:
        if args.debug:
            dlog.setLevel(logging.DEBUG)
        dlog.info("start simplifying")
        run_iter(args.PARAM, args.MACHINE)
        dlog.info("finished")
Beispiel #6
0
    def delete(self, ii):
        '''delete one machine'''
        request = DeleteInstancesRequest()
        request.set_accept_format('json')
        request.set_InstanceIds(
            [self.dispatcher_list[ii]["entity"].instance_id])
        request.set_Force(True)
        count = 0
        flag = 0
        while count < 10:
            try:
                response = self.client.do_action_with_exception(request)
                flag = 1
                break
            except ServerException as e:
                time.sleep(10)
                count += 1

        if flag:
            status_list = [
                item["dispatcher_status"] for item in self.dispatcher_list
            ]
            running_num = status_list.count("running")
            running_num += status_list.count("unsubmitted")
            self.change_apg_capasity(running_num)
        else:
            dlog.info("delete failed, exit")
            sys.exit()
Beispiel #7
0
 def run_jobs(self,
              resources,
              command,
              work_path,
              tasks,
              group_size,
              forward_common_files,
              forward_task_files,
              backward_task_files,
              forward_task_deference=True,
              mark_failure=False,
              outlog='log',
              errlog='err'):
     ratio_failure = self.mdata_resources.get("ratio_failue", 0)
     while True:
         if self.check_all_dispatchers_finished(ratio_failure):
             self.clean()
             break
         self.exception_handling(ratio_failure)
         for ii in range(self.nchunks):
             dispatcher_status = self.check_dispatcher_status(ii)
             if dispatcher_status == "unsubmitted":
                 dlog.info(self.dispatcher_list[ii]["entity"].ip)
                 self.dispatcher_list[ii][
                     "entity"].job_handler = self.dispatcher_list[
                         ii]["dispatcher"].submit_jobs(
                             resources, command, work_path,
                             self.task_chunks[ii], group_size,
                             forward_common_files, forward_task_files,
                             backward_task_files, forward_task_deference,
                             outlog, errlog)
                 self.dispatcher_list[ii][
                     "entity"].job_record = self.dispatcher_list[ii][
                         "entity"].job_handler["job_record"]
                 self.dispatcher_list[ii]["dispatcher_status"] = "running"
             elif dispatcher_status == "finished" and self.dispatcher_list[
                     ii]["entity"]:
                 # no jobs in queue, delete current machine
                 # else add current machine to server_pool
                 entity = self.dispatcher_list[ii]["entity"]
                 status_list = [
                     item["dispatcher_status"]
                     for item in self.dispatcher_list
                 ]
                 flag = "unallocated" in status_list
                 if not flag: self.delete(ii)
                 else:
                     self.dispatcher_list[ii]["entity"] = None
                     self.server_pool.append(entity.instance_id)
                     self.ip_pool.append(entity.ip)
             elif dispatcher_status == "running":
                 pass
             elif dispatcher_status == "unallocated":
                 # if len(server_pool) > 0: make_dispatcher
                 # else: pass
                 self.create(ii)
             elif dispatcher_status == "terminated":
                 pass
         self.update()
         time.sleep(10)
Beispiel #8
0
    def create_apg(self):
        request = CreateAutoProvisioningGroupRequest()
        request.set_accept_format('json')
        request.set_TotalTargetCapacity(str(self.nchunks_limit))
        request.set_LaunchTemplateId(self.cloud_resources["template_id"])
        request.set_AutoProvisioningGroupName(
            self.cloud_resources["instance_name"] +
            ''.join(random.choice(string.ascii_uppercase) for _ in range(20)))
        request.set_AutoProvisioningGroupType("maintain")
        request.set_SpotAllocationStrategy("lowest-price")
        request.set_SpotInstanceInterruptionBehavior("terminate")
        request.set_SpotInstancePoolsToUseCount(1)
        request.set_ExcessCapacityTerminationPolicy("termination")
        request.set_TerminateInstances(True)
        request.set_PayAsYouGoTargetCapacity("0")
        request.set_SpotTargetCapacity(str(self.nchunks_limit))
        config = self.generate_config()
        request.set_LaunchTemplateConfigs(config)

        try:
            response = self.client.do_action_with_exception(request)
            response = json.loads(response)
            with open('apg_id.json', 'w') as fp:
                json.dump({'apg_id': response["AutoProvisioningGroupId"]},
                          fp,
                          indent=4)
            return response["AutoProvisioningGroupId"]
        except ServerException as e:
            dlog.info("create apg failed, err msg: %s" % e)
            sys.exit()
        except ClientException as e:
            dlog.info("create apg failed, err msg: %s" % e)
            sys.exit()
Beispiel #9
0
 def describe_apg_instances(self):
     request = DescribeAutoProvisioningGroupInstancesRequest()
     request.set_accept_format('json')
     request.set_AutoProvisioningGroupId(self.cloud_resources["apg_id"])
     request.set_PageSize(100)
     iteration = self.nchunks // 100
     instance_list = []
     for i in range(iteration + 1):
         request.set_PageNumber(i + 1)
         count = 0
         flag = 0
         err_msg = 0
         while count < 10:
             try:
                 response = self.client.do_action_with_exception(request)
                 response = json.loads(response)
                 for ins in response["Instances"]["Instance"]:
                     instance_list.append(ins["InstanceId"])
                 flag = 1
                 break
             except ServerException as e:
                 # dlog.info(e)
                 err_msg = e
                 count += 1
             except ClientException as e:
                 # dlog.info(e)
                 err_msg = e
                 count += 1
         if not flag:
             dlog.info("describe_apg_instances failed, err msg: %s" %
                       err_msg)
             sys.exit()
     return instance_list
Beispiel #10
0
 def create_template(self, image_id, sg_id, vpc_id):
     request = CreateLaunchTemplateRequest()
     request.set_accept_format('json')
     request.set_LaunchTemplateName(''.join(
         random.choice(string.ascii_uppercase) for _ in range(20)))
     request.set_ImageId(image_id)
     request.set_ImageOwnerAlias("self")
     request.set_PasswordInherit(True)
     if "address" in self.cloud_resources and self.cloud_resources[
             'address'] == "public":
         request.set_InternetMaxBandwidthIn(100)
         request.set_InternetMaxBandwidthOut(100)
     request.set_InstanceType("ecs.c6.large")
     request.set_InstanceName(self.cloud_resources["instance_name"])
     request.set_SecurityGroupId(sg_id)
     request.set_VpcId(vpc_id)
     request.set_SystemDiskCategory("cloud_efficiency")
     request.set_SystemDiskSize(70)
     request.set_IoOptimized("optimized")
     request.set_InstanceChargeType("PostPaid")
     request.set_NetworkType("vpc")
     request.set_SpotStrategy("SpotWithPriceLimit")
     request.set_SpotPriceLimit(100)
     try:
         response = self.client.do_action_with_exception(request)
         response = json.loads(response)
         return response["LaunchTemplateId"]
     except ServerException as e:
         dlog.info(e)
         sys.exit()
     except ClientException as e:
         dlog.info(e)
         sys.exit()
Beispiel #11
0
def make_super_cell_poscar(jdata) :
    out_dir = jdata['out_dir']
    super_cell = jdata['super_cell']
    path_sc = os.path.join(out_dir, global_dirname_02)    
    create_path(path_sc)
    from_poscar_path = jdata['from_poscar_path']
    assert(os.path.isfile(from_poscar_path)), "file %s should exists" % from_poscar_path
    
    from_file = os.path.join(path_sc, 'POSCAR.copied')
    shutil.copy2(from_poscar_path, from_file)
    to_path = path_sc
    to_file = os.path.join(to_path, 'POSCAR')
  
    #minor bug for element symbol behind the coordinates
    from_struct=Structure.from_file(from_file)
    from_struct.make_supercell(super_cell)
    from_struct.to('poscar',to_file)  

    # make system dir (copy)
    lines = open(to_file, 'r').read().split('\n')
    natoms_str = lines[6]
    natoms_list = [int(ii) for ii in natoms_str.split()]
    dlog.info(natoms_list)
    comb_name = "sys-"
    for idx,ii in enumerate(natoms_list) :
        comb_name += "%04d" % ii
        if idx != len(natoms_list)-1 :
            comb_name += "-"
    path_work = os.path.join(path_sc, comb_name)
    create_path(path_work)
    cwd = os.getcwd()
    to_file = os.path.abspath(to_file)
    os.chdir(path_work)
    os.symlink(os.path.relpath(to_file), 'POSCAR')
    os.chdir(cwd)
Beispiel #12
0
    def get_image_id(self, img_name):
        request = DescribeImagesRequest()
        request.set_accept_format('json')
        request.set_ImageOwnerAlias("self")
        request.set_PageSize(20)
        response = self.client.do_action_with_exception(request)
        response = json.loads(response)
        totalcount = response["TotalCount"]

        iteration = totalcount // 20
        if iteration * 20 < totalcount:
            iteration += 1

        for ii in range(1, iteration + 1):
            count = 0
            flag = 0
            request.set_PageNumber(ii)
            while count < 10:
                try:
                    response = self.client.do_action_with_exception(request)
                    response = json.loads(response)
                    for img in response["Images"]["Image"]:
                        if img["ImageName"] == img_name:
                            return img["ImageId"]
                    flag = 1
                    break
                except:
                    count += 1
                    time.sleep(10)
        if not flag:
            dlog.info("get image failed, exit")
            sys.exit()
Beispiel #13
0
 def all_finished(self, job_handler, mark_failure, clean=True):
     task_chunks = job_handler['task_chunks']
     task_chunks_str = ['+'.join(ii) for ii in task_chunks]
     task_hashes = [
         sha1(ii.encode('utf-8')).hexdigest() for ii in task_chunks_str
     ]
     job_list = job_handler['job_list']
     job_record = job_handler['job_record']
     command = job_handler['command']
     tag_failure_list = [
         'tag_failure_%d' % ii for ii in range(len(command))
     ]
     resources = job_handler['resources']
     outlog = job_handler['outlog']
     errlog = job_handler['errlog']
     backward_task_files = job_handler['backward_task_files']
     dlog.debug('checking jobs')
     nchunks = len(task_chunks)
     for idx in range(nchunks):
         cur_hash = task_hashes[idx]
         rjob = job_list[idx]
         if not job_record.check_finished(cur_hash):
             # chunk not finished according to record
             status = rjob['batch'].check_status()
             job_uuid = rjob['context'].job_uuid
             dlog.debug('checked job %s' % job_uuid)
             if status == JobStatus.terminated:
                 job_record.increase_nfail(cur_hash)
                 if job_record.check_nfail(cur_hash) > 3:
                     raise RuntimeError(
                         'Job %s failed for more than 3 times' % job_uuid)
                 dlog.info('job %s terminated, submit again' % job_uuid)
                 dlog.debug('try %s times for %s' %
                            (job_record.check_nfail(cur_hash), job_uuid))
                 rjob['batch'].submit(task_chunks[idx],
                                      command,
                                      res=resources,
                                      outlog=outlog,
                                      errlog=errlog,
                                      restart=True)
             elif status == JobStatus.finished:
                 dlog.info('job %s finished' % job_uuid)
                 if mark_failure:
                     rjob['context'].download(task_chunks[idx],
                                              tag_failure_list,
                                              check_exists=True,
                                              mark_failure=False)
                     rjob['context'].download(task_chunks[idx],
                                              backward_task_files,
                                              check_exists=True)
                 else:
                     rjob['context'].download(task_chunks[idx],
                                              backward_task_files)
                 if clean:
                     rjob['context'].clean()
                 job_record.record_finish(cur_hash)
                 job_record.dump()
     job_record.dump()
     return job_record.check_all_finished()
Beispiel #14
0
def convert_data(jdata):
    s = dpdata.MultiSystems(*[
        dpdata.LabeledSystem(x, fmt="gaussian/log")
        for x in glob.glob(os.path.join(fp_path, "*", "output"))
    ],
                            type_map=jdata["type_map"])
    s.to_deepmd_npy(data_path)
    dlog.info("Initial data is avaiable in %s" % os.path.abspath(data_path))
Beispiel #15
0
def sepline(ch='-', sp='-', screen=False):
    r'''
    seperate the output by '-'
    '''
    if screen:
        print(ch.center(MaxLength, sp))
    else:
        dlog.info(ch.center(MaxLength, sp))
Beispiel #16
0
 def block_checkcall(self, 
                     cmd) :
     stdin, stdout, stderr = self.ssh.exec_command(('cd %s ;' % self.remote_root) + cmd)
     exit_status = stdout.channel.recv_exit_status() 
     if exit_status != 0:
         dlog.info("Error info: %s "%(stderr.readlines()[0]))
         raise RuntimeError("Get error code %d in calling %s through ssh with job: %s "% (exit_status, cmd, self.job_uuid))
     return stdin, stdout, stderr    
Beispiel #17
0
 def download(self,
              job_dirs,
              remote_down_files,
              check_exists=False,
              mark_failure=True,
              back_error=False):
     cwd = os.getcwd()
     for ii in job_dirs:
         local_job = os.path.join(self.local_root, ii)
         remote_job = os.path.join(self.remote_root, ii)
         flist = remote_down_files
         if back_error:
             os.chdir(remote_job)
             flist += glob('error*')
             os.chdir(cwd)
         for jj in flist:
             rfile = os.path.join(remote_job, jj)
             lfile = os.path.join(local_job, jj)
             if not os.path.realpath(rfile) == os.path.realpath(lfile):
                 if (not os.path.exists(rfile)) and (
                         not os.path.exists(lfile)):
                     if check_exists:
                         if mark_failure:
                             with open(
                                     os.path.join(
                                         self.local_root, ii,
                                         'tag_failure_download_%s' % jj),
                                     'w') as fp:
                                 pass
                         else:
                             pass
                     else:
                         raise RuntimeError('do not find download file ' +
                                            rfile)
                 elif (not os.path.exists(rfile)) and (
                         os.path.exists(lfile)):
                     # already downloaded
                     pass
                 elif (os.path.exists(rfile)) and (
                         not os.path.exists(lfile)):
                     # trivial case, download happily
                     shutil.move(rfile, lfile)
                 elif (os.path.exists(rfile)) and (os.path.exists(lfile)):
                     # both exists, replace!
                     dlog.info('find existing %s, replacing by %s' %
                               (lfile, rfile))
                     if os.path.isdir(lfile):
                         shutil.rmtree(lfile, ignore_errors=True)
                     elif os.path.isfile(lfile) or os.path.islink(lfile):
                         os.remove(lfile)
                     shutil.move(rfile, lfile)
                 else:
                     raise RuntimeError('should not reach here!')
             else:
                 # no nothing in the case of linked files
                 pass
     os.chdir(cwd)
Beispiel #18
0
def _main(path, calculator, output, id_prefix):
    assert calculator.lower() in SUPPORTED_CACULATOR
    dlog.info('data collection from: %s' % path)
    if calculator == "vasp":
        parsing_vasp(path, output, id_prefix)
    elif calculator == 'gaussian':
        parsing_gaussian(path, output)
    else:
        parsing_pwscf(path, output)
Beispiel #19
0
 def delete_apg(self):
     request = DeleteAutoProvisioningGroupRequest()
     request.set_accept_format('json')
     request.set_AutoProvisioningGroupId(self.apg_id)
     request.set_TerminateInstances(True)
     try:
         response = self.client.do_action_with_exception(request)
     except ServerException as e:
         dlog.info(e)
     except ClientException as e:
         dlog.info(e)
Beispiel #20
0
 def _rmtree(self, sftp, remotepath, level=0, verbose = False):
     for f in sftp.listdir_attr(remotepath):
         rpath = os.path.join(remotepath, f.filename)
         if stat.S_ISDIR(f.st_mode):
             self._rmtree(sftp, rpath, level=(level + 1))
         else:
             rpath = os.path.join(remotepath, f.filename)
             if verbose: dlog.info('removing %s%s' % ('    ' * level, rpath))
             sftp.remove(rpath)
     if verbose: dlog.info('removing %s%s' % ('    ' * level, remotepath))
     sftp.rmdir(remotepath)
Beispiel #21
0
 def prepare(self):
     restart = False
     if os.path.exists('apg_id.json'):
         with open('apg_id.json') as fp:
             apg = json.load(fp)
             self.cloud_resources["apg_id"] = apg["apg_id"]
         task_chunks_str = ['+'.join(ii) for ii in self.task_chunks]
         task_hashes = [
             sha1(ii.encode('utf-8')).hexdigest() for ii in task_chunks_str
         ]
         for ii in range(self.nchunks):
             fn = 'jr.%.06d.json' % ii
             if os.path.exists(
                     os.path.join(os.path.abspath(self.work_path), fn)):
                 cur_hash = task_hashes[ii]
                 job_record = JobRecord(self.work_path,
                                        self.task_chunks[ii], fn)
                 if not job_record.check_finished(cur_hash):
                     if not self.check_spot_callback(
                             job_record.record[cur_hash]['context']
                         ['instance_id']):
                         self.dispatcher_list[ii]["entity"] = Entity(
                             job_record.record[cur_hash]['context']['ip'],
                             job_record.record[cur_hash]['context']
                             ['instance_id'], job_record)
                         self.make_dispatcher(ii)
                         self.dispatcher_list[ii][
                             "dispatcher_status"] = "unsubmitted"
                     else:
                         os.remove(
                             os.path.join(os.path.abspath(self.work_path),
                                          fn))
                 else:
                     self.dispatcher_list[ii][
                         "dispatcher_status"] = "finished"
         self.server_pool = self.get_server_pool()
         self.ip_pool = self.get_ip(self.server_pool)
         restart = True
     img_id = self.get_image_id(self.cloud_resources["img_name"])
     sg_id, vpc_id = self.get_sg_vpc_id()
     self.cloud_resources["template_id"] = self.create_template(
         img_id, sg_id, vpc_id)
     self.cloud_resources["vsw_id"] = self.get_vsw_id(vpc_id)
     if not restart:
         dlog.info("begin to create apg")
         self.cloud_resources["apg_id"] = self.create_apg()
         time.sleep(120)
         self.server_pool = self.get_server_pool()
         self.ip_pool = self.get_ip(self.server_pool)
     else:
         dlog.info("restart dpgen")
Beispiel #22
0
def check_apikey():
    try:
      apikey=os.environ['MAPI_KEY']
    except KeyError:
       print("You have to get a MAPI_KEY from "+web)
       print("and execute following command:")
       print('echo "export MAPI_KEY=yourkey">> ~/.bashrc')
       print("source ~/.bashrc")
       os._exit(0)
    try:
      return MPRester(apikey)
    except MPRestError:
       dlog.info("MPRester Error, you need to prepare POSCAR manually")
       os._exit(0)
Beispiel #23
0
def parsing_vasp(path,
                 config_info_dict,
                 skip_init,
                 output=OUTPUT,
                 id_prefix=None):

    fp_iters = os.path.join(path, ITERS_PAT)
    dlog.debug(fp_iters)
    f_fp_iters = glob(fp_iters)
    dlog.info("len iterations data: %s" % len(f_fp_iters))
    fp_init = os.path.join(path, INIT_PAT)
    dlog.debug(fp_init)
    f_fp_init = glob(fp_init)
    if skip_init:
        entries = _parsing_vasp(f_fp_iters, config_info_dict, id_prefix)
        dlog.info("len collected data: %s" % len(entries))
    else:
        dlog.info("len initialization data: %s" % len(f_fp_init))
        entries = _parsing_vasp(f_fp_init,
                                config_info_dict,
                                id_prefix,
                                iters=False)
        entries.extend(_parsing_vasp(f_fp_iters, config_info_dict, id_prefix))
        dlog.info("len collected data: %s" % len(entries))
    #print(output)
    #print(entries)
    dumpfn(entries, output, indent=4)
Beispiel #24
0
 def __init__ (self,
               remote_root,
               work_path,
               job_uuid=None,
 ) :
     self.remote_root=os.path.join(remote_root,work_path)
     self.local_root = os.path.abspath(work_path)
     if job_uuid:
         self.job_uuid=job_uuid
     else:
         self.job_uuid = str(uuid.uuid4())
     
     dlog.info("local_root is %s"% self.local_root)
     dlog.info("remote_root is %s"% self.remote_root)
Beispiel #25
0
def gen_init_surf(args):
    try:
        import ruamel
        from monty.serialization import loadfn, dumpfn
        warnings.simplefilter('ignore',
                              ruamel.yaml.error.MantissaNoDotYAML1_1Warning)
        jdata = loadfn(args.PARAM)
        if args.MACHINE is not None:
            mdata = loadfn(args.MACHINE)
    except:
        with open(args.PARAM, 'r') as fp:
            jdata = json.load(fp)
        if args.MACHINE is not None:
            with open(args.MACHINE, "r") as fp:
                mdata = json.load(fp)

    out_dir = out_dir_name(jdata)
    jdata['out_dir'] = out_dir
    dlog.info("# working dir %s" % out_dir)

    if args.MACHINE is not None:
        # Decide a proper machine
        mdata = decide_fp_machine(mdata)
        fp_machine = mdata['fp_machine']
        fp_ssh_sess = SSHSession(fp_machine)
    #stage = args.STAGE
    stage_list = [int(i) for i in jdata['stages']]
    for stage in stage_list:
        if stage == 1:
            create_path(out_dir)
            make_super_cell_pymatgen(jdata)
            place_element(jdata)
            make_vasp_relax(jdata)
            if args.MACHINE is not None:
                run_vasp_relax(jdata, mdata, fp_ssh_sess)
        # elif stage == 0 :
        #     # create_path(out_dir)
        #     # make_super_cell(jdata)
        #     # place_element(jdata)
        #     # make_vasp_relax(jdata)
        #     # make_scale(jdata)
        #     # pert_scaled(jdata)
        #     # poscar_elong('POSCAR', 'POSCAR.out', 3)
        #     pert_scaled(jdata)
        elif stage == 2:
            make_scale(jdata)
            pert_scaled(jdata)
        else:
            raise RuntimeError("unknown stage %d" % stage)
Beispiel #26
0
 def ensure_alive(self, max_check=10, sleep_time=10):
     count = 1
     while not self._check_alive():
         if count == max_check:
             raise RuntimeError(
                 'cannot connect ssh after %d failures at interval %d s' %
                 (max_check, sleep_time))
         dlog.info('connection check failed, try to reconnect to ' +
                   self.remote_host)
         self._setup_ssh(self.remote_host,
                         self.remote_port,
                         username=self.remote_uname,
                         password=self.remote_password)
         count += 1
         time.sleep(sleep)
Beispiel #27
0
def gen_init_reaction(args):
    try:
        import ruamel
        from monty.serialization import loadfn, dumpfn
        warnings.simplefilter('ignore',
                              ruamel.yaml.error.MantissaNoDotYAML1_1Warning)
        jdata = loadfn(args.PARAM)
        if args.MACHINE is not None:
            mdata = loadfn(args.MACHINE)
    except:
        with open(args.PARAM, 'r') as fp:
            jdata = json.load(fp)
        if args.MACHINE is not None:
            with open(args.MACHINE, "r") as fp:
                mdata = json.load(fp)

    record = "record.reaction"
    iter_rec = -1
    numb_task = 7
    if os.path.isfile(record):
        with open(record) as frec:
            for line in frec:
                iter_rec = int(line.strip())
        dlog.info("continue from task %02d" % iter_rec)
    for ii in range(numb_task):
        sepline(str(ii), '-')
        if ii <= iter_rec:
            continue
        elif ii == 0:
            link_reaxff(jdata)
        elif ii == 1:
            dispatcher = make_dispatcher(mdata["reaxff_machine"])
            run_reaxff(jdata, mdata, dispatcher)
        elif ii == 2:
            link_trj(jdata)
        elif ii == 3:
            dispatcher = make_dispatcher(mdata["build_machine"])
            run_build_dataset(jdata, mdata, dispatcher)
        elif ii == 4:
            link_fp_input()
        elif ii == 5:
            dispatcher = make_dispatcher(mdata["fp_machine"])
            run_fp(jdata, mdata, dispatcher)
        elif ii == 6:
            convert_data(jdata)
        with open(record, "a") as frec:
            frec.write(str(ii) + '\n')
Beispiel #28
0
 def create_ess(self):
     img_id = self.get_image_id(self.adata["img_name"])
     sg_id, vpc_id = self.get_sg_vpc_id()
     self.template_id = self.create_template(img_id, sg_id, vpc_id)
     self.vsw_id = self.get_vsw_id(vpc_id)
     self.apg_id = self.create_apg()
     dlog.info("begin to create ess, please wait two minutes")
     time.sleep(120)
     new_server_list = self.describe_apg_instances()
     new_ip_list = self.get_ip(new_server_list)
     for ii in range(len(new_server_list)):
         profile = self.mdata_machine.copy()
         profile['hostname'] = new_ip_list[ii]
         profile['instance_id'] = new_server_list[ii]
         if self.check_server(profile):
             self.instance_list[ii] = new_server_list[ii]
             self.ip_list[ii] = new_ip_list[ii]
Beispiel #29
0
def _main():
    parser = argparse.ArgumentParser(description="gen init confs")
    parser.add_argument('PARAM', type=str, help="parameter file, json format")
    parser.add_argument(
        'STAGE',
        type=int,
        help="the stage of init, can be 1 or 2 "
        "1: Setup vasp jobs for relaxation. "
        "2: Collect vasp relaxed confs (if relax is not skiped). Perturb system. "
    )
    args = parser.parse_args()
    try:
        import ruamel
        from monty.serialization import loadfn, dumpfn
        warnings.simplefilter('ignore',
                              ruamel.yaml.error.MantissaNoDotYAML1_1Warning)
        jdata = loadfn(args.PARAM)
    except:
        with open(args.PARAM, 'r') as fp:
            jdata = json.load(fp)

    out_dir = out_dir_name(jdata)
    jdata['out_dir'] = out_dir
    dlog.info("# working dir %s" % out_dir)

    stage = args.STAGE

    if stage == 1:
        create_path(out_dir)
        make_super_cell_pymatgen(jdata)
        place_element(jdata)
        make_vasp_relax(jdata)
    # elif stage == 0 :
    #     # create_path(out_dir)
    #     # make_super_cell(jdata)
    #     # place_element(jdata)
    #     # make_vasp_relax(jdata)
    #     # make_scale(jdata)
    #     # pert_scaled(jdata)
    #     # poscar_elong('POSCAR', 'POSCAR.out', 3)
    #     pert_scaled(jdata)
    elif stage == 2:
        make_scale(jdata)
        pert_scaled(jdata)
    else:
        raise RuntimeError("unknow stage %d" % stage)
Beispiel #30
0
def make_super_cell_pymatgen(jdata):
    make_unit_cell(jdata)

    out_dir = jdata['out_dir']
    path_uc = os.path.join(out_dir, global_dirname_02)
    from_path = path_uc
    from_file = os.path.join(from_path, 'POSCAR.unit')
    ss = Structure.from_file(from_file)
    # ase only support X type  element
    for i in range(len(ss)):
        ss[i] = 'X'
    ss = AseAtomsAdaptor.get_atoms(ss)

    all_millers = jdata['millers']
    path_sc = os.path.join(out_dir, global_dirname_02)
    #z_min = jdata['z_min']
    layer_numb = jdata['layer_numb']
    super_cell = jdata['super_cell']

    cwd = os.getcwd()
    path_work = (path_sc)
    path_work = os.path.abspath(path_work)
    os.chdir(path_work)
    for miller in all_millers:
        miller_str = ""
        for ii in miller:
            miller_str += str(ii)
        path_cur_surf = create_path('surf-' + miller_str)
        os.chdir(path_cur_surf)
        #slabgen = SlabGenerator(ss, miller, z_min, 1e-3)
        slab = general_surface.surface(ss,
                                       indices=miller,
                                       vacuum=1e-3,
                                       layers=layer_numb)
        #all_slabs = slabgen.get_slabs()
        dlog.info(os.getcwd())
        #dlog.info("Miller %s: The slab has %s termination, use the first one" %(str(miller), len(all_slabs)))
        #all_slabs[0].to('POSCAR', 'POSCAR')
        slab.write('POSCAR', vasp5=True)
        if super_cell[0] > 1 or super_cell[1] > 1:
            st = Structure.from_file('POSCAR')
            st.make_supercell([super_cell[0], super_cell[1], 1])
            st.to('POSCAR', 'POSCAR')
        os.chdir(path_work)
    os.chdir(cwd)