def test_static_isinstance(self):
     self.assertTrue(
         static_isinstance(obj=list(),
                           obj_type=["builtins.list", "__builtin__.list"]))
     self.assertTrue(
         any([
             static_isinstance(obj=list(), obj_type="builtins.list"),
             static_isinstance(obj=list(), obj_type="__builtin__.list"),
         ]))
     self.assertRaises(TypeError, static_isinstance, list(), 1)
Exemple #2
0
def _kill_child(job):
    """
    Internal helper function to kill a child process.

    Args:
        job (JobCore): job object to decompress
    """
    if (
        static_isinstance(
            obj=job.__class__, obj_type="pyiron_base.master.GenericMaster"
        )
        and not job.server.run_mode.queue
        and (job.status.running or job.status.submitted)
    ):
        for proc in psutil.process_iter():
            try:
                pinfo = proc.as_dict(attrs=["pid", "cwd"])
            except psutil.NoSuchProcess:
                pass
            else:
                if pinfo["cwd"] is not None and pinfo["cwd"].startswith(
                    job.working_directory
                ):
                    job_process = psutil.Process(pinfo["pid"])
                    job_process.kill()
Exemple #3
0
def validate_que_request(item):
    """
    Internal function to convert the job_ID or hamiltonian to the queuing system ID.

    Args:
        item (int, pyiron_base.job.generic.GenericJob): Provide either the job_ID or the full hamiltonian

    Returns:
        int: queuing system ID
    """

    if isinstance(item, int):
        que_id = item
    elif static_isinstance(item.__class__,
                           "pyiron_base.master.generic.GenericMaster"):
        if item.server.queue_id:
            que_id = item.server.queue_id
        else:
            queue_id_lst = [
                item.project.load(child_id).server.queue_id
                for child_id in item.child_ids
            ]
            que_id = [
                queue_id for queue_id in queue_id_lst if queue_id is not None
            ]
            if len(que_id) == 0:
                raise ValueError("This job does not have a queue ID.")
    elif static_isinstance(item.__class__,
                           "pyiron_base.job.generic.GenericJob"):
        if item.server.queue_id:
            que_id = item.server.queue_id
        else:
            raise ValueError("This job does not have a queue ID.")
    elif static_isinstance(item.__class__, "pyiron_base.job.core.JobCore"):
        if "server" in item.project_hdf5.list_nodes():
            server_hdf_dict = item.project_hdf5["server"]
            if "qid" in server_hdf_dict.keys():
                que_id = server_hdf_dict["qid"]
            else:
                raise ValueError("This job does not have a queue ID.")
        else:
            raise ValueError("This job does not have a queue ID.")
    else:
        raise TypeError(
            "The queue can either query for IDs or for pyiron GenericJobObjects."
        )
    return que_id
Exemple #4
0
def export_database(project_instance, directory_to_transfer,
                    archive_directory):
    # here we first check wether the archive directory is a path
    # or a project object
    if isinstance(archive_directory, str):
        if archive_directory[-7:] == ".tar.gz":
            archive_directory = archive_directory[:-7]
        archive_directory = os.path.basename(archive_directory)
    # if the archive_directory is a project
    elif static_isinstance(
            obj=archive_directory.__class__,
            obj_type=[
                "pyiron_base.project.generic.Project",
            ],
    ):
        archive_directory = archive_directory.path
    else:
        raise RuntimeError("""the given path for exporting to,
            does not have the correct format paths as string
            or pyiron Project objects are expected""")
    directory_to_transfer = os.path.basename(directory_to_transfer)
    pr = project_instance.open(os.curdir)
    df = pr.job_table()
    job_ids_sorted = sorted(df.id.values)
    new_job_ids = list(range(len(job_ids_sorted)))
    job_translate_dict = {j: n for j, n in zip(job_ids_sorted, new_job_ids)}
    df["id"] = [
        new_job_id(job_id=job_id, job_translate_dict=job_translate_dict)
        for job_id in df.id
    ]
    df["masterid"] = [
        new_job_id(job_id=job_id, job_translate_dict=job_translate_dict)
        for job_id in df.masterid
    ]
    df["parentid"] = [
        new_job_id(job_id=job_id, job_translate_dict=job_translate_dict)
        for job_id in df.parentid
    ]
    df["project"] = update_project(
        project_instance,
        directory_to_transfer=directory_to_transfer,
        archive_directory=archive_directory,
        df=df,
    )
    del df["projectpath"]
    return df
Exemple #5
0
def _get_project_for_copy(job, project, new_job_name):
    """
    Internal helper function to generate a project and hdf5 project for copying

    Args:
        job (JobCore): Job object used for comparison
        project (JobCore/ProjectHDFio/Project/None): The project to copy the job to.
            (Default is None, use the same project.)
        new_job_name (str): The new name to assign the duplicate job. Required if the
            project is `None` or the same project as the copied job. (Default is None,
            try to keep the same name.)

    Returns:
        Project, ProjectHDFio
    """
    if static_isinstance(
        obj=project.__class__,
        obj_type="pyiron_base.job.core.JobCore"
    ):
        file_project = project.project
        hdf5_project = project.project_hdf5.open(new_job_name)
    elif isinstance(project, job.project.__class__):
        file_project = project
        hdf5_project = job.project_hdf5.__class__(
            project=project,
            file_name=new_job_name,
            h5_path="/" + new_job_name
        )
    elif isinstance(project, job.project_hdf5.__class__):
        file_project = project.project
        hdf5_project = project.open(new_job_name)
    elif project is None:
        file_project = job.project
        hdf5_project = job.project_hdf5.__class__(
            project=file_project,
            file_name=new_job_name,
            h5_path="/" + new_job_name
        )
    else:
        raise ValueError("Project should be JobCore/ProjectHDFio/Project/None")
    return file_project, hdf5_project
def import_jobs(project_instance,
                directory_to_import_to,
                archive_directory,
                df,
                compressed=True):
    # Copy HDF5 files
    # if the archive_directory is a path(string)/name of the compressed file
    if isinstance(archive_directory, str):
        archive_directory = os.path.basename(archive_directory)
    # if the archive_directory is a project
    elif static_isinstance(obj=archive_directory.__class__,
                           obj_type=[
                               "pyiron_base.project.generic.Project",
                           ]):
        archive_directory = archive_directory.path
    else:
        raise RuntimeError("""the given path for importing from,
            does not have the correct format paths
            as string or pyiron Project objects are expected""")
    if compressed:
        extract_archive(archive_directory)
    archive_name = getdir(path=archive_directory)
    if directory_to_import_to[-1] != '/':
        directory_to_import_to = os.path.basename(directory_to_import_to)
    else:
        directory_to_import_to = os.path.basename(directory_to_import_to[:-1])
    # destination folder
    des = os.path.abspath(os.path.join(os.curdir, directory_to_import_to))
    # source folder; archive folder
    src = os.path.abspath(os.path.join(os.curdir, archive_directory))
    copy_tree(src, des)
    if compressed:
        rmtree(archive_directory)

    # Update Database
    pr_import = project_instance.open(os.curdir)
    df["project"] = [
        os.path.join(pr_import.project_path, os.path.relpath(p, archive_name))
        + "/" for p in df["project"].values
    ]
    df['projectpath'] = len(df) * [pr_import.root_path]
    # Add jobs to database
    job_id_lst = []
    for entry in df.dropna(axis=1).to_dict(orient="records"):
        if 'id' in entry:
            del entry['id']
        if 'parentid' in entry:
            del entry['parentid']
        if 'masterid' in entry:
            del entry['masterid']
        if 'timestart' in entry:
            entry["timestart"] = pandas.to_datetime(entry["timestart"])
        if 'timestop' in entry:
            entry["timestop"] = pandas.to_datetime(entry["timestop"])
        if 'username' not in entry:
            entry["username"] = s.login_user
        job_id = pr_import.db.add_item_dict(par_dict=entry)
        job_id_lst.append(job_id)

    # Update parent and master ids
    for job_id, masterid, parentid in zip(
            job_id_lst,
            update_id_lst(record_lst=df["masterid"].values,
                          job_id_lst=job_id_lst),
            update_id_lst(record_lst=df["parentid"].values,
                          job_id_lst=job_id_lst),
    ):
        if masterid is not None or parentid is not None:
            pr_import.db.item_update(item_id=job_id,
                                     par_dict={
                                         "parentid": parentid,
                                         "masterid": masterid
                                     })