Example #1
0
    def test_1(self):
        import os
        import shutil
        import tempfile

        from aiida.orm import DataFactory
        from aiida.orm import load_node
        from aiida.orm.calculation.job import JobCalculation
        from aiida.orm.importexport import export

        # Creating a folder for the import/export files
        temp_folder = tempfile.mkdtemp()
        try:
            StructureData = DataFactory('structure')
            sd = StructureData()
            sd.store()

            calc = JobCalculation()
            calc.set_computer(self.computer)
            calc.set_resources({
                "num_machines": 1,
                "num_mpiprocs_per_machine": 1
            })
            calc.store()

            calc.add_link_from(sd)

            pks = [sd.pk, calc.pk]

            attrs = {}
            for pk in pks:
                node = load_node(pk)
                attrs[node.uuid] = dict()
                for k in node.attrs():
                    attrs[node.uuid][k] = node.get_attr(k)

            filename = os.path.join(temp_folder, "export.tar.gz")

            export([calc.dbnode], outfile=filename, silent=True)

            self.clean_db()

            # NOTE: it is better to load new nodes by uuid, rather than assuming
            # that they will have the first 3 pks. In fact, a recommended policy in
            # databases is that pk always increment, even if you've deleted elements
            import_data(filename, silent=True)
            for uuid in attrs.keys():
                node = load_node(uuid)
                # for k in node.attrs():
                for k in attrs[uuid].keys():
                    self.assertEquals(attrs[uuid][k], node.get_attr(k))
        finally:
            # Deleting the created temporary folder
            shutil.rmtree(temp_folder, ignore_errors=True)
Example #2
0
    def test_unexpected_calc_states(self):
        import logging
        from django.utils import timezone
        from aiida.orm.calculation import Calculation

        # Have to use this ugly way of importing because the django migration
        # files start with numbers which are not a valid package name
        state_change = __import__(
            'aiida.backends.djsite.db.migrations.0002_db_state_change',
            fromlist=['fix_calc_states'])
        from aiida.common.datastructures import calc_states
        from aiida.backends.djsite.db.models import DbCalcState, DbLog
        from aiida.orm.calculation.job import JobCalculation

        calc_params = {
            'computer': self.computer,
            'resources': {
                'num_machines': 1,
                'num_mpiprocs_per_machine': 1
            }
        }

        for state in ['NOTFOUND', 'UNDETERMINED']:
            # Let's create a dummy job calculation
            job = JobCalculation(**calc_params)
            job.store()
            # Now save the errant state
            DbCalcState(dbnode=job.dbnode, state=state).save()

            time_before_fix = timezone.now()

            # Call the code that deals with updating these states
            state_change.fix_calc_states(None, None)

            current_state = job.get_state()
            self.assertNotEqual(
                current_state, state,
                "Migration code failed to change removed state {}".format(
                    state))
            self.assertEqual(
                current_state, calc_states.FAILED,
                "Migration code failed to set removed state {} to {}".format(
                    current_state, calc_states.FAILED))

            result = DbLog.objects.filter(
                objpk__exact=job.pk,
                levelname__exact=logging.getLevelName(logging.WARNING),
                time__gt=time_before_fix)

            self.assertEquals(
                len(result), 1,
                "Couldn't find a warning message with the change "
                "from {} to {}, or found too many: I got {} log "
                "messages".format(state, calc_states.FAILED, len(result)))
Example #3
0
def calculation_list(calculations, past_days, groups, all_entries,
                     calculation_state, process_state, exit_status, failed,
                     limit, order_by, project, all_users, raw, absolute_time):
    """Return a list of job calculations that are still running."""
    from aiida.cmdline.utils.common import print_last_process_state_change
    from aiida.orm.calculation.job import JobCalculation
    from aiida.work.processes import ProcessState

    if all_entries:
        process_state = None
        calculation_state = None

    PROCESS_STATE_KEY = 'attributes.{}'.format(
        JobCalculation.PROCESS_STATE_KEY)
    EXIT_STATUS_KEY = 'attributes.{}'.format(JobCalculation.EXIT_STATUS_KEY)

    filters = {}

    if calculation_state:
        process_state = None

    if process_state:
        calculation_state = None
        filters[PROCESS_STATE_KEY] = {'in': process_state}

    if failed:
        calculation_state = None
        filters[PROCESS_STATE_KEY] = {'==': ProcessState.FINISHED.value}
        filters[EXIT_STATUS_KEY] = {'>': 0}

    if exit_status is not None:
        calculation_state = None
        filters[PROCESS_STATE_KEY] = {'==': ProcessState.FINISHED.value}
        filters[EXIT_STATUS_KEY] = {'==': exit_status}

    JobCalculation._list_calculations(
        states=calculation_state,
        past_days=past_days,
        pks=[calculation.pk for calculation in calculations],
        all_users=all_users,
        groups=groups,
        relative_ctime=not absolute_time,
        order_by=order_by,
        limit=limit,
        filters=filters,
        projections=project,
        raw=raw,
    )

    if not raw:
        print_last_process_state_change(process_type='calculation')
Example #4
0
def wipe_all_scratch(w, results_to_save):
    """
    Wipe out all the scratch on the remote cluster used by a workflow and all 
    its subworkflows (found recursively)
    
    :param results_to_save: a list of Calculation objects that will be skipped
    :w: the workflow instance to clean
    """
    from aiida.orm.workflow import Workflow
    from aiida.orm.calculation.job import JobCalculation
    
    if not isinstance(w, Workflow):
        raise TypeError("Parameter w should be a workflow")
    try:
        if not all( [ isinstance(_,JobCalculation) for _ in results_to_save ] ):
            raise TypeError("Parameter results_to_save should be a list of calculations")
    except TypeError:
        raise TypeError("Parameter results_to_save should be a list of calculations")
    
    steps = w.dbworkflowinstance.steps.all()  
    this_calcs = JobCalculation.query(workflow_step__in=steps)
    this_wfs = Workflow.query(parent_workflow_step__in=steps)
    
    for c in this_calcs:
        if c.pk not in [_.pk for _ in results_to_save]:
            try:
                c.out.remote_folder._clean()
            except AttributeError:
                # remote folder does not exist (probably submission of calc. failed)
                pass
            except OSError:
                # work directory was already removed
                pass
    for this_wf in this_wfs:
        wipe_all_scratch(this_wf, results_to_save)
Example #5
0
    def test_change_updatable_attrs_after_store(self):
        """
        Verify that a Sealable node can alter updatable attributes even after storing
        """
        from aiida.orm.calculation.job import JobCalculation

        resources = {'num_machines': 1, 'num_mpiprocs_per_machine': 1}
        job = JobCalculation(computer=self.computer, resources=resources)
        job.store()

        for attr in JobCalculation._updatable_attributes:
            if attr != Sealable.SEALED_KEY:
                job._set_attr(attr, 'a')
Example #6
0
    def calculation_list(self, *args):
        """
        Return a list of calculations on screen.
        """
        if not is_dbenv_loaded():
            load_dbenv()

        from aiida.common.datastructures import calc_states

        import argparse
        from aiida.orm.calculation.job import JobCalculation as C
        from aiida.common.setup import get_property

        parser = argparse.ArgumentParser(
            prog=self.get_full_command_name(),
            description='List AiiDA calculations.')
        # The default states are those that are shown if no option is given
        parser.add_argument('-s', '--states', nargs='+', type=str,
                            help="show only the AiiDA calculations with given state",
                            default=[calc_states.WITHSCHEDULER,
                                     calc_states.NEW,
                                     calc_states.TOSUBMIT,
                                     calc_states.SUBMITTING,
                                     calc_states.COMPUTED,
                                     calc_states.RETRIEVING,
                                     calc_states.PARSING,
                                     ])

        parser.add_argument('-p', '--past-days', metavar='N',
                            help="add a filter to show only calculations created in the past N days",
                            action='store', type=int)
        parser.add_argument('-g', '--group', '--group-name',
                            metavar='GROUPNAME',
                            help="add a filter to show only calculations within a given group",
                            action='store', type=str)
        parser.add_argument('-G', '--group-pk', metavar='GROUPPK',
                            help="add a filter to show only calculations within a given group",
                            action='store', type=int)
        parser.add_argument('pks', type=int, nargs='*',
                            help="a list of calculations to show. If empty, all running calculations are shown. If non-empty, ignores the -p and -r options.")
        parser.add_argument('-a', '--all-states',
                            dest='all_states', action='store_true',
                            help="Overwrite manual set of states if present, and look for calculations in every possible state")
        parser.set_defaults(all_states=False)
        parser.add_argument('-A', '--all-users',
                            dest='all_users', action='store_true',
                            help="Show calculations for all users, rather than only for the current user")
        parser.set_defaults(all_users=False)
        parser.add_argument('-t', '--absolute-time',
                            dest='relative_ctime', action='store_false', default=True,
                            help="Print the absolute creation time, rather than the relative creation time")
        parser.add_argument('-l', '--limit',
                            type=int, default=None,
                            help='set a limit to the number of rows returned')
        parser.add_argument('-o', '--order-by',
                            choices=['id', 'ctime'],
                            default='ctime',
                            help='order the results')
        parser.add_argument('--project',
                            choices=(
                                    'pk', 'state', 'ctime', 'sched', 'computer',
                                    'type', 'description', 'label', 'uuid',
                                    'mtime', 'user'
                                ),
                            nargs='+',
                            default=get_property("verdishell.calculation_list"),
                            #('pk', 'ctime', 'state', 'sched', 'computer', 'type', 'label'),
                            help="Define the list of properties to show"
                        )

        args = list(args)
        parsed_args = parser.parse_args(args)

        capital_states = [i.upper() for i in parsed_args.states]
        parsed_args.states = capital_states

        if parsed_args.all_states:
            parsed_args.states = None

        C._list_calculations(
            states=parsed_args.states,
            past_days=parsed_args.past_days,
            pks=parsed_args.pks,
            all_users=parsed_args.all_users,
            group=parsed_args.group,
            group_pk=parsed_args.group_pk,
            relative_ctime=parsed_args.relative_ctime,
            # with_scheduler_state=parsed_args.with_scheduler_state,
            order_by=parsed_args.order_by,
            limit=parsed_args.limit,
            projections=parsed_args.project,
        )
Example #7
0
    def test_different_computer_same_name_import(self):
        """
        This test checks that if there is a name collision, the imported
        computers are renamed accordingly.
        """
        import os
        import shutil
        import tempfile

        from aiida.orm.importexport import export
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm.computer import Computer
        from aiida.orm.calculation.job import JobCalculation
        from aiida.orm.importexport import COMP_DUPL_SUFFIX

        # Creating a folder for the import/export files
        export_file_tmp_folder = tempfile.mkdtemp()
        unpack_tmp_folder = tempfile.mkdtemp()

        try:
            # Set the computer name
            comp1_name = "localhost_1"
            self.computer.set_name(comp1_name)

            # Store a calculation
            calc1_label = "calc1"
            calc1 = JobCalculation()
            calc1.set_computer(self.computer)
            calc1.set_resources({
                "num_machines": 1,
                "num_mpiprocs_per_machine": 1
            })
            calc1.label = calc1_label
            calc1.store()
            calc1._set_state(u'RETRIEVING')

            # Export the first job calculation
            filename1 = os.path.join(export_file_tmp_folder, "export1.tar.gz")
            export([calc1.dbnode], outfile=filename1, silent=True)

            # Reset the database
            self.clean_db()
            self.insert_data()

            # Set the computer name to the same name as before
            self.computer.set_name(comp1_name)

            # Store a second calculation
            calc2_label = "calc2"
            calc2 = JobCalculation()
            calc2.set_computer(self.computer)
            calc2.set_resources({
                "num_machines": 2,
                "num_mpiprocs_per_machine": 2
            })
            calc2.label = calc2_label
            calc2.store()
            calc2._set_state(u'RETRIEVING')

            # Export the second job calculation
            filename2 = os.path.join(export_file_tmp_folder, "export2.tar.gz")
            export([calc2.dbnode], outfile=filename2, silent=True)

            # Reset the database
            self.clean_db()
            self.insert_data()

            # Set the computer name to the same name as before
            self.computer.set_name(comp1_name)

            # Store a third calculation
            calc3_label = "calc3"
            calc3 = JobCalculation()
            calc3.set_computer(self.computer)
            calc3.set_resources({
                "num_machines": 2,
                "num_mpiprocs_per_machine": 2
            })
            calc3.label = calc3_label
            calc3.store()
            calc3._set_state(u'RETRIEVING')

            # Export the third job calculation
            filename3 = os.path.join(export_file_tmp_folder, "export3.tar.gz")
            export([calc3.dbnode], outfile=filename3, silent=True)

            # Clean the local database
            self.clean_db()

            # Check that there are no computers
            qb = QueryBuilder()
            qb.append(Computer, project=['*'])
            self.assertEqual(
                qb.count(), 0, "There should not be any computers"
                "in the database at this point.")

            # Check that there are no calculations
            qb = QueryBuilder()
            qb.append(JobCalculation, project=['*'])
            self.assertEqual(
                qb.count(), 0, "There should not be any "
                "calculations in the database at "
                "this point.")

            # Import all the calculations
            import_data(filename1, silent=True)
            import_data(filename2, silent=True)
            import_data(filename3, silent=True)

            # Retrieve the calculation-computer pairs
            qb = QueryBuilder()
            qb.append(JobCalculation, project=['label'], tag='jcalc')
            qb.append(Computer, project=['name'], computer_of='jcalc')
            self.assertEqual(qb.count(), 3, "Three combinations expected.")
            res = qb.all()
            self.assertIn([calc1_label, comp1_name], res,
                          "Calc-Computer combination not found.")
            self.assertIn(
                [calc2_label, comp1_name + COMP_DUPL_SUFFIX.format(0)], res,
                "Calc-Computer combination not found.")
            self.assertIn(
                [calc3_label, comp1_name + COMP_DUPL_SUFFIX.format(1)], res,
                "Calc-Computer combination not found.")
        finally:
            # Deleting the created temporary folders
            shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
            shutil.rmtree(unpack_tmp_folder, ignore_errors=True)
Example #8
0
    def test_same_computer_different_name_import(self):
        """
        This test checks that if the computer is re-imported with a different
        name to the same database, then the original computer will not be
        renamed. It also checks that the names were correctly imported (without
        any change since there is no computer name collision)
        """
        import os
        import shutil
        import tempfile

        from aiida.orm.importexport import export
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm.computer import Computer
        from aiida.orm.calculation.job import JobCalculation

        # Creating a folder for the import/export files
        export_file_tmp_folder = tempfile.mkdtemp()
        unpack_tmp_folder = tempfile.mkdtemp()

        try:
            # Store a calculation
            calc1_label = "calc1"
            calc1 = JobCalculation()
            calc1.set_computer(self.computer)
            calc1.set_resources({
                "num_machines": 1,
                "num_mpiprocs_per_machine": 1
            })
            calc1.label = calc1_label
            calc1.store()
            calc1._set_state(u'RETRIEVING')

            # Store locally the computer name
            comp1_name = unicode(self.computer.name)

            # Export the first job calculation
            filename1 = os.path.join(export_file_tmp_folder, "export1.tar.gz")
            export([calc1.dbnode], outfile=filename1, silent=True)

            # Rename the computer
            self.computer.set_name(comp1_name + "_updated")

            # Store a second calculation
            calc2_label = "calc2"
            calc2 = JobCalculation()
            calc2.set_computer(self.computer)
            calc2.set_resources({
                "num_machines": 2,
                "num_mpiprocs_per_machine": 2
            })
            calc2.label = calc2_label
            calc2.store()
            calc2._set_state(u'RETRIEVING')

            # Export the second job calculation
            filename2 = os.path.join(export_file_tmp_folder, "export2.tar.gz")
            export([calc2.dbnode], outfile=filename2, silent=True)

            # Clean the local database
            self.clean_db()

            # Check that there are no computers
            qb = QueryBuilder()
            qb.append(Computer, project=['*'])
            self.assertEqual(
                qb.count(), 0, "There should not be any computers"
                "in the database at this point.")

            # Check that there are no calculations
            qb = QueryBuilder()
            qb.append(JobCalculation, project=['*'])
            self.assertEqual(
                qb.count(), 0, "There should not be any "
                "calculations in the database at "
                "this point.")

            # Import the first calculation
            import_data(filename1, silent=True)

            # Check that the calculation computer is imported correctly.
            qb = QueryBuilder()
            qb.append(JobCalculation, project=['label'])
            self.assertEqual(qb.count(), 1, "Only one calculation should be "
                             "found.")
            self.assertEqual(unicode(qb.first()[0]), calc1_label,
                             "The calculation label is not correct.")

            # Check that the referenced computer is imported correctly.
            qb = QueryBuilder()
            qb.append(Computer, project=['name', 'uuid', 'id'])
            self.assertEqual(qb.count(), 1, "Only one computer should be "
                             "found.")
            self.assertEqual(unicode(qb.first()[0]), comp1_name,
                             "The computer name is not correct.")

            # Import the second calculation
            import_data(filename2, silent=True)

            # Check that the number of computers remains the same and its data
            # did not change.
            qb = QueryBuilder()
            qb.append(Computer, project=['name'])
            self.assertEqual(qb.count(), 1, "Only one computer should be "
                             "found.")
            self.assertEqual(unicode(qb.first()[0]), comp1_name,
                             "The computer name is not correct.")

        finally:
            # Deleting the created temporary folders
            shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
            shutil.rmtree(unpack_tmp_folder, ignore_errors=True)
Example #9
0
    def test_same_computer_import(self):
        """
        Test that you can import nodes in steps without any problems. In this
        test we will import a first calculation and then a second one. The
        import should work as expected and have in the end two job
        calculations.

        Each calculation is related to the same computer. In the end we should
        have only one computer
        """
        import os
        import shutil
        import tempfile

        from aiida.orm.importexport import export
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm.computer import Computer
        from aiida.orm.calculation.job import JobCalculation

        # Creating a folder for the import/export files
        export_file_tmp_folder = tempfile.mkdtemp()
        unpack_tmp_folder = tempfile.mkdtemp()

        try:
            # Store two job calculation related to the same computer
            calc1_label = "calc1"
            calc1 = JobCalculation()
            calc1.set_computer(self.computer)
            calc1.set_resources({
                "num_machines": 1,
                "num_mpiprocs_per_machine": 1
            })
            calc1.label = calc1_label
            calc1.store()
            calc1._set_state(u'RETRIEVING')

            calc2_label = "calc2"
            calc2 = JobCalculation()
            calc2.set_computer(self.computer)
            calc2.set_resources({
                "num_machines": 2,
                "num_mpiprocs_per_machine": 2
            })
            calc2.label = calc2_label
            calc2.store()
            calc2._set_state(u'RETRIEVING')

            # Store locally the computer name
            comp_name = unicode(self.computer.name)
            comp_uuid = unicode(self.computer.uuid)

            # Export the first job calculation
            filename1 = os.path.join(export_file_tmp_folder, "export1.tar.gz")
            export([calc1.dbnode], outfile=filename1, silent=True)

            # Export the second job calculation
            filename2 = os.path.join(export_file_tmp_folder, "export2.tar.gz")
            export([calc2.dbnode], outfile=filename2, silent=True)

            # Clean the local database
            self.clean_db()

            # Check that there are no computers
            qb = QueryBuilder()
            qb.append(Computer, project=['*'])
            self.assertEqual(
                qb.count(), 0, "There should not be any computers"
                "in the database at this point.")

            # Check that there are no calculations
            qb = QueryBuilder()
            qb.append(JobCalculation, project=['*'])
            self.assertEqual(
                qb.count(), 0, "There should not be any "
                "calculations in the database at "
                "this point.")

            # Import the first calculation
            import_data(filename1, silent=True)

            # Check that the calculation computer is imported correctly.
            qb = QueryBuilder()
            qb.append(JobCalculation, project=['label'])
            self.assertEqual(qb.count(), 1, "Only one calculation should be "
                             "found.")
            self.assertEqual(unicode(qb.first()[0]), calc1_label,
                             "The calculation label is not correct.")

            # Check that the referenced computer is imported correctly.
            qb = QueryBuilder()
            qb.append(Computer, project=['name', 'uuid', 'id'])
            self.assertEqual(qb.count(), 1, "Only one computer should be "
                             "found.")
            self.assertEqual(unicode(qb.first()[0]), comp_name,
                             "The computer name is not correct.")
            self.assertEqual(unicode(qb.first()[1]), comp_uuid,
                             "The computer uuid is not correct.")

            # Store the id of the computer
            comp_id = qb.first()[2]

            # Import the second calculation
            import_data(filename2, silent=True)

            # Check that the number of computers remains the same and its data
            # did not change.
            qb = QueryBuilder()
            qb.append(Computer, project=['name', 'uuid', 'id'])
            self.assertEqual(qb.count(), 1, "Only one computer should be "
                             "found.")
            self.assertEqual(unicode(qb.first()[0]), comp_name,
                             "The computer name is not correct.")
            self.assertEqual(unicode(qb.first()[1]), comp_uuid,
                             "The computer uuid is not correct.")
            self.assertEqual(qb.first()[2], comp_id,
                             "The computer id is not correct.")

            # Check that now you have two calculations attached to the same
            # computer.
            qb = QueryBuilder()
            qb.append(Computer, tag='comp')
            qb.append(JobCalculation, has_computer='comp', project=['label'])
            self.assertEqual(qb.count(), 2, "Two calculations should be "
                             "found.")
            ret_labels = set(_ for [_] in qb.all())
            self.assertEqual(
                ret_labels, set([calc1_label, calc2_label]),
                "The labels of the calculations are not correct.")

        finally:
            # Deleting the created temporary folders
            shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
            shutil.rmtree(unpack_tmp_folder, ignore_errors=True)
Example #10
0
    def test_complex_graph_import_export(self):
        """
        This test checks that a small and bit complex graph can be correctly
        exported and imported.

        It will create the graph, store it to the database, export it to a file
        and import it. In the end it will check if the initial nodes are present
        at the imported graph.
        """
        import tempfile
        import shutil
        import os

        from aiida.orm.calculation.job import JobCalculation
        from aiida.orm.data.folder import FolderData
        from aiida.orm.data.parameter import ParameterData
        from aiida.orm.data.remote import RemoteData
        from aiida.common.links import LinkType
        from aiida.orm.importexport import export, import_data
        from aiida.orm.utils import load_node
        from aiida.common.exceptions import NotExistent

        temp_folder = tempfile.mkdtemp()
        try:
            calc1 = JobCalculation()
            calc1.set_computer(self.computer)
            calc1.set_resources({
                "num_machines": 1,
                "num_mpiprocs_per_machine": 1
            })
            calc1.label = "calc1"
            calc1.store()
            calc1._set_state(u'RETRIEVING')

            pd1 = ParameterData()
            pd1.label = "pd1"
            pd1.store()

            pd2 = ParameterData()
            pd2.label = "pd2"
            pd2.store()

            rd1 = RemoteData()
            rd1.label = "rd1"
            rd1.set_remote_path("/x/y.py")
            rd1.set_computer(self.computer)
            rd1.store()
            rd1.add_link_from(calc1, link_type=LinkType.CREATE)

            calc2 = JobCalculation()
            calc2.set_computer(self.computer)
            calc2.set_resources({
                "num_machines": 1,
                "num_mpiprocs_per_machine": 1
            })
            calc2.label = "calc2"
            calc2.store()
            calc2.add_link_from(pd1, link_type=LinkType.INPUT)
            calc2.add_link_from(pd2, link_type=LinkType.INPUT)
            calc2.add_link_from(rd1, link_type=LinkType.INPUT)
            calc2._set_state(u'SUBMITTING')

            fd1 = FolderData()
            fd1.label = "fd1"
            fd1.store()
            fd1.add_link_from(calc2, link_type=LinkType.CREATE)

            node_uuids_labels = {
                calc1.uuid: calc1.label,
                pd1.uuid: pd1.label,
                pd2.uuid: pd2.label,
                rd1.uuid: rd1.label,
                calc2.uuid: calc2.label,
                fd1.uuid: fd1.label
            }

            filename = os.path.join(temp_folder, "export.tar.gz")
            export([fd1.dbnode], outfile=filename, silent=True)

            self.clean_db()

            import_data(filename, silent=True, ignore_unknown_nodes=True)

            for uuid, label in node_uuids_labels.iteritems():
                try:
                    load_node(uuid)
                except NotExistent:
                    self.fail("Node with UUID {} and label {} was not "
                              "found.".format(uuid, label))

        finally:
            # Deleting the created temporary folder
            shutil.rmtree(temp_folder, ignore_errors=True)
Example #11
0
    def test_6(self):
        """
        This test checks that nodes belonging to user A (which is not the
        default user) can be correctly exported, imported, enriched with nodes
        from the default user, re-exported & re-imported and that in the end
        all the nodes that have been finally imported belonging to the right
        users.
        """
        import os
        import shutil
        import tempfile

        from aiida.orm import load_node
        from aiida.orm.calculation.job import JobCalculation
        from aiida.orm.data.structure import StructureData
        from aiida.orm.importexport import export
        from aiida.common.datastructures import calc_states
        from aiida.common.links import LinkType
        from aiida.common.utils import get_configured_user_email
        from aiida.orm.user import User

        # Creating a folder for the import/export files
        temp_folder = tempfile.mkdtemp()
        try:
            # Create another user
            new_email = "[email protected]"
            user = User(email=new_email)
            user.force_save()

            # Create a structure data node that has a calculation as output
            sd1 = StructureData()
            sd1.dbnode.user = user._dbuser
            sd1.label = 'sd1'
            sd1.store()

            jc1 = JobCalculation()
            jc1.set_computer(self.computer)
            jc1.set_resources({"num_machines": 1, "num_mpiprocs_per_machine": 1})
            jc1.dbnode.user = user._dbuser
            jc1.label = 'jc1'
            jc1.store()
            jc1.add_link_from(sd1)
            jc1._set_state(calc_states.PARSING)

            # Create some nodes from a different user
            sd2 = StructureData()
            sd2.dbnode.user = user._dbuser
            sd2.label = 'sd2'
            sd2.store()
            sd2.add_link_from(jc1, label='l1', link_type=LinkType.RETURN)

            # Set the jc1 to FINISHED
            jc1._set_state(calc_states.FINISHED)

            # At this point we export the generated data
            filename1 = os.path.join(temp_folder, "export1.tar.gz")
            export([sd2.dbnode], outfile=filename1, silent=True)
            uuids1 = [sd1.uuid, jc1.uuid, sd2.uuid]
            self.clean_db()
            self.insert_data()
            import_data(filename1, silent=True)

            # Check that the imported nodes are correctly imported and that
            # the user assigned to the nodes is the right one
            for uuid in uuids1:
                self.assertEquals(load_node(uuid).get_user().email, new_email)

            # Now we continue to generate more data based on the imported
            # data
            sd2_imp = load_node(sd2.uuid)

            jc2 = JobCalculation()
            jc2.set_computer(self.computer)
            jc2.set_resources({"num_machines": 1, "num_mpiprocs_per_machine": 1})
            jc2.label = 'jc2'
            jc2.store()
            jc2.add_link_from(sd2_imp, label='l2')
            jc2._set_state(calc_states.PARSING)

            sd3 = StructureData()
            sd3.label = 'sd3'
            sd3.store()
            sd3.add_link_from(jc2, label='l3', link_type=LinkType.RETURN)

            # Set the jc2 to FINISHED
            jc2._set_state(calc_states.FINISHED)

            # Store the UUIDs of the nodes that should be checked
            # if they can be imported correctly.
            uuids2 = [jc2.uuid, sd3.uuid]

            filename2 = os.path.join(temp_folder, "export2.tar.gz")
            export([sd3.dbnode], outfile=filename2, silent=True)
            self.clean_db()
            self.insert_data()
            import_data(filename2, silent=True)

            # Check that the imported nodes are correctly imported and that
            # the user assigned to the nodes is the right one
            for uuid in uuids1:
                self.assertEquals(load_node(uuid).get_user().email, new_email)
            for uuid in uuids2:
                self.assertEquals(load_node(uuid).get_user().email,
                                  get_configured_user_email())

        finally:
            # Deleting the created temporary folder
            shutil.rmtree(temp_folder, ignore_errors=True)
Example #12
0
    def test_5(self):
        """
        This test checks that nodes belonging to different users are correctly
        exported & imported.
        """
        import os
        import shutil
        import tempfile

        from aiida.orm import load_node
        from aiida.orm.calculation.job import JobCalculation
        from aiida.orm.data.structure import StructureData
        from aiida.orm.importexport import export
        from aiida.common.datastructures import calc_states
        from aiida.common.links import LinkType
        from aiida.orm.user import User
        from aiida.common.utils import get_configured_user_email

        # Creating a folder for the import/export files
        temp_folder = tempfile.mkdtemp()
        try:
            # Create another user
            new_email = "[email protected]"
            user = User(email=new_email)
            user.force_save()

            # Create a structure data node that has a calculation as output
            sd1 = StructureData()
            sd1.dbnode.user = user._dbuser
            sd1.label = 'sd1'
            sd1.store()

            jc1 = JobCalculation()
            jc1.set_computer(self.computer)
            jc1.set_resources({"num_machines": 1, "num_mpiprocs_per_machine": 1})
            jc1.dbnode.user = user._dbuser
            jc1.label = 'jc1'
            jc1.store()
            jc1.add_link_from(sd1)
            jc1._set_state(calc_states.PARSING)

            # Create some nodes from a different user
            sd2 = StructureData()
            sd2.dbnode.user = user._dbuser
            sd2.label = 'sd2'
            sd2.store()
            sd2.add_link_from(jc1, label='l1', link_type=LinkType.RETURN)

            jc2 = JobCalculation()
            jc2.set_computer(self.computer)
            jc2.set_resources({"num_machines": 1, "num_mpiprocs_per_machine": 1})
            jc2.label = 'jc2'
            jc2.store()
            jc2.add_link_from(sd2, label='l2')
            jc2._set_state(calc_states.PARSING)

            sd3 = StructureData()
            sd3.label = 'sd3'
            sd3.store()
            sd3.add_link_from(jc2, label='l3', link_type=LinkType.RETURN)

            uuids_u1 = [sd1.uuid, jc1.uuid, sd2.uuid]
            uuids_u2 = [jc2.uuid, sd3.uuid]

            filename = os.path.join(temp_folder, "export.tar.gz")

            export([sd3.dbnode], outfile=filename, silent=True)
            self.clean_db()
            import_data(filename, silent=True)

            # Check that the imported nodes are correctly imported and that
            # the user assigned to the nodes is the right one
            for uuid in uuids_u1:
                self.assertEquals(load_node(uuid).get_user().email, new_email)
            for uuid in uuids_u2:
                self.assertEquals(load_node(uuid).get_user().email,
                                  get_configured_user_email())
        finally:
            # Deleting the created temporary folder
            shutil.rmtree(temp_folder, ignore_errors=True)
Example #13
0
    def test_unexpected_calc_states(self):
        import logging

        from django.utils import timezone
        from aiida.orm.calculation import Calculation

        # Have to use this ugly way of importing because the django migration
        # files start with numbers which are not a valid package name
        state_change = __import__(
            'aiida.backends.djsite.db.migrations.0002_db_state_change',
            fromlist=['fix_calc_states']
        )
        from aiida.common.datastructures import calc_states
        from aiida.backends.djsite.db.models import DbCalcState, DbLog
        from aiida.orm.calculation.job import JobCalculation

        calc_params = {
            'computer': self.computer,
            'resources': {'num_machines': 1,
                          'num_mpiprocs_per_machine': 1}
        }

        for state in ['NOTFOUND', 'UNDETERMINED']:
            # Let's create a dummy job calculation
            job = JobCalculation(**calc_params)
            job.store()
            # Now save the errant state
            DbCalcState(dbnode=job.dbnode, state=state).save()

            time_before_fix = timezone.now()

            # First of all, I re-enable logging in case it was disabled by
            # mistake by a previous test (e.g. one that disables and reenables
            # again, but that failed)
            logging.disable(logging.NOTSET)
            # Temporarily disable logging to the stream handler (i.e. screen)
            # because otherwise fix_calc_states will print warnings
            handler = next((h for h in logging.getLogger('aiida').handlers if
                            isinstance(h, logging.StreamHandler)), None)

            if handler:
                original_level = handler.level
                handler.setLevel(logging.ERROR)

            # Call the code that deals with updating these states
            state_change.fix_calc_states(None, None)

            if handler:
                handler.setLevel(original_level)

            current_state = job.get_state()
            self.assertNotEqual(current_state, state,
                                "Migration code failed to change removed state {}".
                                format(state))
            self.assertEqual(current_state, calc_states.FAILED,
                             "Migration code failed to set removed state {} to {}".
                             format(current_state, calc_states.FAILED))

            result = DbLog.objects.filter(
                objpk__exact=job.pk,
                levelname__exact=logging.getLevelName(logging.WARNING),
                time__gt=time_before_fix
            )

            self.assertEquals(len(result), 1,
                              "Couldn't find a warning message with the change "
                              "from {} to {}, or found too many: I got {} log "
                              "messages".format(state, calc_states.FAILED, len(result))
                              )
Example #14
0
    def calculation_list(self, *args):
        """
        Return a list of calculations on screen.
        """
        if not is_dbenv_loaded():
            load_dbenv()

        from aiida.common.datastructures import calc_states

        import argparse
        from aiida.orm.calculation.job import JobCalculation as C

        parser = argparse.ArgumentParser(
            prog=self.get_full_command_name(),
            description='List AiiDA calculations.')
        # The default states are those that are shown if no option is given
        parser.add_argument(
            '-s',
            '--states',
            nargs='+',
            type=str,
            help="show only the AiiDA calculations with given state",
            default=[
                calc_states.WITHSCHEDULER,
                calc_states.NEW,
                calc_states.TOSUBMIT,
                calc_states.SUBMITTING,
                calc_states.COMPUTED,
                calc_states.RETRIEVING,
                calc_states.PARSING,
            ])

        parser.add_argument(
            '-p',
            '--past-days',
            metavar='N',
            help=
            "add a filter to show only calculations created in the past N days",
            action='store',
            type=int)
        parser.add_argument(
            '-g',
            '--group',
            '--group-name',
            metavar='GROUPNAME',
            help="add a filter to show only calculations within a given group",
            action='store',
            type=str)
        parser.add_argument(
            '-G',
            '--group-pk',
            metavar='GROUPPK',
            help="add a filter to show only calculations within a given group",
            action='store',
            type=int)
        parser.add_argument(
            'pks',
            type=int,
            nargs='*',
            help=
            "a list of calculations to show. If empty, all running calculations are shown. If non-empty, ignores the -p and -r options."
        )
        parser.add_argument(
            '-a',
            '--all-states',
            dest='all_states',
            action='store_true',
            help=
            "Overwrite manual set of states if present, and look for calculations in every possible state"
        )
        parser.set_defaults(all_states=False)
        parser.add_argument(
            '-A',
            '--all-users',
            dest='all_users',
            action='store_true',
            help=
            "Show calculations for all users, rather than only for the current user"
        )
        parser.set_defaults(all_users=False)
        parser.add_argument(
            '-t',
            '--absolute-time',
            dest='relative_ctime',
            action='store_false',
            help=
            "Print the absolute creation time, rather than the relative creation time"
        )
        # ~ parser.add_argument('-w', '--with-scheduler-state',
        # ~ action='store_true',
        # ~ help='Print the scheduler state (slow)')
        parser.add_argument('-l',
                            '--limit',
                            type=int,
                            default=None,
                            help='set a limit to the number of rows returned')
        parser.add_argument('-o',
                            '--order-by',
                            choices=['id', 'ctime'],
                            default='ctime',
                            help='order the results')
        parser.set_defaults(relative_ctime=True)

        args = list(args)
        parsed_args = parser.parse_args(args)

        capital_states = [i.upper() for i in parsed_args.states]
        parsed_args.states = capital_states

        if parsed_args.all_states:
            parsed_args.states = None

        try:
            C._list_calculations(
                states=parsed_args.states,
                past_days=parsed_args.past_days,
                pks=parsed_args.pks,
                all_users=parsed_args.all_users,
                group=parsed_args.group,
                group_pk=parsed_args.group_pk,
                relative_ctime=parsed_args.relative_ctime,
                # with_scheduler_state=parsed_args.with_scheduler_state,
                order_by=parsed_args.order_by,
                limit=parsed_args.limit)
        except Exception as e:
            import traceback
            print '1', e.__doc__
            print '2', sys.exc_info()
            print '3', sys.exc_info()[0]
            print '4', sys.exc_info()[1]
            print '5', sys.exc_info()[2],
            print 'Sorry I mean line...',
            print traceback.tb_lineno(sys.exc_info()[2])
            ex_type, ex, tb = sys.exc_info()
            print '6', traceback.print_tb(tb)