Ejemplo n.º 1
0
def get_serialized_legacy_workflow_logs(connection):
    """ Get the serialized log records that correspond to legacy workflows """
    query = connection.execute(
        text("""
        SELECT db_dblog.id, db_dblog.time, db_dblog.loggername, db_dblog.levelname, db_dblog.objpk, db_dblog.objname,
        db_dblog.message, db_dblog.metadata FROM db_dblog
        WHERE
            (db_dblog.objname LIKE 'aiida.workflows.user.%')
        """))
    res = list()
    for row in query:
        res.append(dict(list(zip(row.keys(), row))))
    return dumps_json(res)
Ejemplo n.º 2
0
def get_serialized_logs_with_no_nodes(connection):
    """ Get the serialized log records that correspond to nodes that were deleted """
    query = connection.execute(
        text("""
        SELECT db_dblog.id, db_dblog.time, db_dblog.loggername, db_dblog.levelname, db_dblog.objpk, db_dblog.objname,
        db_dblog.message, db_dblog.metadata FROM db_dblog
        WHERE
            (db_dblog.objname LIKE 'node.%') AND NOT EXISTS
            (SELECT 1 FROM db_dbnode WHERE db_dbnode.id = db_dblog.objpk LIMIT 1)
        """))
    res = list()
    for row in query:
        res.append(dict(list(zip(row.keys(), row))))
    return dumps_json(res)
Ejemplo n.º 3
0
def get_serialized_legacy_workflow_logs(schema_editor):
    """ Get the serialized log records that correspond to legacy workflows """
    with schema_editor.connection.cursor() as cursor:
        cursor.execute(("""
            SELECT db_dblog.id, db_dblog.time, db_dblog.loggername, db_dblog.levelname, db_dblog.objpk,
            db_dblog.objname, db_dblog.message, db_dblog.metadata FROM db_dblog
            WHERE
                (db_dblog.objname LIKE 'aiida.workflows.user.%')
            """))
        keys = [
            'id', 'time', 'loggername', 'levelname', 'objpk', 'objname',
            'message', 'metadata'
        ]
        res = list()
        for row in cursor.fetchall():
            res.append(dict(list(zip(keys, row))))
        return dumps_json(res)
Ejemplo n.º 4
0
def get_serialized_logs_with_no_nodes(schema_editor):
    """ Get the serialized log records that don't correspond to a node """
    with schema_editor.connection.cursor() as cursor:
        cursor.execute(("""
            SELECT db_dblog.id, db_dblog.time, db_dblog.loggername, db_dblog.levelname, db_dblog.objpk,
            db_dblog.objname, db_dblog.message, db_dblog.metadata FROM db_dblog
            WHERE
                (db_dblog.objname LIKE 'node.%') AND NOT EXISTS
                (SELECT 1 FROM db_dbnode WHERE db_dbnode.id = db_dblog.objpk LIMIT 1)
            """))
        keys = [
            'id', 'time', 'loggername', 'levelname', 'objpk', 'objname',
            'message', 'metadata'
        ]
        res = list()
        for row in cursor.fetchall():
            res.append(dict(list(zip(keys, row))))
        return dumps_json(res)
Ejemplo n.º 5
0
    def setUpBeforeMigration(self):  # pylint: disable=too-many-locals
        import json
        import importlib
        from aiida.backends.general.migrations.utils import dumps_json

        update_024 = importlib.import_module(
            'aiida.backends.djsite.db.migrations.0024_dblog_update')

        DbNode = self.apps.get_model('db', 'DbNode')
        DbWorkflow = self.apps.get_model('db', 'DbWorkflow')
        DbLog = self.apps.get_model('db', 'DbLog')

        # Creating the needed nodes & workflows
        calc_1 = DbNode(type='node.process.calculation.CalculationNode.',
                        user_id=self.default_user.id)
        param = DbNode(type='data.dict.Dict.', user_id=self.default_user.id)
        leg_workf = DbWorkflow(label='Legacy WorkflowNode',
                               user_id=self.default_user.id)
        calc_2 = DbNode(type='node.process.calculation.CalculationNode.',
                        user_id=self.default_user.id)

        # Storing them
        calc_1.save()
        param.save()
        leg_workf.save()
        calc_2.save()

        # Creating the corresponding log records and storing them
        log_1 = DbLog(loggername='CalculationNode logger',
                      objpk=calc_1.pk,
                      objname='node.calculation.job.quantumespresso.pw.',
                      message='calculation node 1',
                      metadata=json.dumps({
                          'msecs':
                          719.0849781036377,
                          'objpk':
                          calc_1.pk,
                          'lineno':
                          350,
                          'thread':
                          140011612940032,
                          'asctime':
                          '10/21/2018 12:39:51 PM',
                          'created':
                          1540118391.719085,
                          'levelno':
                          23,
                          'message':
                          'calculation node 1',
                          'objname':
                          'node.calculation.job.quantumespresso.pw.',
                      }))
        log_2 = DbLog(loggername='something.else logger',
                      objpk=param.pk,
                      objname='something.else.',
                      message='parameter data with log message')
        log_3 = DbLog(
            loggername='TopologicalWorkflow logger',
            objpk=leg_workf.pk,
            objname=
            'aiida.workflows.user.topologicalworkflows.topo.TopologicalWorkflow',
            message='parameter data with log message')
        log_4 = DbLog(loggername='CalculationNode logger',
                      objpk=calc_2.pk,
                      objname='node.calculation.job.quantumespresso.pw.',
                      message='calculation node 2',
                      metadata=json.dumps({
                          'msecs':
                          719.0849781036377,
                          'objpk':
                          calc_2.pk,
                          'lineno':
                          360,
                          'levelno':
                          23,
                          'message':
                          'calculation node 1',
                          'objname':
                          'node.calculation.job.quantumespresso.pw.',
                      }))
        # Creating two more log records that don't correspond to a node
        log_5 = DbLog(loggername='CalculationNode logger',
                      objpk=(calc_2.pk + 1000),
                      objname='node.calculation.job.quantumespresso.pw.',
                      message='calculation node 1000',
                      metadata=json.dumps({
                          'msecs':
                          718,
                          'objpk': (calc_2.pk + 1000),
                          'lineno':
                          361,
                          'levelno':
                          25,
                          'message':
                          'calculation node 1000',
                          'objname':
                          'node.calculation.job.quantumespresso.pw.',
                      }))
        log_6 = DbLog(loggername='CalculationNode logger',
                      objpk=(calc_2.pk + 1001),
                      objname='node.calculation.job.quantumespresso.pw.',
                      message='calculation node 10001',
                      metadata=json.dumps({
                          'msecs':
                          722,
                          'objpk': (calc_2.pk + 1001),
                          'lineno':
                          362,
                          'levelno':
                          24,
                          'message':
                          'calculation node 1001',
                          'objname':
                          'node.calculation.job.quantumespresso.pw.',
                      }))

        # Storing the log records
        log_1.save()
        log_2.save()
        log_3.save()
        log_4.save()
        log_5.save()
        log_6.save()

        # Storing temporarily information needed for the check at the test
        self.to_check = dict()

        # Keeping calculation & calculation log ids
        self.to_check['CalculationNode'] = (
            calc_1.pk,
            log_1.pk,
            calc_2.pk,
            log_4.pk,
        )

        # Getting the serialized Dict logs
        param_data = DbLog.objects.filter(objpk=param.pk).filter(
            objname='something.else.').values(*update_024.values_to_export)[:1]
        serialized_param_data = dumps_json(list(param_data))
        # Getting the serialized logs for the unknown entity logs (as the export migration fuction
        # provides them) - this should coincide to the above
        serialized_unknown_exp_logs = update_024.get_serialized_unknown_entity_logs(
            self.schema_editor)
        # Getting their number
        unknown_exp_logs_number = update_024.get_unknown_entity_log_number(
            self.schema_editor)
        self.to_check['Dict'] = (serialized_param_data,
                                 serialized_unknown_exp_logs,
                                 unknown_exp_logs_number)

        # Getting the serialized legacy workflow logs
        leg_wf = DbLog.objects.filter(objpk=leg_workf.pk).filter(
            objname=
            'aiida.workflows.user.topologicalworkflows.topo.TopologicalWorkflow'
        ).values(*update_024.values_to_export)[:1]
        serialized_leg_wf_logs = dumps_json(list(leg_wf))
        # Getting the serialized logs for the legacy workflow logs (as the export migration function
        # provides them) - this should coincide to the above
        serialized_leg_wf_exp_logs = update_024.get_serialized_legacy_workflow_logs(
            self.schema_editor)
        eg_wf_exp_logs_number = update_024.get_legacy_workflow_log_number(
            self.schema_editor)
        self.to_check['WorkflowNode'] = (serialized_leg_wf_logs,
                                         serialized_leg_wf_exp_logs,
                                         eg_wf_exp_logs_number)

        # Getting the serialized logs that don't correspond to a DbNode record
        logs_no_node = DbLog.objects.filter(
            id__in=[log_5.id, log_6.id]).values(*update_024.values_to_export)
        serialized_logs_no_node = dumps_json(list(logs_no_node))
        # Getting the serialized logs that don't correspond to a node (as the export migration function
        # provides them) - this should coincide to the above
        serialized_logs_exp_no_node = update_024.get_serialized_logs_with_no_nodes(
            self.schema_editor)
        logs_no_node_number = update_024.get_logs_with_no_nodes_number(
            self.schema_editor)
        self.to_check['NoNode'] = (serialized_logs_no_node,
                                   serialized_logs_exp_no_node,
                                   logs_no_node_number)