Esempio n. 1
0
 def _update_spec(self, spec):
     self._last_spec = spec
     spec_tuple = (spec.id, self._run_oid, spec.iterations, spec.label, spec.workload_name,
                   json.dumps(spec.boot_parameters.to_pod()),
                   json.dumps(spec.runtime_parameters.to_pod()),
                   json.dumps(spec.workload_parameters.to_pod()))
     with self._open_connection() as conn:
         conn.execute('INSERT INTO workload_specs VALUES (?,?,?,?,?,?,?,?)', spec_tuple)
         conn.commit()
         c = conn.execute('SELECT OID FROM workload_specs WHERE run_oid=? AND id=?', (self._run_oid, spec.id))
         self._spec_oid = c.fetchone()[0]
Esempio n. 2
0
 def _update_spec(self, spec):
     self._last_spec = spec
     spec_tuple = (spec.id, self._run_oid, spec.iterations, spec.label,
                   spec.workload_name,
                   json.dumps(spec.boot_parameters.to_pod()),
                   json.dumps(spec.runtime_parameters.to_pod()),
                   json.dumps(spec.workload_parameters.to_pod()))
     with self._open_connection() as conn:
         conn.execute('INSERT INTO workload_specs VALUES (?,?,?,?,?,?,?,?)',
                      spec_tuple)
         conn.commit()
         c = conn.execute(
             'SELECT OID FROM workload_specs WHERE run_oid=? AND id=?',
             (self._run_oid, spec.id))
         self._spec_oid = c.fetchone()[0]
Esempio n. 3
0
    def export_run_output(self, run_output, target_info):  # pylint: disable=unused-argument, too-many-locals
        ''' A final export of the RunOutput that updates existing parameters
            and uploads ones which are only generated after jobs have run.
        '''
        if not self.cursor:  # Database did not connect correctly.
            return
        # Update the job statuses following completion of the run
        for job in run_output.jobs:
            job_id = job.id
            job_status = job.status
            self.cursor.execute(
                "UPDATE Jobs SET status=%s WHERE job_id=%s and run_oid=%s",
                (job_status, job_id, self.run_uuid))

        run_uuid = self.run_uuid
        # Update the run entry after jobs have completed
        run_info_pod = run_output.info.to_pod()
        run_state_pod = run_output.state.to_pod()
        sql_command_update_run = self.sql_command['update_run']
        self.cursor.execute(sql_command_update_run, (
            run_output.event_summary,
            run_output.status,
            run_info_pod['start_time'],
            run_info_pod['end_time'],
            run_info_pod['duration'],
            json.dumps(run_state_pod),
            run_uuid,
        ))
        self.sql_upload_events(run_output)
        self.sql_upload_artifacts(run_output, check_uniqueness=True)
        self.sql_upload_metrics(run_output, check_uniqueness=True)
        self.sql_upload_augmentations(run_output)
        self.conn.commit()
Esempio n. 4
0
    def sql_upload_parameters(self, parameter_type, parameter_dict, owner_id=None, job_uuid=None):
        # Note, currently no augmentation parameters are workload specific, but in the future
        # this may change
        augmentation_id = None
        resource_getter_id = None

        if parameter_type not in ['workload', 'resource_getter', 'augmentation', 'runtime']:
            # boot parameters are not yet implemented
            # device parameters are redundant due to the targets table
            raise NotImplementedError("{} is not a valid parameter type.".format(parameter_type))

        if parameter_type == "resource_getter":
            resource_getter_id = owner_id
        elif parameter_type == "augmentation":
            augmentation_id = owner_id

        for parameter in parameter_dict:
            parameter_uuid = uuid.uuid4()
            self.cursor.execute(
                self.sql_command['create_parameter'],
                (
                    parameter_uuid,
                    self.run_uuid,
                    job_uuid,
                    augmentation_id,
                    resource_getter_id,
                    parameter,
                    json.dumps(parameter_dict[parameter]),
                    str(type(parameter_dict[parameter])),
                    parameter_type,
                )
            )
Esempio n. 5
0
    def export_job_output(self, job_output, target_info, run_output):  # pylint: disable=too-many-branches, too-many-statements, too-many-locals, unused-argument
        ''' Run once for each job to upload information that is
            updated on a job by job basis.
        '''
        # Ensure we're still connected to the database.
        self.connect_to_database()
        job_uuid = uuid.uuid4()
        # Create a new job
        self.cursor.execute(
            self.sql_command['create_job'],
            (
                job_uuid,
                self.run_uuid,
                job_output.status,
                job_output.retry,
                job_output.label,
                job_output.id,
                job_output.iteration,
                job_output.spec.workload_name,
                job_output.metadata,
                job_output.spec._pod_version,  # pylint: disable=protected-access
                job_output.spec._pod_serialization_version,  # pylint: disable=protected-access
            ))

        for classifier in job_output.classifiers:
            classifier_uuid = uuid.uuid4()
            self.cursor.execute(
                self.sql_command['create_classifier'],
                (classifier_uuid, None, None, job_uuid, None, classifier,
                 job_output.classifiers[classifier]))
        # Update the run table and run-level parameters
        self.cursor.execute(
            self.sql_command['update_run'],
            (run_output.event_summary, run_output.status,
             run_output.state.timestamp, run_output.info.end_time, None,
             json.dumps(run_output.state.to_pod()), self.run_uuid))
        for classifier in run_output.classifiers:
            classifier_uuid = uuid.uuid4()
            self.cursor.execute(
                self.sql_command['create_classifier'],
                (classifier_uuid, None, None, None, None, self.run_uuid,
                 classifier, run_output.classifiers[classifier]))
        self.sql_upload_artifacts(run_output, record_in_added=True)
        self.sql_upload_metrics(run_output, record_in_added=True)
        self.sql_upload_augmentations(run_output)
        self.sql_upload_resource_getters(run_output)
        self.sql_upload_events(job_output, job_uuid=job_uuid)
        self.sql_upload_artifacts(job_output, job_uuid=job_uuid)
        self.sql_upload_metrics(job_output, job_uuid=job_uuid)
        self.sql_upload_job_augmentations(job_output, job_uuid=job_uuid)
        self.sql_upload_parameters("workload",
                                   job_output.spec.workload_parameters,
                                   job_uuid=job_uuid)
        self.sql_upload_parameters("runtime",
                                   job_output.spec.runtime_parameters,
                                   job_uuid=job_uuid)
        self.conn.commit()
Esempio n. 6
0
    def _process_sections(self, state, sections, seen_sect_ids, seen_wkl_ids):
        for section in sections:
            workloads = []
            for workload_entry in section.pop("workloads", []):
                workload = _process_workload_entry(workload_entry, seen_wkl_ids,
                                                   state.jobs_config)
                workloads.append(workload)

            if 'params' in section:
                if 'runtime_params' in section:
                    msg = 'both "params" and "runtime_params" specified in a '\
                          'section: "{}"'
                    raise ConfigError(msg.format(json.dumps(section, indent=None)))
                section['runtime_params'] = section.pop('params')

            group = section.pop('group', None)
            section = _construct_valid_entry(section, seen_sect_ids,
                                             "s", state.jobs_config)
            state.jobs_config.add_section(section, workloads, group)
Esempio n. 7
0
    def _process_sections(self, state, sections, seen_sect_ids, seen_wkl_ids):
        for section in sections:
            workloads = []
            for workload_entry in section.pop("workloads", []):
                workload = _process_workload_entry(workload_entry,
                                                   seen_wkl_ids,
                                                   state.jobs_config)
                workloads.append(workload)

            if 'params' in section:
                if 'runtime_params' in section:
                    msg = 'both "params" and "runtime_params" specified in a '\
                          'section: "{}"'
                    raise ConfigError(
                        msg.format(json.dumps(section, indent=None)))
                section['runtime_params'] = section.pop('params')

            group = section.pop('group', None)
            section = _construct_valid_entry(section, seen_sect_ids, "s",
                                             state.jobs_config)
            state.jobs_config.add_section(section, workloads, group)
Esempio n. 8
0
    def initialize(self, context):

        if not psycopg2:
            raise ImportError(
                'The psycopg2 module is required for the ' +
                'Postgresql Output Processor: {}'.format(import_error_msg))
        # N.B. Typecasters are for postgres->python and adapters the opposite
        self.connect_to_database()
        self.cursor = self.conn.cursor()
        self.verify_schema_versions()

        # Register the adapters and typecasters for enum types
        self.cursor.execute("SELECT NULL::status_enum")
        status_oid = self.cursor.description[0][1]
        self.cursor.execute("SELECT NULL::param_enum")
        param_oid = self.cursor.description[0][1]
        LEVEL = psycopg2.extensions.new_type((status_oid, ), "LEVEL",
                                             cast_level)
        psycopg2.extensions.register_type(LEVEL)
        PARAM = psycopg2.extensions.new_type((param_oid, ), "PARAM",
                                             cast_vanilla)
        psycopg2.extensions.register_type(PARAM)
        psycopg2.extensions.register_adapter(level, return_as_is(adapt_level))
        psycopg2.extensions.register_adapter(ListOfLevel,
                                             adapt_ListOfX(adapt_level))
        psycopg2.extensions.register_adapter(KernelVersion, adapt_vanilla)
        psycopg2.extensions.register_adapter(CpuInfo, adapt_vanilla)
        psycopg2.extensions.register_adapter(collections.OrderedDict,
                                             extras.Json)
        psycopg2.extensions.register_adapter(dict, extras.Json)
        psycopg2.extensions.register_adapter(
            KernelConfig, create_iterable_adapter(2, explicit_iterate=True))
        # Register ready-made UUID type adapter
        extras.register_uuid()

        # Insert a run_uuid which will be globally accessible during the run
        self.run_uuid = uuid.UUID(str(uuid.uuid4()))
        run_output = context.run_output
        retry_on_status = ListOfLevel(run_output.run_config.retry_on_status)
        self.cursor.execute(
            self.sql_command['create_run'],
            (
                self.run_uuid,
                run_output.event_summary,
                run_output.basepath,
                run_output.status,
                run_output.state.timestamp,
                run_output.info.run_name,
                run_output.info.project,
                run_output.info.project_stage,
                retry_on_status,
                run_output.run_config.max_retries,
                run_output.run_config.bail_on_init_failure,
                run_output.run_config.allow_phone_home,
                run_output.info.uuid,
                run_output.info.start_time,
                run_output.metadata,
                json.dumps(run_output.state.to_pod()),
                run_output.result._pod_version,  # pylint: disable=protected-access
                run_output.result._pod_serialization_version,  # pylint: disable=protected-access
            ))
        self.target_uuid = uuid.uuid4()
        target_info = context.target_info
        target_pod = target_info.to_pod()
        self.cursor.execute(
            self.sql_command['create_target'],
            (
                self.target_uuid,
                self.run_uuid,
                target_pod['target'],
                target_pod['cpus'],
                target_pod['os'],
                target_pod['os_version'],
                target_pod['hostid'],
                target_pod['hostname'],
                target_pod['abi'],
                target_pod['is_rooted'],
                # Important caveat: kernel_version is the name of the column in the Targets table
                # However, this refers to kernel_version.version, not to kernel_version as a whole
                target_pod['kernel_version'],
                target_pod['kernel_release'],
                target_info.kernel_version.sha1,
                target_info.kernel_config,
                target_pod['sched_features'],
                target_pod['page_size_kb'],
                # Android Specific
                list(target_pod.get('screen_resolution', [])),
                target_pod.get('prop'),
                target_pod.get('android_id'),
                target_pod.get('pod_version'),
                target_pod.get('pod_serialization_version'),
            ))

        # Commit cursor commands
        self.conn.commit()