Exemple #1
0
    def get_case(self, case_id):
        """
        Get a case from the database.

        Parameters
        ----------
        case_id : int or str
            The integer index or string-identifier of the case to be retrieved.

        Returns
        -------
            An instance of a solver Case populated with data from the
            specified case/iteration.
        """
        iteration_coordinate = self.get_iteration_coordinate(case_id)

        with sqlite3.connect(self.filename) as con:
            cur = con.cursor()
            cur.execute("SELECT * FROM solver_iterations WHERE "
                        "iteration_coordinate=:iteration_coordinate",
                        {"iteration_coordinate": iteration_coordinate})
            # Initialize the Case object from the iterations data
            row = cur.fetchone()
        con.close()

        idx, counter, iteration_coordinate, timestamp, success, msg, abs_err, rel_err, \
            output_blob, residuals_blob = row

        output_array = blob_to_array(output_blob)
        residuals_array = blob_to_array(residuals_blob)

        case = SolverCase(self.filename, counter, iteration_coordinate, timestamp, success, msg,
                          abs_err, rel_err, output_array, residuals_array, self._prom2abs)

        return case
def assertSystemIterDataRecorded(test, expected, tolerance, prefix=None):
    """
        Expected can be from multiple cases.
    """
    with database_cursor(test.filename) as db_cur:
        f_version, abs2meta = get_format_version_abs2meta(db_cur)

        # iterate through the cases
        for coord, (t0, t1), inputs_expected, outputs_expected, residuals_expected in expected:
            iter_coord = format_iteration_coordinate(coord, prefix=prefix)

            # from the database, get the actual data recorded
            db_cur.execute("SELECT * FROM system_iterations WHERE "
                           "iteration_coordinate=:iteration_coordinate",
                           {"iteration_coordinate": iter_coord})
            row_actual = db_cur.fetchone()
            test.assertTrue(row_actual, 'System iterations table does not contain the requested '
                                        'iteration coordinate: "{}"'.format(iter_coord))

            counter, global_counter, iteration_coordinate, timestamp, success, msg, inputs_text, \
                outputs_text, residuals_text = row_actual

            if f_version >= 3:
                inputs_actual = json_to_np_array(inputs_text, abs2meta)
                outputs_actual = json_to_np_array(outputs_text, abs2meta)
                residuals_actual = json_to_np_array(residuals_text, abs2meta)
            elif f_version in (1, 2):
                inputs_actual = blob_to_array(inputs_text)
                outputs_actual = blob_to_array(outputs_text)
                residuals_actual = blob_to_array(residuals_text)

            # Does the timestamp make sense?
            test.assertTrue(t0 <= timestamp and timestamp <= t1)

            test.assertEqual(success, 1)
            test.assertEqual(msg, '')

            for vartype, actual, expected in (
                ('inputs', inputs_actual, inputs_expected),
                ('outputs', outputs_actual, outputs_expected),
                ('residuals', residuals_actual, residuals_expected),
            ):

                if expected is None:
                    if f_version >= 3:
                        test.assertIsNone(actual)
                    if f_version in (1, 2):
                        test.assertEqual(actual, np.array(None, dtype=object))
                else:
                    # Check to see if the number of values in actual and expected match
                    test.assertEqual(len(actual[0]), len(expected))
                    for key, value in iteritems(expected):
                        # Check to see if the keys in the actual and expected match
                        test.assertTrue(key in actual[0].dtype.names,
                                        '{} variable not found in actual data '
                                        'from recorder'.format(key))
                        # Check to see if the values in actual and expected match
                        assert_rel_error(test, actual[0][key], expected[key], tolerance)
def assertDriverIterationDataRecorded(test, db_cur, expected, tolerance):
    """
        Expected can be from multiple cases.
    """
    # iterate through the cases
    for coord, (t0, t1), desvars_expected, responses_expected, objectives_expected, \
            constraints_expected, sysincludes_expected in expected:
        iter_coord = format_iteration_coordinate(coord)

        # from the database, get the actual data recorded
        db_cur.execute("SELECT * FROM driver_iterations WHERE "
                       "iteration_coordinate=:iteration_coordinate",
                       {"iteration_coordinate": iter_coord})
        row_actual = db_cur.fetchone()

        test.assertTrue(row_actual,
            'Driver iterations table does not contain the requested iteration coordinate: "{}"'.format(iter_coord))


        counter, global_counter, iteration_coordinate, timestamp, success, msg, desvars_blob,\
            responses_blob, objectives_blob, constraints_blob, sysincludes_blob = row_actual

        desvars_actual = blob_to_array(desvars_blob)
        responses_actual = blob_to_array(responses_blob)
        objectives_actual = blob_to_array(objectives_blob)
        constraints_actual = blob_to_array(constraints_blob)
        sysincludes_actual = blob_to_array(sysincludes_blob)

        # Does the timestamp make sense?
        test.assertTrue(t0 <= timestamp and timestamp <= t1)

        test.assertEqual(success, 1)
        test.assertEqual(msg, '')

        for vartype, actual, expected in (
            ('desvars', desvars_actual, desvars_expected),
            ('responses', responses_actual, responses_expected),
            ('objectives', objectives_actual, objectives_expected),
            ('constraints', constraints_actual, constraints_expected),
            ('sysincludes', sysincludes_actual, sysincludes_expected),
        ):

            if expected is None:
                test.assertEqual(actual, np.array(None, dtype=object))
            else:
                # Check to see if the number of values in actual and expected match
                test.assertEqual(len(actual[0]), len(expected))
                for key, value in iteritems(expected):
                    # Check to see if the keys in the actual and expected match
                    test.assertTrue(key in actual[0].dtype.names,
                                    '{} variable not found in actual data'
                                    ' from recorder'.format(key))
                    # Check to see if the values in actual and expected match
                    assert_rel_error(test, actual[0][key], expected[key], tolerance)
        return
def assertSolverIterationDataRecorded(test, db_cur, expected, tolerance):
    """
        Expected can be from multiple cases.
    """

    # iterate through the cases
    for coord, (t0, t1), expected_abs_error, expected_rel_error, expected_output, \
            expected_solver_residuals in expected:

        iter_coord = format_iteration_coordinate(coord)

        # from the database, get the actual data recorded
        db_cur.execute("SELECT * FROM solver_iterations WHERE iteration_coordinate=:iteration_coordinate",
                       {"iteration_coordinate": iter_coord})
        row_actual = db_cur.fetchone()
        test.assertTrue(row_actual, 'Solver iterations table does not contain the requested iteration coordinate: "{}"'.format(iter_coord))

        counter, global_counter, iteration_coordinate, timestamp, success, msg, abs_err, rel_err, \
            output_blob, residuals_blob = row_actual

        output_actual = blob_to_array(output_blob)
        residuals_actual = blob_to_array(residuals_blob)
        # Does the timestamp make sense?
        test.assertTrue(t0 <= timestamp and timestamp <= t1, 'timestamp should be between when the model '
                                                             'started and stopped')

        test.assertEqual(success, 1)
        test.assertEqual(msg, '')
        if expected_abs_error:
            test.assertTrue(abs_err, 'Expected absolute error but none recorded')
            assert_rel_error(test, abs_err, expected_abs_error, tolerance)
        if expected_rel_error:
            test.assertTrue(rel_err, 'Expected relative error but none recorded')
            assert_rel_error(test, rel_err, expected_rel_error, tolerance)

        for vartype, actual, expected in (
                ('outputs', output_actual, expected_output),
                ('residuals', residuals_actual, expected_solver_residuals),
        ):

            if expected is None:
                test.assertEqual(actual, np.array(None, dtype=object))
            else:
                # Check to see if the number of values in actual and expected match
                test.assertEqual(len(actual[0]), len(expected))
                for key, value in iteritems(expected):
                    # Check to see if the keys in the actual and expected match
                    test.assertTrue(key in actual[0].dtype.names, '{} variable not found in actual '
                                                                  'data from recorder'.format(key))
                    # Check to see if the values in actual and expected match
                    assert_rel_error(test, actual[0][key], expected[key], tolerance)
        return
def print_blob(name, blob):
    print(indent, name + ':')
    array = blob_to_array(blob)

    if array.dtype.names:
        for varname in array[0].dtype.names:
            print( indent, indent, varname, array[0][varname] )
    else:
        print(indent, indent, 'None')
    print()
def assertDriverDerivDataRecorded(test, expected, tolerance, prefix=None):
    """
    Expected can be from multiple cases.
    """
    with database_cursor(test.filename) as db_cur:

        # iterate through the cases
        for coord, (t0, t1), totals_expected in expected:

            iter_coord = format_iteration_coordinate(coord, prefix=prefix)

            # from the database, get the actual data recorded
            db_cur.execute("SELECT * FROM driver_derivatives WHERE "
                           "iteration_coordinate=:iteration_coordinate",
                           {"iteration_coordinate": iter_coord})
            row_actual = db_cur.fetchone()

            db_cur.execute("SELECT abs2meta FROM metadata")
            row_abs2meta = db_cur.fetchone()

            test.assertTrue(row_actual,
                            'Driver iterations table does not contain the requested '
                            'iteration coordinate: "{}"'.format(iter_coord))

            counter, global_counter, iteration_coordinate, timestamp, success, msg,\
                totals_blob = row_actual
            abs2meta = json.loads(row_abs2meta[0]) if row_abs2meta[0] is not None else None
            test.assertTrue(isinstance(abs2meta, dict))

            totals_actual = blob_to_array(totals_blob)

            # Does the timestamp make sense?
            test.assertTrue(t0 <= timestamp and timestamp <= t1)

            test.assertEqual(success, 1)
            test.assertEqual(msg, '')

            if totals_expected is None:
                test.assertEqual(totals_actual, np.array(None, dtype=object))
            else:
                actual = totals_actual[0]
                # Check to see if the number of values in actual and expected match
                test.assertEqual(len(actual), len(totals_expected))
                for key, value in iteritems(totals_expected):
                    # Check to see if the keys in the actual and expected match
                    test.assertTrue(key in actual.dtype.names,
                                    '{} variable not found in actual data'
                                    ' from recorder'.format(key))
                    # Check to see if the values in actual and expected match
                    assert_rel_error(test, actual[key], totals_expected[key], tolerance)
def assertProblemDataRecorded(test, expected, tolerance):
    """
    Expected can be from multiple cases.
    """
    with database_cursor(test.filename) as db_cur:
        f_version, abs2meta = get_format_version_abs2meta(db_cur)

        # iterate through the cases
        for case, (t0, t1), outputs_expected in expected:
            # from the database, get the actual data recorded
            db_cur.execute("SELECT * FROM problem_cases WHERE case_name=:case_name",
                           {"case_name": case})
            row_actual = db_cur.fetchone()

            test.assertTrue(row_actual, 'Problem table does not contain the requested '
                            'case name: "{}"'.format(case))

            counter, global_counter, case_name, timestamp, success, msg, outputs_text = row_actual

            if f_version >= 3:
                outputs_actual = json_to_np_array(outputs_text, abs2meta)
            elif f_version in (1, 2):
                outputs_actual = blob_to_array(outputs_text)

            test.assertEqual(success, 1)
            test.assertEqual(msg, '')

            for vartype, actual, expected in (
                ('outputs', outputs_actual, outputs_expected),
            ):

                if expected is None:
                    if f_version >= 3:
                        test.assertIsNone(actual)
                    if f_version in (1, 2):
                        test.assertEqual(actual, np.array(None, dtype=object))
                else:
                    actual = actual[0]
                    # Check to see if the number of values in actual and expected match
                    test.assertEqual(len(actual), len(expected))
                    for key, value in iteritems(expected):
                        # Check to see if the keys in the actual and expected match
                        test.assertTrue(key in actual.dtype.names,
                                        '{} variable not found in actual data'
                                        ' from recorder'.format(key))
                        # Check to see if the values in actual and expected match
                        assert_rel_error(test, actual[key], expected[key], tolerance)
Exemple #8
0
    def __init__(self, source, data, prom2abs, abs2prom, abs2meta, voi_meta, data_format=None):
        """
        Initialize.

        Parameters
        ----------
        source : str
            The unique id of the system/solver/driver/problem that did the recording.
        data : dict-like
            Dictionary of data for a case
        prom2abs : {'input': dict, 'output': dict}
            Dictionary mapping promoted names of all variables to absolute names.
        abs2prom : {'input': dict, 'output': dict}
            Dictionary mapping absolute names of all variables to promoted names.
        abs2meta : dict
            Dictionary mapping absolute names of all variables to variable metadata.
        voi_meta : dict
            Dictionary mapping absolute names of variables of interest to variable metadata.
        data_format : int
            A version number specifying the format of array data, if not numpy arrays.
        """
        self.source = source
        self._format_version = data_format

        if 'iteration_coordinate' in data.keys():
            self.name = data['iteration_coordinate']
            parts = self.name.split('|')
            if len(parts) > 2:
                self.parent = '|'.join(parts[:-2])
            else:
                self.parent = None
        elif 'case_name' in data.keys():
            self.name = data['case_name']  # problem cases
            self.parent = None
        else:
            self.name = None
            self.parent = None

        self.counter = data['counter']
        self.timestamp = data['timestamp']
        self.success = data['success']
        self.msg = data['msg']

        # for a solver case
        self.abs_err = data['abs_err'] if 'abs_err' in data.keys() else None
        self.rel_err = data['abs_err'] if 'rel_err' in data.keys() else None

        # rename solver keys
        if 'solver_inputs' in data.keys():
            if not isinstance(data, dict):
                data = dict(zip(data.keys(), data))
            data['inputs'] = data.pop('solver_inputs')
            data['outputs'] = data.pop('solver_output')
            data['residuals'] = data.pop('solver_residuals')

        # default properties to None
        self.inputs = None
        self.outputs = None
        self.residuals = None
        self.jacobian = None

        if 'inputs' in data.keys():
            if data_format >= 3:
                inputs = json_to_np_array(data['inputs'], abs2meta)
            elif data_format in (1, 2):
                inputs = blob_to_array(data['inputs'])
                if type(inputs) is np.ndarray and not inputs.shape:
                    inputs = None
            else:
                inputs = data['inputs']
            if inputs is not None:
                self.inputs = PromAbsDict(inputs[0], prom2abs, abs2prom, output=False)

        if 'outputs' in data.keys():
            if data_format >= 3:
                outputs = json_to_np_array(data['outputs'], abs2meta)
            elif self._format_version in (1, 2):
                outputs = blob_to_array(data['outputs'])
                if type(outputs) is np.ndarray and not outputs.shape:
                    outputs = None
            else:
                outputs = data['outputs']
            if outputs is not None:
                self.outputs = PromAbsDict(outputs[0], prom2abs, abs2prom)

        if 'residuals' in data.keys():
            if data_format >= 3:
                residuals = json_to_np_array(data['residuals'], abs2meta)
            elif data_format in (1, 2):
                residuals = blob_to_array(data['residuals'])
                if type(residuals) is np.ndarray and not residuals.shape:
                    residuals = None
            else:
                residuals = data['residuals']
            if residuals is not None:
                self.residuals = PromAbsDict(residuals[0], prom2abs, abs2prom)

        if 'jacobian' in data.keys():
            if data_format >= 2:
                jacobian = blob_to_array(data['jacobian'])
                if type(jacobian) is np.ndarray and not jacobian.shape:
                    jacobian = None
            else:
                jacobian = data['jacobian']
            if jacobian is not None:
                self.jacobian = PromAbsDict(jacobian[0], prom2abs, abs2prom, output=True)

        # save var name & meta dict references for use by self._get_variables_of_type()
        self._prom2abs = prom2abs
        self._abs2prom = abs2prom
        self._abs2meta = abs2meta

        # save VOI dict reference for use by self._scale()
        self._voi_meta = voi_meta
def assertDriverIterDataRecorded(test, expected, tolerance, prefix=None):
    """
        Expected can be from multiple cases.
    """
    with database_cursor(test.filename) as db_cur:

        # iterate through the cases
        for coord, (t0, t1), outputs_expected, inputs_expected in expected:
            iter_coord = format_iteration_coordinate(coord, prefix=prefix)

            # from the database, get the actual data recorded
            db_cur.execute(
                "SELECT * FROM driver_iterations WHERE "
                "iteration_coordinate=:iteration_coordinate",
                {"iteration_coordinate": iter_coord})
            row_actual = db_cur.fetchone()

            db_cur.execute("SELECT abs2meta FROM metadata")
            row_abs2meta = db_cur.fetchone()

            test.assertTrue(
                row_actual,
                'Driver iterations table does not contain the requested '
                'iteration coordinate: "{}"'.format(iter_coord))

            counter, global_counter, iteration_coordinate, timestamp, success, msg,\
                input_blob, output_blob = row_actual

            if PY2:
                abs2meta = pickle.loads(str(
                    row_abs2meta[0])) if row_abs2meta[0] is not None else None
            else:
                abs2meta = pickle.loads(
                    row_abs2meta[0]) if row_abs2meta[0] is not None else None

            inputs_actual = blob_to_array(input_blob)
            outputs_actual = blob_to_array(output_blob)

            # Does the timestamp make sense?
            test.assertTrue(t0 <= timestamp and timestamp <= t1)

            test.assertEqual(success, 1)
            test.assertEqual(msg, '')

            for vartype, actual, expected in (('outputs', outputs_actual,
                                               outputs_expected),
                                              ('inputs', inputs_actual,
                                               inputs_expected)):

                if expected is None:
                    test.assertEqual(actual, np.array(None, dtype=object))
                else:
                    actual = actual[0]
                    # Check to see if the number of values in actual and expected match
                    test.assertEqual(len(actual), len(expected))
                    for key, value in iteritems(expected):
                        # Check to see if the keys in the actual and expected match
                        test.assertTrue(
                            key in actual.dtype.names,
                            '{} variable not found in actual data'
                            ' from recorder'.format(key))
                        # Check to see if the values in actual and expected match
                        assert_rel_error(test, actual[key], expected[key],
                                         tolerance)
def assertSolverIterationDataRecorded(test, db_cur, expected, tolerance):
    """
        Expected can be from multiple cases.
    """

    # iterate through the cases
    for coord, (t0, t1), expected_abs_error, expected_rel_error, expected_output, \
            expected_solver_residuals in expected:

        iter_coord = format_iteration_coordinate(coord)

        # from the database, get the actual data recorded
        db_cur.execute(
            "SELECT * FROM solver_iterations WHERE iteration_coordinate=:iteration_coordinate",
            {"iteration_coordinate": iter_coord})
        row_actual = db_cur.fetchone()
        test.assertTrue(
            row_actual,
            'Solver iterations table does not contain the requested iteration coordinate: "{}"'
            .format(iter_coord))

        counter, global_counter, iteration_coordinate, timestamp, success, msg, abs_err, rel_err, \
            output_blob, residuals_blob = row_actual

        output_actual = blob_to_array(output_blob)
        residuals_actual = blob_to_array(residuals_blob)
        # Does the timestamp make sense?
        test.assertTrue(
            t0 <= timestamp and timestamp <= t1,
            'timestamp should be between when the model '
            'started and stopped')

        test.assertEqual(success, 1)
        test.assertEqual(msg, '')
        if expected_abs_error:
            test.assertTrue(abs_err,
                            'Expected absolute error but none recorded')
            assert_rel_error(test, abs_err, expected_abs_error, tolerance)
        if expected_rel_error:
            test.assertTrue(rel_err,
                            'Expected relative error but none recorded')
            assert_rel_error(test, rel_err, expected_rel_error, tolerance)

        for vartype, actual, expected in (
            ('outputs', output_actual, expected_output),
            ('residuals', residuals_actual, expected_solver_residuals),
        ):

            if expected is None:
                test.assertEqual(actual, np.array(None, dtype=object))
            else:
                # Check to see if the number of values in actual and expected match
                test.assertEqual(len(actual[0]), len(expected))
                for key, value in iteritems(expected):
                    # Check to see if the keys in the actual and expected match
                    test.assertTrue(
                        key in actual[0].dtype.names,
                        '{} variable not found in actual '
                        'data from recorder'.format(key))
                    # Check to see if the values in actual and expected match
                    assert_rel_error(test, actual[0][key], expected[key],
                                     tolerance)
        return
Exemple #11
0
def assertSystemIterDataRecorded(test, expected, tolerance, prefix=None):
    """
        Expected can be from multiple cases.
    """
    with database_cursor(test.filename) as db_cur:
        f_version, abs2meta = get_format_version_abs2meta(db_cur)

        # iterate through the cases
        for coord, (
                t0, t1
        ), inputs_expected, outputs_expected, residuals_expected in expected:
            iter_coord = format_iteration_coordinate(coord, prefix=prefix)

            # from the database, get the actual data recorded
            db_cur.execute(
                "SELECT * FROM system_iterations WHERE "
                "iteration_coordinate=:iteration_coordinate",
                {"iteration_coordinate": iter_coord})
            row_actual = db_cur.fetchone()
            test.assertTrue(
                row_actual,
                'System iterations table does not contain the requested '
                'iteration coordinate: "{}"'.format(iter_coord))

            counter, global_counter, iteration_coordinate, timestamp, success, msg, inputs_text, \
                outputs_text, residuals_text = row_actual

            if f_version >= 3:
                inputs_actual = deserialize(inputs_text, abs2meta)
                outputs_actual = deserialize(outputs_text, abs2meta)
                residuals_actual = deserialize(residuals_text, abs2meta)
            elif f_version in (1, 2):
                inputs_actual = blob_to_array(inputs_text)
                outputs_actual = blob_to_array(outputs_text)
                residuals_actual = blob_to_array(residuals_text)

            # Does the timestamp make sense?
            test.assertTrue(t0 <= timestamp and timestamp <= t1)

            test.assertEqual(success, 1)
            test.assertEqual(msg, '')

            for vartype, actual, expected in (
                ('inputs', inputs_actual, inputs_expected),
                ('outputs', outputs_actual, outputs_expected),
                ('residuals', residuals_actual, residuals_expected),
            ):

                if expected is None:
                    if f_version >= 3:
                        test.assertIsNone(actual)
                    if f_version in (1, 2):
                        test.assertEqual(actual, np.array(None, dtype=object))
                else:
                    # Check to see if the number of values in actual and expected match
                    test.assertEqual(len(actual[0]), len(expected))
                    for key, value in iteritems(expected):
                        # Check to see if the keys in the actual and expected match
                        test.assertTrue(
                            key in actual[0].dtype.names,
                            '{} variable not found in actual data '
                            'from recorder'.format(key))
                        # Check to see if the values in actual and expected match
                        assert_rel_error(test, actual[0][key], expected[key],
                                         tolerance)
Exemple #12
0
def assertSolverIterDataRecorded(test, expected, tolerance, prefix=None):
    """
        Expected can be from multiple cases.
    """
    with database_cursor(test.filename) as db_cur:
        f_version, abs2meta, prom2abs, conns = get_format_version_abs2meta(
            db_cur)

        # iterate through the cases
        for coord, (t0, t1), expected_abs_error, expected_rel_error, expected_output, \
                expected_solver_residuals in expected:

            iter_coord = format_iteration_coordinate(coord, prefix=prefix)

            # from the database, get the actual data recorded
            db_cur.execute(
                "SELECT * FROM solver_iterations "
                "WHERE iteration_coordinate=:iteration_coordinate",
                {"iteration_coordinate": iter_coord})
            row_actual = db_cur.fetchone()
            test.assertTrue(
                row_actual,
                'Solver iterations table does not contain the requested '
                'iteration coordinate: "{}"'.format(iter_coord))

            counter, global_counter, iteration_coordinate, timestamp, success, msg, \
                abs_err, rel_err, input_blob, output_text, residuals_text = row_actual

            if f_version >= 3:
                output_actual = deserialize(output_text, abs2meta, prom2abs,
                                            conns)
                residuals_actual = deserialize(residuals_text, abs2meta,
                                               prom2abs, conns)
            elif f_version in (1, 2):
                output_actual = blob_to_array(output_text)
                residuals_actual = blob_to_array(residuals_text)

            # Does the timestamp make sense?
            test.assertTrue(
                t0 <= timestamp and timestamp <= t1,
                'timestamp should be between when the model started and stopped'
            )

            test.assertEqual(success, 1)
            test.assertEqual(msg, '')
            if expected_abs_error:
                test.assertTrue(abs_err,
                                'Expected absolute error but none recorded')
                assert_near_equal(abs_err, expected_abs_error, tolerance)
            if expected_rel_error:
                test.assertTrue(rel_err,
                                'Expected relative error but none recorded')
                assert_near_equal(rel_err, expected_rel_error, tolerance)

            for vartype, actual, expected in (
                ('outputs', output_actual, expected_output),
                ('residuals', residuals_actual, expected_solver_residuals),
            ):

                if expected is None:
                    if f_version >= 3:
                        test.assertIsNone(actual)
                    if f_version in (1, 2):
                        test.assertEqual(actual, np.array(None, dtype=object))
                else:
                    # Check to see if the number of values in actual and expected match
                    test.assertEqual(len(actual[0]), len(expected))
                    for key, value in expected.items():
                        # Check to see if the keys in the actual and expected match
                        test.assertTrue(
                            key in actual[0].dtype.names,
                            '{} variable not found in actual data '
                            'from recorder'.format(key))
                        # Check to see if the values in actual and expected match
                        assert_near_equal(actual[0][key], expected[key],
                                          tolerance)
Exemple #13
0
    def __init__(self,
                 source,
                 data,
                 prom2abs,
                 abs2prom,
                 abs2meta,
                 conns,
                 auto_ivc_map,
                 var_info,
                 data_format=None):
        """
        Initialize.

        Parameters
        ----------
        source : str
            The unique id of the system/solver/driver/problem that did the recording.
        data : dict-like
            Dictionary of data for a case
        prom2abs : {'input': dict, 'output': dict}
            Dictionary mapping promoted names of all variables to absolute names.
        abs2prom : {'input': dict, 'output': dict}
            Dictionary mapping absolute names of all variables to promoted names.
        abs2meta : dict
            Dictionary mapping absolute names of all variables to variable metadata.
        conns : dict
            Dictionary of all model connections.
        auto_ivc_map : dict
            Dictionary that maps all auto_ivc sources to either an absolute input name for single
            connections or a promoted input name for multiple connections. This is for output
            display.
        var_info : dict
            Dictionary with information about variables (scaling, indices, execution order).
        data_format : int
            A version number specifying the format of array data, if not numpy arrays.
        """
        self.source = source
        self._format_version = data_format

        if 'iteration_coordinate' in data.keys():
            self.name = data['iteration_coordinate']
            parts = self.name.split('|')
            if len(parts) > 2:
                self.parent = '|'.join(parts[:-2])
            else:
                self.parent = None
        elif 'case_name' in data.keys():
            self.name = data['case_name']  # problem cases
            self.parent = None
        else:
            self.name = None
            self.parent = None

        self.counter = data['counter']
        self.timestamp = data['timestamp']
        self.success = data['success']
        self.msg = data['msg']

        # for a solver or problem case
        self.abs_err = data['abs_err'] if 'abs_err' in data.keys() else None
        self.rel_err = data['abs_err'] if 'rel_err' in data.keys() else None

        # rename solver keys
        if 'solver_inputs' in data.keys():
            if not isinstance(data, dict):
                data = dict(zip(data.keys(), data))
            data['inputs'] = data.pop('solver_inputs')
            data['outputs'] = data.pop('solver_output')
            data['residuals'] = data.pop('solver_residuals')

        # default properties to None
        self.inputs = None
        self.outputs = None
        self.residuals = None
        self.derivatives = None

        if 'inputs' in data.keys():
            if data_format >= 3:
                inputs = deserialize(data['inputs'], abs2meta, prom2abs, conns)
            elif data_format in (1, 2):
                inputs = blob_to_array(data['inputs'])
                if type(inputs) is np.ndarray and not inputs.shape:
                    inputs = None
            else:
                inputs = data['inputs']
            if inputs is not None:
                self.inputs = PromAbsDict(inputs, prom2abs['input'],
                                          abs2prom['input'])

        if 'outputs' in data.keys():
            if data_format >= 3:
                outputs = deserialize(data['outputs'], abs2meta, prom2abs,
                                      conns)
            elif self._format_version in (1, 2):
                outputs = blob_to_array(data['outputs'])
                if type(outputs) is np.ndarray and not outputs.shape:
                    outputs = None
            else:
                outputs = data['outputs']
            if outputs is not None:
                self.outputs = PromAbsDict(outputs,
                                           prom2abs['output'],
                                           abs2prom['output'],
                                           in_prom2abs=prom2abs['input'],
                                           auto_ivc_map=auto_ivc_map)

        if 'residuals' in data.keys():
            if data_format >= 3:
                residuals = deserialize(data['residuals'], abs2meta, prom2abs,
                                        conns)
            elif data_format in (1, 2):
                residuals = blob_to_array(data['residuals'])
                if type(residuals) is np.ndarray and not residuals.shape:
                    residuals = None
            else:
                residuals = data['residuals']
            if residuals is not None:
                self.residuals = PromAbsDict(residuals,
                                             prom2abs['output'],
                                             abs2prom['output'],
                                             in_prom2abs=prom2abs['input'],
                                             auto_ivc_map=auto_ivc_map)

        if 'jacobian' in data.keys():
            if data_format >= 2:
                jacobian = blob_to_array(data['jacobian'])
                if type(jacobian) is np.ndarray and not jacobian.shape:
                    jacobian = None
            else:
                jacobian = data['jacobian']
            if jacobian is not None:
                self.derivatives = PromAbsDict(jacobian,
                                               prom2abs['output'],
                                               abs2prom['output'],
                                               in_prom2abs=prom2abs['input'],
                                               auto_ivc_map=auto_ivc_map)

        # save var name & meta dict references for use by self._get_variables_of_type()
        self._prom2abs = prom2abs
        self._abs2prom = abs2prom
        self._abs2meta = abs2meta
        self._conns = conns
        self._auto_ivc_map = auto_ivc_map

        # save VOI dict reference for use by self._scale()
        self._var_info = var_info
def assertDriverDerivDataRecorded(test, expected, tolerance, prefix=None):
    """
    Expected can be from multiple cases.
    """
    with database_cursor(test.filename) as db_cur:

        # iterate through the cases
        for coord, (t0, t1), totals_expected in expected:

            iter_coord = format_iteration_coordinate(coord, prefix=prefix)

            # from the database, get the actual data recorded
            db_cur.execute(
                "SELECT * FROM driver_derivatives WHERE "
                "iteration_coordinate=:iteration_coordinate",
                {"iteration_coordinate": iter_coord})
            row_actual = db_cur.fetchone()

            db_cur.execute("SELECT abs2meta, format_version FROM metadata")
            row_abs2meta = db_cur.fetchone()
            f_version = row_abs2meta[1]

            test.assertTrue(
                row_actual,
                'Driver iterations table does not contain the requested '
                'iteration coordinate: "{}"'.format(iter_coord))

            counter, global_counter, iteration_coordinate, timestamp, success, msg,\
                totals_blob = row_actual

            if row_abs2meta[0] is None:
                abs2meta = None
            else:
                if f_version >= 14:
                    abs2meta = zlib_blob_to_json(row_abs2meta[0])
                else:
                    abs2meta = json.loads(row_abs2meta[0])

            test.assertTrue(isinstance(abs2meta, dict))

            totals_actual = blob_to_array(totals_blob)

            # Does the timestamp make sense?
            test.assertTrue(t0 <= timestamp and timestamp <= t1)

            test.assertEqual(success, 1)
            test.assertEqual(msg, '')

            if totals_expected is None:
                test.assertEqual(totals_actual, np.array(None, dtype=object))
            else:
                actual = totals_actual[0]
                # Check to see if the number of values in actual and expected match
                test.assertEqual(len(actual), len(totals_expected))
                for key, value in totals_expected.items():
                    # Check to see if the keys in the actual and expected match
                    test.assertTrue(
                        key in actual.dtype.names,
                        '{} variable not found in actual data'
                        ' from recorder'.format(key))
                    # Check to see if the values in actual and expected match
                    assert_near_equal(actual[key], totals_expected[key],
                                      tolerance)
Exemple #15
0
    def __init__(self, source, data, prom2abs, abs2prom, abs2meta, voi_meta, data_format=None):
        """
        Initialize.

        Parameters
        ----------
        source : str
            The unique id of the system/solver/driver/problem that did the recording.
        data : dict-like
            Dictionary of data for a case
        prom2abs : {'input': dict, 'output': dict}
            Dictionary mapping promoted names of all variables to absolute names.
        abs2prom : {'input': dict, 'output': dict}
            Dictionary mapping absolute names of all variables to promoted names.
        abs2meta : dict
            Dictionary mapping absolute names of all variables to variable metadata.
        voi_meta : dict
            Dictionary mapping absolute names of variables of interest to variable metadata.
        data_format : int
            A version number specifying the format of array data, if not numpy arrays.
        """
        self.source = source
        self._format_version = data_format

        if 'iteration_coordinate' in data.keys():
            self.iteration_coordinate = data['iteration_coordinate']
            parts = self.iteration_coordinate.split('|')
            if len(parts) > 2:
                self.parent = '|'.join(parts[:-2])
            else:
                self.parent = None
        elif 'case_name' in data.keys():
            self.iteration_coordinate = data['case_name']  # problem cases
            self.parent = None
        else:
            self.iteration_coordinate = None
            self.parent = None

        self.counter = data['counter']
        self.timestamp = data['timestamp']
        self.success = data['success']
        self.msg = data['msg']

        # for a solver case
        self.abs_err = data['abs_err'] if 'abs_err' in data.keys() else None
        self.rel_err = data['abs_err'] if 'rel_err' in data.keys() else None

        # rename solver keys
        if 'solver_inputs' in data.keys():
            if not isinstance(data, dict):
                data = dict(zip(data.keys(), data))
            data['inputs'] = data.pop('solver_inputs')
            data['outputs'] = data.pop('solver_output')
            data['residuals'] = data.pop('solver_residuals')

        # default properties to None
        self.inputs = None
        self.outputs = None
        self.residuals = None
        self.jacobian = None

        if 'inputs' in data.keys():
            if data_format >= 3:
                inputs = json_to_np_array(data['inputs'], abs2meta)
            elif data_format in (1, 2):
                inputs = blob_to_array(data['inputs'])
                if type(inputs) is np.ndarray and not inputs.shape:
                    inputs = None
            else:
                inputs = data['inputs']
            if inputs is not None:
                self.inputs = PromotedToAbsoluteMap(inputs[0], prom2abs, abs2prom, output=False)

        if 'outputs' in data.keys():
            if data_format >= 3:
                outputs = json_to_np_array(data['outputs'], abs2meta)
            elif self._format_version in (1, 2):
                outputs = blob_to_array(data['outputs'])
                if type(outputs) is np.ndarray and not outputs.shape:
                    outputs = None
            else:
                outputs = data['outputs']
            if outputs is not None:
                self.outputs = PromotedToAbsoluteMap(outputs[0], prom2abs, abs2prom)

        if 'residuals' in data.keys():
            if data_format >= 3:
                residuals = json_to_np_array(data['residuals'], abs2meta)
            elif data_format in (1, 2):
                residuals = blob_to_array(data['residuals'])
                if type(residuals) is np.ndarray and not residuals.shape:
                    residuals = None
            else:
                residuals = data['residuals']
            if residuals is not None:
                self.residuals = PromotedToAbsoluteMap(residuals[0], prom2abs, abs2prom)

        if 'jacobian' in data.keys():
            if data_format >= 2:
                jacobian = blob_to_array(data['jacobian'])
                if type(jacobian) is np.ndarray and not jacobian.shape:
                    jacobian = None
            else:
                jacobian = data['jacobian']
            if jacobian is not None:
                self.jacobian = PromotedToAbsoluteMap(jacobian[0], prom2abs, abs2prom, output=True)

        # save var name & meta dict references for use by self._get_variables_of_type()
        self._prom2abs = prom2abs
        self._abs2prom = abs2prom
        self._abs2meta = abs2meta

        # save VOI dict reference for use by self._scale()
        self._voi_meta = voi_meta