コード例 #1
0
ファイル: test_vartree.py プロジェクト: cephdon/meta-core
 def test_flatten(self):
     dvt = DumbVT()
     self.assertEqual(
         set(flatten_obj('foo', dvt)),
         set([('foo.vt2.vt3.a', 1.), ('foo.vt2.vt3.b', 12.),
              ('foo.vt2.x', -1.), ('foo.vt2.y', -2.), ('foo.v1', 1.),
              ('foo.v2', 2.)]))
コード例 #2
0
 def test_flatten(self):
     dvt = DumbVT()
     self.assertEqual(set(flatten_obj('foo', dvt)),
                      set([('foo.vt2.vt3.a', 1.), ('foo.vt2.vt3.b', 12.),
                           ('foo.vt2.x', -1.), ('foo.vt2.y', -2.),
                           ('foo.v1', 1.), ('foo.v2', 2.),
                           ('foo.vt2.vt3.data', ''),
                           ('foo.vt2.data', ''), ('foo.data', '')]))
コード例 #3
0
 def test_flatten(self):
     a = array([[1,2],[3,4],[5,6]])
     self.assertEqual(flatten_obj('foo',a), 
                      [('foo[0][0]',1),
                       ('foo[0][1]',2),
                       ('foo[1][0]',3),
                       ('foo[1][1]',4),
                       ('foo[2][0]',5),
                       ('foo[2][1]',6),])
コード例 #4
0
 def test_flatten(self):
     a = array([[1, 2], [3, 4], [5, 6]])
     self.assertEqual(flatten_obj('foo', a), [
         ('foo[0][0]', 1),
         ('foo[0][1]', 2),
         ('foo[1][0]', 3),
         ('foo[1][1]', 4),
         ('foo[2][0]', 5),
         ('foo[2][1]', 6),
     ])
コード例 #5
0
ファイル: vartree.py プロジェクト: MrShoks/OpenMDAO-Framework
def _flatten_vartree(name, vt):
    ret = []
    for n, v in vt._items(set()):
        ret.extend([('.'.join((name, k)), v) for k, v in flatten_obj(n, v)])
    return ret
コード例 #6
0
def caseset_query_to_csv(data, filename='cases.csv', delimiter=',', quotechar='"'):
    """
    Post-processing function that takes a case_data_set and outputs a csv
    file. Should be able to pass tests of current csv case recorder (column
    ordering, meta column, etc...) Assume query by case (not variable).

    Inputs:

    data - results of fetch on Query object

    """

    cds = data.cds
    drivers = {}
    for driver in cds.drivers:
        drivers[driver['_id']] = driver['name']

    # Determine inputs & outputs, map pseudos to expression names.
    expressions = cds.simulation_info['expressions']
    metadata = cds.simulation_info['variable_metadata']
    inputs = []
    outputs = []
    pseudos = {}
    for name in sorted(data[0].keys()):

        # All inputs and outputs that change.
        if name in metadata:
            if metadata[name]['iotype'] == 'in':
                inputs.append(name)
            else:
                outputs.append(name)

        # Include objectives and constraints from all simulation levels.
        elif '_pseudo_' in name and not name.endswith('.out0'):
            for exp_name, exp_dict in expressions.items():
                if exp_dict['pcomp_name'] == name:
                    pseudos[name] = '%s(%s)' % (exp_dict['data_type'], exp_name)
                    break
            else:
                raise RuntimeError('Cannot find %r in expressions' % name)
            outputs.append(name)

        # Allow private vars from components.
        elif '.' in name:
            outputs.append(name)

    # Open CSV file
    outfile = open(filename, 'wb')
    csv_writer = csv.writer(outfile, delimiter=delimiter,
                                     quotechar=quotechar,
                                     quoting=csv.QUOTE_NONNUMERIC)
    # No automatic data type conversion is performed unless the
    # QUOTE_NONNUMERIC format option is specified (in which case unquoted
    # fields are transformed into floats).

    # Write the data
    # data is a list of lists where the inner list is the values and metadata
    # for a case

    sorted_input_keys = []
    sorted_input_values = []
    sorted_output_keys = []
    sorted_output_values = []

    for i, row in enumerate( data ):

        input_keys = []
        input_values = []
        for name in inputs:
            obj = row[ row.name_map[ name ] ]
            for key, value in flatten_obj(name, obj):
                input_keys.append(key)
                input_values.append(value)

        output_keys = []
        output_values = []
        for name in outputs:

            obj = row[ row.name_map[ name ] ]
            if name in pseudos:
                name = pseudos[name]

            for key, value in flatten_obj(name, obj):
                output_keys.append(key)
                output_values.append(value)

        # This should not be necessary, however python's csv writer
        # is not writing boolean variables correctly as strings.

        for index, item in enumerate(input_values):
            if isinstance(item, bool):
                input_values[index] = str(item)

        for index, item in enumerate(output_values):
            if isinstance(item, bool):
                output_values[index] = str(item)

        # Sort the columns alphabetically.

        if len(input_keys) > 0:
            sorted_input_keys, sorted_input_values = \
                (list(item) for item in zip(*sorted(zip(input_keys,
                                                        input_values))))
        if len(output_keys) > 0:
            sorted_output_keys, sorted_output_values = \
                (list(item) for item in zip(*sorted(zip(output_keys,
                                                        output_values))))
        if outfile is None:
            raise RuntimeError('Attempt to record on closed recorder')

        if i == 0:
            headers = ['timestamp', '/INPUTS']
            headers.extend(sorted_input_keys)
            headers.append('/OUTPUTS')
            headers.extend(sorted_output_keys)
            headers.extend(['/METADATA', 'uuid', 'parent_uuid', 'msg'])

            csv_writer.writerow(headers)
            header_size = len(headers)


        timestamp = row[ row.name_map[ 'timestamp' ] ]
        csv_data = [timestamp]
        csv_data.append('')
        csv_data.extend(sorted_input_values)
        csv_data.append('')
        csv_data.extend(sorted_output_values)
        exc = row[ row.name_map[ 'error_message' ] ]
        msg = '' if exc is None else str(exc)
        case_uuid = row[ row.name_map[ '_id' ] ]
        parent_uuid = row[ row.name_map[ '_parent_id' ] ]
        csv_data.extend(['', case_uuid, parent_uuid, msg])

        if header_size != len(csv_data):
            raise RuntimeError("number of data points (%d) doesn't match header"
                               " size (%d) in CSV recorder"
                               % (len(csv_data), header_size))

        csv_writer.writerow(csv_data)

    outfile.close()
コード例 #7
0
def _flatten_vartree(name, vt):
    ret = []
    for n, v in vt._items(set()):
        ret.extend([('.'.join([name, k]), v) for k, v in flatten_obj(n, v)])
    return ret
コード例 #8
0
    def record(self, driver, inputs, outputs, exc, case_uuid, parent_uuid):
        """Store the case in a csv file. The format for a line of data
        follows:

        Field 1      - timestamp
        Field 2      - [Empty]
        Field 3      - Input 1
        ...
        Field i+2    - Input i
        Field i+3    - [Empty]
        Field i+4    - Output 1
        ...
        Field i+j+4  - Output j
        Field i+j+5  - [Empty]
        Field i+j+6  - uuid
        Field i+j+7  - parent_uuid
        Field i+j+8  - msg
        """
        sorted_input_keys = []
        sorted_input_values = []
        sorted_output_keys = []
        sorted_output_values = []

        in_cfg, out_cfg = self._cfg_map[driver]
        input_keys = []
        input_values = []
        for name, obj in zip(in_cfg, inputs):
            for key, value in flatten_obj(name, obj):
                input_keys.append(key)
                input_values.append(value)

        output_keys = []
        output_values = []
        for name, obj in zip(out_cfg, outputs):
            for key, value in flatten_obj(name, obj):
                output_keys.append(key)
                output_values.append(value)

        # This should not be necessary, however python's csv writer
        # is not writing boolean variables correctly as strings.

        for index, item in enumerate(input_values):
            if isinstance(item, bool):
                input_values[index] = str(item)

        for index, item in enumerate(output_values):
            if isinstance(item, bool):
                output_values[index] = str(item)

        # Sort the columns alphabetically.

        if len(input_keys) > 0:
            sorted_input_keys, sorted_input_values = \
                (list(item) for item in zip(*sorted(zip(input_keys,
                                                        input_values))))
        if len(output_keys) > 0:
            sorted_output_keys, sorted_output_values = \
                (list(item) for item in zip(*sorted(zip(output_keys,
                                                        output_values))))
        if self.outfile is None:
            raise RuntimeError('Attempt to record on closed recorder')

        if self._write_headers or self._header_size == 0:
            headers = ['timestamp', '/INPUTS']
            headers.extend(sorted_input_keys)
            headers.append('/OUTPUTS')
            headers.extend(sorted_output_keys)
            headers.extend(['/METADATA', 'uuid', 'parent_uuid', 'msg'])

            self.csv_writer.writerow(headers)
            self._write_headers = False
            self._header_size = len(headers)

        msg = '' if exc is None else str(exc)

        data = [time.time()]
        data.append('')
        data.extend(sorted_input_values)
        data.append('')
        data.extend(sorted_output_values)
        data.extend(['', case_uuid, parent_uuid, msg])

        if self._header_size != len(data):
            raise RuntimeError("number of data points (%d) doesn't match header"
                               " size (%d) in CSV recorder"
                               % (len(data), self._header_size))

        self.csv_writer.writerow(data)
コード例 #9
0
def caseset_query_to_csv(data,
                         filename='cases.csv',
                         delimiter=',',
                         quotechar='"'):
    """
    Post-processing function that takes a case_data_set and outputs a csv
    file. Should be able to pass tests of current csv case recorder (column
    ordering, meta column, etc...) Assume query by case (not variable).

    Inputs:

    data - results of fetch on Query object

    """

    cds = data.cds
    drivers = {}
    for driver in cds.drivers:
        drivers[driver['_id']] = driver['name']

    # Determine inputs & outputs, map pseudos to expression names.
    expressions = cds.simulation_info['expressions']
    metadata = cds.simulation_info['variable_metadata']
    inputs = []
    outputs = []
    pseudos = {}
    for name in sorted(data[0].keys()):

        # All inputs and outputs that change.
        if name in metadata:
            if metadata[name]['iotype'] == 'in':
                inputs.append(name)
            else:
                outputs.append(name)

        # Include objectives and constraints from all simulation levels.
        elif '_pseudo_' in name and not name.endswith('.out0'):
            for exp_name, exp_dict in expressions.items():
                if exp_dict['pcomp_name'] == name:
                    pseudos[name] = '%s(%s)' % (exp_dict['data_type'],
                                                exp_name)
                    break
            else:
                raise RuntimeError('Cannot find %r in expressions' % name)
            outputs.append(name)

        # Allow private vars from components.
        elif '.' in name:
            outputs.append(name)

    # Open CSV file
    outfile = open(filename, 'wb')
    csv_writer = csv.writer(outfile,
                            delimiter=delimiter,
                            quotechar=quotechar,
                            quoting=csv.QUOTE_NONNUMERIC)
    # No automatic data type conversion is performed unless the
    # QUOTE_NONNUMERIC format option is specified (in which case unquoted
    # fields are transformed into floats).

    # Write the data
    # data is a list of lists where the inner list is the values and metadata
    # for a case

    sorted_input_keys = []
    sorted_input_values = []
    sorted_output_keys = []
    sorted_output_values = []

    for i, row in enumerate(data):

        input_keys = []
        input_values = []
        for name in inputs:
            obj = row[row.name_map[name]]
            for key, value in flatten_obj(name, obj):
                input_keys.append(key)
                input_values.append(value)

        output_keys = []
        output_values = []
        for name in outputs:

            obj = row[row.name_map[name]]
            if name in pseudos:
                name = pseudos[name]

            for key, value in flatten_obj(name, obj):
                output_keys.append(key)
                output_values.append(value)

        # This should not be necessary, however python's csv writer
        # is not writing boolean variables correctly as strings.

        for index, item in enumerate(input_values):
            if isinstance(item, bool):
                input_values[index] = str(item)

        for index, item in enumerate(output_values):
            if isinstance(item, bool):
                output_values[index] = str(item)

        # Sort the columns alphabetically.

        if len(input_keys) > 0:
            sorted_input_keys, sorted_input_values = \
                (list(item) for item in zip(*sorted(zip(input_keys,
                                                        input_values))))
        if len(output_keys) > 0:
            sorted_output_keys, sorted_output_values = \
                (list(item) for item in zip(*sorted(zip(output_keys,
                                                        output_values))))
        if outfile is None:
            raise RuntimeError('Attempt to record on closed recorder')

        if i == 0:
            headers = ['timestamp', '/INPUTS']
            headers.extend(sorted_input_keys)
            headers.append('/OUTPUTS')
            headers.extend(sorted_output_keys)
            headers.extend(['/METADATA', 'uuid', 'parent_uuid', 'msg'])

            csv_writer.writerow(headers)
            header_size = len(headers)

        timestamp = row[row.name_map['timestamp']]
        csv_data = [timestamp]
        csv_data.append('')
        csv_data.extend(sorted_input_values)
        csv_data.append('')
        csv_data.extend(sorted_output_values)
        exc = row[row.name_map['error_message']]
        msg = '' if exc is None else str(exc)
        case_uuid = row[row.name_map['_id']]
        parent_uuid = row[row.name_map['_parent_id']]
        csv_data.extend(['', case_uuid, parent_uuid, msg])

        if header_size != len(csv_data):
            raise RuntimeError(
                "number of data points (%d) doesn't match header"
                " size (%d) in CSV recorder" % (len(csv_data), header_size))

        csv_writer.writerow(csv_data)

    outfile.close()