示例#1
0
def test_recarray():
    # check roundtrip of structured array
    dt = [('f1', 'f8'),
          ('f2', 'S10')]
    arr = np.zeros((2,), dtype=dt)
    arr[0]['f1'] = 0.5
    arr[0]['f2'] = 'python'
    arr[1]['f1'] = 99
    arr[1]['f2'] = 'not perl'
    stream = BytesIO()
    savemat(stream, {'arr': arr})
    d = loadmat(stream, struct_as_record=False)
    a20 = d['arr'][0,0]
    yield assert_equal, a20.f1, 0.5
    yield assert_equal, a20.f2, 'python'
    d = loadmat(stream, struct_as_record=True)
    a20 = d['arr'][0,0]
    yield assert_equal, a20['f1'], 0.5
    yield assert_equal, a20['f2'], 'python'
    # structs always come back as object types
    yield assert_equal, a20.dtype, np.dtype([('f1', 'O'),
                                             ('f2', 'O')])
    a21 = d['arr'].flat[1]
    yield assert_equal, a21['f1'], 99
    yield assert_equal, a21['f2'], 'not perl'
示例#2
0
def test_read_opts():
    # tests if read is seeing option sets, at initialization and after
    # initialization
    arr = np.arange(6).reshape(1,6)
    stream = BytesIO()
    savemat(stream, {'a': arr})
    rdr = MatFile5Reader(stream)
    back_dict = rdr.get_variables()
    rarr = back_dict['a']
    assert_array_equal(rarr, arr)
    rdr = MatFile5Reader(stream, squeeze_me=True)
    assert_array_equal(rdr.get_variables()['a'], arr.reshape((6,)))
    rdr.squeeze_me = False
    assert_array_equal(rarr, arr)
    rdr = MatFile5Reader(stream, byte_order=boc.native_code)
    assert_array_equal(rdr.get_variables()['a'], arr)
    # inverted byte code leads to error on read because of swapped
    # header etc
    rdr = MatFile5Reader(stream, byte_order=boc.swapped_code)
    assert_raises(Exception, rdr.get_variables)
    rdr.byte_order = boc.native_code
    assert_array_equal(rdr.get_variables()['a'], arr)
    arr = np.array(['a string'])
    stream.truncate(0)
    stream.seek(0)
    savemat(stream, {'a': arr})
    rdr = MatFile5Reader(stream)
    assert_array_equal(rdr.get_variables()['a'], arr)
    rdr = MatFile5Reader(stream, chars_as_strings=False)
    carr = np.atleast_2d(np.array(list(arr.item()), dtype='U1'))
    assert_array_equal(rdr.get_variables()['a'], carr)
    rdr.chars_as_strings = True
    assert_array_equal(rdr.get_variables()['a'], arr)
示例#3
0
 def save(self, path=None, full_save=False):
     
     import cPickle as pickle
     import os
     
     path = Analyzer.save(self, path=path)
     
     for roi, scores in self.scores.items():
         
         # Save full object!
         if full_save:
             with open(os.path.join(path, '%s_scores.pickle' %(roi)), 'wb') as output:
                 pickle.dump(scores, output)
                    
         for p, score in enumerate(scores):
                 
             mat_score = self._save_score(score)
                 
             # TODO: Better use of cv and attributes for leave-one-subject-out
             filename = "%s_perm_%04d_data.mat" %(roi, p)
             logger.info("Saving %s" %(filename))
             
             savemat(os.path.join(path, filename), mat_score)
             
     return
示例#4
0
def test_gzip_simple():
    xdense = np.zeros((20,20))
    xdense[2,3] = 2.3
    xdense[4,5] = 4.5
    x = SP.csc_matrix(xdense)

    name = 'gzip_test'
    expected = {'x':x}
    format = '4'

    tmpdir = mkdtemp()
    try:
        fname = pjoin(tmpdir,name)
        mat_stream = gzip.open(fname,mode='wb')
        savemat(mat_stream, expected, format=format)
        mat_stream.close()

        mat_stream = gzip.open(fname,mode='rb')
        actual = loadmat(mat_stream, struct_as_record=True)
        mat_stream.close()
    finally:
        shutil.rmtree(tmpdir)

    assert_array_almost_equal(actual['x'].todense(),
                              expected['x'].todense(),
                              err_msg=repr(actual))
示例#5
0
def test_empty_string():
    # make sure reading empty string does not raise error
    estring_fname = pjoin(test_data_path, 'single_empty_string.mat')
    fp = open(estring_fname, 'rb')
    rdr = MatFile5Reader(fp)
    d = rdr.get_variables()
    fp.close()
    assert_array_equal(d['a'], np.array([], dtype='U1'))
    # empty string round trip.  Matlab cannot distiguish
    # between a string array that is empty, and a string array
    # containing a single empty string, because it stores strings as
    # arrays of char.  There is no way of having an array of char that
    # is not empty, but contains an empty string.
    stream = BytesIO()
    savemat(stream, {'a': np.array([''])})
    rdr = MatFile5Reader(stream)
    d = rdr.get_variables()
    assert_array_equal(d['a'], np.array([], dtype='U1'))
    stream.truncate(0)
    stream.seek(0)
    savemat(stream, {'a': np.array([], dtype='U1')})
    rdr = MatFile5Reader(stream)
    d = rdr.get_variables()
    assert_array_equal(d['a'], np.array([], dtype='U1'))
    stream.close()
示例#6
0
def test_unicode_mat4():
    # Mat4 should save unicode as latin1
    bio = BytesIO()
    var = {'second_cat': u('Schrödinger')}
    savemat(bio, var, format='4')
    var_back = loadmat(bio)
    assert_equal(var_back['second_cat'], var['second_cat'])
示例#7
0
def test_fieldnames():
    # Check that field names are as expected
    stream = BytesIO()
    savemat(stream, {"a": {"a": 1, "b": 2}})
    res = loadmat(stream)
    field_names = res["a"].dtype.names
    assert_equal(set(field_names), set(("a", "b")))
示例#8
0
def test_unicode_mat4():
    # Mat4 should save unicode as latin1
    bio = BytesIO()
    var = {"second_cat": u("Schrödinger")}
    savemat(bio, var, format="4")
    var_back = loadmat(bio)
    assert_equal(var_back["second_cat"], var["second_cat"])
示例#9
0
def test_save_dict():
    # Test that dict can be saved (as recarray), loaded as matstruct
    d = {'a':1, 'b':2}
    stream = StringIO()
    savemat(stream, {'dict':d})
    stream.seek(0)
    vals = loadmat(stream)
示例#10
0
def test_fieldnames():
    # Check that field names are as expected
    stream = BytesIO()
    savemat(stream, {'a': {'a':1, 'b':2}})
    res = loadmat(stream)
    field_names = res['a'].dtype.names
    assert_equal(set(field_names), set(('a', 'b')))
示例#11
0
def test_save_dict():
    # Test that dict can be saved (as recarray), loaded as matstruct
    dict_types = ((dict, False),)
    try:
        from collections import OrderedDict
    except ImportError:
        pass
    else:
        dict_types += ((OrderedDict, True),)
    ab_exp = np.array([[(1, 2)]], dtype=[('a', object), ('b', object)])
    ba_exp = np.array([[(2, 1)]], dtype=[('b', object), ('a', object)])
    for dict_type, is_ordered in dict_types:
        # Initialize with tuples to keep order for OrderedDict
        d = dict_type([('a', 1), ('b', 2)])
        stream = BytesIO()
        savemat(stream, {'dict': d})
        stream.seek(0)
        vals = loadmat(stream)['dict']
        assert_equal(set(vals.dtype.names), set(['a', 'b']))
        if is_ordered:  # Input was ordered, output in ab order
            assert_array_equal(vals, ab_exp)
        else:  # Not ordered input, either order output
            if vals.dtype.names[0] == 'a':
                assert_array_equal(vals, ab_exp)
            else:
                assert_array_equal(vals, ba_exp)
示例#12
0
def test_scalar_squeeze():
    stream = BytesIO()
    in_d = {"scalar": [[0.1]], "string": "my name", "st": {"one": 1, "two": 2}}
    savemat(stream, in_d)
    out_d = loadmat(stream, squeeze_me=True)
    assert_(isinstance(out_d["scalar"], float))
    assert_(isinstance(out_d["string"], string_types))
    assert_(isinstance(out_d["st"], np.ndarray))
示例#13
0
def test_mat_struct_squeeze():
    stream = BytesIO()
    in_d = {"st": {"one": 1, "two": 2}}
    savemat(stream, in_d)
    # no error without squeeze
    out_d = loadmat(stream, struct_as_record=False)
    # previous error was with squeeze, with mat_struct
    out_d = loadmat(stream, struct_as_record=False, squeeze_me=True)
示例#14
0
def test_scalar_squeeze():
    stream = BytesIO()
    in_d = {'scalar': [[0.1]], 'string': 'my name', 'st':{'one':1, 'two':2}}
    savemat(stream, in_d)
    out_d = loadmat(stream, squeeze_me=True)
    assert_(isinstance(out_d['scalar'], float))
    assert_(isinstance(out_d['string'], string_types))
    assert_(isinstance(out_d['st'], np.ndarray))
示例#15
0
def test_sparse_in_struct():
    # reproduces bug found by DC where Cython code was insisting on
    # ndarray return type, but getting sparse matrix
    st = {'sparsefield': SP.coo_matrix(np.eye(4))}
    stream = BytesIO()
    savemat(stream, {'a':st})
    d = loadmat(stream, struct_as_record=True)
    yield assert_array_equal, d['a'][0,0]['sparsefield'].todense(), np.eye(4)
示例#16
0
def test_long_field_names():
    # Test limit for length of field names in structs
    lim = 63
    fldname = "a" * lim
    st1 = np.zeros((1, 1), dtype=[(fldname, object)])
    savemat(BytesIO(), {"longstruct": st1}, format="5", long_field_names=True)
    fldname = "a" * (lim + 1)
    st1 = np.zeros((1, 1), dtype=[(fldname, object)])
    assert_raises(ValueError, savemat, BytesIO(), {"longstruct": st1}, format="5", long_field_names=True)
示例#17
0
def test_save_empty_dict():
    # saving empty dict also gives empty struct
    stream = BytesIO()
    savemat(stream, {'arr': {}})
    d = loadmat(stream)
    a = d['arr']
    assert_equal(a.shape, (1,1))
    assert_equal(a.dtype, np.dtype(object))
    assert_(a[0,0] is None)
示例#18
0
def test_regression_653():
    # Saving a dictionary with only invalid keys used to raise an error. Now we
    # save this as an empty struct in matlab space.
    sio = BytesIO()
    savemat(sio, {'d':{1:2}}, format='5')
    back = loadmat(sio)['d']
    # Check we got an empty struct equivalent
    assert_equal(back.shape, (1,1))
    assert_equal(back.dtype, np.dtype(object))
    assert_(back[0,0] is None)
示例#19
0
def test_structname_len():
    # Test limit for length of field names in structs
    lim = 31
    fldname = "a" * lim
    st1 = np.zeros((1, 1), dtype=[(fldname, object)])
    mat_stream = BytesIO()
    savemat(BytesIO(), {"longstruct": st1}, format="5")
    fldname = "a" * (lim + 1)
    st1 = np.zeros((1, 1), dtype=[(fldname, object)])
    assert_raises(ValueError, savemat, BytesIO(), {"longstruct": st1}, format="5")
示例#20
0
def test_round_types():
    # Check that saving, loading preserves dtype in most cases
    arr = np.arange(10)
    stream = BytesIO()
    for dts in ("f8", "f4", "i8", "i4", "i2", "i1", "u8", "u4", "u2", "u1", "c16", "c8"):
        stream.truncate(0)
        stream.seek(0)  # needed for BytesIO in python 3
        savemat(stream, {"arr": arr.astype(dts)})
        vars = loadmat(stream)
        assert_equal(np.dtype(dts), vars["arr"].dtype)
示例#21
0
def test_empty_sparse():
    # Can we read empty sparse matrices?
    sio = BytesIO()
    import scipy.sparse
    empty_sparse = scipy.sparse.csr_matrix([[0,0],[0,0]])
    savemat(sio, dict(x=empty_sparse))
    sio.seek(0)
    res = loadmat(sio)
    assert_array_equal(res['x'].shape, empty_sparse.shape)
    assert_array_equal(res['x'].todense(), 0)
示例#22
0
def test_structname_len():
    # Test limit for length of field names in structs
    lim = 31
    fldname = 'a' * lim
    st1 = np.zeros((1,1), dtype=[(fldname, object)])
    savemat(BytesIO(), {'longstruct': st1}, format='5')
    fldname = 'a' * (lim+1)
    st1 = np.zeros((1,1), dtype=[(fldname, object)])
    assert_raises(ValueError, savemat, BytesIO(),
                  {'longstruct': st1}, format='5')
示例#23
0
def test_mat4_3d():
    # test behavior when writing 3D arrays to matlab 4 files
    stream = StringIO()
    arr = np.arange(24).reshape((2,3,4))
    warnings.simplefilter('error')
    yield (assert_raises, DeprecationWarning, savemat, 
           stream, {'a': arr}, True, '4')
    warnings.resetwarnings()
    savemat(stream, {'a': arr}, format='4')
    d = loadmat(stream)
    yield assert_array_equal, d['a'], arr.reshape((6,4))
示例#24
0
def test_round_types():
    # Check that saving, loading preserves dtype in most cases
    arr = np.arange(10)
    stream = BytesIO()
    for dts in ('f8','f4','i8','i4','i2','i1',
                'u8','u4','u2','u1','c16','c8'):
        stream.truncate(0)
        stream.seek(0)  # needed for BytesIO in python 3
        savemat(stream, {'arr': arr.astype(dts)})
        vars = loadmat(stream)
        assert_equal(np.dtype(dts), vars['arr'].dtype)
示例#25
0
def test_long_field_names():
    # Test limit for length of field names in structs
    lim = 63
    fldname = 'a' * lim
    st1 = np.zeros((1,1), dtype=[(fldname, object)])
    mat_stream = StringIO()
    savemat(StringIO(), {'longstruct': st1}, format='5',long_field_names=True)
    fldname = 'a' * (lim+1)
    st1 = np.zeros((1,1), dtype=[(fldname, object)])
    assert_raises(ValueError, savemat, StringIO(),
                  {'longstruct': st1}, format='5',long_field_names=True)
示例#26
0
def test_mat_struct_squeeze():
    stream = BytesIO()
    in_d = {'st':{'one':1, 'two':2}}
    savemat(stream, in_d)
    # no error without squeeze
    out_d = loadmat(stream, struct_as_record=False)
    # previous error was with squeeze, with mat_struct
    out_d = loadmat(stream,
                    struct_as_record=False,
                    squeeze_me=True,
                    )
示例#27
0
def test_cell_with_one_thing_in_it():
    # Regression test - make a cell array that's 1 x 2 and put two
    # strings in it.  It works. Make a cell array that's 1 x 1 and put
    # a string in it. It should work but, in the old days, it didn't.
    cells = np.ndarray((1,2),dtype=object)
    cells[0,0] = 'Hello'
    cells[0,1] = 'World'
    savemat(BytesIO(), {'x': cells}, format='5')

    cells = np.ndarray((1,1),dtype=object)
    cells[0,0] = 'Hello, world'
    savemat(BytesIO(), {'x': cells}, format='5')
示例#28
0
def test_1d_shape():
    # Current 5 behavior is 1D -> column vector
    arr = np.arange(5)
    stream = StringIO()
    savemat(stream, {'oned':arr}, format='5')
    vals = loadmat(stream)
    yield assert_equal, vals['oned'].shape, (5,1)
    # Current 4 behavior is 1D -> row vector
    arr = np.arange(5)
    stream = StringIO()
    savemat(stream, {'oned':arr}, format='4')
    vals = loadmat(stream)
    yield assert_equal, vals['oned'].shape, (1, 5)
    for format in ('4', '5'):
        # can be explicitly 'column' for oned_as
        stream = StringIO()
        savemat(stream, {'oned':arr}, 
                format=format,
                oned_as='column')
        vals = loadmat(stream)
        yield assert_equal, vals['oned'].shape, (5,1)
        # but different from 'row'
        stream = StringIO()
        savemat(stream, {'oned':arr}, 
                format=format,
                oned_as='row')
        vals = loadmat(stream)
        yield assert_equal, vals['oned'].shape, (1,5)
示例#29
0
def test_1d_shape():
    # Current 5 behavior is 1D -> column vector
    arr = np.arange(5)
    stream = BytesIO()
    warn_ctx = WarningManager()
    warn_ctx.__enter__()
    try:
        # silence warnings for tests
        warnings.simplefilter('ignore')
        savemat(stream, {'oned':arr}, format='5')
        vals = loadmat(stream)
        assert_equal(vals['oned'].shape, (5,1))
        # Current 4 behavior is 1D -> row vector
        stream = BytesIO()
        savemat(stream, {'oned':arr}, format='4')
        vals = loadmat(stream)
        assert_equal(vals['oned'].shape, (1, 5))
        for format in ('4', '5'):
            # can be explicitly 'column' for oned_as
            stream = BytesIO()
            savemat(stream, {'oned':arr},
                    format=format,
                    oned_as='column')
            vals = loadmat(stream)
            assert_equal(vals['oned'].shape, (5,1))
            # but different from 'row'
            stream = BytesIO()
            savemat(stream, {'oned':arr},
                    format=format,
                    oned_as='row')
            vals = loadmat(stream)
            assert_equal(vals['oned'].shape, (1,5))
    finally:
        warn_ctx.__exit__()
示例#30
0
def test_compression():
    arr = np.zeros(100).reshape((5,20))
    arr[2,10] = 1
    stream = BytesIO()
    savemat(stream, {'arr':arr})
    raw_len = len(stream.getvalue())
    vals = loadmat(stream)
    yield assert_array_equal, vals['arr'], arr
    stream = BytesIO()
    savemat(stream, {'arr':arr}, do_compression=True)
    compressed_len = len(stream.getvalue())
    vals = loadmat(stream)
    yield assert_array_equal, vals['arr'], arr
    yield assert_, raw_len > compressed_len
    # Concatenate, test later
    arr2 = arr.copy()
    arr2[0,0] = 1
    stream = BytesIO()
    savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=False)
    vals = loadmat(stream)
    yield assert_array_equal, vals['arr2'], arr2
    stream = BytesIO()
    savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=True)
    vals = loadmat(stream)
    yield assert_array_equal, vals['arr2'], arr2
示例#31
0
def test_write_opposite_endian():
    # We don't support writing opposite endian .mat files, but we need to behave
    # correctly if the user supplies an other-endian NumPy array to write out.
    float_arr = np.array([[2., 3.], [3., 4.]])
    int_arr = np.arange(6).reshape((2, 3))
    uni_arr = np.array(['hello', 'world'], dtype='U')
    stream = BytesIO()
    savemat(
        stream, {
            'floats': float_arr.byteswap().newbyteorder(),
            'ints': int_arr.byteswap().newbyteorder(),
            'uni_arr': uni_arr.byteswap().newbyteorder()
        })
    rdr = MatFile5Reader(stream)
    d = rdr.get_variables()
    assert_array_equal(d['floats'], float_arr)
    assert_array_equal(d['ints'], int_arr)
    assert_array_equal(d['uni_arr'], uni_arr)
    stream.close()
示例#32
0
def test_long_field_names_in_struct():
    # Regression test - long_field_names was erased if you passed a struct
    # within a struct
    lim = 63
    fldname = 'a' * lim
    cell = np.ndarray((1, 2), dtype=object)
    st1 = np.zeros((1, 1), dtype=[(fldname, object)])
    cell[0, 0] = st1
    cell[0, 1] = st1
    mat_stream = BytesIO()
    savemat(BytesIO(), {'longstruct': cell}, format='5', long_field_names=True)
    #
    # Check to make sure it fails with long field names off
    #
    assert_raises(ValueError,
                  savemat,
                  BytesIO(), {'longstruct': cell},
                  format='5',
                  long_field_names=False)
def test_save_dict():
    # Test that dict can be saved (as recarray), loaded as matstruct
    dict_types = ((dict, False), (OrderedDict, True),)
    ab_exp = np.array([[(1, 2)]], dtype=[('a', object), ('b', object)])
    ba_exp = np.array([[(2, 1)]], dtype=[('b', object), ('a', object)])
    for dict_type, is_ordered in dict_types:
        # Initialize with tuples to keep order for OrderedDict
        d = dict_type([('a', 1), ('b', 2)])
        stream = BytesIO()
        savemat(stream, {'dict': d})
        stream.seek(0)
        vals = loadmat(stream)['dict']
        assert_equal(set(vals.dtype.names), set(['a', 'b']))
        if is_ordered:  # Input was ordered, output in ab order
            assert_array_equal(vals, ab_exp)
        else:  # Not ordered input, either order output
            if vals.dtype.names[0] == 'a':
                assert_array_equal(vals, ab_exp)
            else:
                assert_array_equal(vals, ba_exp)
示例#34
0
def test_varmats_from_mat():
    # Make a mat file with several variables, write it, read it back
    names_vars = (('arr', mlarr(np.arange(10))), ('mystr', mlarr('a string')),
                  ('mynum', mlarr(10)))

    # Dict like thing to give variables in defined order
    class C(object):
        def items(self):
            return names_vars

    stream = BytesIO()
    savemat(stream, C())
    varmats = varmats_from_mat(stream)
    assert_equal(len(varmats), 3)
    for i in range(3):
        name, var_stream = varmats[i]
        exp_name, exp_res = names_vars[i]
        assert_equal(name, exp_name)
        res = loadmat(var_stream)
        assert_array_equal(res[name], exp_res)
示例#35
0
def test_multiple_open():
    # Ticket #1039, on Windows: check that files are not left open
    tmpdir = mkdtemp()
    try:
        x = dict(x=np.zeros((2, 2)))

        fname = pjoin(tmpdir, "a.mat")

        # Check that file is not left open
        savemat(fname, x, oned_as='row')
        os.unlink(fname)
        savemat(fname, x, oned_as='row')
        loadmat(fname)
        os.unlink(fname)

        # Check that stream is left open
        f = open(fname, 'wb')
        savemat(f, x, oned_as='column')
        f.seek(0)
        f.close()

        f = open(fname, 'rb')
        loadmat(f)
        f.seek(0)
        f.close()
    finally:
        shutil.rmtree(tmpdir)
示例#36
0
def test_str_round():
    # from report by Angus McMorland on mailing list 3 May 2010
    stream = BytesIO()
    in_arr = np.array(['Hello', 'Foob'])
    out_arr = np.array(['Hello', 'Foob '])
    savemat(stream, dict(a=in_arr))
    res = loadmat(stream)
    # resulted in ['HloolFoa', 'elWrdobr']
    assert_array_equal(res['a'], out_arr)
    stream.truncate(0)
    stream.seek(0)
    # Make Fortran ordered version of string
    in_str = in_arr.tostring(order='F')
    in_from_str = np.ndarray(shape=a.shape,
                             dtype=in_arr.dtype,
                             order='F',
                             buffer=in_str)
    savemat(stream, dict(a=in_from_str))
    assert_array_equal(res['a'], out_arr)
    # unicode save did lead to buffer too small error
    stream.truncate(0)
    stream.seek(0)
    in_arr_u = in_arr.astype('U')
    out_arr_u = out_arr.astype('U')
    savemat(stream, {'a': in_arr_u})
    res = loadmat(stream)
    assert_array_equal(res['a'], out_arr_u)
示例#37
0
def test_recarray():
    # check roundtrip of structured array
    dt = [('f1', 'f8'), ('f2', 'S10')]
    arr = np.zeros((2, ), dtype=dt)
    arr[0]['f1'] = 0.5
    arr[0]['f2'] = 'python'
    arr[1]['f1'] = 99
    arr[1]['f2'] = 'not perl'
    stream = BytesIO()
    savemat(stream, {'arr': arr})
    d = loadmat(stream, struct_as_record=False)
    a20 = d['arr'][0, 0]
    assert_equal(a20.f1, 0.5)
    assert_equal(a20.f2, 'python')
    d = loadmat(stream, struct_as_record=True)
    a20 = d['arr'][0, 0]
    assert_equal(a20['f1'], 0.5)
    assert_equal(a20['f2'], 'python')
    # structs always come back as object types
    assert_equal(a20.dtype, np.dtype([('f1', 'O'), ('f2', 'O')]))
    a21 = d['arr'].flat[1]
    assert_equal(a21['f1'], 99)
    assert_equal(a21['f2'], 'not perl')
示例#38
0
def test_gzip_simple():
    xdense = np.zeros((20, 20))
    xdense[2, 3] = 2.3
    xdense[4, 5] = 4.5
    x = SP.csc_matrix(xdense)

    name = 'gzip_test'
    expected = {'x': x}
    format = '4'

    tmpdir = mkdtemp()
    try:
        fname = join(tmpdir, name)
        mat_stream = gzip.open(fname, mode='wb')
        savemat(mat_stream, expected, format=format)
        mat_stream.close()

        mat_stream = gzip.open(fname, mode='rb')
        actual = loadmat(mat_stream, struct_as_record=True)
        mat_stream.close()
    finally:
        shutil.rmtree(tmpdir)

    assert_array_almost_equal(actual['x'].todense(), expected['x'].todense())
示例#39
0
def test_1d_shape():
    # Current 5 behavior is 1D -> column vector
    arr = np.arange(5)
    stream = StringIO()
    savemat(stream, {'oned': arr}, format='5')
    vals = loadmat(stream)
    yield assert_equal, vals['oned'].shape, (5, 1)
    # Current 4 behavior is 1D -> row vector
    arr = np.arange(5)
    stream = StringIO()
    savemat(stream, {'oned': arr}, format='4')
    vals = loadmat(stream)
    yield assert_equal, vals['oned'].shape, (1, 5)
    for format in ('4', '5'):
        # can be explicitly 'column' for oned_as
        stream = StringIO()
        savemat(stream, {'oned': arr}, format=format, oned_as='column')
        vals = loadmat(stream)
        yield assert_equal, vals['oned'].shape, (5, 1)
        # but different from 'row'
        stream = StringIO()
        savemat(stream, {'oned': arr}, format=format, oned_as='row')
        vals = loadmat(stream)
        yield assert_equal, vals['oned'].shape, (1, 5)
示例#40
0
def test_1d_shape():
    # New 5 behavior is 1D -> row vector
    arr = np.arange(5)
    for format in ('4', '5'):
        # Column is the default
        stream = BytesIO()
        savemat(stream, {'oned': arr}, format=format)
        vals = loadmat(stream)
        assert_equal(vals['oned'].shape, (1, 5))
        # can be explicitly 'column' for oned_as
        stream = BytesIO()
        savemat(stream, {'oned': arr}, format=format, oned_as='column')
        vals = loadmat(stream)
        assert_equal(vals['oned'].shape, (5, 1))
        # but different from 'row'
        stream = BytesIO()
        savemat(stream, {'oned': arr}, format=format, oned_as='row')
        vals = loadmat(stream)
        assert_equal(vals['oned'].shape, (1, 5))
示例#41
0
        if each in exclude_list:
            continue
        mdict[each] = input_file.variables[each][:]
        print each,
    except Exception:
        missing.append(each)
        continue
print ""

if len(missing) > 0:
    print "Warning! the following variable(s) was(were) not found in '%s':" % (input_name), missing

print "Excluded variables: ", exclude_list

if mdict.keys() == []:
    print "WARNING:  No data to write. Exiting ..."
    print_options_and_exit(-2)

print "Writing data to %s..." % (output_name)
try:
    from scipy.io.matlab.mio import savemat
except:
    print "ERROR! Can't import 'savemat' from scipy.io.matlab.mio.  Exiting ..."
    exit(-3)
from scipy.io.matlab.mio import savemat
try:
    savemat(output_name, mdict, appendmat=False, format='5')
except:
    print "ERROR! Can't write to %s. Exiting ..." % (output_name)
    exit(-4)
示例#42
0
    def setUp(self):

        self.prior = np.array([1, 0, 0])
        self.transmat = np.matrix([[0, 1, 0], [0, 0, 1], [0, 0, 1]])

        self.mu = np.zeros((2, 3, 2))
        self.mu[:, :, 0] = np.array([[1, 2, 3], [1, 2, 3]])
        self.mu[:, :, 1] = np.array([[4, 5, 6], [4, 5, 6]])

        self.Sigma = np.zeros((2, 2, 3, 2))
        for i in range(3):
            self.Sigma[:, :, i, 0] = np.diag(np.ones((2, )) * 0.01)
            self.Sigma[:, :, i, 1] = np.diag(np.ones((2, )) * 0.01)

        self.mixmat = np.array([[.5, .5], [.5, .5], [.5, .5]])

        try:
            with open('MhmmEM2DTest.cache', 'rb') as f:
                cache = load(f)

            self.obs = cache['obs']
            self.prior0 = cache['prior0']
            self.transmat0 = cache['transmat0']
            self.mu0 = cache['mu0']
            self.Sigma0 = cache['Sigma0']
            self.mixmat0 = cache['mixmat0']

        except:

            self.obs, hidden = mhmm_sample(T=4,
                                           numex=100,
                                           initial_prob=self.prior,
                                           transmat=self.transmat,
                                           mu=self.mu,
                                           Sigma=self.Sigma,
                                           mixmat=self.mixmat)

            self.prior0, _ = mk_stochastic(np.random.rand(3))
            self.transmat0, _ = mk_stochastic(np.random.rand(3, 3))

            self.mu0 = np.zeros((2, 3, 2))
            self.mu0[:, :, 0] = np.array([[1.5, 2.5, 3.5], [1.5, 2.5, 3.5]])
            self.mu0[:, :, 1] = np.array([[4.5, 5.5, 6.5], [4.5, 5.5, 6.5]])

            self.Sigma0 = np.zeros((2, 2, 3, 2))
            for i in range(3):
                self.Sigma0[:, :, i, 0] = np.diag(np.ones((2, )) * 1.0)
                self.Sigma0[:, :, i, 1] = np.diag(np.ones((2, )) * 1.0)

            self.mixmat0 = np.array([[.2, .8], [.2, .8], [.2, .8]])

            cache = {
                'obs': self.obs,
                'prior0': self.prior0,
                'transmat0': self.transmat0,
                'mu0': self.mu0,
                'Sigma0': self.Sigma0,
                'mixmat0': self.mixmat0
            }

            with open('MhmmEM2DTest.cache', 'wb') as f:
                dump(cache, f)
            savemat('MhmmEM2DTest.mat', cache)
示例#43
0
        #         IApos=InertialAxis
        #         rho=1.02
        AELAOPTS = AeroelasticOps(EApos, IApos, 0.08891)
        #         AELAOPTS = AeroelasticOps(EApos,IApos,rho)

        Settings.OutputFileRoot = 'M' + str(M) + 'N' + str(N) + '_V' + str(
            U_mag) + '_alpha' + str(alpha)

        # solve static problem
        PosDefor, PsiDefor, Zeta, ZetaStar, Gamma, GammaStar, iForceStep, NumNodes_tot, NumDof, XBELEM, XBNODE, PosIni, PsiIni, Uext = Solve_Py(
            XBINPUT, XBOPTS, VMOPTS, VMINPUT, AELAOPTS)

        # save reference gamma
        if True:
            fileName = Settings.OutputDir + Settings.OutputFileRoot + '_Gamma0'
            savemat(fileName, {'Gamma0': Gamma}, True)

        ### linearized beam model
        A, B, C, kMat, phiSort = genSSbeam(XBINPUT,
                                           NumNodes_tot,
                                           NumDof,
                                           XBELEM,
                                           XBNODE,
                                           PosIni,
                                           PsiIni,
                                           PosDefor,
                                           PsiDefor,
                                           XBOPTS,
                                           chord,
                                           U_mag,
                                           modal=10)
示例#44
0
            'ssim': ssim,
            'CoordinateChannel2D': CoordinateChannel2D
        })

    output_path = './output_file/'
    if not os.path.exists(output_path):
        os.makedirs(output_path)

    testImagePath = args.test_path

    fileName = os.listdir(testImagePath)

    for i in range(len(fileName)):

        start_time = time.time()

        img = cv2.imread(testImagePath + fileName[i])

        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255
        results = generate_output(img, model)
        end_time = time.time()

        print('predicted time', end_time - start_time)
        print(fileName[i].split('clean')[0][0:-1] + '.mat')
        savemat(output_path + fileName[i].split('clean')[0][0:-1] + '.mat',
                {'cube': results})

        print(i)

    print("output files saved in " + output_path)
示例#45
0
    n_im, h, w, c = udc_mat.shape
    results = udc_mat.copy()
    for i in range(n_im):
        udc = np.reshape(udc_mat[i, :, :, :], (h, w, c))
        restored, run_time = restoration(opt, generator, udc, i, run_time)
        results[i, :, :, :] = restored
    print(run_time)

    # create results directory
    res_dir = 'res_dir'
    os.makedirs(os.path.join(work_dir, res_dir), exist_ok=True)

    # save images in a .mat file with dictionary key "results"
    res_fn = os.path.join(work_dir, res_dir, 'results.mat')
    res_key = 'results'  # Note: do not change this key, the evaluation code will look for this key
    savemat(res_fn, {res_key: results})

    # submission indormation
    # TODO: update the values below; the evaluation code will parse them
    runtime = run_time / 30  # seconds / image
    cpu_or_gpu = 0  # 0: GPU, 1: CPU
    method = 1  # 0: traditional methods, 1: deep learning method
    other = '(optional) any additional description or information'

    # prepare and save readme file
    readme_fn = os.path.join(work_dir, res_dir,
                             'readme.txt')  # Note: do not change 'readme.txt'
    with open(readme_fn, 'w') as readme_file:
        readme_file.write('Runtime (seconds / megapixel): %s\n' % str(runtime))
        readme_file.write('CPU[1] / GPU[0]: %s\n' % str(cpu_or_gpu))
        readme_file.write('Method: %s\n' % str(method))
示例#46
0
def prep_Result(opt):

    net = set_model(opt)
    _, net, _ = load_model(opt, net)

    if opt.n_channels == 1:
        from skimage.external.tifffile import imsave, imread
    else:
        from skimage.io import imsave, imread

    opt = set_gpu(opt)

    if opt.use_cuda:
        net = net.to(opt.device)

    if opt.multi_gpu:
        net = nn.DataParallel(net)

    set_test_dir(opt)
    if not os.path.exists(opt.test_result_dir):
        os.makedirs(opt.test_result_dir)

    res_img_dir = os.path.join(opt.test_result_dir, 'result_img_dir')
    if not os.path.exists(res_img_dir):
        os.makedirs(res_img_dir)

    # create results directory
    res_dir = os.path.join(opt.test_result_dir, 'res_dir')
    os.makedirs(os.path.join(res_dir), exist_ok=True)

    print('\ntest_result_dir : ', opt.test_result_dir)
    print('\nresult_img_dir : ', res_img_dir)
    print('\nresult_dir : ', res_dir)

    # total_psnr = 0
    # total_ssim = 0

    loss_criterion = nn.MSELoss()
    total_psnr = 0.0

    if opt.n_channels == 1:
        # load noisy images
        noisy_fn = 'siddplus_test_noisy_raw.mat'
        noisy_key = 'siddplus_test_noisy_raw'
        noisy_mat = loadmat(os.path.join(opt.test_dir, opt.dataset,
                                         noisy_fn))[noisy_key]

        # denoise
        n_im, h, w = noisy_mat.shape
        results = noisy_mat.copy()

        start_time = time.time()
        for i in range(n_im):
            print('\n[*]PROCESSING..{}/{}'.format(i, n_im))

            noisy = np.reshape(noisy_mat[i, :, :], (h, w))
            denoised = denoiser(opt, net, noisy)
            results[i, :, :] = denoised

            result_name = str(i) + '.tiff'
            concat_img = np.concatenate((denoised, noisy), axis=1)
            imsave(os.path.join(res_img_dir, result_name), concat_img)

            denoised = torch.Tensor(denoised)
            noisy = torch.Tensor(noisy)

            mse_loss = loss_criterion(denoised, noisy)
            psnr = 10 * math.log10(1 / mse_loss.item())
            total_psnr += psnr
            print('%.5fs .. [%d/%d] psnr : %.5f, avg_psnr : %.5f' %
                  (time.time() - start_time, i, n_im, psnr, total_psnr /
                   (i + 1)))

    else:
        # load noisy images
        noisy_fn = 'siddplus_test_noisy_srgb.mat'
        noisy_key = 'siddplus_test_noisy_srgb'
        noisy_mat = loadmat(os.path.join(opt.test_dir, opt.dataset,
                                         noisy_fn))[noisy_key]

        # denoise
        n_im, h, w, c = noisy_mat.shape
        results = noisy_mat.copy()

        start_time = time.time()
        for i in range(n_im):
            print('\n[*]PROCESSING..{}/{}'.format(i, n_im))

            noisy = np.reshape(noisy_mat[i, :, :, :], (h, w, c))
            denoised = denoiser(opt, net, noisy)
            results[i, :, :, :] = denoised

            result_name = str(i) + '.png'
            concat_img = np.concatenate((denoised, noisy), axis=1)
            imsave(os.path.join(res_img_dir, result_name), concat_img)

            denoised = torch.Tensor(denoised).float() / 255.0
            noisy = torch.Tensor(noisy).float() / 255.0

            mse_loss = loss_criterion(noisy, denoised)
            psnr = 10 * math.log10(1 / mse_loss.item())
            total_psnr += psnr
            print('%.5fs .. [%d/%d] psnr : %.5f, avg_psnr : %.5f' %
                  (time.time() - start_time, i, n_im, psnr, total_psnr /
                   (i + 1)))

    print("****total avg psnr : %.10f", total_psnr / (n_im))
    # save denoised images in a .mat file with dictionary key "results"
    res_fn = os.path.join(res_dir, 'results.mat')
    res_key = 'results'  # Note: do not change this key, the evaluation code will look for this key
    savemat(res_fn, {res_key: results})

    runtime = 0.0  # seconds / megapixel
    cpu_or_gpu = 0  # 0: GPU, 1: CPU
    use_metadata = 0  # 0: no use of metadata, 1: metadata used
    other = '(optional) any additional description or information'

    # prepare and save readme file
    readme_fn = os.path.join(res_dir,
                             'readme.txt')  # Note: do not change 'readme.txt'
    with open(readme_fn, 'w') as readme_file:
        readme_file.write('Runtime (seconds / megapixel): %s\n' % str(runtime))
        readme_file.write('CPU[1] / GPU[0]: %s\n' % str(cpu_or_gpu))
        readme_file.write('Metadata[1] / No Metadata[0]: %s\n' %
                          str(use_metadata))
        readme_file.write('Other description: %s\n' % str(other))

    # compress results directory
    res_zip_fn = 'results_dir'
    shutil.make_archive(os.path.join(opt.test_result_dir, res_zip_fn), 'zip',
                        res_dir)
示例#47
0
        model.cuda()
        output = model(images)
        pred_tot.append(output.cpu().data.numpy())
        brut_tot.append(brut.data.numpy())
        true_tot.append(target.data.numpy())
        if save:
            tosave = {}
            name_save = 'prediction' + str(i) + '.mat'
            target = target.cpu().data.numpy()
            brut = brut.cpu().data.numpy()
            img = output.cpu().data.numpy()
            tosave['yt'] = target
            tosave['y'] = img
            tosave['x'] = brut
            mio.savemat(save_path + name_save, tosave)
#%%
plt.figure(1)

img = output.cpu().detach().numpy()

handle = plt.subplot(311)
handle.set_title('row image')
plt.imshow(brut[0, 0, :, :], cmap='hot')

handle = plt.subplot(312)
handle.set_title('target image')
plt.imshow(target[0, 0, :, :], cmap='hot')

handle = plt.subplot(313)
handle.set_title('reconstructed image')
示例#48
0
def test_roundtrip_zero_dimensions():
    stream = BytesIO()
    savemat(stream, {'d':np.empty((10, 0))})
    d = loadmat(stream)
    assert d['d'].shape == (10, 0)
示例#49
0
def genLinearAerofoil(m, mW, writeToMat=False, e=0.25, f=0.75):
    """@brief Generate linear model of aerofoil.
    @param m Chordwise panels.
    @param mW Chordwise panels in wake.
    @param delS Non-dim time step.
    @param e location of pitch axis aft of LE [0,1], default 0.25.
    @param f location of flap hinge aft of LE [0,1], default 0.75.
    @param writeToMat write to file in Settings.OutputDir.
    """

    n = 1  # number of spanwise panels

    # infer delS from body discretization
    delS = 2.0 / m

    # hack
    #     factor = 4
    #     mW=factor*mW
    #     delS = delS/float(factor)

    # initialise states and inputs
    gam = np.zeros((m * n))
    gamW = np.zeros((mW * n))
    gamPri = np.zeros((m * n))
    chords = np.linspace(0.0, 1.0, m + 1, True)
    chordsW = np.linspace(1.0, 1.0 + mW * delS / 2.0, mW + 1, True)
    spans = np.linspace(0.0, 2000.0, n + 1, True)
    zeta = np.zeros(3 * len(chords) * len(spans))
    zetaW = np.zeros(3 * len(chordsW) * len(spans))
    zetaPri = np.zeros((3 * len(chords) * len(spans)))
    #zetaPri[0::3] = -1.0
    nu = np.zeros_like(zetaPri)
    nu[0::3] = 0.5
    kk = 0
    for c in chords:
        for s in spans:
            zeta[3 * kk] = c
            zeta[3 * kk + 1] = s
            kk = kk + 1
        # end for s
    # end for c
    kk = 0
    for c in chordsW:
        for s in spans:
            zetaW[3 * kk] = c
            zetaW[3 * kk + 1] = s
            kk = kk + 1
        # end for s
    # end for c

    # generate model
    E, F, G, C, D = genSSuvlm(gam, gamW, gamPri, zeta, zetaW, zetaPri, nu, m,
                              n, mW, delS, True)

    # convert inputs from general kinematics to aerofil DoFs
    T = np.zeros((9 * (m + 1) * (n + 1), 5))
    for i in range(m + 1):
        for j in range(n + 1):
            q = i * (n + 1) + j
            # alpha, alphaPrime
            T[3 * (m + 1) * (n + 1) + 3 * q + 2,
              0] = -(zeta[3 * q] + 0.25 / m - e)
            T[3 * q + 2, 1] = -(zeta[3 * q] + 0.25 / m - e)
            # plunge
            T[3 * q + 2, 2] = -1
            # beta, betaPrime
            if zeta[3 * q] + 0.25 / m > f:
                T[3 * (m + 1) * (n + 1) + 3 * q + 2,
                  3] = -(zeta[3 * q] + 0.25 / m - f)
                T[3 * q + 2, 4] = -(zeta[3 * q] + 0.25 / m - f)

    G_s = np.dot(G, T)
    D_s = np.dot(D, T)

    # get coefficients as output
    T_coeff = np.zeros((3, 3 * (m + 1) * (n + 1)))
    T_coeff[0, 0::3] = 1.0  #drag
    T_coeff[1, 2::3] = 1.0  #lift
    for i in range(m + 1):
        for j in range(n + 1):
            q = i * (n + 1) + j
            # moment = r*L, +ve nose-up, at quarter chord
            T_coeff[2, 3 * q + 2] = -(zeta[3 * q] + 0.25 / m - 0.25)

    C_coeff = np.dot(T_coeff, C)
    D_coeff = np.dot(T_coeff, D)
    D_s_coeff = np.dot(T_coeff, D_s)

    if writeToMat == True:
        fileName = Settings.OutputDir + 'TESTaerofoil_m' + str(m) + 'mW' + str(
            mW) + 'delS' + str(delS)
        if e != 0.25:
            fileName += 'e' + str(e)
        if f != 0.75:
            fileName += 'f' + str(f)
        savemat(
            fileName, {
                'E': E,
                'F': F,
                'G': G,
                'C': C,
                'D': D,
                'm': m,
                'mW': mW,
                'delS': delS,
                'G_s': G_s,
                'D_s': D_s,
                'C_coeff': C_coeff,
                'D_coeff': D_coeff,
                'D_s_coeff': D_s_coeff,
                'T_coeff': T_coeff
            }, True)
    # end if

    return E, F, G, C, D, delS
示例#50
0
def genLinearRectWing(AR,
                      m,
                      mW,
                      n,
                      e=0.25,
                      f=0.75,
                      writeToMat=False,
                      imageMeth=False):
    """@brief Generate linear model of rectangular wing.
    @param AR Aspect ration
    @param m Chordwise panels.
    @param mW Chordwise panels in wake.
    @param n Spanwise panels.
    @param delS Non-dim time step.
    @param e location of pitch axis aft of LE [0,1], default 0.25.
    @param f location of flap hinge aft of LE [0,1], default 0.75.
    @param writeToMat write to file in Settings.OutputDir.
    @param imageMeth Use image method across xz-plane.
    """

    # infer delS from body discretization
    delS = 2.0 / m

    # initialise states and inputs
    gam = np.zeros((m * n))
    gamW = np.zeros((mW * n))
    gamPri = np.zeros((m * n))
    chords = np.linspace(0.0, 1.0, m + 1, True)
    chordsW = np.linspace(1.0, 1.0 + mW * delS / 2.0, mW + 1, True)
    if imageMeth:
        spans = np.linspace(0.0, AR / 2.0, n + 1, True)
    else:
        spans = np.linspace(-AR / 2.0, AR / 2.0, n + 1, True)
    # end if
    zeta = np.zeros(3 * len(chords) * len(spans))
    zetaW = np.zeros(3 * len(chordsW) * len(spans))
    zetaPri = np.zeros((3 * len(chords) * len(spans)))
    zetaPri[0::3] = -0.5
    nu = np.zeros_like(zetaPri)
    kk = 0
    for c in chords:
        for s in spans:
            zeta[3 * kk] = c
            zeta[3 * kk + 1] = s
            kk = kk + 1
        # end for s
    # end for c
    kk = 0
    for c in chordsW:
        for s in spans:
            zetaW[3 * kk] = c
            zetaW[3 * kk + 1] = s
            kk = kk + 1
        # end for s
    # end for c

    # generate model
    E, F, G, C, D = genSSuvlm(gam, gamW, gamPri, zeta, zetaW, zetaPri, nu, m,
                              n, mW, delS, imageMeth)

    # convert inputs from general kinematics to aerofil DoFs
    T = np.zeros((9 * (m + 1) * (n + 1), 5))
    for i in range(m + 1):
        for j in range(n + 1):
            q = i * (n + 1) + j
            # alpha, alphaPrime
            T[3 * (m + 1) * (n + 1) + 3 * q + 2,
              0] = -(zeta[3 * q] + 0.25 / m - e)
            T[3 * q + 2, 1] = -(zeta[3 * q] + 0.25 / m - e)
            # plunge
            T[3 * q + 2, 2] = -1
            # beta, betaPrime
            if zeta[3 * q] + 0.25 / m > f:
                T[3 * (m + 1) * (n + 1) + 3 * q + 2,
                  3] = -(zeta[3 * q] + 0.25 / m - f)
                T[3 * q + 2, 4] = -(zeta[3 * q] + 0.25 / m - f)

    G_s = np.dot(G, T)
    D_s = np.dot(D, T)

    # get coefficients as output
    T_coeff = np.zeros((3, 3 * (m + 1) * (n + 1)))
    T_coeff[0, 0::3] = 1.0  #drag
    T_coeff[1, 2::3] = 1.0  #lift
    for i in range(m + 1):
        for j in range(n + 1):
            q = i * (n + 1) + j
            # moment = r*L, +ve nose-up, at quarter chord
            T_coeff[2, 3 * q + 2] = -(zeta[3 * q] + 0.25 / m - 0.25)

    C_coeff = np.dot(T_coeff, C)
    D_coeff = np.dot(T_coeff, D)
    D_s_coeff = np.dot(T_coeff, D_s)

    # spanwise lift distribution as output
    T_span = np.zeros((n + 1, 3 * (m + 1) * (n + 1)))
    for jj in range(n + 1):
        T_span[jj, 3 * jj + 2::3 * (n + 1)] = 1.0

    if writeToMat == True:
        fileName = Settings.OutputDir + 'rectWingAR' + str(AR) + '_m' + str(
            m) + 'mW' + str(mW) + 'n' + str(n) + 'delS' + str(delS)
        if e != 0.25:
            fileName += 'e' + str(e)
        if f != 0.75:
            fileName += 'f' + str(f)
        if imageMeth != False:
            fileName += 'half'
        savemat(
            fileName, {
                'E': E,
                'F': F,
                'G': G,
                'C': C,
                'D': D,
                'm': m,
                'mW': mW,
                'delS': delS,
                'G_s': G_s,
                'D_s': D_s,
                'C_coeff': C_coeff,
                'D_coeff': D_coeff,
                'D_s_coeff': D_s_coeff,
                'T_coeff': T_coeff,
                'T_span': T_span,
                'AR': AR,
                'm': m,
                'mW': mW,
                'n': n,
                'zeta': zeta
            }, True)
    # end if

    return E, F, G, C, D
示例#51
0
def test_save_unicode_field(tmpdir):
    filename = os.path.join(str(tmpdir), 'test.mat')
    test_dict = {u'a': {u'b': 1, u'c': 'test_str'}}
    savemat(filename, test_dict)
示例#52
0
def _rt_check_case(name, expected, format):
    mat_stream = BytesIO()
    savemat(mat_stream, expected, format=format)
    mat_stream.seek(0)
    _load_check_case(name, [mat_stream], expected)
示例#53
0
def Solve_Py(XBINPUT, XBOPTS, VMOPTS, VMINPUT, AELAOPTS):
    """Nonlinear static solver using Python to solve aeroelastic
    equation. Assembly of structural matrices is carried out with 
    Fortran subroutines. Aerodynamics solved using PyAero.UVLM."""

    assert XBOPTS.Solution.value == 112, ('NonlinearStatic requested' +
                                          ' with wrong solution code')

    # Initialize beam.
    XBINPUT, XBOPTS, NumNodes_tot, XBELEM, PosIni, PsiIni, XBNODE, NumDof \
                = BeamInit.Static(XBINPUT,XBOPTS)
    # Set initial conditions as undef config.
    PosDefor = PosIni.copy(order='F')
    PsiDefor = PsiIni.copy(order='F')
    if XBOPTS.PrintInfo.value == True:
        sys.stdout.write('Solve nonlinear static case in Python ... \n')
    # Initialise structural eqn tensors.
    KglobalFull = np.zeros((NumDof.value, NumDof.value), ct.c_double, 'F')
    ks = ct.c_int()
    FglobalFull = np.zeros((NumDof.value, NumDof.value), ct.c_double, 'F')
    fs = ct.c_int()
    DeltaS = np.zeros(NumDof.value, ct.c_double, 'F')
    Qglobal = np.zeros(NumDof.value, ct.c_double, 'F')
    x = np.zeros(NumDof.value, ct.c_double, 'F')
    dxdt = np.zeros(NumDof.value, ct.c_double, 'F')
    # Beam Load Step tensors
    Force = np.zeros((XBINPUT.NumNodesTot, 6), ct.c_double, 'F')
    iForceStep = np.zeros((NumNodes_tot.value, 6), ct.c_double, 'F')
    iForceStep_Dof = np.zeros(NumDof.value, ct.c_double, 'F')

    # Initialze Aero.
    Section = InitSection(VMOPTS, VMINPUT, AELAOPTS.ElasticAxis)
    # Declare memory for Aero grid and velocities.
    Zeta = np.zeros((Section.shape[0], PosDefor.shape[0], 3), ct.c_double, 'C')
    ZetaDot = np.zeros((Section.shape[0], PosDefor.shape[0], 3), ct.c_double,
                       'C')
    # Additional Aero solver variables.
    AeroForces = np.zeros((VMOPTS.M.value + 1, VMOPTS.N.value + 1, 3),
                          ct.c_double, 'C')
    Gamma = np.zeros((VMOPTS.M.value, VMOPTS.N.value), ct.c_double, 'C')
    # Init external velocities.
    Uext = InitSteadyExternalVels(VMOPTS, VMINPUT)
    # Create zero triads for motion of reference frame.
    VelA_A = np.zeros((3))
    OmegaA_A = np.zeros((3))
    # Create zero vectors for structural vars not used in static analysis.
    PosDotDef = np.zeros_like(PosDefor, ct.c_double, 'F')
    PsiDotDef = np.zeros_like(PsiDefor, ct.c_double, 'F')

    # Define tecplot stuff.
    FileName = Settings.OutputDir + Settings.OutputFileRoot + 'AeroGrid.dat'
    Variables = ['X', 'Y', 'Z', 'Gamma']
    FileObject = PostProcess.WriteAeroTecHeader(FileName, 'Default', Variables)

    # Start Load Loop.
    for iLoadStep in range(XBOPTS.MaxIterations.value):
        # Reset convergence parameters and loads.
        Iter = 0
        ResLog10 = 0.0
        Force[:, :] = 0.0
        AeroForces[:, :, :] = 0.0
        oldPos = PosDefor.copy(order='F')
        oldPsi = PsiDefor.copy(order='F')

        # Calculate aero loads.
        if hasattr(XBINPUT, 'ForcedVel'):
            CoincidentGrid(PosDefor,
                           PsiDefor,
                           Section,
                           XBINPUT.ForcedVel[0, :3],
                           XBINPUT.ForcedVel[0, 3:],
                           PosDotDef,
                           PsiDotDef,
                           XBINPUT,
                           Zeta,
                           ZetaDot,
                           ctrlSurf=VMINPUT.ctrlSurf)
        else:
            CoincidentGrid(PosDefor,
                           PsiDefor,
                           Section,
                           VelA_A,
                           OmegaA_A,
                           PosDotDef,
                           PsiDotDef,
                           XBINPUT,
                           Zeta,
                           ZetaDot,
                           ctrlSurf=VMINPUT.ctrlSurf)

        if hasattr(XBINPUT, 'ForcedVel'):
            ZetaStar, GammaStar = InitSteadyWake(VMOPTS, VMINPUT, Zeta,
                                                 XBINPUT.ForcedVel[0, :3])
        else:
            ZetaStar, GammaStar = InitSteadyWake(VMOPTS, VMINPUT, Zeta)

        # Define AICs here for debgugging - Rob 16/08/2016
        AIC = np.zeros(
            (VMOPTS.M.value * VMOPTS.N.value, VMOPTS.M.value * VMOPTS.N.value),
            ct.c_double, 'C')
        BIC = np.zeros(
            (VMOPTS.M.value * VMOPTS.N.value, VMOPTS.M.value * VMOPTS.N.value),
            ct.c_double, 'C')

        # Solve for AeroForces.
        UVLMLib.Cpp_Solver_VLM(Zeta, ZetaDot, Uext, ZetaStar, VMOPTS,
                               AeroForces, Gamma, GammaStar, AIC, BIC)

        AeroForces[:, :, :] = AELAOPTS.AirDensity * AeroForces[:, :, :]

        # Write solution to tecplot file.
        PostProcess.WriteUVLMtoTec(FileObject,
                                   Zeta,
                                   ZetaStar,
                                   Gamma,
                                   GammaStar,
                                   iLoadStep,
                                   XBOPTS.NumLoadSteps.value,
                                   iLoadStep * 1.0,
                                   Text=True)

        # Map AeroForces to beam.
        CoincidentGridForce(XBINPUT, PsiDefor, Section, AeroForces, Force)

        # Add gravity loads.
        AddGravityLoads(Force, XBINPUT, XBELEM, AELAOPTS, PsiDefor, VMINPUT.c)

        # Apply factor corresponding to force step.
        if iLoadStep < XBOPTS.NumLoadSteps.value:
            iForceStep = (Force + XBINPUT.ForceStatic)*float( (iLoadStep+1) ) / \
                                                XBOPTS.NumLoadSteps.value
        else:
            # continue at full loading until equilibrium
            iForceStep = Force + XBINPUT.ForceStatic

        if XBOPTS.PrintInfo.value == True:
            sys.stdout.write('  iLoad: %-10d\n' % (iLoadStep + 1))
            sys.stdout.write('   SubIter DeltaF     DeltaX     ResLog10\n')

        # Start Newton Iteration.
        while ((ResLog10 > np.log10(XBOPTS.MinDelta.value))
               & (Iter < XBOPTS.MaxIterations.value)):

            Iter += 1
            if XBOPTS.PrintInfo.value == True:
                sys.stdout.write('   %-7d ' % (Iter))

            # Set structural eqn tensors to zero
            KglobalFull[:, :] = 0.0
            ks = ct.c_int()
            FglobalFull[:, :] = 0.0
            fs = ct.c_int()
            Qglobal[:] = 0.0

            # Assemble matrices for static problem
            BeamLib.Cbeam3_Asbly_Static(XBINPUT, NumNodes_tot, XBELEM, XBNODE,
                                        PosIni, PsiIni, PosDefor, PsiDefor,
                                        iForceStep, NumDof, ks, KglobalFull,
                                        fs, FglobalFull, Qglobal, XBOPTS)

            # Get state vector from current deformation.
            PosDot = np.zeros((NumNodes_tot.value, 3), ct.c_double, 'F')
            PsiDot = np.zeros((XBINPUT.NumElems, Settings.MaxElNod, 3),
                              ct.c_double, 'F')

            BeamLib.Cbeam_Solv_Disp2State(NumNodes_tot, NumDof, XBINPUT,
                                          XBNODE, PosDefor, PsiDefor, PosDot,
                                          PsiDot, x, dxdt)

            # Get forces on unconstrained nodes.
            BeamLib.f_fem_m2v(
                ct.byref(NumNodes_tot), ct.byref(ct.c_int(6)),
                iForceStep.ctypes.data_as(ct.POINTER(ct.c_double)),
                ct.byref(NumDof),
                iForceStep_Dof.ctypes.data_as(ct.POINTER(ct.c_double)),
                XBNODE.Vdof.ctypes.data_as(ct.POINTER(ct.c_int)))

            # Calculate \Delta RHS.
            Qglobal = Qglobal - np.dot(FglobalFull, iForceStep_Dof)

            # Calculate \Delta State Vector.
            DeltaS = -np.dot(np.linalg.inv(KglobalFull), Qglobal)

            if XBOPTS.PrintInfo.value == True:
                sys.stdout.write('%-10.4e %-10.4e ' %
                                 (max(abs(Qglobal)), max(abs(DeltaS))))

            # Update Solution.
            BeamLib.Cbeam3_Solv_Update_Static(XBINPUT, NumNodes_tot, XBELEM,
                                              XBNODE, NumDof, DeltaS, PosIni,
                                              PsiIni, PosDefor, PsiDefor)

            # Record residual at first iteration.
            if (Iter == 1):
                Res0_Qglobal = max(abs(Qglobal)) + 1.e-16
                Res0_DeltaX = max(abs(DeltaS)) + 1.e-16

            # Update residual and compute log10
            Res_Qglobal = max(abs(Qglobal)) + 1.e-16
            Res_DeltaX = max(abs(DeltaS)) + 1.e-16
            ResLog10 = max([
                np.log10(Res_Qglobal / Res0_Qglobal),
                np.log10(Res_DeltaX / Res0_DeltaX)
            ])

            if XBOPTS.PrintInfo.value == True:
                sys.stdout.write('%8.4f\n' % (ResLog10))

            # Stop the solution.
            if (ResLog10 > 10.):
                sys.stderr.write(' STOP\n')
                sys.stderr.write(' The max residual is %e\n' % (ResLog10))
                exit(1)
            elif Res_DeltaX < 1.e-14:
                break
        # END Newton iteration

        # After incremental loading continue until equilibrium reached
        if iLoadStep >= XBOPTS.NumLoadSteps.value:
            Pos_error = PosDefor - oldPos
            Psi_error = PsiDefor - oldPsi
            if( (np.linalg.norm(Pos_error)<=XBOPTS.MinDelta) & \
                (np.linalg.norm(Psi_error)<=XBOPTS.MinDelta) ):
                break
    # END Load step loop

    if XBOPTS.PrintInfo.value == True:
        sys.stdout.write(' ... done\n')

    # Write deformed configuration to file.
    ofile = Settings.OutputDir + Settings.OutputFileRoot + '_SOL112_def.dat'
    if XBOPTS.PrintInfo.value == True:
        sys.stdout.write('Writing file %s ... ' % (ofile))
    fp = open(ofile, 'w')
    fp.write('TITLE="Non-linear static solution: deformed geometry"\n')
    fp.write('VARIABLES="iElem" "iNode" "Px" "Py" "Pz" "Rx" "Ry" "Rz"\n')
    fp.close()
    if XBOPTS.PrintInfo.value == True:
        sys.stdout.write('done\n')
    WriteMode = 'a'

    BeamIO.OutputElems(XBINPUT.NumElems, NumNodes_tot.value, XBELEM, PosDefor,
                       PsiDefor, ofile, WriteMode)

    # Print deformed configuration.
    if XBOPTS.PrintInfo.value == True:
        sys.stdout.write('--------------------------------------\n')
        sys.stdout.write('NONLINEAR STATIC SOLUTION\n')
        sys.stdout.write('%10s %10s %10s\n' % ('X', 'Y', 'Z'))
        for inodi in range(NumNodes_tot.value):
            sys.stdout.write(' ')
            for inodj in range(3):
                sys.stdout.write('%12.5e' % (PosDefor[inodi, inodj]))
            sys.stdout.write('\n')
        sys.stdout.write('--------------------------------------\n')

    # Close Tecplot ascii FileObject.
    PostProcess.CloseAeroTecFile(FileObject)

    if True:
        # print and save deformed wing total force coefficients (may include gravity
        # and other applied static loads). Coefficients in wind-axes.
        cF = np.zeros((3))
        for i in range(VMOPTS.M.value + 1):
            for j in range(VMOPTS.N.value + 1):
                cF += AeroForces[i, j, :]

        Calpha = Psi2TransMat(np.array([VMINPUT.alpha, 0.0, 0.0]))
        cF = np.dot(Calpha, cF)
        cF = cF / (0.5 * AELAOPTS.AirDensity * np.linalg.norm(VMINPUT.U_infty)
                   **2.0 * VMINPUT.c * XBINPUT.BeamLength)
        print("Reference condition total force coefficients: {}".format(cF))
        fileName = Settings.OutputDir + Settings.OutputFileRoot + 'refCf'
        savemat(fileName, {'cF': cF})

    # Return solution
    return PosDefor, PsiDefor, Zeta, ZetaStar, Gamma, GammaStar, iForceStep, NumNodes_tot, NumDof, XBELEM, XBNODE, PosIni, PsiIni, Uext
示例#54
0
    plt.show()

# save res to mat-file
savemat('turn_result.mat', {
    'X': X,
    'Y': Y,
    'psi': psi,
    'r': r,
    'vx': vx,
    'vy': vy,
    'delta': delta,
    'ddelta': ddelta,
    'alphaf': alphaf,
    'alphar': alphar,
    'kappaf': kappaf,
    'kappar': kappar,
    'omegaf': omegaf,
    'omegar': omegar,
    'Fyf': Fyf,
    'Fyr': Fyr,
    'Fxf': Fxf,
    'Fxr': Fxr,
    'Twf': Twf,
    'Twr': Twr,
    'FX': FX,
    'FY': FY,
    'MZ': MZ,
    't': t
},
        appendmat=False)
示例#55
0
def test_single_object():
    stream = BytesIO()
    savemat(stream, {'A': np.array(1, dtype=object)})
示例#56
0
文件: VLM.py 项目: cndaqiang/SHARPy
        if f != 0.75:
            fileName += 'f' + str(f)
        if imageMeth != False:
            fileName += 'half'
        savemat(
            fileName, {
                'E': E,
                'F': F,
                'G': G,
                'C': C,
                'D': D,
                'm': m,
                'mW': mW,
                'delS': delS,
                'G_s': G_s,
                'D_s': D_s,
                'C_coeff': C_coeff,
                'D_coeff': D_coeff,
                'D_s_coeff': D_s_coeff,
                'T_coeff': T_coeff,
                'T_span': T_span,
                'AR': span / chord,
                'm': m,
                'mW': mW,
                'n': n,
                'zeta': zeta
            }, True)

    if runLinear == True:
        nT = 4001  # number of time steps
        u = np.zeros((nT, G_s.shape[1]))  # inputs
示例#57
0
    def setUp(self):

        self.prior = np.array([1, 0, 0])
        self.transmat = np.matrix([[0, 1, 0], [0, 0, 1], [0, 0, 1]])

        self.mu = np.zeros((2, 3, 2))
        self.mu[:, :, 0] = np.array([[1, 2, 3], [1, 2, 3]])
        self.mu[:, :, 1] = np.array([[4, 5, 6], [4, 5, 6]])

        self.Sigma = np.zeros((2, 2, 3, 2))
        for i in range(3):
            self.Sigma[:, :, i, 0] = np.diag(np.ones((2, )) * 0.01)
            self.Sigma[:, :, i, 1] = np.diag(np.ones((2, )) * 0.01)

        self.mixmat = np.array([[.5, .5], [.5, .5], [.5, .5]])

        try:
            with open('MhmmEM2DTestGaussInit.cache', 'rb') as f:
                cache = load(f)

            self.obs = cache['obs']
            self.prior0 = cache['prior0']
            self.transmat0 = cache['transmat0']
            self.mu0 = cache['mu0']
            self.Sigma0 = cache['Sigma0']
            self.mixmat0 = cache['mixmat0']

        except:

            self.obs, hidden = mhmm_sample(T=4,
                                           numex=100,
                                           initial_prob=self.prior,
                                           transmat=self.transmat,
                                           mu=self.mu,
                                           Sigma=self.Sigma,
                                           mixmat=self.mixmat)

            self.prior0, _ = mk_stochastic(np.random.rand(3))
            self.transmat0, _ = mk_stochastic(np.random.rand(3, 3))

            O = self.obs.shape[0]
            M = 2
            Q = 3

            mu0, Sigma0, weights0 = mixgauss_init(Q * M,
                                                  self.obs,
                                                  cov_type='diag')

            self.mu0 = np.transpose(np.reshape(mu0, (O, M, Q)), (0, 2, 1))
            self.Sigma0 = np.transpose(np.reshape(Sigma0, (O, O, M, Q)),
                                       (0, 1, 3, 2))

            self.mixmat0, _ = mk_stochastic(np.random.rand(Q, M))

            cache = {
                'obs': self.obs,
                'prior0': self.prior0,
                'transmat0': self.transmat0,
                'mu0': self.mu0,
                'Sigma0': self.Sigma0,
                'mixmat0': self.mixmat0
            }

            with open('MhmmEM2DTestGaussInit.cache', 'wb') as f:
                dump(cache, f)
            savemat('MhmmEM2DTestGaussInit.mat', cache)