Beispiel #1
0
    def test_write_csv(self):

        test_file = DakotaFile(file_type='csv')

        values = np.array([0, 1, 2, 3, 4, 5])

        data_arrays = {}
        data_arrays['values'] = xr.DataArray(values)

        test_file.uncertain['vars'] = xr.Dataset(data_arrays,
                                                 attrs={'type': 'normal'})

        test_file.write_csv('test.csv')

        with open('test.csv') as csv_file:

            csv_reader = csv.reader(csv_file, delimiter=',')
            row_count = 0

            for row in csv_reader:

                self.assertEqual(int(row[0]), row_count)
                row_count = row_count + 1

        os.remove('test.csv')
Beispiel #2
0
    def test_check_uncertainty_file(self):

        # Write a file first
        test_netcdf = DakotaFile()

        # Add some settings
        test_netcdf.add_settings(settings)

        # Add some variables
        test_netcdf.uncertain['test'] = xr.Dataset(
            {
                'means': means,
                'std_deviations': sds
            }, attrs={'type': 'normal'})
        test_netcdf.uncertain['test2'] = xr.Dataset(
            {
                'lower_bounds': lower_bounds,
                'upper_bounds': upper_bounds
            },
            attrs={'type': 'uniform'})
        test_netcdf.uncertain['test3'] = xr.Dataset(
            {
                'betas': betas,
                'initial_point': initial_points
            },
            attrs={'type': 'exponential'})

        test_netcdf.check()
Beispiel #3
0
    def test_scan_csv_fails(self):

        with open('test.csv', 'w+') as test_file:
            test_file.write(csv_scan_incorrect)

        test_file = DakotaFile(file_type='csv')

        with self.assertRaises(CSVError):
            test_file.read('test.csv')

        os.remove('test.csv')
Beispiel #4
0
    def test_mc_file_without_settings(self):

        # Write a file first
        test_netcdf = DakotaFile()

        # Add some variables
        test_netcdf.uncertain['test'] = xr.Dataset(
            {
                'means': means,
                'std_deviations': sds
            }, attrs={'type': 'normal'})

        with self.assertRaises(FileConsistencyError):
            test_netcdf.check()
Beispiel #5
0
    def test_scan_csv(self):

        with open('test.csv', 'w+') as test_file:
            test_file.write(csv_scan_correct)

        test_file = DakotaFile(file_type='csv')
        test_file.read('test.csv')

        self.assertEqual(test_file.uncertain['vars']['lower_bounds'].data[1],
                         3)
        self.assertEqual(test_file.uncertain['vars']['upper_bounds'].data[2],
                         837)
        self.assertEqual(test_file.uncertain['vars']['partitions'].data[3], 47)

        os.remove('test.csv')
Beispiel #6
0
    def test_mc_csv(self):

        with open('test.csv', 'w+') as test_file:
            test_file.write(csv_mc_correct)

        test_file = DakotaFile(file_type='csv')
        test_file.read('test.csv')

        self.assertEqual(test_file.settings.attrs['samples'], 4000)
        self.assertEqual(test_file.settings.attrs['seed'], 23825)
        self.assertEqual(test_file.uncertain['vars']['means'].data[1], 3)
        self.assertEqual(test_file.uncertain['vars']['std_deviations'].data[2],
                         0.74)

        os.remove('test.csv')
Beispiel #7
0
    def test_fail_write_csv(self):

        test_file = DakotaFile(file_type='csv')

        dataset1 = xr.Dataset({'values': np.array([1])})
        dataset2 = xr.Dataset({'values': np.array([2])})
        dataset3 = xr.Dataset({'values': np.array([3])})

        test_file.add_variable_from_dataset('vars_0', 'normal', dataset1)
        test_file.add_variable_from_dataset('vars_1', 'normal', dataset2)
        test_file.add_variable_from_dataset('vars_3', 'normal', dataset3)

        with self.assertRaises(CSVError):
            test_file.write_csv('test.csv')
Beispiel #8
0
    def encode(self, params, input_file, file_type):

        # Read existing data file (need shapes)
        user_file = DakotaFile(file_type=file_type)
        user_file.read(input_file)

        # Format data in params into correct shapes and add values entries to file
        self.parse_data(user_file, params)

        # Write new data file with varied data in the target directory
        user_file.write(input_file)
Beispiel #9
0
    def test_write_a_netcdf(self):

        test_netcdf = DakotaFile()

        # Add some settings
        test_netcdf.add_settings(settings)

        test_netcdf.uncertain['test'] = xr.Dataset(
            {
                'means': means,
                'std_deviations': sds
            }, attrs={'type': 'normal'})

        test_netcdf.write('test.nc')

        self.assertTrue(os.path.isfile('test.nc'))
Beispiel #10
0
    def test_write_var_as_dataset(self):

        test_netcdf = DakotaFile()

        # Add some settings
        test_netcdf.add_settings(settings)

        # A uniform uncertain variable
        name = 'test_uniform'
        var_type = 'uniform'
        dataset = xr.Dataset({
            'lower_bounds': lower_bounds,
            'upper_bounds': upper_bounds
        })

        test_netcdf.add_variable_from_dataset(name, var_type, dataset)

        test_netcdf.write('test3.nc')
        self.assertTrue(os.path.isfile('test3.nc'))
        os.remove('test3.nc')
Beispiel #11
0
    def test_add_var_from_dict(self):

        test_netcdf = DakotaFile()

        # A normal uncertain variable
        name = 'test_normal'
        var_type = 'normal'
        variable_dict = {'means': means.data, 'std_deviations': sds.data}

        test_netcdf.add_variable_from_dict(name, var_type, variable_dict)

        with self.assertRaises(VariableError):
            test_netcdf.add_variable_from_dict(name, var_type, variable_dict)
Beispiel #12
0
def make_dakota(args):

    # Create template DAKOTA input file
    dakota = DakotaClass()

    # Read user provided file
    user_file = DakotaFile(file_type=args.type)
    user_file.read(args.input)

    # Set Dakota entries from arguments
    dakota.dakota.set_attribute('evaluation_concurrency', args.concurrency)
    dakota.dakota.set_attribute('analysis_drivers', "'" + args.driver + "'")

    # Update DAKOTA input file with uncertain variables
    user_file.write_dakota_input(dakota)

    # Write input file
    dakota.write_input_file(args.output)
Beispiel #13
0
    def test_check_scan_file(self):

        # Write a file first
        test_netcdf = DakotaFile()

        # Add some variables
        test_netcdf.uncertain['test'] = xr.Dataset(
            {
                'lower_bounds': lower_bounds,
                'upper_bounds': upper_bounds,
                'partitions': partitions
            },
            attrs={'type': 'scan'})

        test_netcdf.uncertain['test2'] = xr.Dataset(
            {
                'lower_bounds': lower_bounds2,
                'upper_bounds': upper_bounds2,
                'partitions': partitions2
            },
            attrs={'type': 'scan'})

        test_netcdf.check()
Beispiel #14
0
# This file creates an example netcdf file in order to demonstrate the tools for reading and writing
# uncertain variable data.

import numpy as np
import xarray as xr
from dakota_file import DakotaFile

# First create an instance of the dakota_netcdf class!
my_netcdf = DakotaFile()

# Before doing anything else we need to configure how DAKOTA will run. This means passing
# a dictionary of settings. At present the following settings are needed:
# sample_type            - This can be 'random' for pure MC or 'lhs' for Latin Hypercube Sampling
# samples                - The number of samples to generate
# seed                   - The random number seed. Must be at least 1

settings = {'sample_type': 'lhs', 'samples': 4, 'seed': 3947}

my_netcdf.add_settings(settings)

# We need to add some uncertain variables...

# ------------------------------------------------------------------------
# WRITE EXAMPLE 1
# ------------------------------------------------------------------------

# The simplest way is to build a dictionary of needed variables and call
# the add_variable_from_dict function.

# We'll use a normal uncertain variable as an example. This requires two pieces
# of input data: means and standard deviations.
Beispiel #15
0
helpstr = "Name of the input file to use"
parser.add_argument("-i", "--input", default='DAKOTA.nc', help=helpstr)

helpstr = "Type of input file. Currently supports netcdf (n) and csv (c)"
parser.add_argument("-t", "--type", default='netcdf', help=helpstr)

helpstr = "Name of the output file to create"
parser.add_argument("-o", "--output", default='easyvvuq_main.py', help=helpstr)

helpstr = "Number of CPUs to use"
parser.add_argument("-c", "--cpu", default=1, help=helpstr)

args = parser.parse_args()

# Read user provided file
user_file = DakotaFile( file_type = args.type )
user_file.read( args.input )

# Get just file name (not path)
shortname = args.input.split('/')[-1].strip()

# Sample type setting is used to specify the kind of run
if 'sample_type' in user_file.settings.attrs:
    sample_type = user_file.settings.attrs['sample_type'].strip().lower()
else:
    raise Exception("No sample type set. ABORTING!")

all_vars = {}
out_vars = {}

variations = "{"
Beispiel #16
0
        raise Exception

    file_name = sys.argv[3]
    file_type = sys.argv[4]

    # Get DAKOTA parameters
    params, results = di.read_parameters_file()

    # Get Iteration number
    iteration = params.eval_num

    # Read original user input file
    # If it is a netcdf We will append the values drawn from each distribution to the original datasets
    # If it is a csv we will simply make a new csv with a single column of varied data values

    user_file = DakotaFile(file_type=file_type)
    user_file.read(file_name)

    # Loop over uncertain variables and reconstruct data arrays
    for key in user_file.uncertain.keys():

        # Get the dataset for this variable
        dataset = user_file.get_variable_as_dataset(key)

        # Get the type of this variable
        var_type = dataset.attrs['type']

        # Get one of the required entries
        var_name = allowed_variable_types[var_type]['required'][0]

        var = dataset[var_name]
Beispiel #17
0
# This file serves creates an example netCDF file to demonstrate the tools
# for reading and writing parameter scan data. 

from exceptions import *
from dakota_file import DakotaFile
import numpy as np
import xarray as xr
import main
import os

# First create an instance of the dakota_netcdf class!
my_netcdf = DakotaFile()

# Unlike the Monte-Carlo sampling case we do not need any run settings
# so the add_settings function should not be called

# We need to add some parameter scan variables

# ------------------------------------------------------------------------
# WRITE EXAMPLE 1
# ------------------------------------------------------------------------

# The simplest way is to build a dictionary of needed variables and call
# the add_variable_from_dict function.

# Performing a parameter scan requires 3 variables. Upper and lower bounds
# and the number of partitions. 

# Give our variable a name!
name = 'test_scan1'
Beispiel #18
0
import numpy as np
import xarray as xr
import exceptions
from dakota_file import DakotaFile

# Create a new instance of the
my_netcdf = DakotaFile()

filename = 'DAKOTA.nc'
my_netcdf.read(filename)

# We can read one of the variables in the netcdf file back into a dictionary of data
# We shall read test_normal2 which was originally written as a dataset

variable_dict = my_netcdf.get_variable_as_dict('test_normal2')

if 'values' in variable_dict.keys():

    values = variable_dict['values']
    results = values.sum()

    file_out = open('DAKOTA_OUTPUT.dat', 'w')
    file_out.write('Sum of values is: ')
    file_out.write(str(results))
    file_out.close()

else:

    print('ERROR: values entry missing from variable dict!')
    raise Exception
Beispiel #19
0
    def test_main_uncertain(self):

        # Create a DAKOTA netcdf file
        test_netcdf = DakotaFile()

        # Add some test settings
        settings = {'samples': 50}
        test_netcdf.add_settings(settings)

        # Some Coordinates
        times = np.array([1, 2, 3, 4])
        times = xr.DataArray(times, dims=['time'])

        positions = np.array([1, 2, 3])
        positions = xr.DataArray(positions, dims=['position'])

        x = np.array([0.2, 0.3, 0.4, 0.5, 0.6])
        y = np.array([2.5, 3.5, 4.5])
        z = np.array([5.2, 5.3])

        x = xr.DataArray(x, dims=['x'])
        y = xr.DataArray(y, dims=['y'])
        z = xr.DataArray(z, dims=['z'])

        # Some Data

        # Scalar data
        scalar_mean = np.array([5])
        scalar_sd = np.array([0.2])

        # 1D data
        probs = np.random.rand(4)
        total = np.array([8, 9, 10, 11])
        selected = np.array([4, 5, 6, 7])
        num = np.array([5, 7, 7, 9])

        probs = xr.DataArray(probs)
        total = xr.DataArray(total)
        selected = xr.DataArray(selected)
        num = xr.DataArray(num)

        # 2D data
        means = np.random.rand(4, 3)
        means = xr.DataArray(means,
                             coords=[times, positions],
                             dims=['time', 'position'])

        sds = np.random.rand(4, 3)
        sds = xr.DataArray(sds,
                           coords=[times, positions],
                           dims=['time', 'position'])

        # 3D data
        data1 = np.random.rand(5, 3, 2) + 2.0
        data1 = xr.DataArray(data1, coords=[x, y, z], dims=['x', 'y', 'z'])

        data2 = np.random.rand(5, 3, 2) + 4.0
        data2 = xr.DataArray(data2, coords=[x, y, z], dims=['x', 'y', 'z'])

        # Test all uncertain variable types

        # Normal Uncertain Data
        test_netcdf.uncertain['normal'] = xr.Dataset(
            {
                'means': means,
                'std_deviations': sds
            }, attrs={'type': 'normal'})

        # Lognormal Uncertain Data
        test_netcdf.uncertain['lognormal'] = xr.Dataset(
            {
                'means': means,
                'std_deviations': sds
            },
            attrs={'type': 'lognormal'})

        # Uniform Uncertain Data
        test_netcdf.uncertain['uniform'] = xr.Dataset(
            {
                'lower_bounds': data1,
                'upper_bounds': data2
            },
            attrs={'type': 'uniform'})

        # Log Uniform Uncertain Data
        test_netcdf.uncertain['loguniform'] = xr.Dataset(
            {
                'lower_bounds': data1,
                'upper_bounds': data2
            },
            attrs={'type': 'loguniform'})

        # Triangular Uncertain Data
        test_netcdf.uncertain['triangular'] = xr.Dataset(
            {
                'modes': data1,
                'lower_bounds': data1,
                'upper_bounds': data2
            },
            attrs={'type': 'triangular'})

        # Exponential Uncertain Data
        test_netcdf.uncertain['exponential'] = xr.Dataset(
            {'betas': data1}, attrs={'type': 'exponential'})

        # Beta Uncertain Data
        test_netcdf.uncertain['beta'] = xr.Dataset(
            {
                'alphas': data1,
                'betas': data2,
                'lower_bounds': data1,
                'upper_bounds': data2
            },
            attrs={'type': 'beta'})

        # Gamma/Gumbel/Frechet/Weibull Uncertain Data
        test_netcdf.uncertain['gamma'] = xr.Dataset(
            {
                'alphas': data1,
                'betas': data2
            }, attrs={'type': 'gamma'})
        test_netcdf.uncertain['gumbel'] = xr.Dataset(
            {
                'alphas': data1,
                'betas': data2
            }, attrs={'type': 'gumbel'})
        test_netcdf.uncertain['frechet'] = xr.Dataset(
            {
                'alphas': data1,
                'betas': data2
            }, attrs={'type': 'frechet'})
        test_netcdf.uncertain['weibull'] = xr.Dataset(
            {
                'alphas': data1,
                'betas': data2
            }, attrs={'type': 'weibull'})

        # Poisson Uncertain Data
        test_netcdf.uncertain['poisson'] = xr.Dataset(
            {'lambdas': means}, attrs={'type': 'poisson'})

        # Binomial Uncertain Data
        test_netcdf.uncertain['binomial'] = xr.Dataset(
            {
                'probability_per_trial': probs,
                'num_trials': total
            },
            attrs={'type': 'binomial'})

        # Negative Binomial Uncertain Data
        # WARNING - This test sporadically fails with the DAKOTA self checker complaining of inconsistent bounds
        # It's unclear what causes this or how to fix it so this test is commented out to stop random test failure.

        #        test_netcdf.uncertain['negative_binomial'] = xr.Dataset( {'probability_per_trial':probs, 'num_trials':total },
        #                                                                 attrs={'type':'negative_binomial'} )

        # Geometric Uncertain Data
        test_netcdf.uncertain['geometric'] = xr.Dataset(
            {'probability_per_trial': means}, attrs={'type': 'geometric'})

        # Hypergeometric Uncertain Data
        test_netcdf.uncertain['hypergeometric'] = xr.Dataset(
            {
                'total_population': total,
                'selected_population': selected,
                'num_drawn': num
            },
            attrs={'type': 'hypergeometric'})

        # Check that multiple instances of variables is OK
        test_netcdf.uncertain['uniform2'] = xr.Dataset(
            {
                'lower_bounds': data1,
                'upper_bounds': data2
            },
            attrs={'type': 'uniform'})

        # Add some 0D data to check this is OK.
        test_netcdf.uncertain['normal2'] = xr.Dataset(
            {
                'means': scalar_mean,
                'std_deviations': scalar_sd
            },
            attrs={'type': 'normal'})

        # Write the Netcdf file
        test_netcdf.write('test.nc')

        # Create fake arguments
        args = fake_args()
        args.driver = 'test.py'
        args.input = 'test.nc'

        # Create a Dakota input file from the netcdf file
        main.make_dakota(args)

        # Check the file exists
        self.assertTrue(os.path.isfile('DAKOTA.in'))

        # Create files_for_dakota directory if necessary
        delete = False
        if not os.path.exists('files_for_dakota'):
            os.system('mkdir files_for_dakota')
            delete = True

        # Check that DAKOTA recognises the input file
        os.system('dakota --check DAKOTA.in > DAKOTA.tmp')

        # Get rid of directory
        if delete:
            os.system('rmdir files_for_dakota')

        with open('DAKOTA.tmp', 'r') as content_file:
            content = content_file.read()

        self.assertTrue('Input check completed successfully' in content)

        content_file.close()

        # Delete the temporary files
        os.remove('DAKOTA.in')
        os.remove('test.nc')
        os.remove('DAKOTA.tmp')

        if os.path.exists('dakota.rst'):
            os.remove('dakota.rst')
Beispiel #20
0
    def test_check_inconsistent_file(self):

        # Write a file first
        test_netcdf = DakotaFile()

        # Add some settings
        test_netcdf.add_settings(settings)

        # Add some variables
        test_netcdf.uncertain['test'] = xr.Dataset(
            {
                'means': means,
                'std_deviations': sds
            }, attrs={'type': 'normal'})
        test_netcdf.uncertain['test2'] = xr.Dataset(
            {
                'lower_bounds': lower_bounds,
                'upper_bounds': upper_bounds
            },
            attrs={'type': 'uniform'})
        test_netcdf.uncertain['test3'] = xr.Dataset(
            {
                'betas': betas,
                'initial_point': initial_points
            },
            attrs={'type': 'exponential'})

        test_netcdf.uncertain['test4'] = xr.Dataset(
            {
                'lower_bounds': lower_bounds,
                'upper_bounds': upper_bounds,
                'partitions': partitions
            },
            attrs={'type': 'scan'})

        test_netcdf.uncertain['test5'] = xr.Dataset(
            {
                'lower_bounds': lower_bounds2,
                'upper_bounds': upper_bounds2,
                'partitions': partitions2
            },
            attrs={'type': 'scan'})

        with self.assertRaises(FileConsistencyError):
            test_netcdf.check()
Beispiel #21
0
    def test_add_var_from_dict(self):

        test_netcdf = DakotaFile()

        # Add some settings
        test_netcdf.add_settings(settings)

        # A normal uncertain variable
        name = 'test_normal'
        var_type = 'normal'
        variable_dict = {'means': means.data, 'std_deviations': sds.data}

        test_netcdf.add_variable_from_dict(name, var_type, variable_dict)

        # An exponential uncertain variable
        name = 'test_exponential'
        var_type = 'exponential'
        variable_dict = {
            'betas': betas.data,
            'initial_point': initial_points.data
        }

        test_netcdf.add_variable_from_dict(name, var_type, variable_dict)

        # A lognormal uncertain variable
        name = 'test_lognormal'
        var_type = 'lognormal'
        variable_dict = {
            'means': lognormal_means.data,
            'std_deviations': lognormal_sds.data,
            'lower_bounds': lower_bounds.data,
            'upper_bounds': upper_bounds.data
        }

        test_netcdf.add_variable_from_dict(name, var_type, variable_dict)

        test_netcdf.write('test2.nc')
        self.assertTrue(os.path.isfile('test2.nc'))

        os.remove('test2.nc')
Beispiel #22
0
    def test_read_var_as_dict(self):

        test_netcdf = DakotaFile()

        # Add some settings
        test_netcdf.add_settings(settings)

        # A normal uncertain variable
        name = 'test_normal'
        var_type = 'normal'
        variable_dict = {'means': means.data, 'std_deviations': sds.data}

        test_netcdf.add_variable_from_dict(name, var_type, variable_dict)

        # An exponential uncertain variable
        name = 'test_exponential'
        var_type = 'exponential'
        variable_dict = {
            'betas': betas.data,
            'initial_point': initial_points.data
        }

        test_netcdf.add_variable_from_dict(name, var_type, variable_dict)

        # A lognormal uncertain variable
        name = 'test_lognormal'
        var_type = 'lognormal'
        variable_dict = {
            'means': lognormal_means.data,
            'std_deviations': lognormal_sds.data,
            'lower_bounds': lower_bounds.data,
            'upper_bounds': upper_bounds.data
        }

        test_netcdf.add_variable_from_dict(name, var_type, variable_dict)
        test_netcdf.write('test2.nc')

        test_netcdf = DakotaFile()
        test_netcdf.read('test2.nc')

        name = 'test_normal'
        variable_dict = test_netcdf.get_variable_as_dict(name)

        self.assertEqual(np.sum(variable_dict['means'] - means), 0.0)
        self.assertEqual(np.sum(variable_dict['std_deviations'] - sds), 0.0)

        name = 'test_exponential'
        variable_dict = test_netcdf.get_variable_as_dict(name)

        self.assertEqual(np.sum(variable_dict['betas'] - betas), 0.0)
        self.assertEqual(
            np.sum(variable_dict['initial_point'] - initial_points), 0.0)

        name = 'test_lognormal'
        variable_dict = test_netcdf.get_variable_as_dict(name)

        self.assertEqual(np.sum(variable_dict['means'] - lognormal_means), 0.0)
        self.assertEqual(np.sum(variable_dict['lower_bounds'] - lower_bounds),
                         0.0)
        self.assertEqual(np.sum(variable_dict['upper_bounds'] - upper_bounds),
                         0.0)

        os.remove('test2.nc')
Beispiel #23
0
    def test_read_a_netcdf(self):

        # Write a file first
        test_netcdf = DakotaFile()

        # Add some settings
        test_netcdf.add_settings(settings)

        # Add a variable
        test_netcdf.uncertain['test'] = xr.Dataset(
            {
                'means': means,
                'std_deviations': sds
            }, attrs={'type': 'normal'})

        # Write the file
        test_netcdf.write('test.nc')

        # Read the file into a new instance
        test_netcdf = DakotaFile()
        test_netcdf.read('test.nc')

        self.assertTrue('test' in test_netcdf.uncertain)
        self.assertEqual(test_netcdf.uncertain['test'].attrs['type'], 'normal')
        self.assertEqual(
            np.sum(test_netcdf.uncertain['test'].data_vars['means'] - means),
            0.0)
        self.assertEqual(
            np.sum(test_netcdf.uncertain['test'].data_vars['std_deviations'] -
                   sds), 0.0)
Beispiel #24
0
import numpy as np
import xarray as xr
import exceptions
from dakota_file import DakotaFile

my_netcdf = DakotaFile()

filename = 'DAKOTA.nc'
my_netcdf.read(filename)

variable_dict1 = my_netcdf.get_variable_as_dict('test_scan1')
variable_dict2 = my_netcdf.get_variable_as_dict('test_scan2')
variable_dict3 = my_netcdf.get_variable_as_dict('test_scan3')

file_out = open('DAKOTA_OUTPUT.dat','w')

file_out.write('test_scan1:\n')

values = variable_dict1['values']
file_out.write( str(values[0])+' '+str(values[1])+'\n' )

values = variable_dict2['values']
file_out.write( str(values)+'\n' )

values = variable_dict3['values']
file_out.write( str(values)+'\n' )

file_out.close()
    
Beispiel #25
0
from exceptions import *
from dakota_file import DakotaFile
import numpy as np
import xarray as xr
import main
import os

my_netcdf = DakotaFile()

name = 'test_scan1'
var_type = 'scan'

# Make a dictionary of the needed data
lower_bounds  = np.array( [ 1.0, 3.0 ] )
upper_bounds  = np.array( [ 2.0, 4.0 ] )
partitions    = np.array( [ 1,   1   ] ) 

variable_dict = { 'lower_bounds' : lower_bounds, 'upper_bounds' : upper_bounds, 'partitions' : partitions }

my_netcdf.add_variable_from_dict( name, var_type, variable_dict )

name = 'test_scan2'
var_type = 'scan'

lower_bounds = 0.0
upper_bounds = 1.0
partitions   = 1

dataset = xr.Dataset( {'lower_bounds' : lower_bounds, 'upper_bounds' : upper_bounds, 'partitions' : partitions } )

my_netcdf.add_variable_from_dataset( name, var_type, dataset )
Beispiel #26
0
    def test_main_parameter_scan(self):

        # Create a DAKOTA netcdf file
        test_netcdf = DakotaFile()

        # Some Coordinates
        times = np.array([1, 2, 3, 4])
        times = xr.DataArray(times, dims=['time'])

        positions = np.array([1, 2, 3])
        positions = xr.DataArray(positions, dims=['position'])

        x = np.array([0.2, 0.3, 0.4, 0.5, 0.6])
        y = np.array([2.5, 3.5, 4.5])
        z = np.array([5.2, 5.3])

        x = xr.DataArray(x, dims=['x'])
        y = xr.DataArray(y, dims=['y'])
        z = xr.DataArray(z, dims=['z'])

        # Some Data

        # Scalar data
        lower_bounds0 = 0.0
        upper_bounds0 = 1.0
        partitions0 = 5

        # 2D data
        lower_bounds1 = np.random.rand(4, 3)
        lower_bounds1 = xr.DataArray(lower_bounds1,
                                     coords=[times, positions],
                                     dims=['time', 'position'])

        upper_bounds1 = np.random.rand(4, 3) + 1.0
        upper_bounds1 = xr.DataArray(upper_bounds1,
                                     coords=[times, positions],
                                     dims=['time', 'position'])

        partitions1 = np.random.rand(4, 3)
        partitions1 = xr.DataArray(partitions1,
                                   coords=[times, positions],
                                   dims=['time', 'position'])

        # 3D data
        lower_bounds2 = np.random.rand(5, 3, 2)
        lower_bounds2 = xr.DataArray(lower_bounds2,
                                     coords=[x, y, z],
                                     dims=['x', 'y', 'z'])

        upper_bounds2 = np.random.rand(5, 3, 2) + 1.0
        upper_bounds2 = xr.DataArray(upper_bounds2,
                                     coords=[x, y, z],
                                     dims=['x', 'y', 'z'])

        partitions2 = np.random.rand(5, 3, 2) + 1.0
        partitions2 = xr.DataArray(partitions2,
                                   coords=[x, y, z],
                                   dims=['x', 'y', 'z'])

        # Add some parameter scan data

        test_netcdf.uncertain['scan0'] = xr.Dataset(
            {
                'lower_bounds': lower_bounds0,
                'upper_bounds': upper_bounds0,
                'partitions': partitions0
            },
            attrs={'type': 'scan'})

        test_netcdf.uncertain['scan1'] = xr.Dataset(
            {
                'lower_bounds': lower_bounds1,
                'upper_bounds': upper_bounds1,
                'partitions': partitions1
            },
            attrs={'type': 'scan'})

        test_netcdf.uncertain['scan2'] = xr.Dataset(
            {
                'lower_bounds': lower_bounds2,
                'upper_bounds': upper_bounds2,
                'partitions': partitions2
            },
            attrs={'type': 'scan'})

        # Add a correlated scan variable
        partitions3 = np.full((5, 3, 2), 4)
        partitions3 = xr.DataArray(partitions3,
                                   coords=[x, y, z],
                                   dims=['x', 'y', 'z'])

        test_netcdf.uncertain['scan3'] = xr.Dataset(
            {
                'lower_bounds': lower_bounds2,
                'upper_bounds': upper_bounds2,
                'partitions': partitions3
            },
            attrs={'type': 'scan_correlated'})

        # Write the Netcdf file
        test_netcdf.write('test.nc')

        # Create fake arguments
        args = fake_args()
        args.driver = 'test.py'
        args.input = 'test.nc'

        # Create a Dakota input file from the netcdf file
        main.make_dakota(args)

        # Check the file exists
        self.assertTrue(os.path.isfile('DAKOTA.in'))

        # Create files_for_dakota directory if necessary
        delete = False
        if not os.path.exists('files_for_dakota'):
            os.system('mkdir files_for_dakota')
            delete = True

        # Check that DAKOTA recognises the input file
        os.system('dakota --check DAKOTA.in > DAKOTA.tmp')

        # Get rid of directory
        if delete:
            os.system('rmdir files_for_dakota')

        with open('DAKOTA.tmp', 'r') as content_file:
            content = content_file.read()

        self.assertTrue('Input check completed successfully' in content)

        content_file.close()

        # Delete the temporary files
        os.remove('DAKOTA.in')
        os.remove('test.nc')
        os.remove('DAKOTA.tmp')

        if os.path.exists('dakota.rst'):
            os.remove('dakota.rst')
Beispiel #27
0
    def test_read_var_as_dataset(self):

        # Write some data
        test_netcdf = DakotaFile()

        # Add some settings
        test_netcdf.add_settings(settings)

        # A uniform uncertain variable
        name = 'test_uniform'
        var_type = 'uniform'
        dataset = xr.Dataset({
            'lower_bounds': lower_bounds,
            'upper_bounds': upper_bounds
        })

        test_netcdf.add_variable_from_dataset(name, var_type, dataset)

        test_netcdf.write('test3.nc')

        # Read the data
        test_netcdf = DakotaFile()
        test_netcdf.read('test3.nc')

        # A uniform uncertain variable
        name = 'test_uniform'
        dataset = test_netcdf.get_variable_as_dataset(name)

        self.assertEqual(np.sum(dataset['lower_bounds'] - lower_bounds), 0.0)
        self.assertEqual(np.sum(dataset['upper_bounds'] - upper_bounds), 0.0)

        os.remove('test3.nc')