Exemple #1
0
    def test_load_balanced_doe(self):

        problem = Problem(impl=impl)
        root = problem.root = Group()
        root.add('indep_var', IndepVarComp('x', val=1.0))
        root.add('const', IndepVarComp('c', val=2.0))
        root.add('mult', Mult())

        root.connect('indep_var.x', 'mult.x')
        root.connect('const.c', 'mult.c')

        num_levels = 25
        problem.driver = FullFactorialDriver(num_levels=num_levels,
                                             num_par_doe=self.N_PROCS,
                                             load_balance=True)
        problem.driver.add_desvar('indep_var.x',
                                  lower=1.0,
                                  upper=float(num_levels))
        problem.driver.add_objective('mult.y')

        problem.driver.add_recorder(InMemoryRecorder())

        problem.setup(check=False)
        problem.run()

        for data in problem.driver.recorders[0].iters:
            self.assertEqual(data['unknowns']['indep_var.x'] * 2.0,
                             data['unknowns']['mult.y'])

        num_cases = len(problem.driver.recorders[0].iters)
        if MPI:
            lens = problem.comm.allgather(num_cases)
            self.assertEqual(sum(lens), num_levels)
        else:
            self.assertEqual(num_cases, num_levels)
Exemple #2
0
    def test_doe(self):

        problem = Problem(impl=impl)
        root = problem.root = Group()
        root.add('indep_var', IndepVarComp('x', val=7.0))
        root.add('const', IndepVarComp('c', val=3.0))
        root.add('dut', DUT())

        root.connect('indep_var.x', 'dut.x')
        root.connect('const.c', 'dut.c')

        num_samples = 10
        problem.driver = UniformDriver(num_samples=num_samples,
                                       num_par_doe=self.N_PROCS)
        problem.driver.add_desvar('indep_var.x', low=4410.0, high=4450.0)
        problem.driver.add_objective('dut.y')

        problem.driver.add_recorder(InMemoryRecorder())

        problem.setup(check=False)
        problem.run()

        for data in problem.driver.recorders[0].iters:
            self.assertEqual(data['unknowns']['indep_var.x'] * 3.0,
                             data['unknowns']['dut.y'])

        num_cases = len(problem.driver.recorders[0].iters)
        if MPI:
            lens = problem.comm.allgather(num_cases)
            self.assertEqual(sum(lens), num_samples)
        else:
            self.assertEqual(num_cases, num_samples)
Exemple #3
0
    def test_case_driver(self):
        problem = Problem()
        root = problem.root = Group()
        root.add('indep_var', IndepVarComp('x', val=1.0))
        root.add('const', IndepVarComp('c', val=2.0))
        root.add('mult', ExecComp4Test("y=c*x"))

        root.connect('indep_var.x', 'mult.x')
        root.connect('const.c', 'mult.c')

        cases = [
            [('indep_var.x', 3.0), ('const.c', 1.5)],
            [('indep_var.x', 4.0), ('const.c', 2.)],
            [('indep_var.x', 5.5), ('const.c', 3.0)],
        ]

        problem.driver = CaseDriver(cases)

        problem.driver.add_desvar('indep_var.x')
        problem.driver.add_desvar('const.c')

        problem.driver.add_recorder(InMemoryRecorder())

        problem.setup(check=False)
        problem.run()

        for i, data in enumerate(problem.driver.recorders[0].iters):
            data['unknowns'] = dict(data['unknowns'])
            self.assertEqual(
                data['unknowns']['indep_var.x'] * data['unknowns']['const.c'],
                data['unknowns']['mult.y'])
            self.assertEqual(cases[i][0][1] * cases[i][1][1],
                             data['unknowns']['mult.y'])

        self.assertEqual(len(problem.driver.recorders[0].iters), 3)
Exemple #4
0
    def test_load_balanced_doe_crit_fail(self):

        problem = Problem(impl=impl)
        root = problem.root = Group()
        root.add('indep_var', IndepVarComp('x', val=1.0))
        root.add('const', IndepVarComp('c', val=2.0))

        if MPI:
            fail_rank = 1  # raise exception from this rank
        else:
            fail_rank = 0

        if self.comm.rank == fail_rank:
            root.add('mult', ExecComp4Test("y=c*x", fails=[3], critical=True))
        else:
            root.add('mult', ExecComp4Test("y=c*x"))

        root.connect('indep_var.x', 'mult.x')
        root.connect('const.c', 'mult.c')

        num_levels = 25
        problem.driver = FullFactorialDriver(num_levels=num_levels,
                                       num_par_doe=self.N_PROCS,
                                       load_balance=True)
        problem.driver.add_desvar('indep_var.x',
                                  lower=1.0, upper=float(num_levels))
        problem.driver.add_objective('mult.y')

        problem.driver.add_recorder(InMemoryRecorder())

        problem.setup(check=False)
        if MPI:
            problem.run()
        else:
            try:
                problem.run()
            except Exception as err:
                self.assertEqual(str(err), "OMG, a critical error!")
            else:
                self.fail("expected exception")

        for data in problem.driver.recorders[0].iters:
            self.assertEqual(data['unknowns']['indep_var.x']*2.0,
                             data['unknowns']['mult.y'])

        num_cases = len(problem.driver.recorders[0].iters)

        if MPI:
            # in load balanced mode, we can't really predict how many cases
            # will actually run before we terminate, so just check to see if
            # we at least have less than the full set we'd have if nothing
            # went wrong.
            lens = problem.comm.allgather(num_cases)
            self.assertTrue(sum(lens) < num_levels,
                    "Cases run (%d) should be less than total cases (%d)" %
                    (sum(lens), num_levels))
        else:
            self.assertEqual(num_cases, 3)
Exemple #5
0
    def test_doe_fail_critical(self):
        problem = Problem(impl=impl)
        root = problem.root = Group()
        root.add('indep_var', IndepVarComp('x', val=1.0))
        root.add('const', IndepVarComp('c', val=2.0))

        if MPI:
            fail_rank = 1  # raise exception from this rank
        else:
            fail_rank = 0

        if self.comm.rank == fail_rank:
            root.add('mult', ExecComp4Test("y=c*x", fails=[3], critical=True))
        else:
            root.add('mult', ExecComp4Test("y=c*x"))

        root.connect('indep_var.x', 'mult.x')
        root.connect('const.c', 'mult.c')

        num_levels = 25
        problem.driver = FullFactorialDriver(num_levels=num_levels,
                                             num_par_doe=self.N_PROCS)
        problem.driver.add_desvar('indep_var.x',
                                  lower=1.0,
                                  upper=float(num_levels))
        problem.driver.add_objective('mult.y')

        problem.driver.add_recorder(InMemoryRecorder())

        problem.setup(check=False)

        try:
            problem.run()
        except Exception as err:
            with MultiProcFailCheck(self.comm):
                if self.comm.rank == fail_rank:
                    self.assertEqual(str(err), "OMG, a critical error!")
                else:
                    self.assertEqual(
                        str(err),
                        "an exception was raised by another MPI process.")

        for data in problem.driver.recorders[0].iters:
            self.assertEqual(data['unknowns']['indep_var.x'] * 2.0,
                             data['unknowns']['mult.y'])

        num_cases = len(problem.driver.recorders[0].iters)
        if MPI:
            lens = problem.comm.allgather(num_cases)
            self.assertEqual(sum(lens), 12)
        else:
            self.assertEqual(num_cases, 3)
Exemple #6
0
    def test_doe_fail_analysis_error(self):
        problem = Problem(impl=impl)
        root = problem.root = Group()
        root.add('indep_var', IndepVarComp('x', val=1.0))
        root.add('const', IndepVarComp('c', val=2.0))

        fail_rank = 1  # raise exception from this rank
        if self.comm.rank == fail_rank:
            root.add('mult', ExecComp4Test("y=c*x", fails=[3, 4]))
        else:
            root.add('mult', ExecComp4Test("y=c*x"))

        root.connect('indep_var.x', 'mult.x')
        root.connect('const.c', 'mult.c')

        num_levels = 25
        problem.driver = FullFactorialDriver(num_levels=num_levels,
                                             num_par_doe=self.N_PROCS)
        problem.driver.add_desvar('indep_var.x',
                                  lower=1.0,
                                  upper=float(num_levels))
        problem.driver.add_objective('mult.y')

        problem.driver.add_recorder(InMemoryRecorder())

        problem.setup(check=False)

        problem.run()

        for data in problem.driver.recorders[0].iters:
            self.assertEqual(data['unknowns']['indep_var.x'] * 2.0,
                             data['unknowns']['mult.y'])

        num_cases = len(problem.driver.recorders[0].iters)
        if MPI:
            lens = problem.comm.allgather(num_cases)
            self.assertEqual(sum(lens), 25)
        else:
            self.assertEqual(num_cases, 25)

        nfails = 0
        for data in problem.driver.recorders[0].iters:
            if not data['success']:
                nfails += 1

        if self.comm.rank == fail_rank:
            self.assertEqual(nfails, 2)
        else:
            self.assertEqual(nfails, 0)
Exemple #7
0
def run_list(driver, inputs):
    if driver.TestMotor:    # test the motor design instead of an external model
        outputs = []
        for i in inputs:
            outputs.append(motorDesign.motor(i, len(driver.outputNames)))
    elif driver.UseParametersCSV:
        outputs = []
        with open('parameters.csv', 'r') as f:  # read log file of inputs and outputs
            f.readline()  # skip past the line specifying the names of inputs and outputs
            for line in f:
                output = []
                for num in line.split(',')[-len(driver.outputNames):]:  # get list of outputs from line (in string form)
                    output.append(float(num))
                outputs.append(output)
            f.close()
    else:
        recorder = InMemoryRecorder()
        recorder.startup(driver.root)
        driver.recorders.append(recorder)
        driver.runlist = [zip(driver.inputNames, input) for input in inputs]

        if UseParallel:
            driver.sequential = False

        if UseCluster:
            print 'Using remote cluster.'
            driver.sequential = False
            # This is necessary more often than it should be.
            driver.ignore_egg_requirements = True
            # Force use of only cluster hosts by adding this requirement.
            driver.extra_resources = dict(allocator='PCCCluster')

        super(driver.__class__, driver).run(driver)

        outputs = []
        for c in recorder.iters:
            unknowns = c['unknowns']
            output = []
            missing = object()
            for name in driver.outputNames:
                unknown = unknowns.get(name, missing)
                if unknown is missing:
                    raise Exception('No outputs from simulator matching requested output \'{0}\'. Available outputs: {1}'.format(name, unknowns.keys()))
                if unknown is None:
                    # FIXME upgrade OpenMDAO, and the testbench should throw AnalysisException
                    raise Exception('No value from simulator matching requested output \'{0}\'. Perhaps the testbench failed')
                if not isinstance(unknown, float):
                    # FIXME should really be a float. TODO fix post_processing_class.py update_metrics_in_report_json in all models everywhere...
                    # warnings.warn('Unknown \'{0}\' produced from a TestBench is not a float'.format(name))
                    unknown = float(unknown)
                output.append(unknown)
            outputs.append(output)
        driver.recorders._recorders.remove(recorder)

    if not driver.UseParametersCSV:
        with open('parameters.csv', 'w') as f:  # write log file of inputs and outputs
            f.write(','.join(driver.inputNames)+','+','.join(driver.outputNames)+'\n')
            for x, i in enumerate(inputs):
                f.write(','.join(str(y) for y in inputs[x]))
                if x < len(outputs):
                    f.write(','+','.join(str(y) for y in outputs[x]))
                f.write('\n')
            f.close()

    if len(outputs) != len(inputs):
        raise Exception('Simulator returned only {0} results of a requested {1}. See parameters.csv for details.'.format(len(outputs), len(inputs)))

    return asarray(outputs)
Exemple #8
0
def run_list(problem, driver, inputs):
    if driver.TestMotor:  # test the motor design instead of an external model
        outputs = []
        for i in inputs:
            outputs.append(motorDesign.motor(i, len(driver.outputNames)))
    elif driver.UseParametersCSV:
        outputs = []
        with open('parameters.csv',
                  'r') as f:  # read log file of inputs and outputs
            f.readline(
            )  # skip past the line specifying the names of inputs and outputs
            for line in f:
                output = []
                for num in line.split(
                        ','
                )[-len(driver.outputNames
                       ):]:  # get list of outputs from line (in string form)
                    output.append(float(num))
                outputs.append(output)
            f.close()
    else:
        recorder = InMemoryRecorder()
        recorder.startup(driver.root)
        driver.recorders.append(recorder)
        problem.setup()
        driver.runlist = [zip(driver.inputNames, input) for input in inputs]

        if UseParallel:
            driver.sequential = False

        if UseCluster:
            print 'Using remote cluster.'
            driver.sequential = False
            # This is necessary more often than it should be.
            driver.ignore_egg_requirements = True
            # Force use of only cluster hosts by adding this requirement.
            driver.extra_resources = dict(allocator='PCCCluster')

        super(driver.__class__, driver).run(problem)

        outputs = []
        for c in recorder.iters:
            unknowns = c['unknowns']
            output = []
            missing = object()
            for name in driver.outputNames:
                path = name.split(".")
                if len(path) > 1 and path[0] in driver.subproblem_output_meta:
                    real_name = driver.subproblem_output_meta[path[0]][path[1]]
                    real_path = "{}.{}".format(path[0], real_name)
                else:
                    real_path = name
                unknown = unknowns.get(real_path, missing)
                if unknown is missing:
                    raise Exception(
                        'No outputs from simulator matching requested output \'{0}\'. Available outputs: {1}'
                        .format(real_path, unknowns.keys()))
                if unknown is None:
                    # FIXME upgrade OpenMDAO, and the testbench should throw AnalysisException
                    raise Exception(
                        'No value from simulator matching requested output \'{0}\'. Perhaps the testbench failed'
                    )
                if not isinstance(unknown, float):
                    # FIXME should really be a float. TODO fix post_processing_class.py update_metrics_in_report_json in all models everywhere...
                    # warnings.warn('Unknown \'{0}\' produced from a TestBench is not a float'.format(name))
                    unknown = float(unknown)
                output.append(unknown)
            outputs.append(output)
        driver.recorders._recorders.remove(recorder)

    if not driver.UseParametersCSV:
        with open('parameters.csv',
                  'w') as f:  # write log file of inputs and outputs
            f.write(','.join(driver.inputNames) + ',' +
                    ','.join(driver.outputNames) + '\n')
            for x, i in enumerate(inputs):
                f.write(','.join(str(y) for y in inputs[x]))
                if x < len(outputs):
                    f.write(',' + ','.join(str(y) for y in outputs[x]))
                f.write('\n')
            f.close()

    if len(outputs) != len(inputs):
        raise Exception(
            'Simulator returned only {0} results of a requested {1}. See parameters.csv for details.'
            .format(len(outputs), len(inputs)))

    return asarray(outputs)
Exemple #9
0
 def setUp(self):
     self.recorder = InMemoryRecorder()
Exemple #10
0
    def test_load_balanced_doe_soft_fail(self):

        problem = Problem(impl=impl)
        root = problem.root = Group()
        root.add('indep_var', IndepVarComp('x', val=1.0))
        root.add('const', IndepVarComp('c', val=2.0))

        if MPI:
            fail_rank = 1  # raise exception from this rank
        else:
            fail_rank = 0

        fail_idxs = [3, 4, 5]
        if self.comm.rank == fail_rank:
            root.add('mult', ExecComp4Test("y=c*x", fails=fail_idxs))
        else:
            root.add('mult', ExecComp4Test("y=c*x"))

        root.connect('indep_var.x', 'mult.x')
        root.connect('const.c', 'mult.c')

        num_levels = 25
        problem.driver = FullFactorialDriver(num_levels=num_levels,
                                             num_par_doe=self.N_PROCS,
                                             load_balance=True)
        problem.driver.add_desvar('indep_var.x',
                                  lower=1.0,
                                  upper=float(num_levels))
        problem.driver.add_objective('mult.y')

        problem.driver.add_recorder(InMemoryRecorder())

        problem.setup(check=False)
        problem.run()

        for data in problem.driver.recorders[0].iters:
            self.assertEqual(data['unknowns']['indep_var.x'] * 2.0,
                             data['unknowns']['mult.y'])

        num_cases = len(problem.driver.recorders[0].iters)

        if MPI and self.comm.rank > 0:
            self.assertEqual(num_cases, 0)
        else:
            self.assertEqual(num_cases, num_levels)

        nfails = 0
        cases_in_fail_rank = 0
        for data in problem.driver.recorders[0].iters:
            if not data['success']:
                nfails += 1
            if data['unknowns']['mult.case_rank'] == fail_rank:
                cases_in_fail_rank += 1

        if self.comm.rank == 0:
            # FIXME: for now, all cases get sent back to the master process (0),
            # even when recorders are parallel.

            # there's a chance that the fail rank didn't get enough
            # cases to actually fail 3 times, so we need to check
            # how many cases it actually got.

            if cases_in_fail_rank > 5:
                self.assertEqual(nfails, 3)
            elif cases_in_fail_rank > 4:
                self.assertEqual(nfails, 2)
            elif cases_in_fail_rank > 3:
                self.assertEqual(nfails, 1)
            else:
                self.assertEqual(nfails, 0)
        else:
            self.assertEqual(nfails, 0)