def omdaoGroup(record):
    print 'Executing multiple instances of OpenMDAO-defined FAST components in parallel'
    print ''

    # Parallel execution entails additional dependencies--be sure they are installed
    
    # If example = 3, be sure to run callv8Wrapper using RunParallel script file
    # or using equivalent mpi commands, otherwise cases will fun in serial.

    # Imports
    from runFAST_v8 import runFAST_v8
    from openmdao.api import Group, Problem, Component, IndepVarComp
    from openmdao.api import ParallelGroup, ParallelFDGroup
    from openmdao.core.mpi_wrap import MPI
    if MPI:
        from openmdao.core.petsc_impl import PetscImpl as impl
    else:
        from openmdao.core.basic_impl import BasicImpl as impl
    from FASTv8_aeroelasticsolver import FASTv8_Workflow, FASTv8_AeroElasticSolver

    # Initial OpenMDAO problem setup for parallel group
    top = Problem(impl=impl, root=ParallelFDGroup(1)) # use for parallel
    root = top.root

    # Setup input config dictionary of dictionaries.
    caseids = ['omdaoParallelCase1','omdaoParallelCase2','omdaoParallelCase3','omdaoParallelCase4']
    DT = [0.01, 0.02, 0.025, 0.03] #multiple timestep sizes we wish to test--currently limited to even multiples of TMax
    cfg_master = {} #master config dictionary (dictionary of dictionaries)

    for i in range(4):
        # Create dictionary for this particular case
        cfg = {}
        cfg['DT'] = DT[i]
        cfg['TMax'] = 60

        # fst_exe, fst_file, fst_dir same for all cases
        cfg['fst_exe'] = "../../../FAST_v8/bin/FAST_glin64"
        cfg['fst_dir'] = "TemplateTest/"
        cfg['fst_file'] = "Test18.fst"

        # Put dictionary into master dictionary, keyed by caseid
        cfg_master[caseids[i]] = cfg

    # Add parallel group to omdao problem, pass in master config file
    root.add('FASTcases', FASTv8_AeroElasticSolver(cfg_master, caseids))

    # Set up recorder if desired (requires sqlite)
    if record:
        from openmdao.api import SqliteRecorder
        recorder = SqliteRecorder('omdaoparallel.sqlite')
        top.driver.add_recorder(recorder)

    top.setup()
    top.run()

    top.cleanup()   #Good practice, especially when using recorder
Beispiel #2
0
def setup_1comp_model(par_fds, size, mult, add, delay):
    prob = Problem(impl=impl)
    if par_fds == 1:  # do serial
        prob.root = Group()
    else:
        prob.root = ParallelFDGroup(par_fds)
    prob.root.add('P1', IndepVarComp('x', np.ones(size)))
    prob.root.add('C1', ScalableComp(size, mult, add, delay))

    prob.root.connect('P1.x', 'C1.x')

    prob.driver.add_desvar('P1.x')
    prob.driver.add_objective('C1.y')

    prob.setup(check=False)
    prob.run()

    return prob
Beispiel #3
0
def set_top(optimize_flag):
    par_fd = 5

    if optimize_flag:
        top = Problem(impl=impl, root=ParallelFDGroup(par_fd))
    else:
        top = Problem(impl=impl, root=Group())

    # Set the Driver
    top.driver = ScipyOptimizer()
    top.driver.options['optimizer'] = 'SLSQP'
    top.driver.options['tol'] = 1.0e-5
    # from openmdao.drivers.pyoptsparse_driver import pyOptSparseDriver
    # top.driver = pyOptSparseDriver()
    # top.driver.options['optimizer'] = 'IPOPT'
    # top.driver.opt_settings['mu_strategy'] = 'adaptive'
    # top.driver.opt_settings['linear_solver'] = 'ma27'
    # top.driver.opt_settings['max_iter'] = 40
    # top.driver.opt_settings['tol'] = 1.e-5
    # Some options and parameters
    top.root.fd_options['force_fd'] = True
    top.root.fd_options['step_size'] = 1.e-3
    return top
Beispiel #4
0
                    help='The excel input file.')
parser.add_argument('-s',
                    '--string',
                    dest='insheet',
                    type=str,
                    required=True,
                    metavar='EXCEL_SHEET',
                    help='The excel sheet as a string.')

#parse and assign to the variable
args = parser.parse_args()
infile = args.infile
insheet = args.insheet

# Initial OpenMDAO problem setup for parallel group
top = Problem(impl=impl, root=ParallelFDGroup(1))
root = top.root

# ===================== Input Section =====================

rundir = '02_Run'
winddir = '04_Wind'
postdir = '03_Postpro'

# Initialize general dlc config dictionary
dlc_cfg = {}  #dictionary describing what we do
dlc_cfg['SettingsFromExcel'] = True
dlc_cfg['Standard'] = ''  #IEC, DIBT, DNVGL
dlc_cfg['DLC'] = ''
dlc_cfg['WindModel'] = ''
dlc_cfg['WindFileFormat'] = ''  # Either Bladed (.wnd) or Turbsim format (.bts)
Beispiel #5
0
def configure(nsec, dry_run=False, FPM=False, par_fd=1):

    p = Problem(impl=impl, root=ParallelFDGroup(par_fd))

    p.root.add('blade_length_c',
               IndepVarComp('blade_length', 86.366),
               promotes=['*'])

    pf = read_blade_planform('data/DTU_10MW_RWT_blade_axis_prebend.dat')
    nsec_ae = 50
    nsec_st = 4
    s_ae = np.linspace(0, 1, nsec_ae)
    s_st = np.linspace(0, 1, nsec_st)
    pf = redistribute_planform(pf, s=s_ae)

    spl = p.root.add('pf_splines', SplinedBladePlanform(pf), promotes=['*'])
    spl.configure()
    redist = p.root.add('pf_st',
                        PGLRedistributedPlanform('_st', nsec_ae, s_st),
                        promotes=['*'])

    cfg = {}
    cfg['redistribute_flag'] = False
    cfg['blend_var'] = np.array([0.241, 0.301, 0.36, 1.0])
    afs = []
    for f in [
            'data/ffaw3241.dat', 'data/ffaw3301.dat', 'data/ffaw3360.dat',
            'data/cylinder.dat'
    ]:

        afs.append(np.loadtxt(f))
    cfg['base_airfoils'] = afs
    surf = p.root.add('blade_surf',
                      PGLLoftedBladeSurface(cfg,
                                            size_in=nsec_st,
                                            size_out=(200, nsec_st, 3),
                                            suffix='_st'),
                      promotes=['*'])

    # read the blade structure
    st3d = read_bladestructure('data/DTU10MW')

    # and interpolate onto new distribution
    st3dn = interpolate_bladestructure(st3d, s_st)

    spl = p.root.add('st_splines',
                     SplinedBladeStructure(st3dn),
                     promotes=['*'])
    # spl.add_spline('DP04', np.linspace(0, 1, 4), spline_type='bezier')
    spl.add_spline('r04uniaxT', np.linspace(0, 1, 2), spline_type='bezier')
    spl.add_spline('r08uniaxT', np.linspace(0, 1, 2), spline_type='bezier')
    # spl.add_spline('w02biaxT', np.linspace(0, 1, 4), spline_type='bezier')
    spl.configure()
    # inputs to CS2DtoBECAS and BECASWrapper
    config = {}
    cfg = {}
    cfg['dry_run'] = dry_run
    cfg['path_shellexpander'] = '/Users/frza/git/BECAS_stable/shellexpander/shellexpander'
    cfg['dominant_elsets'] = ['REGION04', 'REGION08']
    cfg['max_layers'] = 0
    config['CS2DtoBECAS'] = cfg
    cfg = {}
    cfg['path_becas'] = '/Users/frza/git/BECAS_stable/BECAS/src/matlab'
    cfg['hawc2_FPM'] = FPM
    cfg['dry_run'] = dry_run
    cfg['analysis_mode'] = 'stiffness'
    config['BECASWrapper'] = cfg

    p.root.add('stiffness',
               BECASBeamStructure(p.root, config, st3dn, (200, nsec_st, 3)),
               promotes=['*'])

    p.root.add('dv0', IndepVarComp('ud_T', np.zeros(2)))
    p.driver.add_desvar('dv0.ud_T')
    p.root.connect('dv0.ud_T', 'r04uniaxT_C')
    p.root.connect('dv0.ud_T', 'r08uniaxT_C')
    p.driver.add_objective('blade_mass')
    p.root.fd_options['force_fd'] = True
    p.root.fd_options['step_size'] = 1.e-3

    p.setup()
    for k, v in pf.iteritems():
        if k in p.root.pf_splines.params.keys():
            p.root.pf_splines.params[k] = v

    # p['hub_radius'] = 2.8
    # p['blade_x'] = d.pf['x'] * 86.366
    # p['blade_z'] = d.pf['y'] * 86.366
    # p['blade_y'] = d.pf['z'] * 86.366
    return p