Esempio n. 1
0
def yaml2df(filename, names=[]):
    '''
    Read yaml containing summary stats into dataframe

    Parameters:
    -------
    filename:
        Name of yaml file to load. 

    '''

    data_dict = Processing.load_yaml('test.yaml', package=0)

    level = data_dict
    li = 0  # level index
    while isinstance(level, dict):
        li = li + 1
        if isinstance(level, dict):
            level = level[list(level.keys())[0]]

    data_list = []
    if li == 3:
        for key in data_dict.keys():
            data_list.append(data_dict[key])
        if not names:
            names = ['dataset_' + str(i) for i in range(len(data_list))]

    elif li == 2:
        data_list.append(data_dict)
        names = []
    else:
        raise TypeError(
            '{} is improperly structured for yaml2df.'.format(filename))

    df = dict2df(data_list, names=names)

    return df
Esempio n. 2
0
    def generate_rotperf_fast(self,
                              openfast_path,
                              FAST_runDirectory=None,
                              run_BeamDyn=False,
                              debug_level=1,
                              run_type='multi'):
        '''
        Use openfast to generate Cp surface data. Will be slow, especially if using BeamDyn,
        but may be necessary if cc-blade is not sufficient.

        Parameters:
        -----------
        openfast_path: str
            path to openfast
        FAST_runDirectory: str
            directory to run openfast simulations in
        run_BeamDyn: bool
            Flag to run beamdyn - does not exist yet
        debug_level: float
            0 - no outputs, 1 - simple outputs, 2 - all outputs
        run_type: str
            'serial' - run in serial, 'multi' - run using python multiprocessing tools, 
            'mpi' - run using mpi tools
        '''

        # Load additional WEIS tools
        from weis.aeroelasticse import runFAST_pywrapper, CaseGen_General
        from weis.aeroelasticse.Util import FileTools
        # Load pCrunch tools
        from pCrunch import pdTools, Processing

        # setup values for surface
        v0 = self.v_rated + 2
        TSR_initial = np.arange(3, 15, 1)
        pitch_initial = np.arange(-1, 25, 1)
        rotspeed_initial = TSR_initial * v0 / self.rotor_radius * RadSec2rpm  # rpms

        # Specify Case Inputs
        case_inputs = {}

        # ------- Setup OpenFAST inputs --------
        case_inputs[('Fst', 'TMax')] = {'vals': [330], 'group': 0}
        case_inputs[('Fst', 'Compinflow')] = {'vals': [1], 'group': 0}
        case_inputs[('Fst', 'CompAero')] = {'vals': [2], 'group': 0}
        case_inputs[('Fst', 'CompServo')] = {'vals': [1], 'group': 0}
        case_inputs[('Fst', 'CompHydro')] = {'vals': [0], 'group': 0}
        if run_BeamDyn:
            case_inputs[('Fst', 'CompElast')] = {'vals': [2], 'group': 0}
        else:
            case_inputs[('Fst', 'CompElast')] = {'vals': [1], 'group': 0}
        case_inputs[('Fst', 'OutFileFmt')] = {'vals': [2], 'group': 0}

        # AeroDyn15
        case_inputs[('AeroDyn15', 'WakeMod')] = {'vals': [1], 'group': 0}
        case_inputs[('AeroDyn15', 'AfAeroMod')] = {'vals': [1], 'group': 0}
        case_inputs[('AeroDyn15', 'TwrPotent')] = {'vals': [0], 'group': 0}

        # ElastoDyn
        case_inputs[('ElastoDyn', 'FlapDOF1')] = {'vals': ['True'], 'group': 0}
        case_inputs[('ElastoDyn', 'FlapDOF2')] = {'vals': ['True'], 'group': 0}
        case_inputs[('ElastoDyn', 'EdgeDOF')] = {'vals': ['True'], 'group': 0}
        case_inputs[('ElastoDyn', 'TeetDOF')] = {'vals': ['False'], 'group': 0}
        case_inputs[('ElastoDyn', 'DrTrDOF')] = {'vals': ['False'], 'group': 0}
        case_inputs[('ElastoDyn', 'GenDOF')] = {'vals': ['False'], 'group': 0}
        case_inputs[('ElastoDyn', 'YawDOF')] = {'vals': ['False'], 'group': 0}
        case_inputs[('ElastoDyn', 'TwFADOF1')] = {
            'vals': ['False'],
            'group': 0
        }
        case_inputs[('ElastoDyn', 'TwFADOF2')] = {
            'vals': ['False'],
            'group': 0
        }
        case_inputs[('ElastoDyn', 'TwSSDOF1')] = {
            'vals': ['False'],
            'group': 0
        }
        case_inputs[('ElastoDyn', 'TwSSDOF2')] = {
            'vals': ['False'],
            'group': 0
        }
        case_inputs[('ElastoDyn', 'PtfmSgDOF')] = {
            'vals': ['False'],
            'group': 0
        }
        case_inputs[('ElastoDyn', 'PtfmSwDOF')] = {
            'vals': ['False'],
            'group': 0
        }
        case_inputs[('ElastoDyn', 'PtfmHvDOF')] = {
            'vals': ['False'],
            'group': 0
        }
        case_inputs[('ElastoDyn', 'PtfmRDOF')] = {
            'vals': ['False'],
            'group': 0
        }
        case_inputs[('ElastoDyn', 'PtfmPDOF')] = {
            'vals': ['False'],
            'group': 0
        }
        case_inputs[('ElastoDyn', 'PtfmYDOF')] = {
            'vals': ['False'],
            'group': 0
        }

        # BeamDyn
        # NEEDED

        # InflowWind
        case_inputs[('InflowWind', 'WindType')] = {'vals': [1], 'group': 0}
        case_inputs[('InflowWind', 'HWindSpeed')] = {'vals': [v0], 'group': 0}
        case_inputs[('InflowWind', 'PLexp')] = {'vals': [0], 'group': 0}

        # ServoDyn
        case_inputs[('ServoDyn', 'PCMode')] = {'vals': [0], 'group': 0}
        case_inputs[('ServoDyn', 'VSContrl')] = {'vals': [0], 'group': 0}
        case_inputs[('ServoDyn', 'HSSBrMode')] = {'vals': [0], 'group': 0}
        case_inputs[('ServoDyn', 'YCMode')] = {'vals': [0], 'group': 0}

        # ------- Setup sweep values inputs --------
        case_inputs[('ElastoDyn', 'BlPitch1')] = {
            'vals': list(pitch_initial),
            'group': 1
        }
        case_inputs[('ElastoDyn', 'BlPitch2')] = {
            'vals': list(pitch_initial),
            'group': 1
        }
        case_inputs[('ElastoDyn', 'BlPitch3')] = {
            'vals': list(pitch_initial),
            'group': 1
        }
        case_inputs[('ElastoDyn', 'RotSpeed')] = {
            'vals': list(rotspeed_initial),
            'group': 2
        }

        # FAST details
        fastBatch = runFAST_pywrapper.runFAST_pywrapper_batch(
            FAST_ver='OpenFAST', dev_branch=True)
        fastBatch.FAST_exe = openfast_path  # Path to executable
        fastBatch.FAST_InputFile = self.fast.FAST_InputFile
        fastBatch.FAST_directory = self.fast.FAST_directory
        if not FAST_runDirectory:
            FAST_runDirectory = os.path.join(os.getcwd(), 'RotPerf_OpenFAST')
        fastBatch.FAST_runDirectory = FAST_runDirectory
        fastBatch.debug_level = debug_level

        # Generate cases
        case_name_base = self.TurbineName + '_rotperf'
        case_list, case_name_list = CaseGen_General.CaseGen_General(
            case_inputs,
            dir_matrix=fastBatch.FAST_runDirectory,
            namebase=case_name_base)
        fastBatch.case_list = case_list
        fastBatch.case_name_list = case_name_list

        # Make sure proper outputs exist
        var_out = [
            # ElastoDyn (this is probably overkill on the outputs)
            "BldPitch1",
            "BldPitch2",
            "BldPitch3",
            "Azimuth",
            "RotSpeed",
            "GenSpeed",
            "NacYaw",
            "OoPDefl1",
            "IPDefl1",
            "TwstDefl1",
            "OoPDefl2",
            "IPDefl2",
            "TwstDefl2",
            "OoPDefl3",
            "IPDefl3",
            "TwstDefl3",
            "RootFxc1",
            "RootFyc1",
            "RootFzc1",
            "RootMxc1",
            "RootMyc1",
            "RootMzc1",
            "RootFxc2",
            "RootFyc2",
            "RootFzc2",
            "RootMxc2",
            "RootMyc2",
            "RootMzc2",
            "RootFxc3",
            "RootFyc3",
            "RootFzc3",
            "RootMxc3",
            "RootMyc3",
            "RootMzc3",
            "Spn1MLxb1",
            "Spn1MLyb1",
            "Spn1MLzb1",
            "Spn1MLxb2",
            "Spn1MLyb2",
            "Spn1MLzb2",
            "Spn1MLxb3",
            "Spn1MLyb3",
            "Spn1MLzb3",
            "RotThrust",
            "LSSGagFya",
            "LSSGagFza",
            "RotTorq",
            "LSSGagMya",
            "LSSGagMza",
            # ServoDyn
            "GenPwr",
            "GenTq",
            # AeroDyn15
            "RtArea",
            "RtVAvgxh",
            "B1N3Clrnc",
            "B2N3Clrnc",
            "B3N3Clrnc",
            "RtAeroCp",
            'RtAeroCq',
            'RtAeroCt',
            'RtTSR',  # NECESSARY
            # InflowWind
            "Wind1VelX",
        ]
        channels = {}
        for var in var_out:
            channels[var] = True
        fastBatch.channels = channels

        # Run OpenFAST
        if run_type.lower() == 'multi':
            fastBatch.run_multi()
        elif run_type.lower() == 'mpi':
            fastBatch.run_mpi()
        elif run_type.lower() == 'serial':
            fastBatch.run_serial()

        # ========== Post Processing ==========
        # Save statistics
        fp = Processing.FAST_Processing()

        # Find all outfiles
        fname_case_matrix = os.path.join(FAST_runDirectory, 'case_matrix.yaml')
        case_matrix = FileTools.load_yaml(fname_case_matrix, package=1)
        cm = pd.DataFrame(case_matrix)
        # Parse case matrix and find outfiles names
        outfiles = []
        case_names = cm['Case_Name']
        outfiles = []
        for name in case_names:
            outfiles.append(os.path.join(FAST_runDirectory, name + '.outb'))

        # Set some processing parameters
        fp.OpenFAST_outfile_list = outfiles
        fp.namebase = case_name_base
        fp.t0 = 270
        fp.parallel_analysis = True
        fp.results_dir = os.path.join(FAST_runDirectory, 'stats')
        fp.verbose = True
        # Save for debug!
        fp.save_LoadRanking = False
        fp.save_SummaryStats = False

        print('Processing openfast data on {} cores.'.format(
            fp.parallel_cores))

        # Load and save statistics and load rankings
        stats, load_rankings = fp.batch_processing()

        # Get means of last 30 seconds of 300 second simulation
        CP = stats[0]['RtAeroCp']['mean']
        CT = stats[0]['RtAeroCt']['mean']
        CQ = stats[0]['RtAeroCq']['mean']

        # Reshape Cp, Ct and Cq
        Cp = np.transpose(
            np.reshape(CP, (len(pitch_initial), len(TSR_initial))))
        Ct = np.transpose(
            np.reshape(CT, (len(pitch_initial), len(TSR_initial))))
        Cq = np.transpose(
            np.reshape(CQ, (len(pitch_initial), len(TSR_initial))))

        # Store necessary metrics for analysis
        self.pitch_initial_rad = pitch_initial * deg2rad
        self.TSR_initial = TSR_initial
        self.Cp_table = Cp
        self.Ct_table = Ct
        self.Cq_table = Cq
Esempio n. 3
0
fastBatch.channels = channels

# Execute
case_list, case_name_list = iec.execute(case_inputs=case_inputs)
fastBatch.case_list = case_list
fastBatch.case_name_list = case_name_list
fastBatch.debug_level = debug_level
if multi:
    fastBatch.run_multi()
else:
    fastBatch.run_serial()

# Save statistics
if save_stats:
    from pCrunch import pdTools, Processing
    fp = Processing.FAST_Processing()

    # Find all outfiles
    outfiles = []
    for file in os.listdir(run_dir):
        if file.endswith('.outb'):
            print(file)
            outfiles.append(os.path.join(run_dir, file))
        elif file.endswith('.out'):
            outfiles.append(os.path.join(run_dir, file))

    outfiles = outfiles

    # Set some processing parameters
    fp.OpenFAST_outfile_list = outfiles
    fp.namebase = case_name_base
case_matrix = FileTools.load_yaml(fname_case_matrix, package=1)
cm = pd.DataFrame(case_matrix)

# Define DLCs
DLCs = [1.1, 1.3]
# Pare down case matrix for desired runs 
cm2 = pd.concat([cm[cm[('IEC', 'DLC')] == dlc].iloc[1:15] for dlc in DLCs]).reset_index()
# Pull desired outfile names from case matrix dataframe
outfiles = []
for dlc in DLCs:
    case_names = cm2[cm2[('IEC', 'DLC')] == dlc]['Case_Name']
    outnames = list(outfile_base + case_names + '.outb')
    outfiles.append(outnames)

# Initialize processing classes
fp = Processing.FAST_Processing()
fa = Analysis.Loads_Analysis()

# Set some processing parameters
fp.OpenFAST_outfile_list = outfiles
fp.dataset_names = ['DLC1.1', 'DLC1.3']
fp.t0 = 30
fp.parallel_analysis = True
fp.parallel_analysis = True
fp.results_dir = 'temp_results'
fp.save_LoadRanking = True
fp.save_SummaryStats = True
fp.verbose=True


# Load and save statistics and load rankings
Esempio n. 5
0
    def postFAST_steady(self):
        """
        Post process results to get steady state information for all initial conditions at each wind speed
        Save as ss_ops.yaml for 
        """

        # Plot steady states vs wind speed
        PLOT = 0

        # Define input files paths
        output_dir = self.FAST_steadyDirectory

        # Find all outfiles
        outfiles = []
        for file in os.listdir(output_dir):
            if file.endswith('.outb'):
                outfiles.append(os.path.join(output_dir, file))
            # elif file.endswith('.out') and not file.endswith('.MD.out'):
            #     outfiles.append(os.path.join(output_dir,file))

        # Initialize processing classes
        fp = Processing.FAST_Processing()

        # Set some processing parameters
        fp.OpenFAST_outfile_list = outfiles
        fp.t0 = self.TMax - 400  # make sure this is less than simulation time
        fp.parallel_analysis = self.parallel
        fp.parallel_cores = self.cores
        fp.results_dir = os.path.join(output_dir, 'stats')
        fp.verbose = True
        fp.save_LoadRanking = True
        fp.save_SummaryStats = True

        # Load and save statistics and load rankings
        if self.overwrite or not os.path.exists(
                os.path.join(output_dir, 'ss_ops.yaml')):
            stats, _ = fp.batch_processing()

            if isinstance(stats, list):
                stats = stats[0]

            windSortInd = np.argsort(stats['Wind1VelX']['mean'])

            #            FAST output name,  FAST IC name
            ssChannels = [
                ['Wind1VelX', 'Wind1VelX'],
                ['OoPDefl1', 'OoPDefl'],
                ['IPDefl1', 'IPDefl'],
                ['BldPitch1', 'BlPitch1'],
                ['RotSpeed', 'RotSpeed'],
                ['TTDspFA', 'TTDspFA'],
                ['TTDspSS', 'TTDspSS'],
                ['PtfmSurge', 'PtfmSurge'],
                ['PtfmSway', 'PtfmSway'],
                ['PtfmHeave', 'PtfmHeave'],
                ['PtfmRoll', 'PtfmRoll'],
                ['PtfmYaw', 'PtfmYaw'],
                ['PtfmPitch', 'PtfmPitch'],
            ]

            ssChanData = {}
            for iChan in ssChannels:
                try:
                    ssChanData[iChan[1]] = np.array(
                        stats[iChan[0]]['mean'])[windSortInd].tolist()
                except:
                    print('Warning: ' + iChan[0] + ' is is not in OutList')

            if PLOT:
                fig1 = plt.figure()
                ax1 = fig1.add_subplot(211)
                ax2 = fig1.add_subplot(212)

                ax1.plot(ssChanData['Wind1VelX'], ssChanData['BlPitch1'])
                ax2.plot(ssChanData['Wind1VelX'], ssChanData['RotSpeed'])

                fig2 = plt.figure()
                ax1 = fig2.add_subplot(411)
                ax2 = fig2.add_subplot(412)
                ax3 = fig2.add_subplot(413)
                ax4 = fig2.add_subplot(414)

                ax1.plot(ssChanData['Wind1VelX'], ssChanData['OoPDefl'])
                ax2.plot(ssChanData['Wind1VelX'], ssChanData['IPDefl'])
                ax3.plot(ssChanData['Wind1VelX'], ssChanData['TTDspFA'])
                ax4.plot(ssChanData['Wind1VelX'], ssChanData['TTDspSS'])

                fig3 = plt.figure()
                ax1 = fig3.add_subplot(611)
                ax2 = fig3.add_subplot(612)
                ax3 = fig3.add_subplot(613)
                ax4 = fig3.add_subplot(614)
                ax5 = fig3.add_subplot(615)
                ax6 = fig3.add_subplot(616)

                ax1.plot(ssChanData['Wind1VelX'], ssChanData['PtfmSurge'])
                ax2.plot(ssChanData['Wind1VelX'], ssChanData['PtfmSway'])
                ax3.plot(ssChanData['Wind1VelX'], ssChanData['PtfmHeave'])
                ax4.plot(ssChanData['Wind1VelX'], ssChanData['PtfmRoll'])
                ax5.plot(ssChanData['Wind1VelX'], ssChanData['PtfmPitch'])
                ax6.plot(ssChanData['Wind1VelX'], ssChanData['PtfmYaw'])

                plt.show()

            # output steady states to yaml
            save_yaml(output_dir, 'ss_ops.yaml', ssChanData)
Esempio n. 6
0
        ]
        outFileThere = [
            os.path.exists(outFileName) for outFileName in outFileNames
        ]
        print('here')

        # Run simulations if they're not all there or if you want to overwrite
        if not all(outFileThere) or overwrite:
            if cores > 1:
                fastBatch.run_multi(cores)
            else:
                fastBatch.run_serial()

        # Set up pCrunch
        # Initialize processing classes
        fp = Processing.FAST_Processing()

        # Set some processing parameters
        fp.OpenFAST_outfile_list = outFileNames
        fp.t0 = 100  # DZ: I'd like this to be 60 or 100, but there are errors there
        fp.parallel_analysis = True
        fp.parallel_cores = cores
        fp.results_dir = os.path.join(runDir, 'stats')
        fp.verbose = True
        fp.save_LoadRanking = True
        fp.save_SummaryStats = True

        # Load and save statistics and load rankings
        if not os.path.exists(
                os.path.join(fp.results_dir,
                             'dataset1_LoadRanking.yaml')) or reCrunch: