Esempio n. 1
0
def collect_surface_motion(runDir, bldg_data, surfaceMoDir=''):

    if surfaceMoDir == '': surfaceMoDir = f"{runDir}/surface_motions/" 


    for bldg in bldg_data: #[:1]:
        log_msg(bldg)

        bldg_id =  bldg['id']

        if bldg_id is not None:

            mPaths = glob(f"{runDir}/{bldg_id}/workdir.*/EVENT.json")


            surfMoTmpDir = f"{surfaceMoDir}/{bldg_id}/"

            if not os.path.exists(surfMoTmpDir): os.makedirs(surfMoTmpDir) 

            for p in mPaths:
                simID = p.split('/')[-2].split('.')[-1]
                #shutil.copyfile(p, f"{surfMoTmpDir}/EVENT-{simID}.json")
                newEVENT = {}
                # load the event file
                with open(p, 'r') as f:
                    EVENT_in_All = json.load(f)
                    
                    newEVENT['name'] = EVENT_in_All['Events'][0]['event_id'].replace('x','-')
                    newEVENT['location'] = EVENT_in_All['Events'][0]['location']
                    newEVENT['dT'] = EVENT_in_All['Events'][0]['dT']
                    
                    newEVENT['data_x'] = EVENT_in_All['Events'][0]['timeSeries'][0]['data']
                    newEVENT['PGA_x'] = max(newEVENT['data_x'])

                    if len(EVENT_in_All['Events'][0]['timeSeries'])>0: # two-way shaking
                        newEVENT['data_y'] = EVENT_in_All['Events'][0]['timeSeries'][1]['data']
                        newEVENT['PGA_y'] = max(newEVENT['data_y'])
                    
                    with open(f"{surfMoTmpDir}/EVENT-{newEVENT['name']}.json", "w") as outfile:
                        json.dump(newEVENT, outfile)


    return surfaceMoDir
Esempio n. 2
0
def main(run_type, input_file, app_registry):

    # initialize the log file
    with open(input_file, 'r') as f:
        inputs = json.load(f)
    runDir = inputs['runDir']

    whale.log_file = runDir + '/log.txt'
    with open(whale.log_file, 'w') as f:
        f.write('femUQ workflow\n')

    # echo the inputs
    log_msg(log_div)
    log_msg('Started running the workflow script')
    log_msg(log_div)

    WF = whale.Workflow(
        run_type,
        input_file,
        app_registry,
        app_type_list=['Event', 'Modeling', 'EDP', 'Simulation', 'UQ'])

    # initialize the working directory
    WF.init_simdir()

    # prepare the input files for the simulation
    WF.create_RV_files(app_sequence=['Event', 'Modeling', 'EDP', 'Simulation'])

    # create the workflow driver file
    WF.create_driver_file(
        app_sequence=['Event', 'Modeling', 'EDP', 'Simulation'])

    # run uq engine to simulate response
    WF.simulate_response()
def main(run_type, input_file, app_registry, working_dir, app_dir, log_file):

    # initialize the log file
    with open(input_file, 'r') as f:
        inputs = json.load(f)
    runDir = inputs['runDir']

    if working_dir is not None:
        runDir = working_dir
    else:
        runDir = inputs['runDir']

    whale.log_file = runDir + '/log.txt'
    with open(whale.log_file, 'w') as f:
        f.write('sWHALE workflow\n')

    whale.print_system_info()

    # echo the inputs
    log_msg(log_div)
    log_msg('Started running the workflow script')
    log_msg(log_div)

    # If there is an external EDP file provided, change the run_type to loss_only
    try:
        if inputs['DamageAndLoss']['ResponseModel']['ResponseDescription'][
                'EDPDataFile'] is not None:
            run_type = 'loss_only'
    except:
        pass

    WF = whale.Workflow(
        run_type,
        input_file,
        app_registry,
        app_type_list=['Event', 'Modeling', 'EDP', 'Simulation', 'UQ', 'DL'],
        working_dir=working_dir,
        app_dir=app_dir)

    if WF.run_type != 'loss_only':

        # initialize the working directory
        WF.init_simdir()

        # prepare the input files for the simulation
        WF.create_RV_files(
            app_sequence=['Event', 'Modeling', 'EDP', 'Simulation'])

        # create the workflow driver file
        WF.create_driver_file(
            app_sequence=['Event', 'Modeling', 'EDP', 'Simulation'])

        # run uq engine to simulate response
        WF.simulate_response()

    if WF.run_type != 'set_up':

        # run dl engine to estimate losses
        WF.estimate_losses(input_file=input_file)
def main(run_type, input_file, app_registry):

    # echo the inputs
    log_msg(log_div)
    log_msg('Started running the workflow script')
    log_msg(log_div)

    WF = whale.Workflow(
        run_type,
        input_file,
        app_registry,
        app_type_list=['Event', 'Modeling', 'EDP', 'Simulation', 'UQ'])

    # prepare the input files for the simulation
    WF.create_RV_files(app_sequence=['Event', 'Modeling', 'EDP', 'Simulation'])

    # create the workflow driver file
    WF.create_driver_file(
        app_sequence=['Event', 'Modeling', 'EDP', 'Simulation'])

    # run uq engine to simulate response
    WF.simulate_response()
Esempio n. 5
0
def main(run_type, input_file, app_registry,
         force_cleanup, bldg_id_filter, reference_dir,
         working_dir, app_dir, log_file):

    # initialize the log file
    with open(input_file, 'r') as f:
        inputs = json.load(f)
    if working_dir is not None:
        runDir = working_dir
    else:
        runDir = inputs['runDir']

    if not os.path.exists(runDir):
        os.mkdir(runDir)
    if log_file == 'log.txt':
        whale.log_file = runDir + '/log.txt'
    else:
        whale.log_file = log_file
    with open(whale.log_file, 'w') as f:
        f.write('RDT workflow\n')

    whale.print_system_info()

    # echo the inputs
    log_msg(log_div)
    log_msg('Started running the workflow script')
    log_msg(log_div)
    if force_cleanup:
        log_msg('Forced cleanup turned on.')

    WF = whale.Workflow(run_type, input_file, app_registry,
        app_type_list = ['Building', 'RegionalEvent', 'RegionalMapping',
                         'Event', 'Modeling', 'EDP', 'Simulation', 'UQ', 'DL'],
        reference_dir = reference_dir,
        working_dir = working_dir,
        app_dir = app_dir,
        units = inputs.get('units', None),
        outputs=inputs.get('outputs', None))

    if bldg_id_filter is not None:
        print(bldg_id_filter)
        log_msg(
            f'Overriding simulation scope; running buildings {bldg_id_filter}')

        # If a Min or Max attribute is used when calling the script, we need to
        # update the min and max values in the input file.
        WF.workflow_apps['Building'].pref["filter"] = bldg_id_filter

    # initialize the working directory
    WF.init_workdir()

    # prepare the basic inputs for individual buildings
    building_file = WF.create_building_files()
    WF.perform_regional_mapping(building_file)

    # TODO: not elegant code, fix later
    with open(WF.building_file_path, 'r') as f:
        bldg_data = json.load(f)

    for bldg in bldg_data: #[:1]:
        log_msg(bldg)

        # initialize the simulation directory
        WF.init_simdir(bldg['id'], bldg['file'])

        # prepare the input files for the simulation
        WF.create_RV_files(
            app_sequence = ['Event', 'Modeling', 'EDP', 'Simulation'],
            BIM_file = bldg['file'], bldg_id=bldg['id'])

        # create the workflow driver file
        WF.create_driver_file(
            app_sequence = ['Building', 'Event', 'Modeling', 'EDP', 'Simulation'],
            bldg_id=bldg['id'])

        # run uq engine to simulate response
        WF.simulate_response(BIM_file = bldg['file'], bldg_id=bldg['id'])

        # run dl engine to estimate losses
        WF.estimate_losses(BIM_file = bldg['file'], bldg_id = bldg['id'])

        if force_cleanup:
            #clean up intermediate files from the simulation
            WF.cleanup_simdir(bldg['id'])

    # aggregate results
    WF.aggregate_results(bldg_data = bldg_data)

    if force_cleanup:
        # clean up intermediate files from the working directory
        WF.cleanup_workdir()
Esempio n. 6
0
def main(run_type, input_file, app_registry, force_cleanup, bldg_id_filter,
         reference_dir, working_dir, app_dir, log_file):

    # save the reference dir in the input file
    with open(input_file, 'r') as f:
        inputs = json.load(f)

    if not os.path.exists(working_dir):
        os.mkdir(working_dir)

    # initialize the log file
    if log_file == 'log.txt':
        whale.log_file = working_dir + '/log.txt'
    else:
        whale.log_file = log_file
    with open(whale.log_file, 'w') as f:
        f.write('rWHALE workflow\n')

    whale.print_system_info()

    # echo the inputs
    log_msg(log_div)
    log_msg('Started running the workflow script')
    log_msg(log_div)

    if force_cleanup:
        log_msg('Forced cleanup turned on.')

    #
    # parse regionalEventAppData, create new input file
    # for the rWHALE workflow
    #

    randomVariables = []
    if "randomVariables" in inputs.keys():
        randomVariables = inputs["randomVariables"]

    inputApplications = inputs["Applications"]
    regionalApplication = inputApplications["RegionalEvent"]
    appData = regionalApplication["ApplicationData"]
    regionalData = inputs["RegionalEvent"]
    regionalData["eventFile"] = appData["inputEventFilePath"] + "/" + appData[
        "inputEventFile"]
    regionalData["eventFilePath"] = appData["inputEventFilePath"]

    siteFilter = appData["filter"]

    siteResponseInput = {
        "units": inputs["units"],
        "outputs": {
            "EDP": True,
            "DM": False,
            "BIM": False,
            "DV": False,
            "every_realization": False
        },
        "RegionalEvent": regionalData,
        "randomVariables": randomVariables,
        "Applications": {
            "RegionalMapping":
            inputApplications["RegionalMapping"],
            "UQ":
            inputApplications["UQ"],
            "Building": {
                "Application": "CSV_to_BIM",
                "ApplicationData": {
                    "buildingSourceFile":
                    appData["soilGridParametersFilePath"] + "/" +
                    appData["soilGridParametersFile"],
                    "filter":
                    siteFilter
                }
            },
            "EDP": {
                "Application": "DummyEDP",
                "ApplicationData": {}
            },
            "Events": [{
                "EventClassification": "Earthquake",
                "Application": "RegionalSiteResponse",
                "ApplicationData": {
                    "pathEventData": "inputMotions",
                    "mainScript": appData["siteResponseScript"],
                    "modelPath": appData["siteResponseScriptPath"],
                    "ndm": 3
                }
            }]
        }
    }

    siteResponseInputFile = 'tmpSiteResponseInput.json'

    with open(siteResponseInputFile, 'w') as json_file:
        json_file.write(json.dumps(siteResponseInput, indent=2))

    WF = whale.Workflow(
        run_type,
        siteResponseInputFile,
        app_registry,
        app_type_list=['Building', 'RegionalMapping', 'Event', 'EDP', 'UQ'],
        reference_dir=reference_dir,
        working_dir=working_dir,
        app_dir=app_dir)

    if bldg_id_filter is not None:
        print(bldg_id_filter)
        log_msg(
            f'Overriding simulation scope; running buildings {bldg_id_filter}')

        # If a Min or Max attribute is used when calling the script, we need to
        # update the min and max values in the input file.
        WF.workflow_apps['Building'].pref["filter"] = bldg_id_filter

    # initialize the working directory
    WF.init_workdir()

    # perform the event simulation (if needed)
    if 'RegionalEvent' in WF.workflow_apps.keys():
        WF.perform_regional_event()

    # prepare the basic inputs for individual buildings
    building_file = WF.create_building_files()
    WF.perform_regional_mapping(building_file)

    # TODO: not elegant code, fix later
    with open(building_file, 'r') as f:
        bldg_data = json.load(f)

    for bldg in bldg_data:  #[:1]:
        log_msg(bldg)

        # initialize the simulation directory
        WF.init_simdir(bldg['id'], bldg['file'])

        # prepare the input files for the simulation
        WF.create_RV_files(app_sequence=['Event', 'EDP'],
                           BIM_file=bldg['file'],
                           bldg_id=bldg['id'])

        # create the workflow driver file
        WF.create_driver_file(app_sequence=['Building', 'Event', 'EDP'],
                              bldg_id=bldg['id'])

        # run uq engine to simulate response
        WF.simulate_response(BIM_file=bldg['file'], bldg_id=bldg['id'])

        # run dl engine to estimate losses
        #WF.estimate_losses(
        #    BIM_file = bldg['file'], bldg_id = bldg['id'],
        #    copy_resources=True)

        if force_cleanup:
            #clean up intermediate files from the simulation
            WF.cleanup_simdir(bldg['id'])

    # aggregate results
    WF.aggregate_results(bldg_data=bldg_data)

    if force_cleanup:
        # clean up intermediate files from the working directory
        WF.cleanup_workdir()
Esempio n. 7
0
def main(run_type, input_file, app_registry, force_cleanup, bldg_id_filter,
         reference_dir, working_dir, app_dir, log_file):

    # save the reference dir in the input file
    with open(input_file, 'r') as f:
        inputs = json.load(f)

    # TODO: if the ref dir is needed, do NOT save it to the input file, store it
    # somewhere else in a file that i not shared among processes
    #inputs['refDir'] = reference_dir
    #with open(input_file, 'w') as f:
    #    json.dump(inputs, f, indent=2)

    # TODO: remove the commented section below, I only kept it for now to make
    # sure it is not needed

    #if working_dir is not None:
    #    runDir = working_dir
    #else:
    #    runDir = inputs['runDir']

    if not os.path.exists(working_dir):
        os.mkdir(working_dir)

    # initialize the log file
    if log_file == 'log.txt':
        whale.log_file = working_dir + '/log.txt'
    else:
        whale.log_file = log_file
    with open(whale.log_file, 'w') as f:
        f.write('rWHALE workflow\n')

    whale.print_system_info()

    # echo the inputs
    log_msg(log_div)
    log_msg('Started running the workflow script')
    log_msg(log_div)

    if force_cleanup:
        log_msg('Forced cleanup turned on.')

    WF = whale.Workflow(run_type,
                        input_file,
                        app_registry,
                        app_type_list=[
                            'Building', 'RegionalEvent', 'RegionalMapping',
                            'Event', 'Modeling', 'EDP', 'Simulation', 'UQ',
                            'DL'
                        ],
                        reference_dir=reference_dir,
                        working_dir=working_dir,
                        app_dir=app_dir)

    if bldg_id_filter is not None:
        print(bldg_id_filter)
        log_msg(
            f'Overriding simulation scope; running buildings {bldg_id_filter}')

        # If a Min or Max attribute is used when calling the script, we need to
        # update the min and max values in the input file.
        WF.workflow_apps['Building'].pref["filter"] = bldg_id_filter

    # initialize the working directory
    WF.init_workdir()

    # perform the event simulation (if needed)
    if 'RegionalEvent' in WF.workflow_apps.keys():
        WF.perform_regional_event()

    # prepare the basic inputs for individual buildings
    building_file = WF.create_building_files()
    WF.perform_regional_mapping(building_file)

    # TODO: not elegant code, fix later
    with open(building_file, 'r') as f:
        bldg_data = json.load(f)

    for bldg in bldg_data:  #[:1]:
        log_msg(bldg)

        # initialize the simulation directory
        WF.init_simdir(bldg['id'], bldg['file'])

        # prepare the input files for the simulation
        WF.create_RV_files(
            app_sequence=['Event', 'Modeling', 'EDP', 'Simulation'],
            BIM_file=bldg['file'],
            bldg_id=bldg['id'])

        # create the workflow driver file
        WF.create_driver_file(app_sequence=[
            'Building', 'Event', 'Modeling', 'EDP', 'Simulation'
        ],
                              bldg_id=bldg['id'])

        # run uq engine to simulate response
        WF.simulate_response(BIM_file=bldg['file'], bldg_id=bldg['id'])

        # run dl engine to estimate losses
        WF.estimate_losses(BIM_file=bldg['file'],
                           bldg_id=bldg['id'],
                           copy_resources=True)

        if force_cleanup:
            #clean up intermediate files from the simulation
            WF.cleanup_simdir(bldg['id'])

    # aggregate results
    WF.aggregate_results(bldg_data=bldg_data)

    if force_cleanup:
        # clean up intermediate files from the working directory
        WF.cleanup_workdir()