Ejemplo n.º 1
0
def extract():
    copy_lads()

    print("Extracting")
    datasets = [
        {
            "src": "/data/scenarios/climate_v1.zip",
            "dest": str(NISMOD_SCENARIOS_PATH.joinpath("climate/")),
        },
        {
            "src": "/data/scenarios/population_v1.zip",
            "dest": str(NISMOD_SCENARIOS_PATH.joinpath("population/")),
        },
        {
            "src": "/data/scenarios/prices_v2.zip",
            "dest": str(NISMOD_SCENARIOS_PATH.joinpath("prices/")),
        },
        {
            "src": "/data/scenarios/socio-economic-1.0.1.zip",
            "dest": str(NISMOD_SOCIO_ECONOMIC_PATH),
        },
        {
            "src": "/data/scenarios/ev_transport_trips_v0.1.zip",
            "dest": str(NISMOD_SCENARIOS_PATH.joinpath("ev_transport_trips/")),
        },
        # {
        #     "src": "/data/energy_supply/energy_supply_data_v0.9.10.zip",
        #     "dest": str(NISMOD_DATA_PATH.joinpath("energy_supply/")),
        # },
        {
            "src": "/data/energy_demand/v0.9.12_full.zip",
            "dest": str(NISMOD_DATA_PATH.joinpath("energy_demand/")),
        },
        {
            "src": "/data/energy_demand/config_data_v1.0.zip",
            "dest":
            str(NISMOD_DATA_PATH.joinpath("energy_demand/config_data/")),
        },
    ]
    for data in datasets:
        print("Extracting - " + data["src"] + " - to - " + data["dest"])
        unpack_archive(data["src"], data["dest"], "zip")

    link_files(
        Path.joinpath(NISMOD_SOCIO_ECONOMIC_PATH, "socio-economic-1.0.1/"),
        NISMOD_SOCIO_ECONOMIC_PATH,
    )

    print("Installing energy_demand")
    run_process("cd " + str(NISMOD_PATH) +
                " && ./provision/install_energy_demand.sh " + str(NISMOD_PATH))
    print("energy_demand setup")
    run_process("cd " + str(NISMOD_PATH) + " && energy_demand setup -f " +
                str(NISMOD_PATH) + "/models/energy_demand/wrapperconfig.ini")
Ejemplo n.º 2
0
def extract():
    copy_lads()

    print("Extracting")
    datasets = [
        {
            "src": "/data/scenarios/climate_v1.zip",
            "dest": str(NISMOD_SCENARIOS_PATH.joinpath("climate/")),
        },
        {
            "src": "/data/scenarios/population_v1.zip",
            "dest": str(NISMOD_SCENARIOS_PATH.joinpath("population/")),
        },
        {
            "src": "/data/scenarios/prices_v2.zip",
            "dest": str(NISMOD_SCENARIOS_PATH.joinpath("prices/")),
        },
        {
            "src": "/data/scenarios/socio-economic-1.0.1.zip",
            "dest": str(NISMOD_SOCIO_ECONOMIC_PATH),
        },
        {
            "src": "/data/scenarios/ev_transport_trips_v0.1.zip",
            "dest": str(NISMOD_SCENARIOS_PATH.joinpath("ev_transport_trips/")),
        },
        {
            "src": "/data/et_module/et_module_v0.5.zip",
            "dest": str(NISMOD_DATA_PATH.joinpath("et_module/")),
        },
    ]
    for data in datasets:
        print("Extracting - " + data["src"] + " - to - " + data["dest"])
        unpack_archive(data["src"], data["dest"], "zip")

    link_files(
        Path.joinpath(NISMOD_SOCIO_ECONOMIC_PATH, "socio-economic-1.0.1/"),
        NISMOD_SOCIO_ECONOMIC_PATH,
    )

    print("Installing ET Module")
    run_process("cd " + str(NISMOD_PATH) +
                " && ./provision/install_et_module.sh " + str(NISMOD_PATH))
Ejemplo n.º 3
0
def extract():
    copy_lads() 

    print("Extracting")
    TRANSPORT_PATH = NISMOD_DATA_PATH.joinpath("transport/")
    datasets = [
        {
            "src": "/data/scenarios/climate_v1.zip",
            "dest": str(NISMOD_SCENARIOS_PATH.joinpath("climate/")),
        },
        {
            "src": "/data/scenarios/population_v1.zip",
            "dest": str(NISMOD_SCENARIOS_PATH.joinpath("population/")),
        },
        {
            "src": "/data/scenarios/prices_v2.zip",
            "dest": str(NISMOD_SCENARIOS_PATH.joinpath("prices/")),
        },
        {
            "src": "/data/scenarios/socio-economic-1.0.1.zip",
            "dest": str(NISMOD_SOCIO_ECONOMIC_PATH),
        },
        {
            "src": "/data/scenarios/ev_transport_trips_v0.1.zip",
            "dest": str(NISMOD_SCENARIOS_PATH.joinpath("ev_transport_trips/")),
        },
        {
            "src": "/data/energy_supply/energy_supply_data_v0.9.10.zip",
            "dest": str(NISMOD_DATA_PATH.joinpath("energy_supply/")),
        },
        {
            "src": "/data/energy_demand/v0.9.12_full.zip",
            "dest": str(NISMOD_DATA_PATH.joinpath("energy_demand/")),
        },
        {
            "src": "/data/energy_demand/config_data_v1.0.zip",
            "dest": str(NISMOD_DATA_PATH.joinpath("energy_demand/config_data/")),
        },
        {
            "src": "/data/transport/TR_data_full_for_release_v2.3.0.zip",
            "dest": str(TRANSPORT_PATH),
        },
        {
            "src": "/data/transport/transport_testdata_2.3.0.zip",
            "dest": str(TRANSPORT_PATH),
        },
        {
            "src": "/data/transport/transport-rail_v1.0.0.zip",
            "dest": str(TRANSPORT_PATH),
        },
        {
            "src": "/data/et_module/et_module_v0.5.zip",
            "dest": str(NISMOD_DATA_PATH.joinpath("et_module/")),
        },
    ]
    for data in datasets:
        print("Extracting - " + data["src"] + " - to - " + data["dest"])
        unpack_archive(data["src"], data["dest"], "zip")

    link_files(
        Path.joinpath(NISMOD_SOCIO_ECONOMIC_PATH, "socio-economic-1.0.1/"),
        NISMOD_SOCIO_ECONOMIC_PATH,
    )
Ejemplo n.º 4
0
def extract():
    copy_lads()

    print("Extracting")
    TRANSPORT_PATH = NISMOD_DATA_PATH.joinpath("transport/")
    datasets = [
        {
            "src": "/data/scenarios/climate_v1.zip",
            "dest": str(NISMOD_SCENARIOS_PATH.joinpath("climate/")),
        },
        {
            "src": "/data/scenarios/population_v1.zip",
            "dest": str(NISMOD_SCENARIOS_PATH.joinpath("population/")),
        },
        {
            "src": "/data/scenarios/prices_v2.zip",
            "dest": str(NISMOD_SCENARIOS_PATH.joinpath("prices/")),
        },
        {
            "src": "/data/scenarios/socio-economic-1.0.1.zip",
            "dest": str(NISMOD_SOCIO_ECONOMIC_PATH),
        },
        {
            "src": "/data/scenarios/ev_transport_trips_v0.1.zip",
            "dest": str(NISMOD_SCENARIOS_PATH.joinpath("ev_transport_trips/")),
        },
        # {
        #     "src": "/data/energy_supply/energy_supply_data_v0.9.10.zip",
        #     "dest": str(NISMOD_DATA_PATH.joinpath("energy_supply/")),
        # },
        # {
        #     "src": "/data/energy_demand/v0.9.12_full.zip",
        #     "dest": str(NISMOD_DATA_PATH.joinpath("energy_demand/")),
        # },
        # {
        #     "src": "/data/energy_demand/config_data_v1.0.zip",
        #     "dest": str(NISMOD_DATA_PATH.joinpath("energy_demand/config_data/")),
        # },
        {
            "src": "/data/transport/TR_data_full_for_release_v2.3.0.zip",
            "dest": str(TRANSPORT_PATH),
        },
        {
            "src": "/data/transport/transport_testdata_2.3.0.zip",
            "dest": str(TRANSPORT_PATH),
        },
        {
            "src": "/data/transport/transport-rail_v1.0.0.zip",
            "dest": str(TRANSPORT_PATH),
        },
    ]
    for data in datasets:
        print("Extracting - " + data["src"] + " - to - " + data["dest"])
        unpack_archive(data["src"], data["dest"], "zip")

    link_files(
        Path.joinpath(NISMOD_SOCIO_ECONOMIC_PATH, "socio-economic-1.0.1/"),
        NISMOD_SOCIO_ECONOMIC_PATH,
    )

    print("Moving TR_data_full")
    TR_DATA_FULL_PATH = TRANSPORT_PATH.joinpath("TR_data_full/")
    rmtree(str(TR_DATA_FULL_PATH), ignore_errors=True)
    move(
        str(TRANSPORT_PATH.joinpath("TR_data_full_for_release_v2.3.0/")),
        str(TR_DATA_FULL_PATH),
    )

    print("Moving TR GB data")
    TR_GB_PATH = TRANSPORT_PATH.joinpath("gb/")
    TR_GB_DATA_PATH = TR_GB_PATH.joinpath("data")
    rmtree(str(TR_GB_PATH), ignore_errors=True)
    TR_GB_PATH.joinpath("config").parent.mkdir(parents=True, exist_ok=True)
    move(str(TR_DATA_FULL_PATH.joinpath("full/data")), str(TR_GB_DATA_PATH))

    print("Moving Southampton data")
    SOUTHAMPTON_PATH = TRANSPORT_PATH.joinpath("southampton/")
    rmtree(str(SOUTHAMPTON_PATH), ignore_errors=True)
    SOUTHAMPTON_PATH.joinpath("config").parent.mkdir(parents=True,
                                                     exist_ok=True)
    move(
        str(TRANSPORT_PATH.joinpath("transport_testdata_2.3.0/")),
        str(SOUTHAMPTON_PATH.joinpath("data/")),
    )
    print("southampton Data directory looks like")
    for dc in SOUTHAMPTON_PATH.joinpath("data/").iterdir():
        print(str(dc))
    for dc in SOUTHAMPTON_PATH.joinpath("data/csvfiles/").iterdir():
        print(str(dc))

    print("Gzipping passengerRoutes")
    run_process(
        "gzip " +
        str(SOUTHAMPTON_PATH.joinpath("data/routes/passengerRoutes.dat")))
    print("Gzipping freightRoutes")
    run_process(
        "gzip " +
        str(SOUTHAMPTON_PATH.joinpath("data/routes/freightRoutes.dat")))

    print("Converting Engine fractions")
    convert_transport_engine_fractions.main(
        str(TR_GB_DATA_PATH.joinpath("csvfiles/engineTypeFractions.csv")),
        str(NISMOD_SCENARIOS_PATH.joinpath("engine_type_fractions.csv")),
    )
    convert_transport_engine_fractions.main(
        str(TR_GB_DATA_PATH.joinpath("csvfiles/engineTypeFractionsEW.csv")),
        str(NISMOD_SCENARIOS_PATH.joinpath("engine_type_fractions_ew.csv")),
    )
    convert_transport_engine_fractions.main(
        str(TR_GB_DATA_PATH.joinpath("csvfiles/engineTypeFractionsMVE.csv")),
        str(NISMOD_SCENARIOS_PATH.joinpath("engine_type_fractions_mve.csv")),
    )

    print("Moving rail data")
    rail_data = [
        "dimensions/",
        "initial_conditions/",
        "interventions/",
        "parameters/",
        "scenarios/",
    ]
    TRANSPORT_RAIL_PATH = TRANSPORT_PATH.joinpath("transport-rail_v1.0.0/")
    for rd in rail_data:
        src_path = str(TRANSPORT_RAIL_PATH.joinpath(rd))
        dest_path = str(NISMOD_DATA_PATH.joinpath(rd))
        print("Moving - " + src_path + " - to - " + dest_path)
        move(src_path, dest_path)
Ejemplo n.º 5
0
def afire_submitter(args):
    '''
    This routine encapsulates the single unit of work, multiple instances of which are submitted to
    the multiprocessing queue. It takes as input whatever is required to complete the work unit,
    and returns return values and output logging from the external process.
    '''

    # This try block wraps all code in this worker function, to capture any exceptions.
    try:

        granule_dict = args['granule_dict']
        afire_home = args['afire_home']
        afire_options = args['afire_options']

        granule_id = granule_dict['granule_id']
        run_dir = granule_dict['run_dir']
        cmd = granule_dict['cmd']
        work_dir = afire_options['work_dir']
        env_vars = {}

        rc_exe = 0
        rc_problem = 0
        exe_out = "Finished the Active Fires granule {}".format(granule_id)

        LOG.debug("granule_id = {}".format(granule_id))
        LOG.debug("run_dir = {}".format(run_dir))
        LOG.debug("cmd = {}".format(cmd))
        LOG.debug("work_dir = {}".format(work_dir))
        LOG.debug("env_vars = {}".format(env_vars))

        current_dir = os.getcwd()

        LOG.info("Processing granule_id {}...".format(granule_id))

        # Create the run dir for this input file
        log_idx = 0
        while True:
            run_dir = pjoin(
                work_dir, "{}_run_{}".format(granule_dict['run_dir'], log_idx))
            if not exists(run_dir):
                os.makedirs(run_dir)
                break
            else:
                log_idx += 1

        os.chdir(run_dir)

        # Download and stage the required ancillary data for this input file
        LOG.info("\tStaging the required ancillary data for granule_id {}...".
                 format(granule_id))
        failed_ancillary = False
        try:
            rc_ancil, rc_ancil_dict, lwm_file = get_lwm(
                afire_options, granule_dict)
            failed_ancillary = True if rc_ancil != 0 else False
        except Exception as err:
            failed_ancillary = True
            LOG.warn('\tProblem generating LWM for granule_id {}'.format(
                granule_id))
            LOG.error(err)
            LOG.debug(traceback.format_exc())

        # Run the active fire binary
        if afire_options['ancillary_only']:

            LOG.info(
                '''\tAncillary only, skipping Active Fire execution for granule_id {}'''
                .format(granule_id))
            if failed_ancillary:
                LOG.warn(
                    '\tAncillary granulation failed for granule_id {}'.format(
                        granule_id))
                rc_problem = 1

            os.chdir(current_dir)

        elif failed_ancillary:

            LOG.warn('\tAncillary granulation failed for granule_id {}'.format(
                granule_id))
            os.chdir(current_dir)
            rc_problem = 1

        else:
            # Link the required files and directories into the work directory...
            paths_to_link = [
                pjoin(afire_home, 'vendor', afire_options['vfire_exe']),
                lwm_file,
            ] + [
                granule_dict[key]['file']
                for key in afire_options['input_prefixes']
            ]
            number_linked = link_files(run_dir, paths_to_link)
            LOG.debug("\tWe are linking {} files to the run dir:".format(
                number_linked))
            for linked_files in paths_to_link:
                LOG.debug("\t{}".format(linked_files))

            # Contruct a dictionary of error conditions which should be logged.
            error_keys = [
                'FAILURE', 'failure', 'FAILED', 'failed', 'FAIL', 'fail',
                'ERROR', 'error', 'ERR', 'err', 'ABORTING', 'aborting',
                'ABORT', 'abort'
            ]
            error_dict = {
                x: {
                    'pattern': x,
                    'count_only': False,
                    'count': 0,
                    'max_count': None,
                    'log_str': ''
                }
                for x in error_keys
            }
            error_dict['error_keys'] = error_keys

            start_time = time.time()

            rc_exe, exe_out = execute_binary_captured_inject_io(
                run_dir,
                cmd,
                error_dict,
                log_execution=False,
                log_stdout=False,
                log_stderr=False,
                **env_vars)

            end_time = time.time()

            afire_time = execution_time(start_time, end_time)
            LOG.debug("\tafire execution of {} took {:9.6f} seconds".format(
                granule_id, afire_time['delta']))
            LOG.info(
                "\tafire execution of {} took {} days, {} hours, {} minutes, {:8.6f} seconds"
                .format(granule_id, afire_time['days'], afire_time['hours'],
                        afire_time['minutes'], afire_time['seconds']))

            LOG.debug("\tGranule ID: {}, rc_exe = {}".format(
                granule_id, rc_exe))

            os.chdir(current_dir)

            # Write the afire output to a log file, and parse it to determine the output
            creation_dt = datetime.utcnow()
            timestamp = creation_dt.isoformat()
            logname = "{}_{}.log".format(run_dir, timestamp)
            log_dir = dirname(run_dir)
            logpath = pjoin(log_dir, logname)
            logfile_obj = open(logpath, 'w')
            for line in exe_out.splitlines():
                logfile_obj.write(str(line) + "\n")
            logfile_obj.close()

            # Update the various file global attributes
            try:

                old_output_file = pjoin(run_dir, granule_dict['AFEDR']['file'])
                creation_dt = granule_dict['creation_dt']

                # Check whether the target AF text file exists, and remove it.
                output_txt_file = '{}.txt'.format(splitext(old_output_file)[0])
                if exists(output_txt_file):
                    LOG.debug('{} exists, removing.'.format(output_txt_file))
                    os.remove(output_txt_file)

                #
                # Update the attributes, moving to the end
                #
                if afire_options['i_band']:

                    # Update the I-band attributes, and write the fire data to a text file.
                    h5_file_obj = h5py.File(old_output_file, "a")
                    h5_file_obj.attrs.create(
                        'date_created', np.string_(creation_dt.isoformat()))
                    h5_file_obj.attrs.create('granule_id',
                                             np.string_(granule_id))
                    history_string = 'CSPP Active Fires version: {}'.format(
                        afire_options['version'])
                    h5_file_obj.attrs.create('history',
                                             np.string_(history_string))
                    h5_file_obj.attrs.create(
                        'Metadata_Link', np.string_(basename(old_output_file)))
                    h5_file_obj.attrs.create(
                        'id', np.string_(getURID(creation_dt)['URID']))

                    # Extract desired data from the NetCDF4 file, for output to the text file
                    nfire = h5_file_obj.attrs['FirePix'][0]
                    if int(nfire) > 0:
                        fire_datasets = [
                            'FP_latitude', 'FP_longitude', 'FP_T4',
                            'FP_confidence', 'FP_power'
                        ]
                        fire_data = []
                        for dset in fire_datasets:
                            fire_data.append(h5_file_obj['/' + dset][:])

                    h5_file_obj.close()

                    # Check if there are any fire pixels, and write the associated fire data to
                    # a text file...

                    LOG.info("\tGranule {} has {} fire pixels".format(
                        granule_id, nfire))

                    if int(nfire) > 0:
                        Along_scan_pixel_dim = 0.375
                        Along_track_pixel_dim = 0.375
                        fire_pixel_res = [
                            Along_scan_pixel_dim, Along_track_pixel_dim
                        ]

                        format_str = '''{0:13.8f}, {1:13.8f}, {2:13.8f}, {5:6.3f}, {6:6.3f},''' \
                            ''' {3:4d}, {4:13.8f}'''

                        txt_file_header = \
                            '''# Active Fires I-band EDR\n''' \
                            '''#\n''' \
                            '''# source: {}\n''' \
                            '''# version: {}\n''' \
                            '''#\n''' \
                            '''# column 1: latitude of fire pixel (degrees)\n''' \
                            '''# column 2: longitude of fire pixel (degrees)\n''' \
                            '''# column 3: I04 brightness temperature of fire pixel (K)\n''' \
                            '''# column 4: Along-scan fire pixel resolution (km)\n''' \
                            '''# column 5: Along-track fire pixel resolution (km)\n''' \
                            '''# column 6: detection confidence ([7,8,9]->[lo,med,hi])\n''' \
                            '''# column 7: fire radiative power (MW)\n''' \
                            '''#\n# number of fire pixels: {}\n''' \
                            '''#'''.format(basename(old_output_file), history_string, nfire)

                        nasa_file = output_txt_file.replace('dev', 'dev_nasa')
                        if exists(nasa_file):
                            LOG.debug('{} exists, removing.'.format(nasa_file))
                            os.remove(nasa_file)

                        LOG.info("\tWriting output text file {}".format(
                            output_txt_file))
                        txt_file_obj = open(output_txt_file, 'x')

                        try:
                            txt_file_obj.write(txt_file_header + "\n")

                            for FP_latitude, FP_longitude, FP_R13, FP_confidence, FP_power in zip(
                                    *fire_data):
                                fire_vars = [
                                    FP_latitude, FP_longitude, FP_R13,
                                    FP_confidence, FP_power
                                ]
                                line = format_str.format(*(fire_vars +
                                                           fire_pixel_res))
                                txt_file_obj.write(line + "\n")

                            txt_file_obj.close()
                        except Exception:
                            txt_file_obj.close()
                            rc_problem = 1
                            LOG.warning(
                                "\tProblem writing Active fire text file: {}".
                                format(output_txt_file))
                            LOG.warn(traceback.format_exc())
                else:

                    # Update the M-band attributes, and write the fire data to a text file.

                    nc_file_obj = Dataset(old_output_file,
                                          "a",
                                          format="NETCDF4")
                    setattr(nc_file_obj, 'date_created',
                            creation_dt.isoformat())
                    setattr(nc_file_obj, 'granule_id', granule_id)
                    history_string = 'CSPP Active Fires version: {}'.format(
                        afire_options['version'])
                    setattr(nc_file_obj, 'history', history_string)
                    setattr(nc_file_obj, 'Metadata_Link',
                            basename(old_output_file))
                    setattr(nc_file_obj, 'id', getURID(creation_dt)['URID'])

                    # Extract desired data from the NetCDF4 file, for output to the text file
                    nfire = len(nc_file_obj['Fire Pixels'].dimensions['nfire'])
                    if int(nfire) > 0:
                        fire_datasets = [
                            'FP_latitude', 'FP_longitude', 'FP_T13',
                            'FP_confidence', 'FP_power'
                        ]
                        fire_data = []
                        for dset in fire_datasets:
                            fire_data.append(
                                nc_file_obj['Fire Pixels'].variables[dset][:])
                    nc_file_obj.close()

                    # Check if there are any fire pixels, and write the associated fire data to
                    # a text file...

                    LOG.info("\tGranule {} has {} fire pixels".format(
                        granule_id, nfire))

                    if int(nfire) > 0:
                        Along_scan_pixel_dim = 0.75
                        Along_track_pixel_dim = 0.75
                        fire_pixel_res = [
                            Along_scan_pixel_dim, Along_track_pixel_dim
                        ]

                        format_str = '''{0:13.8f}, {1:13.8f}, {2:13.8f}, {5:6.3f}, {6:6.3f},''' \
                            ''' {3:4d}, {4:13.8f}'''

                        txt_file_header = \
                            '''# Active Fires M-band EDR\n''' \
                            '''#\n''' \
                            '''# source: {}\n''' \
                            '''# version: {}\n''' \
                            '''#\n''' \
                            '''# column 1: latitude of fire pixel (degrees)\n''' \
                            '''# column 2: longitude of fire pixel (degrees)\n''' \
                            '''# column 3: M13 brightness temperature of fire pixel (K)\n''' \
                            '''# column 4: Along-scan fire pixel resolution (km)\n''' \
                            '''# column 5: Along-track fire pixel resolution (km)\n''' \
                            '''# column 6: detection confidence (%)\n''' \
                            '''# column 7: fire radiative power (MW)\n''' \
                            '''#\n# number of fire pixels: {}\n''' \
                            '''#'''.format(basename(old_output_file), history_string, nfire)

                        LOG.info("\tWriting output text file {}".format(
                            output_txt_file))
                        txt_file_obj = open(output_txt_file, 'x')

                        try:
                            txt_file_obj.write(txt_file_header + "\n")

                            for FP_latitude, FP_longitude, FP_T13, FP_confidence, FP_power in zip(
                                    *fire_data):
                                fire_vars = [
                                    FP_latitude, FP_longitude, FP_T13,
                                    FP_confidence, FP_power
                                ]
                                line = format_str.format(*(fire_vars +
                                                           fire_pixel_res))
                                txt_file_obj.write(line + "\n")

                            txt_file_obj.close()
                        except Exception:
                            txt_file_obj.close()
                            rc_problem = 1
                            LOG.warning(
                                "\tProblem writing Active fire text file: {}".
                                format(output_txt_file))
                            LOG.warn(traceback.format_exc())

            except Exception:
                rc_problem = 1
                LOG.warning(
                    "\tProblem setting attributes in output file {}".format(
                        old_output_file))
                LOG.debug(traceback.format_exc())

            # Move output files to the work directory
            LOG.debug("\tMoving output files from {} to {}".format(
                run_dir, work_dir))
            af_prefix = 'AFIMG' if afire_options['i_band'] else 'AFMOD'
            af_suffix = 'nc' if afire_options[
                'i_band'] else 'nc'  # FIXME: NOAA should fix NC output for I-band!
            outfiles = glob(pjoin(run_dir, '{}*.{}'.format(af_prefix, af_suffix))) \
                     + glob(pjoin(run_dir, '{}*.txt'.format(af_prefix)))

            for outfile in outfiles:
                try:
                    shutil.move(outfile, work_dir)
                except Exception:
                    rc_problem = 1
                    LOG.warning(
                        "\tProblem moving output {} from {} to {}".format(
                            outfile, run_dir, work_dir))
                    LOG.debug(traceback.format_exc())

        # If no problems, remove the run dir
        if (rc_exe == 0) and (rc_problem == 0) and afire_options['docleanup']:
            cleanup([run_dir])

    except Exception:
        LOG.warn("\tGeneral warning for {}".format(granule_id))
        LOG.debug(traceback.format_exc())
        os.chdir(current_dir)
        #raise

    return [granule_id, rc_exe, rc_problem, exe_out]
Ejemplo n.º 6
0
def unaggregate_inputs(afire_home, agg_input_files, afire_options):
    '''
    Create a dir for the unaggregated files in the work dir, and use nagg to unaggregate the
    aggregated input files.
    '''

    unagg_inputs_dir = os.path.join(afire_options['work_dir'],
                                    'unaggregated_inputs')
    unagg_inputs_dir = create_dir(unagg_inputs_dir)

    # Construct a list of task dicts...
    nagg_tasks = []
    for agg_input_file in agg_input_files:
        args = {
            'afire_home': afire_home,
            'agg_input_file': agg_input_file,
            'unagg_inputs_dir': unagg_inputs_dir,
            'afire_options': afire_options
        }
        nagg_tasks.append(args)

    # Link the nagg executable into the unaggregated inputs dir...
    paths_to_link = [os.path.join(afire_home, 'vendor/nagg')]
    number_linked = link_files(unagg_inputs_dir, paths_to_link)
    LOG.debug(
        "\tWe are linking {} files to the run dir:".format(number_linked))
    for linked_files in paths_to_link:
        LOG.debug("\t{}".format(linked_files))

    # Setup the processing pool
    cpu_count = multiprocessing.cpu_count()
    LOG.debug('There are {} available CPUs'.format(cpu_count))

    requested_cpu_count = afire_options['num_cpu']

    if requested_cpu_count is not None:
        LOG.debug('We have requested {} {}'.format(
            requested_cpu_count,
            "CPU" if requested_cpu_count == 1 else "CPUs"))

        if requested_cpu_count > cpu_count:
            LOG.warn(
                '{} requested CPUs is greater than available, using {}'.format(
                    requested_cpu_count, cpu_count))
            cpus_to_use = cpu_count
        else:
            cpus_to_use = requested_cpu_count
    else:
        cpus_to_use = cpu_count

    LOG.debug('We are using {}/{} available CPUs'.format(
        cpus_to_use, cpu_count))
    pool = multiprocessing.Pool(cpus_to_use)

    # Submit the Active Fire tasks to the processing pool
    timeout = 9999999
    result_list = []

    start_time = time.time()

    LOG.info("Submitting {} nagg {} to the pool...".format(
        len(nagg_tasks), "task" if len(nagg_tasks) == 1 else "tasks"))
    result_list = pool.map_async(nagg_submitter, nagg_tasks).get(timeout)

    end_time = time.time()

    total_afire_time = execution_time(start_time, end_time)
    LOG.debug("Unaggregation took {:9.6f} seconds".format(
        total_afire_time['delta']))
    LOG.info(
        "Unaggregation took {} days, {} hours, {} minutes, {:8.6f} seconds".
        format(total_afire_time['days'], total_afire_time['hours'],
               total_afire_time['minutes'], total_afire_time['seconds']))

    # Loop through each of the Active Fire results collect error information
    for result in result_list:
        agg_input_file, nagg_rc, problem_rc, exe_out = result
        LOG.debug(
            ">>> agg_input_file {}: nagg_rc = {}, problem_rc = {}".format(
                agg_input_file, nagg_rc, problem_rc))

    return unagg_inputs_dir