Пример #1
0
def main(start_date=-1, end_date=-1, name=''):
    config = get_op_config()
    years = config[GlobalModel.years]
    base_folder = config[GlobalModel.base_folder]
    release_loc_folder = config[GlobalModel.loc_folder]
    output_file = join(config[GlobalModel.output_folder],
                       F"{name}_{config[GlobalModel.output_file]}")
    lat_files = config[GlobalModel.lat_files]
    lon_files = config[GlobalModel.lon_files]
    dt = config[GlobalModel.dt]
    kh = 1
    repeat_release = config[GlobalModel.repeat_release]
    if start_date == -1:
        start_date = config[GlobalModel.start_date]
    if end_date == -1:
        end_date = config[GlobalModel.end_date]
    run_time = timedelta(seconds=(end_date - start_date).total_seconds())

    file_names = read_files(base_folder,
                            years,
                            wind=False,
                            start_date=start_date,
                            end_date=end_date)
    if len(file_names) == 0:
        print("ERROR: We couldn't read any file!")
        return 0

    print("Reading initial positions.....")
    lat0 = functools.reduce(lambda a, b: np.concatenate((a, b), axis=0), [
        np.genfromtxt(join(release_loc_folder, x), delimiter='')
        for x in lat_files
    ])
    lon0 = functools.reduce(lambda a, b: np.concatenate((a, b), axis=0), [
        np.genfromtxt(join(release_loc_folder, x), delimiter='')
        for x in lon_files
    ])

    #variables = {'U': 'U_combined',
    #'V': 'V_combined'}
    variables = {'U': 'surf_u', 'V': 'surf_v'}

    #dimensions = {'lat': 'lat',
    #'lon': 'lon',
    dimensions = {'lat': 'latitude', 'lon': 'longitude', 'time': 'time'}

    print("Reading data.....")
    # Adding the currents field
    # chunk_sizes = [False, 'auto', 128, 256, 512, 1024, 2048]
    chunk_sizes = [False, 'auto', 128, 256, 512, 1024, 2048]
    for chunk_size in chunk_sizes:
        if chunk_size not in ['auto', False]:
            cs = (chunk_size, chunk_size)
            winds_currents_fieldset = FieldSet.from_netcdf(
                file_names,
                variables,
                dimensions,
                allow_time_extrapolation=True,
                field_chunksize=cs)
        else:
            winds_currents_fieldset = FieldSet.from_netcdf(
                file_names,
                variables,
                dimensions,
                allow_time_extrapolation=True,
                field_chunksize=chunk_size)

        # -------  Adding constants for periodic halo
        winds_currents_fieldset.add_constant(
            'halo_west', winds_currents_fieldset.U.grid.lon[0])
        winds_currents_fieldset.add_constant(
            'halo_east', winds_currents_fieldset.U.grid.lon[-1])
        winds_currents_fieldset.add_periodic_halo(
            zonal=True)  #create a zonal halo

        # -------  Making syntetic diffusion coefficient
        U_grid = winds_currents_fieldset.U.grid
        lat = U_grid.lat
        lon = U_grid.lon
        # Getting proporcional size by degree

        print("Making Kh.....")
        kh_mer = Field('Kh_meridional',
                       kh * np.ones((len(lat), len(lon)), dtype=np.float32),
                       lon=lon,
                       lat=lat,
                       allow_time_extrapolation=True,
                       fieldtype='Kh_meridional',
                       mesh='spherical')
        kh_zonal = Field('Kh_zonal',
                         kh * np.ones((len(lat), len(lon)), dtype=np.float32),
                         lon=lon,
                         lat=lat,
                         allow_time_extrapolation=True,
                         fieldtype='Kh_zonal',
                         mesh='spherical')

        winds_currents_fieldset.add_field(kh_mer, 'Kh_meridional')
        winds_currents_fieldset.add_field(kh_zonal, 'Kh_zonal')

        print("Setting up everything.....")
        if repeat_release:
            pset = ParticleSet(fieldset=winds_currents_fieldset,
                               pclass=JITParticle,
                               lon=lon0,
                               lat=lat0,
                               repeatdt=repeat_release)
        else:
            pset = ParticleSet(fieldset=winds_currents_fieldset,
                               pclass=JITParticle,
                               lon=lon0,
                               lat=lat0)

        print(F"Running with {pset.size} number of particles", flush=True)
        out_parc_file = pset.ParticleFile(
            name=output_file, outputdt=config[GlobalModel.output_freq])
        t = time.time()
        # pset.execute(AdvectionRK4,
        # pset.execute(AdvectionRK4 + pset.Kernel(periodicBC),
        # pset.execute(AdvectionRK4 + pset.Kernel(periodicBC),
        # pset.execute(AdvectionRK4 + pset.Kernel(EricSolution),
        # pset.execute(AdvectionRK4 + pset.Kernel(RandomWalkSphere),
        print(F"Running for {run_time} hour")
        pset.execute(AdvectionRK4,
                     runtime=run_time,
                     dt=dt,
                     output_file=out_parc_file)

        print(
            F"####### Done time={time.time()-t} ChunkSize: {chunk_size} ####### "
        )
Пример #2
0
    countries = []
    tons = []
    percs = []
    try:
        if search_type == "from":
            for country in maincountry['from']['from']:
                countries.append(country['name'])
                tons.append(country['tons'])
                percs.append(country['perc'])
    except Exception as e:
        print(F"Failed for country: {stat}")
    return [c.lower() for c in countries], tons, percs


if __name__ == "__main__":
    config = get_op_config()

    # Read geojson
    web_folder = config[GlobalModel.output_folder_web]
    geojson_file = join(web_folder, "countries.json")
    stats_file = join(web_folder, "ReachedTablesData.json")
    geo_data = geopandas.read_file(geojson_file)

    ##
    with open(stats_file) as f:
        stats = json.load(f)
        # Iterate over each country
        for country, stat in stats.items():
            geo_country = geo_data[geo_data["name"].str.lower() ==
                                   country]  # Finds the corresponding geo data
            # Obtain all the countries that receive litter from it
def createBinaryFileSingle():
    """
    Creates binary and text files corresponding to the desired 'reduced' number of particles
    :return:
    """
    all_reduce_particles_by = [2, 3, 4, 6]
    min_number_particles = 20
    BEACHED = False  # Indicate if we are testing the beached particles

    def myfmt(r):  # 'Round to 2 decimals'
        return float(F"{r:.2f}")

    vecfmt = np.vectorize(myfmt)

    config = get_op_config()

    # ------- Home ---------
    input_folder = config[GlobalModel.output_folder]
    input_file = config[GlobalModel.output_file]
    output_folder = config[GlobalModel.output_folder_web]

    countries_file_name = config[GlobalModel.countries_file]
    # Reading the json file with the names and geometries of the countries
    df_country_list = pd.read_csv(countries_file_name, index_col=0)

    # Reading the output from Ocean Parcles
    nc_file = Dataset(join(input_folder, input_file), "r", format="NETCDF4")

    print("----- Attributes ----")
    for name in nc_file.ncattrs():
        print(name, "=", getattr(nc_file, name))
    # plt.imshow(nc_file['beached'])
    # Print variables
    print("----- Variables ----")
    all_vars = nc_file.variables
    for name in all_vars.keys():
        print(name)

    glob_num_particles = nc_file.dimensions['traj'].size

    lat = all_vars['lat']
    lon = all_vars['lon']
    if BEACHED:
        beached = all_vars['beached']
        # beached_count = all_vars['beached_count']

    # Iterate over the options to reduce the number of particles
    for reduce_particles_global in all_reduce_particles_by:
        final_ouput_folder = F"{output_folder}/{reduce_particles_global}"
        if not (os.path.exists(final_ouput_folder)):
            os.makedirs(final_ouput_folder)

        cur_idx = 0

        tot_assigned_particles = 0
        countries = {}
        # Iterate over each country
        for cur_country_name in df_country_list.index:
            print(F'-------- {cur_country_name} ----------')
            # First and last particle position
            particles_for_country_str = df_country_list.loc[cur_country_name][
                'idx_country'].replace(']', '').replace('[', '').split(',')
            particles_for_country = [int(x) for x in particles_for_country_str]

            tot_particles = len(particles_for_country)
            tot_assigned_particles += tot_particles
            reduce_particles_by_country = reduce_particles_global
            # If there are not enough particles then we need to reduce the 'separation' of particles
            while (((tot_particles / reduce_particles_by_country) <
                    min_number_particles)
                   and (reduce_particles_by_country > 1)):
                reduce_particles_by_country -= 1

            red_particles_for_country = particles_for_country[::
                                                              reduce_particles_by_country]

            # Append the particles
            cur_lat_all_part = lat[red_particles_for_country].filled()
            cur_lon_all_part = lon[red_particles_for_country].filled()
            if BEACHED:
                cur_beached_all_part = beached[
                    red_particles_for_country].filled() == 4
                countries[cur_country_name] = {
                    'lat_lon': [
                        vecfmt(cur_lat_all_part).tolist(),
                        vecfmt(cur_lon_all_part).tolist()
                    ],
                    'beached': [cur_beached_all_part.tolist()],
                    'oceans': [
                        x for x in df_country_list.loc[cur_country_name]
                        ['oceans'].split(';')
                    ],
                    'continent':
                    df_country_list.loc[cur_country_name]['continent']
                }
            else:
                countries[cur_country_name] = {
                    'lat_lon': [
                        vecfmt(cur_lat_all_part).tolist(),
                        vecfmt(cur_lon_all_part).tolist()
                    ],
                    'oceans': [
                        x for x in df_country_list.loc[cur_country_name]
                        ['oceans'].split(';')
                    ],
                    'continent':
                    df_country_list.loc[cur_country_name]['continent']
                }

            cur_idx += 3  # Hardcoded because the way the country list is made

        # ------------- Writing the binary file form the countries object --------------
        txt = ''
        bindata = b''
        for c_country in countries.keys():
            # name, continent, num_particles, num_timesteps
            txt += F"{c_country}, {countries[c_country]['continent']}, {len(countries[c_country]['lat_lon'][0])}, {len(countries[c_country]['lat_lon'][0][0])}\n"
            bindata += (np.array(countries[c_country]['lat_lon'][0]) *
                        100).astype(np.int16).tobytes()
            bindata += (np.array(countries[c_country]['lat_lon'][1]) *
                        100).astype(np.int16).tobytes()

        print(F" Saving binary file {final_ouput_folder}.....")
        header_output_file = F"{final_ouput_folder}/{input_file.replace('.nc','')}.txt"
        binary_file = F"{final_ouput_folder}/{input_file.replace('.nc','')}.bin"
        zip_output_file = F"{final_ouput_folder}/{input_file.replace('.nc','')}.zip"
        # -------- Writing header file ---------------
        f = open(header_output_file, 'w')
        f.write(txt)
        f.close()
        # -------- Writing binary file---------------
        f = open(binary_file, 'wb')
        f.write(bindata)
        f.close()

        # -------- Writing zip file (required because the website reads zip files) ---------------
        print(F" Saving zip file ..... {zip_output_file}")
        with zipfile.ZipFile(zip_output_file, 'w') as zip_file:
            zip_file.write(binary_file)
        zip_file.close()
        print(
            F"Original particles {glob_num_particles} assigned: {tot_assigned_particles}"
        )

    nc_file.close()