Пример #1
0
def main():
    tokens = import_data(token_file)
    eids = import_data(eids_file)

    endpoints = []
    zipcodes = []
    locations = 'locations = [\n\n'

    i = 0
    for eid in eids:
        i += 1

        endpoint = get_endpoints(eid, tokens[0])
        endpoints.append(endpoint)
        print endpoint

        response = get_response(endpoint)
        export_data(path, eid + '.json', response)

        for r in response['attendees']:
            if(r['profile']):
                profile = r['profile']

                if(profile['addresses'] and profile['addresses']['bill'] and profile['addresses']['bill']['postal_code']):
                    zipcode = str(profile['addresses']['bill']['postal_code'])

                    # Remove duplicates
                    if(zipcode not in zipcodes):
                        zipcodes.append(zipcode)

                        # Check if last element
                        if(i == len(eids)):
                            locations += zipcode
                        else:
                            locations += zipcode + ',\n'

    locations += '\n]'

    export_data(path, 'zipcodes.dat', zipcodes)
    export_data(path, 'locations.js', locations)
    export_data('../../public/js/project/', 'locations.js', locations)
# libraries
import numpy as np
from netCDF4 import Dataset, num2date

# my functions
from data_processing import import_data
from averaging_stats import monthly_average
from decorrelation_scale import decor_scale
from lsf import least_square_fit
from save_netcdf_fields import add_global_atrributes, save_netcdf_decor_scale

# set time and space variables
nt, nlon, nlat = 8400, 360, 133

# call data:
swh, time, lat, lon = import_data("WW3_swh", data_path)

# Use monthly average function to partition data and time into monthly segments
swh_month_dict = monthly_average(np.array(time), swh)

# Initialize monthly partitioned swh and time:
swh_monthly_time = np.ma.array(swh_month_dict["time"])
swh_monthly_data = np.ma.array(swh_month_dict["data"])

# Compute decorrelation time scales

# set variables:
ntime = swh_monthly_data.shape[0]
decor = np.zeros((ntime, nlat, nlon))

# Loop over time
# libraries
import numpy as np
from netCDF4 import Dataset, num2date

# my functions
from data_processing import import_data
from averaging_stats import monthly_average
from decorrelation_scale import decor_scale
from lsf import least_square_fit
from save_netcdf_fields import add_global_atrributes, save_netcdf_decor_scale

# set time and space variables
nt, nlon, nlat = 8400, 360, 133

# Call data:
wsp, time, lat, lon = import_data("WW3_wsp", data_path)

# Use monthly average function to partition data and time into monthly segments
wsp_month_dict = monthly_average(np.array(time), wsp)

# Initialize monthly partitioned swh and time:
wsp_monthly_time = np.ma.array(wsp_month_dict["time"])
wsp_monthly_data = np.ma.array(wsp_month_dict["data"])

# Compute decorrelation time scales

# set variables:
ntime = wsp_monthly_data.shape[0]
decor = np.zeros((ntime, nlat, nlon))

# Loop over time
Пример #4
0
import numpy as np
from netCDF4 import Dataset, num2date
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cmocean.cm as cmo
import matplotlib.patches as mpatches

# my functions
from data_processing import import_data
from averaging_stats import clima_mean, stat_moments_temporal
from lsf import weighted_least_square_fit, LSF_parameters
from regional_clima_figs import regional_clima, regional_clima_plot
import cartopy_figs as cart

# call IFREMER SWH and CCMP2 WSP processed data:
swh, time_s, lat_s, lon_s = import_data("IFREMER_swh", data_path_i)
wsp, time_w, lat_w, lon_w = import_data("CCMP2_wsp", data_path_c)

# Call decorrelation time scales
###### SWH ######
nc_swh = Dataset(data_path_decor + "IFREMER_swh_decor_time_scale.nc", "r")
decor_swh = nc_swh.variables["decor_scale"][:]
time_decor_swh = num2date(nc_swh.variables["time"][:],
                          nc_swh.variables["time"].units)
###### WSP ######
nc_wsp = Dataset(data_path_decor + "CCMP2_wsp_decor_time_scale.nc", "r")
decor_wsp = nc_wsp.variables["decor_scale"][:]
time_decor_wsp = num2date(nc_wsp.variables["time"][:],
                          nc_wsp.variables["time"].units)

# Compute WSP statistical moments seasonally
Пример #5
0
# libraries
import numpy as np
from netCDF4 import Dataset, num2date

# my functions
from data_processing import import_data
from averaging_stats import monthly_average
from decorrelation_scale import decor_scale
from lsf import least_square_fit
from save_netcdf_fields import save_netcdf_decor_scale

# set time and space variables
nt, nlon, nlat = 8400, 360, 133

# Call data:
wsp, time, lat, lon = import_data("CCMP2_wsp", data_path)

# Use monthly average function to partition data and time into monthly segments
wsp_month_dict = monthly_average(np.array(time), wsp)

# Initialize monthly partitioned wsp and time:
wsp_monthly_time = np.ma.array(wsp_month_dict["time"])
wsp_monthly_data = np.ma.array(wsp_month_dict["data"])

# Compute decorrelation time scales

# set variables:
ntime = wsp_monthly_data.shape[0]
decor = np.zeros((ntime, nlat, nlon))

# Loop over time
Пример #6
0
# Import Libraries
import numpy as np
from netCDF4 import Dataset, num2date

# Import functions
from data_processing import import_data
from averaging_stats import clima_mean
from lsf import weighted_least_square_fit, LSF_parameters, uncertainty_phase_amp
from save_netcdf_fields import save_netcdf_lsf_parameters

# Set dimensions for data of space and time
nt, nlon, nlat = 12, 360, 133

# Call data:
wsp, time, lat, lon = import_data("WW3_wsp", data_path_ww)
swh, time, lat, lon = import_data("WW3_swh", data_path_ws)

# Calculate the monthly averaged from 1993 to 2015
#### SWH ####
swh_clima_dict = clima_mean(date_time=np.ma.array(time), data=swh)
swh_clima_mean = np.ma.array(swh_clima_dict["mean"])
swh_clima_std = np.ma.array(swh_clima_dict["std"])
swh_clima_n = np.ma.array(swh_clima_dict["N"])
#### WSP ####
wsp_clima_dict = clima_mean(date_time=np.ma.array(time), data=wsp)
wsp_clima_mean = np.ma.array(wsp_clima_dict["mean"])
wsp_clima_std = np.ma.array(wsp_clima_dict["std"])
wsp_clima_n = np.ma.array(wsp_clima_dict["N"])

# call monthly decorrelation scale
# libraries
import numpy as np
from netCDF4 import Dataset, num2date

# my functions
from data_processing import import_data
from averaging_stats import monthly_average
from lsf import least_square_fit
from decorrelation_scale import decor_scale
from save_netcdf_fields import add_global_atrributes, save_netcdf_decor_scale

# set time and space variables
nt, nlon, nlat = 8400, 360, 133

# call data:
swh, time, lat, lon = import_data("IFREMER_swh", data_path)

# Use monthly average function to partition data and time into monthly segments:
swh_month_dict = monthly_average(np.array(time), swh)

# Initialize monthly partitioned swh and time:
swh_monthly_time = np.ma.array(swh_month_dict["time"])
swh_monthly_data = np.ma.array(swh_month_dict["data"])

# Compute decorrelation time scales
# set variables
ntime = swh_monthly_data.shape[0]
decor = np.zeros((ntime, nlat, nlon))

# Loop over time
for itime in range(0, ntime):
Пример #8
0
        for i in reversed(range(r)):
            if indices[i] != n - 1:
                break
        else:
            return
        indices[i:] = [indices[i] + 1] * (r - i)
        yield tuple(pool[i] for i in indices)



if __name__ == '__main__':

# Read in data
    for i in range(0,1):
        file_train = "train"
        data0 = np.array(dp.import_data(file_train))
        data0 = data0.astype(np.int)
        
        file_test = "test"
        test_data0 = np.array(dp.import_data(file_test))
        test_data0 = test_data0.astype(np.int)
		
        out_best=csv.writer(open("results/best_results.csv","wb"))
        best_result = 0.
        best_val = []
        ival = range(1,11)
        print(ival)
        c = [1,2,6,8,10] 
        for n_est in range(23,24,1):
            maxd = n_est%50 + 1
        #for c in combinations_with_replacement(ival,10):