def erg_orb_l3_t89(event_point):
    epoch = np.empty((0, )).astype(int)  #astype(int)를 해야 시간계산이 제대로됨
    pos_eq_t89 = np.empty((0, 2))
    pos_iono_north_t89 = np.empty((0, 2))
    pos_iono_south_t89 = np.empty((0, 2))
    pos_lmc_t89 = np.empty((0, 9))
    pos_lstar_t89 = np.empty((0, 9))
    pos_I_t89 = np.empty((0, 9))
    pos_blocal_t89 = np.empty((0, ))
    pos_beq_t89 = np.empty((0, ))
    for i in date_finder(event_point)[0]:
        path = common_path + "ergsc/satellite/erg/orb/l3/t89/" + i.strftime(
            '%Y') + "/" + i.strftime('%m') + "/"
        file = path + "erg_orb_l3_t89_" + i.strftime('%Y%m%d') + "_v02.cdf"
        if pathlib.Path(file).exists():
            data = cdflib.CDF(file)

            epoch = np.concatenate((epoch, data['epoch']))
            pos_eq_t89 = np.concatenate((pos_eq_t89, data['pos_eq_t89']))
            pos_iono_north_t89 = np.concatenate(
                (pos_iono_north_t89, data['pos_iono_north_t89']))
            pos_iono_south_t89 = np.concatenate(
                (pos_iono_south_t89, data['pos_iono_south_t89']))
            pos_lmc_t89 = np.concatenate((pos_lmc_t89, data['pos_lmc_t89']))
            pos_lstar_t89 = np.concatenate(
                (pos_lstar_t89, data['pos_lstar_t89']))
            pos_I_t89 = np.concatenate((pos_I_t89, data['pos_I_t89']))
            pos_blocal_t89 = np.concatenate(
                (pos_blocal_t89, data['pos_blocal_t89']))
            pos_beq_t89 = np.concatenate((pos_beq_t89, data['pos_beq_t89']))
        else:
            print("No file path:" + file)
    # pos_lstar_op[pos_b lstar_op<0]=0 #np.nan
    return epoch, pos_eq_t89, pos_iono_north_t89, pos_iono_south_t89, pos_lmc_t89, pos_lstar_t89, pos_I_t89, pos_blocal_t89, pos_beq_t89
Exemple #2
0
def pgp(dateRange,
        source='/home/james/Documents/MPHYS_ARCHIVE/PGP',
        gsm=False):
    dates = [f"20{i[0]:02d}{i[1]:02d}" for i in dateRange]
    files = sorted(glob.glob(source + '/*.cdf'))
    filesRef = [s.split('/')[-1].split('_')[-2][:-2] for s in files]
    files = dict(zip(filesRef, files))

    df = pd.DataFrame(columns=['x', 'y', 'z'])
    with Timer('Timing PGP'):
        bar = progress.Bar('Loading PGP', max=len(dates))
        for d in dates:
            logging.info(files[d])
            pgp = cdflib.CDF(files[d])
            timetags = pgp.varget('Epoch__CL_JP_PGP')
            pos = pgp.varget('sc_r_xyz_gse__CL_JP_PGP')
            if gsm:
                conv = pgp.varget('gse_gsm__CL_JP_PGP')
                pos[:, 1] *= np.cos(np.radians(-conv))
                pos[:, 2] *= np.cos(np.radians(-conv))
            time = cdflib.cdfepoch.unixtime(timetags)
            time = [dt.datetime.utcfromtimestamp(t) for t in time]
            locations_month = pd.DataFrame(np.column_stack(
                [time, pos[:, 0], pos[:, 1], pos[:, 2]]),
                                           columns=['time', 'x', 'y', 'z'])
            locations_month.x = locations_month.x.astype(float)
            locations_month.z = locations_month.z.astype(float)
            locations_month.y = locations_month.y.astype(float)
            locations_month = locations_month.set_index('time')
            df = df.append(locations_month)
            bar.next()
        bar.finish()
    return df
Exemple #3
0
    def load_cdf_data(self):
        """
        load the data from the cdf file
        :return:
        """
        cdf_file = cdflib.CDF(self.file_path)
        cdf_info = cdf_file.cdf_info()
        variables = cdf_info['zVariables']

        if dict(self.variable_name_dict):
            new_dict = {vn: vn for vn in variables}
            self.variable_name_dict = pybasic.dict_set_default(
                self.variable_name_dict, **new_dict)

        for var_name, cdf_var_name in self.variable_name_dict.items():
            if var_name == 'CDF_EPOCH':
                epochs = cdf_file.varget(variable=cdf_var_name)
                epochs = cdflib.cdfepoch.unixtime(epochs)
                dts = [
                    datetime.timedelta(seconds=epoch) +
                    datetime.datetime(1970, 1, 1, 0, 0, 0) for epoch in epochs
                ]
                self.variables['SC_DATETIME'] = np.array(dts).reshape(
                    (len(dts), 1))
                continue
            var = cdf_file.varget(variable=cdf_var_name)
            var = np.array(var)
            vshape = var.shape
            if len(vshape) == 1:
                var = var.reshape(vshape[0], 1)
            self.variables[var_name] = var
Exemple #4
0
def importar_lpw(year, month, day, ti, tf):
    date_orbit = dt.date(int(year), int(month), int(day))
    year = date_orbit.strftime("%Y")
    month = date_orbit.strftime("%m")
    day = date_orbit.strftime("%d")

    if gethostname() == "magneto2":
        path = f"../../../../media/gabybosc/datos/LPW/"
    elif gethostname() == "gabybosc":
        path = "../../datos/LPW/"
    else:
        path = f"../../../datos/LPW/"

    lpw = cdf.CDF(path + f"mvn_lpw_l2_lpnt_{year}{month}{day}_v03_r02.cdf")

    t_unix = lpw.varget("time_unix")
    e_density = lpw.varget("data")[:, 3]

    t = unix_to_decimal(t_unix)

    inicio = donde(t, ti)
    fin = donde(t, tf)

    t_cut = t[inicio:fin]
    e_density_cut = e_density[inicio:fin]

    return lpw, t_cut, e_density_cut
Exemple #5
0
def ReadCDF(fname, Verbose=True):
    '''
	Read a CDF file contents
	'''

    if not os.path.isfile(fname):
        print('File not found')
        return None, None

    #open the file
    f = cdflib.CDF(fname)

    #get the list of zVariables
    var = f.cdf_info()['zVariables']

    #create ouput dicts
    data = {}
    attr = {}
    for v in var:
        data[v] = f.varget(v)
        attr[v] = f.varattsget(v)

    #delete cdf (not sure if this is necessary - no idea if there is a close function)
    del f

    return data, attr
Exemple #6
0
    def __init__(self, cdfPath, parseFilenameParams=True):
        self.cdf = cdflib.CDF(cdfPath)
        for var in allVars.keys():
            # Just load all the scalar values, they're small enough, and most of them are
            # important
            if typeDict[var] == 'Val':
                setattr(self, var, self.cdf.varget(var).item())
        atts = self.cdf.globalattsget()
        if len(atts) > 0:
            for k in atts:
                setattr(self, k, str(atts[k]))

        cdfFilename = os.path.basename(cdfPath)
        if cdfFilename != 'radyn_out.cdf':
            if parseFilenameParams and cdfFilename.startswith('radyn_out.'):
                p = cdfFilename[cdfFilename.find('.') + 1:]
                params = p.split('_')
                self.filenameParams = params
                if len(params) != 6:
                    raise ValueError(
                        'FilenameParams should contain 6 underscore seperated terms.\n'
                        'See FCHROMA simulation documentation for examples.\n'
                        'If you don\'t want to parse these then call with parseFilenameParams=False'
                    )
                self.startingModelAtmosphere = params[0]
                self.beamSpectralIndex = float(params[1][1:])
                self.totalBeamEnergy = float(params[2])
                self.beamPlulseType = params[3]
                self.cutoffEnergy = params[4]
                self.beamType = params[5]
def importar_swia(year, month, day, ti, tf):
    date_orbit = dt.date(int(year), int(month), int(day))
    year = date_orbit.strftime("%Y")
    month = date_orbit.strftime("%m")
    day = date_orbit.strftime("%d")

    # if gethostname() == "magneto2":
    #     path = f"../../../../media/gabybosc/datos/SWIA/"
    # elif gethostname() == "gabybosc":
    #     path = "../../datos/SWIA/"
    # else:
    path = f"../../../datos/SWIA/"

    swia = cdf.CDF(path +
                   f"mvn_swi_l2_onboardsvymom_{year}{month}{day}_v01_r01.cdf")

    t_unix = swia.varget("time_unix")
    density = swia.varget("density")  # cm⁻³
    temperature = swia.varget("temperature_mso")  # eV
    vel_mso_xyz = swia.varget("velocity_mso")  # km/s

    t_swia = unix_to_decimal(t_unix)
    inicio = donde(t_swia, ti)
    fin = donde(t_swia, tf)

    t_cut = t_swia[inicio:fin]
    density_cut = density[inicio:fin]
    temperature_cut = temperature[inicio:fin]
    vel_mso_cut = vel_mso_xyz[inicio:fin]  # km/s

    return swia, t_cut, density_cut, temperature_cut, vel_mso_cut
def process(root, metadata, subjects, info):
    commands, total = [], 0
    for camera_ID, camera in enumerate(metadata.camera_ids):
        base_filename = metadata.get_base_filename(info['subject'],
                                                   info['action'],
                                                   info['subaction'], camera)
        CDF_path = root / info['subject'] / 'D2_Positions' / '{:}.cdf'.format(
            base_filename)
        CDF_path = CDF_path.resolve()
        cdf_file = cdflib.CDF(str(CDF_path))
        Poses_2d = cdf_file.varget("Pose")
        Poses_2d = Poses_2d.reshape(Poses_2d.shape[1], 32, 2)
        video_path = root / info['subject'] / 'Videos' / '{:}.mp4'.format(
            base_filename)
        frame_xdir = root / info['subject'] / 'Frames' / '{:}'.format(
            base_filename)
        video_path, frame_xdir = video_path.resolve(), frame_xdir.resolve()
        frame_xdir.mkdir(parents=True, exist_ok=True)
        #command = "ffmpeg -nostats -loglevel 0 -i {:} -qscale:v 3 {:}/image-%06d.png".format(video_path, frame_xdir)
        command = "ffmpeg -i {:} -q:v 1 -f image2 -start_number 0 {:}/image-%06d.png".format(
            str(video_path).replace(' ', '\ '),
            str(frame_xdir).replace(' ', '\ '))
        camera_info = metadata.loadCamera(subjects[info['subject']],
                                          camera_ID + 1)
        info['pose-{:}'.format(camera)] = Poses_2d
        commands.append(command)
        total += Poses_2d.shape[0]
    return commands, total
Exemple #9
0
    def __init__(self, filename,
                 varformat='*',  # Regular expressions
                 var_types=['data', 'support_data'],
                 center_measurement=False,
                 raise_errors=False,
                 regnames=None,
                 datetime=True,
                 **kwargs):
        self._raise_errors = raise_errors
        self._filename = filename
        self._varformat = varformat
        self._var_types = var_types
        self._datetime = datetime
        self._center_measurement = center_measurement

        # Registration names map from file params to kamodo-compatible names
        if regnames is None:
            regnames = {}
        self._regnames = regnames

        self._cdf_file = cdflib.CDF(self._filename)
        self._cdf_info = self._cdf_file.cdf_info()
        self.data = {}
        self.meta = {}
        self._dependencies = {}

        self._variable_names = (self._cdf_info['rVariables']
                                + self._cdf_info['zVariables'])

        self.load_variables()
def load_data(bpath, subjects, actions, dim=3):
    """Loads 2d ground truth from disk, and puts it in an easy-to-acess dictionary

    Args
      bpath: String. Path where to load the data from
      subjects: List of integers. Subjects whose data will be loaded
      actions: List of strings. The actions to load
      dim: Integer={2,3}. Load 2 or 3-dimensional data
    Returns:
      data: Dictionary with keys k=(subject, action, seqname)
        values v=(nx(32*2) matrix of 2d ground truth)
        There will be 2 entries per subject/action if loading 3d data
        There will be 8 entries per subject/action if loading 2d data
    """

    if not dim in [2, 3]:
        raise ValueError('dim must be 2 or 3')

    data = {}

    for subj in subjects:
        for action in actions:

            # print('Reading subject {0}, action {1}'.format(subj, action))

            dpath = os.path.join(bpath, 'S{0}'.format(subj),
                                 'MyPoseFeatures/D{0}_Positions'.format(dim),
                                 '{0}*.cdf'.format(action))
            # print(dpath)

            fnames = glob.glob(dpath)

            loaded_seqs = 0
            for fname in fnames:
                seqname = os.path.basename(fname)

                # This rule makes sure SittingDown is not loaded when Sitting is requested
                if action == "Sitting" and seqname.startswith("SittingDown"):
                    continue

                # This rule makes sure that WalkDog and WalkTogeter are not loaded when
                # Walking is requested.
                if seqname.startswith(action):
                    # print(fname)
                    loaded_seqs = loaded_seqs + 1

                    cdf_file = cdflib.CDF(fname)
                    poses = cdf_file.varget("Pose").squeeze()
                    cdf_file.close()

                    data[(subj, action, seqname)] = poses

            if dim == 2:
                assert loaded_seqs == 8, "Expecting 8 sequences, found {0} instead".format(
                    loaded_seqs)
            else:
                assert loaded_seqs == 2, "Expecting 2 sequences, found {0} instead".format(
                    loaded_seqs)

    return data
def from_cdflib(files, varname, start_date, end_date):

    global cdf_vars
    global file_vars

    if isinstance(files, str):
        files = [files]
    tstart = datetime_to_list(start_date)
    tend = datetime_to_list(end_date)

    # Extract metadata
    cdf_vars = {}
    for file in files:
        file_vars = {}
        cdf = cdflib.CDF(file)

        try:
            data = cdflib_readvar(cdf, varname, tstart, tend)
        except:
            cdf.close()
            raise

        cdf.close()

    return data
Exemple #12
0
def importar_swea(year, month, day, ti, tf):
    date_orbit = dt.date(int(year), int(month), int(day))
    year = date_orbit.strftime("%Y")
    month = date_orbit.strftime("%m")
    day = date_orbit.strftime("%d")

    if gethostname() == "magneto2":
        path = f"../../../../media/gabybosc/datos/SWEA/"
    elif gethostname() == "gabybosc":
        path = "../../datos/SWEA/"
    else:
        path = f"../../../datos/SWEA/"

    swea = cdf.CDF(path +
                   f"/mvn_swe_l2_svyspec_{year}{month}{day}_v04_r01.cdf")

    flux_all = swea.varget("diff_en_fluxes")
    energia = swea.varget("energy")
    t_unix = swea.varget("time_unix")

    t = unix_to_decimal(t_unix)

    inicio = donde(t, ti)
    fin = donde(t, tf)

    t_cut = t[inicio:fin]

    flux = flux_all[inicio:fin]
    flux_plot = np.transpose(flux)[::-1]

    return swea, t_cut, energia, flux_plot
Exemple #13
0
def importar_static(year, month, day, ti, tf):
    date_orbit = dt.date(int(year), int(month), int(day))
    year = date_orbit.strftime("%Y")
    month = date_orbit.strftime("%m")
    day = date_orbit.strftime("%d")

    if gethostname() == "magneto2":
        path = f"../../../../media/gabybosc/datos/STATIC/"
    elif gethostname() == "gabybosc":
        path = "../../datos/STATIC/"
    else:
        path = f"../../../datos/STATIC/"

    static = cdf.CDF(path +
                     f"mvn_sta_l2_c6-32e64m_{year}{month}{day}_v02_r01.cdf")

    t_unix = static.varget("time_unix")
    mass = static.varget("mass_arr")
    energy = static.varget("energy")

    t = unix_to_decimal(t_unix)

    inicio = donde(t, ti)
    fin = donde(t, tf)

    t_cut = t[inicio:fin]
    mass_cut = mass[inicio:fin]
    energy_cut = energy[inicio:fin]

    return static, t_cut, mass_cut, energy_cut
Exemple #14
0
    def load_omni_cdf(self):
        f_cdf = cdflib.CDF(self.file_path)
        f_info = f_cdf.cdf_info()
        variables = {}
        self.metadata['var_attrs'] = {}
        for var_name, var_name_cdf in cdf_variable_name_dict.items():
            var = f_cdf.varget(var_name_cdf)
            var_attr = f_cdf.varattsget(var_name_cdf)
            fillval = var_attr['FILLVAL']
            var = np.where(var == fillval, np.nan, var)
            variables[var_name] = np.reshape(var, (var.size, 1))
            self.metadata['var_attrs'].update(
                var_name=f_cdf.varattsget(var_name_cdf))

        dts_str = cdflib.cdfepoch.encode(variables['EPOCH'].flatten())
        dts = np.empty_like(variables['EPOCH'], dtype=datetime.datetime)
        for ind, dt_str in enumerate(dts_str):
            dts[ind, 0] = datetime.datetime.strptime(dt_str + '000',
                                                     '%Y-%m-%dT%H:%M:%S.%f')
        variables['DATETIME'] = dts
        variables['B_x_GSM'] = variables['B_x_GSE']

        self.variables = variables
        self.metadata.update(f_cdf.globalattsget(expand=False))
        self.done = True
Exemple #15
0
def solo_rpw_hfr(filepath):
    rpw_l2_hfr = cdflib.CDF(filepath)
    l2_cdf_file = pycdf.CDF(filepath)

    # times = l2_cdf_file['Epoch']
    # times = times[:]

    times = rpw_l2_hfr.varget('EPOCH')

    freqs = rpw_l2_hfr.varget('FREQUENCY')

    # Indicates the THR sensor configuration (V1=1, V2=2, V3=3, V1-V2=4, V2-V3=5,
    # V3-V1=6, B_MF=7, HF_V1-V2=9, HF_V2-V3=10, HF_V3-V1=11)
    sensor = rpw_l2_hfr.varget('SENSOR_CONFIG')
    freq_uniq = np.unique(
        rpw_l2_hfr.varget('FREQUENCY'))  # frequency channels list
    sample_time = rpw_l2_hfr.varget('SAMPLE_TIME')

    agc1 = rpw_l2_hfr.varget('AGC1')
    agc2 = rpw_l2_hfr.varget('AGC2')

    flux_density1 = rpw_l2_hfr.varget('FLUX_DENSITY1')
    flux_density2 = rpw_l2_hfr.varget('FLUX_DENSITY2')

    rpw_l2_hfr.close()
    # l2_cdf_file.close()

    # For CH1 extract times, freqs and data points
    slices1 = []
    times1 = []
    freq1 = []
    for cfreq in freq_uniq:
        search = np.argwhere((freqs == cfreq) & (sensor[:, 0] == 9)
                             & (agc1 != 0))
        if search.size > 0:
            slices1.append(agc1[search])
            times1.append(times[search])
            freq1.append(cfreq)

    # For CH1 extract times, freqs and data points
    slices2 = []
    times2 = []
    freq2 = []
    for cfreq in freq_uniq:
        search = np.argwhere((freqs == cfreq) & (sensor[:, 1] == 9)
                             & (agc2 != 0))
        if search.size > 0:
            slices2.append(agc2[search])
            times2.append(times[search])
            freq2.append(cfreq)

    # Kinda arb but pick a time near middle of freq sweep
    tt1 = np.hstack(times1)[:, 160]
    tt2 = np.hstack(times2)[:, 50]

    spec1 = np.hstack(slices1)
    spec2 = np.hstack(slices2)

    return tt1, freq1, spec1, tt2, freq2, spec2
Exemple #16
0
def test_read():
    fname = 'helios.cdf'
    if not os.path.exists(fname):
        import urllib.request
        urllib.request.urlretrieve(
            'http://helios-data.ssl.berkeley.edu/data/E1_experiment/New_proton_corefit_data_2017/cdf/helios1/1974/h1_1974_346_corefit.cdf',
            fname)
    cdf = cdflib.CDF(fname)
Exemple #17
0
 def fetch(self):
     """ Fetch data from remote and hold the files """
     for loc, fname, url in zip(self.files["locations"],
                                self.files["fnames"], self.files["urls"]):
         if not os.path.exists(loc): os.makedirs(loc)
         floc = loc + fname
         if not os.path.exists(floc):
             if self.verbose: print(" URL -", url)
             response = requests.get(url, stream=True)
             if response.status_code == 200:
                 with open(floc, "wb") as f:
                     shutil.copyfileobj(response.raw, f)
                     self.files["file_objects"].append(cdflib.CDF(floc))
         else:
             if self.verbose: print(" Loading from - ", floc)
             self.files["file_objects"].append(cdflib.CDF(floc))
     return self
Exemple #18
0
def test_read():
    fname = 'helios.cdf'
    url = ("http://helios-data.ssl.berkeley.edu/data/"
           "E1_experiment/New_proton_corefit_data_2017/"
           "cdf/helios1/1974/h1_1974_346_corefit.cdf")
    if not os.path.exists(fname):
        urllib.request.urlretrieve(url, fname)
    cdflib.CDF(fname)
Exemple #19
0
def example2():
    """
    Plot difference between modelled and observed field strength using Swarm A
    data in August 2018 from a cdf-file.

    """
    import cdflib

    model = load_CHAOS_matfile(FILEPATH_CHAOS)
    print(model)

    cdf_file = cdflib.CDF(
        'data/SW_OPER_MAGA_LR_1B_'
        '20180801T000000_20180801T235959'
        '_PT15S.cdf', 'r')
    # print(cdf_file.cdf_info())  # print cdf info/contents

    radius = cdf_file.varget('Radius') / 1000  # km
    theta = 90. - cdf_file.varget('Latitude')  # colat deg
    phi = cdf_file.varget('Longitude')  # deg
    time = cdf_file.varget('Timestamp')  # milli seconds since year 1
    time = time / (1e3 * 3600 *
                   24) - 730485  # time in modified Julian date 2000
    F_swarm = cdf_file.varget('F')
    cdf_file.close()

    theta_gsm, phi_gsm = transform_points(theta,
                                          phi,
                                          time=time,
                                          reference='gsm')
    index_day = np.logical_and(phi_gsm < 90, phi_gsm > -90)
    index_night = np.logical_not(index_day)

    # complete forward computation: pre-built not customizable (see ex. 1)
    B_radius, B_theta, B_phi = model(time, radius, theta, phi)

    # compute field strength and plot together with data
    F = np.sqrt(B_radius**2 + B_theta**2 + B_phi**2)

    print('RMSE of F: {:.5f} nT'.format(np.std(F - F_swarm)))

    plt.scatter(theta_gsm[index_day],
                F_swarm[index_day] - F[index_day],
                s=0.5,
                c='r',
                label='dayside')
    plt.scatter(theta_gsm[index_night],
                F_swarm[index_night] - F[index_night],
                s=0.5,
                c='b',
                label='nightside')
    plt.xlabel('dipole colatitude ($^\\circ$)')
    plt.ylabel('$\\mathrm{d} F$ (nT)')
    plt.legend(loc=2)
    plt.show()
    def __init__(
        self,
        times=(),
        objCad=3600,
        cdfPath="/home/diegodp/Documents/PhD/Paper_3/SolO_SDO_EUI/unsafe/soloData/"
    ):
        """ 
        Variables within the SWEAP CDF:
            [
                'Epoch', 
                'Half_interval', 
                'SCET', 
                'Info', 
                'validity', 
                'N', 
                'V_SRF', 
                'V_RTN', 
                'P_SRF', 
                'P_RTN', 
                'TxTyTz_SRF', 
                'TxTyTz_RTN', 
                'T'
            ] 
        """
        _swe_df = None
        # Must download myself!
        for index, file in enumerate(sorted(glob(f"{cdfPath}*.cdf"))):
            cdf = cdflib.CDF(file)
            time = cdfEpoch.to_datetime(cdf["Epoch"])
            _df = pd.DataFrame({}, index=time)

            for i in ("V_RTN", "N", "T", "validity"):
                if i == "V_RTN":  # Allow for multidimensional
                    for n, arg in zip(
                        (0, 1, 2), ("_R", "_T", "_N")):  # R is radial velocity
                        _df[f"V{arg}"] = cdf[i][:, n]

                else:
                    _df[i] = cdf[i]

            # Join the dataframes only after the first instance
            if index == 0:
                _swe_df = _df
            elif index > 0:
                _swe_df = _swe_df.append(_df)

        # Mask values outside of times to nan
        mask = (_swe_df.index > times[0]) & (_swe_df.index <= times[1])
        _swe_df = _swe_df.loc[mask]
        # NAN and interpolate values where validity < 3
        _swe_df[_swe_df["validity"] < 3] = np.nan
        del _swe_df["validity"]
        _swe_df = _swe_df.resample(f"{objCad}s").mean()
        self.df = _swe_df
Exemple #21
0
def load_position(fname):
    with Timer('loading position data'):
        cl_locations = cdflib.CDF(fname)
        timetags = cl_locations.varget('Epoch__CL_JP_PGP')
        pos = cl_locations.varget('sc_r_xyz_gse__CL_JP_PGP')
        time = cdflib.cdfepoch.unixtime(timetags)
        time = [dt.datetime.utcfromtimestamp(t) for t in time]
        locations = pd.DataFrame(np.column_stack(
            [time, pos[:, 0], pos[:, 1], pos[:, 2]]),
                                 columns=['time', 'x', 'y', 'z'])
    return locations
Exemple #22
0
def load_moments(fname):
    with Timer('Load cluster moments'):
        cl_moments = cdflib.CDF(fname)
        timetags = cl_moments.varget(
            'time_tags__C1_CP_CIS-HIA_ONBOARD_MOMENTS')
        time = cdflib.cdfepoch.unixtime(timetags)
        time = [dt.datetime.utcfromtimestamp(t) for t in time]
        temp = cl_moments.varget('temperature__C1_CP_CIS-HIA_ONBOARD_MOMENTS')
        moments = pd.DataFrame(np.column_stack([time, temp]),
                               columns=['time', 'temp'])
    return moments
Exemple #23
0
def _load_cdf(file_path):
    '''
    A function to handle loading cdflib, and printing a nice error if things
    go wrong.
    '''
    try:
        cdf = cdflib.CDF(str(file_path))
    except Exception as err:
        print('Error whilst trying to load {}\n'.format(file_path))
        raise err
    return cdf
Exemple #24
0
def cdfpeek(cdf_loc):
    """
    List all the variables present in a CDF file, along with their size.

    Parameters
    ----------
    cdf_loc : string
        Local location of the cdf file.
    """
    import cdflib
    cdf = cdflib.CDF(cdf_loc)
    print(cdf)
Exemple #25
0
    def _read_cdf(self, dt_start: datetime.datetime, dt_end: datetime.datetime,
                  version_dict: dict, column_dicts: List[dict],
                  cdf_type: str) -> Tuple[np.ndarray, np.ndarray]:
        logger = logging.getLogger(__name__)

        try:
            if cdf_type == "nasa_cdf":
                cdf_file = cdflib.CDF(self.file_path)
                epochs = cdf_file.varget(version_dict["time_column"]["key"])

                # special case when cdf files that have epoch = 0 entries
                if np.sum(epochs == 0) > 0:
                    null_filter = (epochs != 0)
                    epochs = epochs[null_filter]
                else:
                    null_filter = None

                dt_r = cdflib.epochs.CDFepoch.unixtime(epochs, to_np=True)
                dk_r = []

                for column in column_dicts:
                    key = column["key"]

                    if isinstance(key, str):
                        indices = column.get("indices", None)

                        if indices is not None:
                            indices = np.array(indices)
                            dk_r.append(
                                np.array(cdf_file.varget(key)[:, indices]))
                        else:
                            dk_r.append(np.array(cdf_file.varget(key)))
                    elif isinstance(key, list):
                        dk_r.append(
                            np.stack(arrays=[
                                np.array(cdf_file.varget(k)) for k in key
                            ],
                                     axis=1))
                    else:
                        raise ValueError(
                            "cdf key must be a string or a list thereof")

                if null_filter is not None:
                    for i in range(len(dk_r)):
                        dk_r[i] = dk_r[i][null_filter]
            else:
                raise ValueError("CDF type \"%s\" is not supported", cdf_type)

            return dt_r, dk_r
        except Exception as e:
            logger.exception("failed to read file \"%s\" (%s)", self.file_path,
                             e)
            raise Exception
Exemple #26
0
def importar_swia(year, month, day, ti, tf):
    date_orbit = dt.date(int(year), int(month), int(day))
    year = date_orbit.strftime("%Y")
    month = date_orbit.strftime("%m")
    day = date_orbit.strftime("%d")

    if gethostname() == "magneto2":
        path = "../../../../media/gabybosc/datos/SWIA/"
    elif gethostname() == "gabybosc":
        path = "../../datos/SWIA/"
    else:
        path = "../../../datos/SWIA/"

    if os.path.isfile(
            path + f"mvn_swi_l2_coarsearc3d_{year}{month}{day}_v01_r01.cdf"):
        # si no existe SWICA, usa los onboard
        swia = cdf.CDF(
            path +
            f"mvn_swi_l2_coarsearc3d_{year}{month}{day}_v01_r01_orig.cdf")
    else:
        swia = cdf.CDF(
            path + f"mvn_swi_l2_onboardsvymom_{year}{month}{day}_v01_r01.cdf")

    t_unix = swia.varget("time_unix")
    density = swia.varget("density")  # cm⁻³
    # creo que en los datos de PDS, SWICA no tiene estos ya calculados (son justamente los moments)
    temperature = swia.varget("temperature_mso")  # eV
    vel_mso_xyz = swia.varget("velocity_mso")  # km/s

    t_swia = unix_to_decimal(t_unix)
    inicio = donde(t_swia, ti)
    fin = donde(t_swia, tf)

    t_cut = t_swia[inicio:fin]
    density_cut = density[inicio:fin]
    temperature_cut = temperature[inicio:fin]
    vel_mso_cut = vel_mso_xyz[inicio:fin]  # km/s

    return swia, t_cut, density_cut, temperature_cut, vel_mso_cut
Exemple #27
0
    def __init__(self, cdfPath, varList, parseFilenameParams=True):
        if type(varList) is str:
            if varList == '*':
                varList = allVars.keys()
            else:
                varList = [varList]

        assert (all(type(x) is str for x in varList))
        for var in varList:
            if not var in allVars:
                raise ValueError(
                    'Non-existent Radyn CDF variable "%s" requested from load_vars'
                    % var)

        cdf = cdflib.CDF(cdfPath)
        notLoadedVars = []
        for var in allVars.keys():
            # Just load all the scalar values, they're small enough, and most of them are
            # important
            if typeDict[var] == 'Val':
                setattr(self, var, cdf.varget(var).item())
            else:
                if var in varList:
                    setattr(self, var, cdf.varget(var))
                else:
                    notLoadedVars.append(var)
        atts = cdf.globalattsget()
        if len(atts) > 0:
            for k in atts:
                setattr(self, k, str(atts[k]))
        self.notLoadedVars = notLoadedVars
        cdf.close()

        cdfFilename = os.path.basename(cdfPath)
        if cdfFilename != 'radyn_out.cdf':
            if parseFilenameParams and cdfFilename.startswith('radyn_out.'):
                p = cdfFilename[cdfFilename.find('.') + 1:]
                params = p.split('_')
                self.filenameParams = params
                if len(params) != 6:
                    raise ValueError(
                        'FilenameParams should contain 6 underscore seperated terms.\n'
                        'See FCHROMA simulation documentation for examples.\n'
                        'If you don\'t want to parse these then call with parseFilenameParams=False'
                    )
                self.startingModelAtmosphere = params[0]
                self.beamSpectralIndex = float(params[1][1:])
                self.totalBeamEnergy = float(params[2])
                self.beamPlulseType = params[3]
                self.cutoffEnergy = params[4]
                self.beamType = params[5]
Exemple #28
0
def read_cdf(file, **name_pairings):
    """ Read data from a cdf file.
    
    Read a cdf file and construct a dictionary based on
    name_parings. This may open and read a file as a side-effect.
    
    Parameters
    ----------
    file : str or cdfread
        The file to read the data from. 
        Accepts both a string filepath to the file and an opened cdfread file.
    **name_pairings
        Pairs of names used to link a name in the output with its 
        corrisponding name in the cdf file.
        
    Returns
    -------
    dict
        The keys are the keywords used in name_parings,
        while the values are the variables loaded from
        the cdf file with the name of the corrisponding argument.
        
    Examples
    --------
    We can directly ask for variables:
    
    >>> from pyfac.utils import *  # doctest: +SKIP
    >>> read_cdf('tempdata.cdf') # doctest: +SKIP
    {}
    >>> read_cdf('tempdata.cdf', time='Timestamp') # doctest: +SKIP,+ELLIPSIS
    {'time': array([...])}
    
    We can also make use of predefined options:
    
    >>> options = {'time': 'Timestamp', 'B_base': 'B_NEC'}
    >>> read_cdf('tempdata.cdf', **options) # doctest: +SKIP,+ELLIPSIS
    {'time': array([...]), 'B_base': array([[...]])}
    
    Note
    ----
    These examples are skiped by doctest due to dependency 
    on the existance of a 'tempdata.cdf' file.
    """
    if not isinstance(file, cdflib.cdfread.CDF):
        file = cdflib.CDF(file)
    return {
        name: file.varget(cdf_name)
        for name, cdf_name in name_pairings.items()
    }
Exemple #29
0
def omni(dateRange,
         source='/home/james/Documents/MPHYS_ARCHIVE/OMNI',
         gsm=False):
    dates = [f"20{i[0]:02d}{i[1]:02d}" for i in dateRange]
    files = sorted(glob.glob(source + '/*.cdf'))
    filesRef = [s.split('/')[-1].split('_')[-2][:-2] for s in files]
    files = dict(zip(filesRef, files))

    df = pd.DataFrame(columns=['bx', 'by', 'bz'])
    with Timer('Timing OMNI'):
        bar = progress.Bar('Loading OMNI', max=len(dates))
        for d in dates:
            logging.info(files[d])
            omni = cdflib.CDF(files[d])
            timetags = omni.varget('Epoch')
            imf_x = omni.varget('BX_GSE')
            if gsm:
                imf_z = omni.varget('BZ_GSM')
                imf_y = omni.varget('BY_GSM')
            else:
                imf_z = omni.varget('BZ_GSE')
                imf_y = omni.varget('BY_GSE')
            time = cdflib.cdfepoch.unixtime(timetags)
            time = [dt.datetime.utcfromtimestamp(t) for t in time]
            imf_month = pd.DataFrame(np.column_stack(
                [time, imf_x, imf_y, imf_z]),
                                     columns=['time', 'bx', 'by', 'bz'])
            # imf_month.bx = imf_month.bx.mask(
            #     imf_month.bx > 1000).interpolate().astype('float')
            # imf_month.by = imf_month.by.mask(
            #     imf_month.by > 1000).interpolate().astype('float')
            # imf_month.bz = imf_month.bz.mask(
            #     imf_month.bz > 1000).interpolate().astype('float')

            imf_month.bx = imf_month.bx.mask(
                imf_month.bx > 1000).astype('float')
            imf_month.by = imf_month.by.mask(
                imf_month.by > 1000).astype('float')
            imf_month.bz = imf_month.bz.mask(
                imf_month.bz > 1000).astype('float')

            imf_month.dropna(inplace=True)
            imf_month = imf_month.set_index('time')
            imf_month = imf_month.resample("5T").mean()
            df = df.append(imf_month)
            bar.next()
        bar.finish()
    return df
Exemple #30
0
def load_IMF(fname):
    with Timer('Loading IMF data'):
        cdf_imf = cdflib.CDF(fname)
        timetags = cdf_imf.varget('Epoch')
        imf_z = cdf_imf.varget('BZ_GSE')
        imf_x = cdf_imf.varget('BX_GSE')
        imf_y = cdf_imf.varget('BY_GSE')
        time = cdflib.cdfepoch.unixtime(timetags)
        time = [dt.datetime.utcfromtimestamp(t) for t in time]
        imf = pd.DataFrame(np.column_stack([time, imf_x, imf_y, imf_z]),
                           columns=['time', 'bx', 'by', 'bz'])
        imf.time = pd.to_datetime(imf.time)
        imf = imf.set_index('time')
        imf.bx = imf.bx.replace(imf.bx.max(), np.NAN).interpolate()
        imf.by = imf.by.replace(imf.by.max(), np.NAN).interpolate()
        imf.bz = imf.bz.replace(imf.bx.max(), np.NAN).interpolate()
    return imf