def getTOFxPH(): url = 'http://rbspice' + craft.lower( ) + '.ftecs.com/Level_' + RBlevel + '/TOFxPH' + species + PH + '/' + date.strftime( '%Y') + '/' destination = join(root, craft, 'TOFxPH' + species, 'L' + RBlevel + '-' + PH) if not check: file = filter(listdir(destination), '*' + date.strftime('%Y%m%d') + '*') if not file: print('No TOFxPH file') return else: print('Loading TOFxPH...') return cdf.CDF(join(destination, file[0])) try: stat(destination) except: makedirs(destination, exist_ok=True) request = requests.get(url) if request.status_code == 404: print('Year does not exist for TOFxPH' + species + PH + ' ' + craft) return page = request.text soup = BeautifulSoup(page, 'html.parser') files = [ node.get('href') for node in soup.find_all('a') if node.get('href').endswith('.cdf') ] fileList = filter(files, '*' + date.strftime('%Y%m%d') + '*') if not fileList: print('Month or day does not exist for TOFxPH' + species + PH + ' ' + craft) return file = 'http://rbspice' + craft.lower() + '.ftecs.com' + fileList[-1] fname = file[file.rfind('/') + 1:] fnameNoVer = fname[:fname.rfind('v') + 1] prevFile = filter(listdir(destination), fnameNoVer + '*') if prevFile: ver = fname[fname.rfind('v') + 1:fname.rfind('.')] prevVer = prevFile[0][prevFile[0].rfind('v') + 1:prevFile[0].rfind('.')] else: ver = 1 prevVer = 0 if ver == prevVer: print('Loading TOFxPH...') return cdf.CDF(join(destination, fname)) else: if prevVer != 0: print('Updating TOFxPH...') else: print('Downloading TOFxPH...') newCDF = cdf.CDF( urlretrieve(file, join(destination, fname), reporthook)[0]) if prevVer != 0: remove(join(destination, prevFile[0])) return newCDF
def getHOPE(): url = 'https://rbsp-ect.lanl.gov/data_pub/rbsp' + craft.lower( ) + '/hope/level' + Hlevel if Hlevel == '3': url = url + '/' + Hproduct + '/' url = url + '/' + date.strftime('%Y') + '/' Hpro = 'PA' if Hproduct == 'pitchangle' else 'MOM' destination = join(root, craft, 'HOPE', 'L' + Hlevel + Hpro) if not check: file = filter(listdir(destination), '*' + date.strftime('%Y%m%d') + '*') if not file: print('No HOPE file') return else: print('Loading HOPE...') return cdf.CDF(join(destination, file[0])) try: stat(destination) except: makedirs(destination, exist_ok=True) page = requests.get(url).text soup = BeautifulSoup(page, 'html.parser') files = [ node.get('href') for node in soup.find_all('a') if node.get('href').endswith('.cdf') ] fileList = filter(files, '*' + date.strftime('%Y%m%d') + '*') if not fileList: print('Date does not exist for HOPE ' + craft) return file = url + fileList[-1] fname = file[file.rfind('/') + 1:] fnameNoVer = fname[:fname.rfind('v') + 1] prevFile = filter(listdir(destination), fnameNoVer + '*') if prevFile: ver = fname[fname.rfind('v') + 1:fname.rfind('.')] prevVer = prevFile[0][prevFile[0].rfind('v') + 1:prevFile[0].rfind('.')] else: ver = 1 prevVer = 0 if ver == prevVer: print('Loading HOPE...') return cdf.CDF(join(destination, fname)) else: if prevVer != 0: print('Updating HOPE...') else: print('Downloading HOPE...') newCDF = cdf.CDF( urlretrieve(file, join(destination, fname), reporthook)[0]) if prevVer != 0: remove(join(destination, prevFile[0])) return newCDF
def _convert_sequence(subject, sequence, group): print('Converting %s...' % group.name) views = list(_views(subject, sequence)) n_video_frames = None for view in views: # print(subject, sequence, view) try: video_path = _video_path(subject, sequence, view) with imageio.get_reader(video_path) as reader: n = len(reader) n_video_frames = n if n_video_frames is None else \ min(n_video_frames, n) except Exception: print('Failed to read video file. Skipping...') return False n = n_video_frames with pycdf.CDF(_p3_path(subject, sequence)) as cdf: p3 = cdf['Pose'][0] n_frames = p3.shape[0] n = min(n_frames, n) p3 = p3.reshape(n_frames, -1, 3)[:n, _filter_indices] p3d = group.create_dataset('p3', p3.shape, dtype=np.int32) p3d[...] = p3 group.attrs['len'] = n # p3 = normalize(np.array(p3d, dtype=np.float32), subject) # theta = group.create_dataset('theta', (n,), dtype=np.float32) # rel_poses = group.create_dataset( # 'rel_p3', (n, skeleton.n_joints, 3), dtype=np.float32) # for i, abs_pose in enumerate(p3): # rel_poses[i], theta[i] = abs_to_rel(abs_pose) views_group = group.create_group('views') for view in views: view_group = views_group.create_group(view) with pycdf.CDF(_p2_path(subject, sequence, view)) as cdf: p2 = cdf['Pose'][0] assert (p2.shape[0] == n_frames) p2 = p2.reshape(n_frames, -1, 2)[:n, _filter_indices] p2d = view_group.create_dataset('p2', p2.shape, dtype=np.int32) p2d[...] = p2 with pycdf.CDF(_p3_path(subject, sequence, view)) as cdf: p3 = cdf['Pose'][0] assert (p3.shape[0] == n_frames) p3 = p3.reshape(n_frames, -1, 3)[:n, _filter_indices] p3d = view_group.create_dataset('p3', p3.shape, dtype=np.int32) p3d[...] = p3 return True
def read_imp_CDF(filename): """read in a imp CDF file read gridded interpolated magnetic perturbations (IMPs) from a specially formatted CDF file. TODO: """ cdf = pycdf.CDF(filename) Epoch = cdf['Epoch'][:] Latitude = cdf['Latitude'][:] Longitude = cdf['Longitude'][:] Radius = cdf['Radius'][:] X = cdf['X'][:] Y = cdf['Y'][:] Z = cdf['Z'][:] Label = cdf['Label'][:] ObsLat = cdf['ObsLat'][:] ObsLon = cdf['ObsLon'][:] ObsRad = cdf['ObsRad'][:] ObsX = cdf['ObsX'][:] ObsY = cdf['ObsY'][:] ObsZ = cdf['ObsZ'][:] ObsFit = cdf['ObsFit'][:] ObsName = cdf['ObsName'][:] return (Epoch, (Latitude, Longitude, Radius), X, Y, Z, Label, (ObsLat, ObsLon, ObsRad), ObsX, ObsY, ObsZ, ObsFit, ObsName)
def _load_coeff_mma_single_set(path, cdf_reader, variable, is_internal): with pycdf.CDF(path) as cdf: data = cdf_reader(cdf) return SparseSHCoefficientsTimeDependent( data["nm"], data[variable], data["t"], is_internal=is_internal )
def construct_interpolants(cursor, fpi_prd1, obs, mode, descriptor, year, month, day): mquery = cursor.execute('Select ver, filename from mec_data where\ obs = "%s" and\ mode = "%s" and\ descriptor = "%s" and\ year = %s and\ month = %s and\ day = %s;' % \ (obs,mode,descriptor,year,month,day)) Re = 6378.14 mresults = mquery.fetchall() for mr in mresults: MEC_file = fpi_prd1 + mr[1] MEC = pycdf.CDF(MEC_file) mt = mdates.date2num(MEC['Epoch']) mr_gsm = np.asarray(MEC['%s_mec_r_gsm' % obs]) / Re MEC.close() orbit_extent = np.max(np.abs(mr_gsm)) if orbit_extent < 50.0 * Re: x_gsm_spline = interp.splrep(mt, mr_gsm[:, 0]) y_gsm_spline = interp.splrep(mt, mr_gsm[:, 1]) z_gsm_spline = interp.splrep(mt, mr_gsm[:, 2]) return x_gsm_spline, y_gsm_spline, z_gsm_spline #if no MEC file works return False, False, False
def data_from_CDF(date, myfile): """ data_from_CDF outputs dynamic spectra data from PSP CDF datafile. inputs: date: datetime.datetime class. Date of the observation. myfile: fnames class defined in psp_dataprep. Location of all directories output: data: 2D numpy matrix of dynamic spectra epoch: 1D numpy array of the date and times of each datapoint freqs: 1D numpy array of the frequency channels """ cwd = os.getcwd() print(myfile.path_data) cdf = pycdf.CDF(myfile.path_data) # print(cdf) data = cdf.get(myfile.dataname) epoch = cdf.get(myfile.epochname) freqs = cdf.get(myfile.freqname) data = np.array(data) epoch = np.array(epoch) freqs = np.array(freqs) print(freqs) freqs = freqs[0,:] # print(data.shape) # print(epoch.shape) # print(freqs.shape) cdf.close() return data, epoch, freqs
def convert_cdfs_to_dataframe(filelist, varlist, time_var_str): """ Routine to conver cdfs to a dictionary of arrays, keys are strings from varlist Input: filelist: the filename/paths to the cdf files you want to read in and convert to arrays varlist: the variable names you want to read in from the cdf files time_var_str: the string variable for the time you want from the file and also converted to datetime Output: dictionary: the dictionary, with keys that lead to their appropriate arrays from the cdf files """ from spacepy import pycdf from delorean import Delorean #create empty numpy arrays ll = len(varlist) varsdata = [np.zeros(1) for i in range(ll + 1)] # read data from cdf files and append the arrays. for i in filelist: # print 'reading file '+i d = pycdf.CDF(i) # CHECK TIME STAMPS # FIND OUT WHAT BEGINNING INDEX TO USE # save last time stamp to compare with next file ctr = 0 if i == filelist[0]: last = pycdf.VarCopy(d[time_var_str])[-1] else: while (Delorean(pycdf.VarCopy(d[time_var_str])[ctr], timezone='UTC').epoch - Delorean(last, timezone='UTC').epoch < .01): ctr += 1 last = pycdf.VarCopy(d[time_var_str])[-1] for j in varlist: idx = varlist.index(j) if j != 'scDistance': varsdata[idx] = np.append(varsdata[idx], pycdf.VarCopy(d[j])[ctr:]) else: varsdata[idx] = np.append(varsdata[idx], pycdf.VarCopy(d[j])[int(ctr / 25):]) print('Done reading data') # For create an epoch array from time_var_str # (s)econds (s)ince (epoch) == ssepoch idxe = varlist.index(time_var_str) ldata = len(varsdata[0]) ssepoch = np.zeros(ldata) for i in range(1, ldata): ssepoch[i] = Delorean(varsdata[idxe][i], timezone="UTC").epoch # drop the first zero before creating the data frame dictionary = {} dictionary['time'] = ssepoch[1:] for j in varlist: if j == time_var_str: dictionary['datetime'] = varsdata[varlist.index(j)][1:] else: dictionary[j] = varsdata[varlist.index(j)][1:] return dictionary
def write(self, resource): '''Write CDF to file''' from spacepy import pycdf from datetime import datetime nullValue = 99999.0 cdf = pycdf.CDF(resource, '') cdf.compress(pycdf.const.GZIP_COMPRESSION, 9) self.__writeAttrs(cdf) comps = self.getReportedOrientation() for i in xrange(len(comps)): field = "GeomagneticFieldElement%d"%(i+1) data = [ (nullValue if np.isnan(value) else value) for value in self.get(comps[i]).tolist() ] zVariable = cdf.new(field, data=data, type=pycdf.const.CDF_DOUBLE) cdf[field].attrs['FIELDNAME'] = "Geomagnetic Field Element %d"%(i+1) cdf[field].attrs['VALIDMIN'] = -79999.0 cdf[field].attrs['VALIDMAX'] = 79999.0 if comps[i] in ('X','Y','Z','F','H','G'): units = 'nT' elif comps[i] in ('D','I'): units = 'Minutes of arc' else: units = "" cdf[field].attrs['UNITS'] = units cdf[field].attrs['FILLVAL'] = nullValue cdf[field].attrs['StartDate'] = self.get(self.datetime_index)[0].isoformat() cdf[field].attrs['StartDateEpoch'] = self.get(self.datetime_index)[0] cdf[field].attrs['SampPer'] = self.getSamplingRate() cdf[field].attrs['ElemRec'] = comps[i] cdf[field].attrs['OrigFreq'] = 99999.0
def __init__(self, ssmcdffn): self.cdffn = ssmcdffn # CDF file self.cdf = pycdf.CDF(ssmcdffn) # Open CDF #Make a name for if we have anything to write #leaf = str(self.cdf.attrs['Logical_file_id']).replace('ssm','ssm_modified')+'.cdf' #self.modified_cdffn = os.path.join(,leaf) #Get UTC second of day self.ut = np.array([ datetime2sod(dt) for dt in self.cdf['Epoch'][:].flatten().tolist() ]).flatten() self.lat = self.cdf['SC_APEX_LAT'][:].flatten() self.lon = self.cdf['SC_APEX_LON'][:].flatten() self.glat = self.cdf['SC_GEOCENTRIC_LAT'][:].flatten() self.glon = self.cdf['SC_GEOCENTRIC_LON'][:].flatten() self.R = self.cdf['SC_GEOCENTRIC_R'][:].flatten() * 1000. self.mlt = self.cdf['SC_APEX_MLT'][:].flatten() self.oi = self.cdf['ORBIT_INDEX'][:].flatten() self.n_orbits = int(np.max(np.abs(self.oi))) self.dBd1 = self.cdf['DELTA_B_APX'][:, 0].flatten() self.dBd2 = self.cdf['DELTA_B_APX'][:, 1].flatten() self.dBd3 = self.cdf['DELTA_B_APX'][:, 2].flatten() #SSM coordinates x - down, y - along, z - across-right self.dB_along = self.cdf['DELTA_B_SC'][:, 1].flatten() self.dB_across = -1 * self.cdf['DELTA_B_SC'][:, 2].flatten( ) # Across left self.dB_up = -1 * self.cdf['DELTA_B_SC'][:, 0].flatten()
def from_cdf(files, variable, cache=False, clobber=False, name=''): """ Read variable data from a CDF file. Parameters ========== filenames : str, list Name of the CDF file(s) to be read. variable : str Name of the variable to be read. Returns ======= vars : mrarry A mrarray object. """ global cdf_vars global file_vars if isinstance(files, str): files = [files] # Read variables from files cdf_vars = {} for file in files: file_vars = {} with pycdf.CDF(file) as f: var = __from_cdf_read_var(f, variable) # Cache if cache: var.cache(clobber=clobber) return var
def get_node_features_for_one(file, rotInd_array): cdf = pycdf.CDF(file) cdf = downsize(cdf['Pose'][0]) total_timesteps = len(cdf) num_pieces = total_timesteps / NUM_TIMESTEPS joint_array = np.zeros((num_pieces, NUM_TIMESTEPS, NUM_JOINTS, 3)) for piece in range(num_pieces): for t in range(NUM_TIMESTEPS): for joint_id in range(NUM_JOINTS): assert len(rotInd_array[joint_id]) == 0 or len( rotInd_array[joint_id]) == 3 for i in range(len(rotInd_array[joint_id])): rotInd = rotInd_array[joint_id][i] joint_array[piece][t][joint_id][i] = cdf[piece * NUM_TIMESTEPS + t][rotInd - 1] joint_array[piece][t][joint_id] = exp_map( joint_array[piece][t][joint_id]) # joint_array[piece][time][joint_id] gives the list of three rotations (z, x, y) # now create the node features everything = extract(joint_array, 0, 32) left_arm = extract(joint_array, 16, 24) right_arm = extract(joint_array, 24, 32) left_leg = extract(joint_array, 6, 11) right_leg = extract(joint_array, 1, 6) spine = get_spine(joint_array) # Each of these[piece][time] gives a list of features return left_arm, right_arm, left_leg, right_leg, spine, everything
def solo_rpw_hfr(filepath): rpw_l2_hfr = cdflib.CDF(filepath) l2_cdf_file = pycdf.CDF(filepath) # times = l2_cdf_file['Epoch'] # times = times[:] times = rpw_l2_hfr.varget('EPOCH') freqs = rpw_l2_hfr.varget('FREQUENCY') # Indicates the THR sensor configuration (V1=1, V2=2, V3=3, V1-V2=4, V2-V3=5, # V3-V1=6, B_MF=7, HF_V1-V2=9, HF_V2-V3=10, HF_V3-V1=11) sensor = rpw_l2_hfr.varget('SENSOR_CONFIG') freq_uniq = np.unique( rpw_l2_hfr.varget('FREQUENCY')) # frequency channels list sample_time = rpw_l2_hfr.varget('SAMPLE_TIME') agc1 = rpw_l2_hfr.varget('AGC1') agc2 = rpw_l2_hfr.varget('AGC2') flux_density1 = rpw_l2_hfr.varget('FLUX_DENSITY1') flux_density2 = rpw_l2_hfr.varget('FLUX_DENSITY2') rpw_l2_hfr.close() # l2_cdf_file.close() # For CH1 extract times, freqs and data points slices1 = [] times1 = [] freq1 = [] for cfreq in freq_uniq: search = np.argwhere((freqs == cfreq) & (sensor[:, 0] == 9) & (agc1 != 0)) if search.size > 0: slices1.append(agc1[search]) times1.append(times[search]) freq1.append(cfreq) # For CH1 extract times, freqs and data points slices2 = [] times2 = [] freq2 = [] for cfreq in freq_uniq: search = np.argwhere((freqs == cfreq) & (sensor[:, 1] == 9) & (agc2 != 0)) if search.size > 0: slices2.append(agc2[search]) times2.append(times[search]) freq2.append(cfreq) # Kinda arb but pick a time near middle of freq sweep tt1 = np.hstack(times1)[:, 160] tt2 = np.hstack(times2)[:, 50] spec1 = np.hstack(slices1) spec2 = np.hstack(slices2) return tt1, freq1, spec1, tt2, freq2, spec2
def _load_coeff_mma_multi_set(path, cdf_reader, variable, is_internal): with pycdf.CDF(path) as cdf: data = cdf_reader(cdf) return CombinedSHCoefficients(*[ SparseSHCoefficientsTimeDependent( item["nm"], item[variable], item["t"], is_internal=is_internal ) for item in data ])
def load_cdf(cdf_file, product): with threading.Lock() as lock: f = pycdf.CDF(cdf_file) if product in f: data_var = f[product] time_var = f.raw_var(get_depend(data_var)) time = convert_time(time_var) return SpwcVariable(time=time, data=data_var[:], meta={}, columns=[], y=None) return None
def _select_mgf(): global MGNT, MGF_EPOCH mgf_file = Tk() mgf_file.withdraw() # hide window mgf_path = askopenfilename(initialdir=os.getcwd(), title=' Select MGF file', filetypes=[('CDF Files', '*.cdf')]) # open file mgf_name = os.path.split(mgf_path)[-1][:-4] mgf_data = cdf.CDF(mgf_path) MGNT = mgf_data['magt_8sec'][:] # choose magnitude file MGF_EPOCH =mgf_data['epoch_8sec'][:] # choose epoch file
def main(): print "This is test code of SpacePy modules." if not args or len(args) != 1: return 0 CDF = args[0] cdf=pycdf.CDF(CDF) print "Information CDF" ReadCDF(cdf) print "Global Attributes" OutGA(cdf)
def make_cdf(name, compress=False): if os.path.exists(name): os.unlink(name) cd = pycdf.CDF(name, '') add_varaibles(cd) add_attributes(cd) print_cdf(cd) if compress: cd.compress(pycdf.const.GZIP_COMPRESSION) cd.close()
def pose_loader(root, subject_list, action_list): subject_pose = {} for num in subject_list: action_pose = {} pose_path = root + '/S%d/MyPoseFeatures/D2_Positions'%num for folder in os.listdir(pose_path): if folder.split()[0] in action_list or folder.split('.')[0] in action_list: file_path = os.path.join(pose_path, folder) action_pose[folder[:-4]] = pycdf.CDF(file_path)[0][...][0] subject_pose['S%d'%num] = action_pose return subject_pose
def get_raw_angles(self, actor, action, sub_action=0): """ :param actor: :param action: :param sub_action: :return: """ cdf_file = self.get_cdf_file('RawAngles', actor, action, sub_action) cdf = pycdf.CDF(cdf_file) angles3d = np.squeeze(cdf['Pose']) return angles3d
def get_cdf(self, dt, cadence): remotefn = self.ftpdir + '/' + self.cadence_subdir[ cadence] + '/' + self.filename_gen[cadence](dt) remote_path, fn = '/'.join( remotefn.split('/')[:-1]), remotefn.split('/')[-1] localfn = os.path.join(self.localdir, fn) if not os.path.exists(localfn) or self.force_download: # ftp = ftplib.FTP_TLS(self.ftpserv) # print('Connecting to OMNIWeb FTP server %s' % (self.ftpserv)) # ftp.connect() # ftp.login() # ftp.prot_p() #switch to secure data connection # #Change directory # ftp.cwd(remote_path) # print('Downloading file %s' % (remote_path+'/'+fn)) # with open(localfn,'wb') as f: # ftp.retrbinary('RETR ' + fn, f.write) # print("Saved as %s" % (localfn)) # ftp.quit() url = 'https://' + self.ftpserv + remotefn print(url) head = requests.head(url, allow_redirects=True) headers = head.headers content_type = headers.get('content-type') if content_type is not None: if 'html' in content_type.lower(): raise RuntimeError( ('Expected {} to be a file, but '.format(url) + 'content_type is html. Headers were:\n' + '{}'.format(headers))) response = requests.get(url, allow_redirects=True) if self.cdf_or_txt == 'txt': try: datastr = str(response.content, 'utf-8') # Py 3 except TypeError: datastr = str(response.content) # Py 2 with open(localfn, 'w') as f: f.write(datastr) elif self.cdf_or_txt == 'cdf': with open(localfn, 'wb') as f: f.write(response.content) if self.cdf_or_txt == 'txt': return omni_txt_cdf_mimic(localfn, cadence) elif self.cdf_or_txt == 'cdf': return pycdf.CDF(localfn)
def build_pdf_from_cdf(file_cdf): """ builds pdf file name from cdf :param: input cdf file path :return: pdf file path """ cdf = pycdf.CDF(file_cdf) file_pdf = '{}.pdf'.format(os.path.join(os.path.dirname(file_cdf.replace('/data/','/ql/B-W/')), cdf.attrs['Parents'][0].split('.')[0])) cdf.close() return file_pdf
def get_3d(self, actor, action, sub_action=0): """ :param actor: :param action: :param sub_action: :return: """ cdf_file = self.get_cdf_file('D3_Positions', actor, action, sub_action) cdf = pycdf.CDF(cdf_file) joints3d = np.squeeze(cdf['Pose']).reshape((-1, 32, 3)) return joints3d
def convert_cdfs_to_dataframe(filelist, varlist, nameoftimecolumn, nameofvectorcolumn): ''' import spacepy and delorean for cdfs and datetimes Remember (os.environ["CDF_LIB"] = "~/") before importing pycdf ''' os.environ["CDF_LIB"] = "~/" from spacepy import pycdf from delorean import Delorean #create empty numpy arrays ll=len(varlist); varsdata=[np.zeros(1) for i in range(ll+1)] #read data from cdf files and append the arrays. for i in filelist: d = pycdf.CDF(i) for j in varlist: idx=varlist.index(j) varsdata[idx]= np.append(varsdata[idx], pycdf.VarCopy(d[j])) #For create an epoch array from Epoch2 #(s)econds (s)ince (epoch) == ssepoch idxe = varlist.index(nameoftimecolumn); ldata=len(varsdata[0]); ssepoch=np.zeros(ldata) vector1 = np.zeros(ldata-1) vector2 = np.zeros(ldata-1) vector3 = np.zeros(ldata-1) for i in range(1,ldata): ssepoch[i] = Delorean(varsdata[idxe][i],timezone="UTC").epoch #drop the first zero before creating the data frame dictionary = {}; dictionary['epoch']=ssepoch[1:] for j in varlist: if j == nameoftimecolumn: dictionary['datetime']=varsdata[varlist.index(j)][1:] if j == nameofvectorcolumn: vector1[i-1] = varsdata[varlist.index(j)][1:][(i-1)*3] vector2[i-1] = varsdata[varlist.index(j)][1:][(i-1)*3+1] vector3[i-1] = varsdata[varlist.index(j)][1:][(i-1)*3+2] else: dictionary[j] = varsdata[varlist.index(j)][1:] dictionary['vector1'] = vector1 dictionary['vector2'] = vector2 dictionary['vector3'] = vector3 #Make the dataframe and replace all missing values with Nans d = pd.DataFrame(dictionary) d.replace(to_replace=-1e30,value=np.NaN,inplace=True) return d
def __init__(self, N=int(1e5)): self.N = N #Initializing data data_path = "/home/" + usrname + "/Documents/Master/Swarm_Data" cdfA_path = data_path + "/Sat_A/SW_OPER_EFIA_LP_1B_20131221T000000_20131221T235959_0501.CDF/SW_OPER_EFIA_LP_1B_20131221T000000_20131221T235959_0501_MDR_EFI_LP.cdf" cdfB_path = data_path + "/Sat_B/SW_OPER_EFIB_LP_1B_20131221T000000_20131221T235959_0501.CDF/SW_OPER_EFIB_LP_1B_20131221T000000_20131221T235959_0501_MDR_EFI_LP.cdf" cdfC_path = data_path + "/Sat_C/SW_OPER_EFIC_LP_1B_20131221T000000_20131221T235959_0501.CDF/SW_OPER_EFIC_LP_1B_20131221T000000_20131221T235959_0501_MDR_EFI_LP.cdf" self.cdfA = pycdf.CDF(cdfA_path) self.cdfB = pycdf.CDF(cdfB_path) self.cdfC = pycdf.CDF(cdfC_path) #Retrieving data from CDF files. self.NeA = self.cdfA["Ne"][:N] self.NeB = self.cdfB["Ne"][:N] self.NeC = self.cdfC["Ne"][:N] self.longA = self.cdfA["Longitude"][:N] self.longB = self.cdfB["Longitude"][:N] self.longC = self.cdfC["Longitude"][:N] self.latA = self.cdfA["Latitude"][:N] self.latB = self.cdfB["Latitude"][:N] self.latC = self.cdfC["Latitude"][:N] self.radA = self.cdfA["Radius"][:N] self.radB = self.cdfB["Radius"][:N] self.radC = self.cdfC["Radius"][:N] #Setting time to seconds after midnight self.seconds = self.stamp_to_sec(self.cdfA["Timestamp"][:N]) self.stamps = self.cdfA["Timestamp"][:N] self.fs = 2 self.BA_shift = self.timeshift_latitude(self.latB, self.latA) self.BC_shift = self.timeshift_latitude(self.latB, self.latC) self.solved = False
def getJade(self,jade_folder): timeStart = '2017-03-09T00:00:01.500' timeEnd = '2017-03-10T00:00:02.531' dataFolder = pathlib.Path('../data/jad') DOY,ISO,datFiles = getFiles(timeStart,timeEnd,'.DAT',dataFolder,'JAD_L30_LRS_ION_ANY_CNT') jadeIon = JadeData(datFiles,timeStart,timeEnd) jadeIon.getIonData() cdf_file = pycdf.CDF(r'..\crossings\test.cdf','') for date in jadeIon.dataDict.keys(): jade_data = jadeIon.dataDict[date] cdf_file['JADE DATA'] = jade_data['DATA_ARRAY'] print(jade_data['DATA_ARRAY'])
def printcdf(filename): ''' Remember to use 'os.environ["CDF_LIB"] = library_directory' before import! For future notice: cdf_library = "cdf37_1-dist", and the best place for this library seems to be in "~/". pycdf apparently does not find this library from any other directories. ''' os.environ["CDF_LIB"] = "~/" from spacepy import pycdf data = pycdf.CDF(filename) print(data)
def process_view_3D(out_dir, subject, action, subaction): subj_dir = path.join('extracted', subject) base_filename = metadata.get_base_filename(subject, action, subaction) # Load joint position annotations with pycdf.CDF( path.join(subj_dir, 'Poses_D3_Positions', base_filename + '.cdf')) as cdf: poses_3d = np.array(cdf['Pose']) poses_3d = poses_3d.reshape(poses_3d.shape[1], 32, 3) with pycdf.CDF(path.join(subj_dir, 'TOF', base_filename + '.cdf')) as cdf: tof_range = np.array(cdf['RangeFrames']) tof_range = tof_range.reshape(tof_range.shape[3], 144, 176) tof_indicator = np.array(cdf['Indicator']) tof_indicator = tof_indicator.reshape(tof_indicator.shape[1]) tof_index = np.array(cdf['Index']) tof_index = tof_index.reshape(tof_index.shape[1]) tof_intensity = np.array(cdf['IntensityFrames']) tof_intensity = tof_intensity.reshape(tof_intensity.shape[3], 144, 176) # extract frame indices for pose frame_indices_pose = np.argwhere(tof_indicator == 1) frame_indices_pose = frame_indices_pose.reshape( frame_indices_pose.shape[0]) frame_indices_tof = tof_index[frame_indices_pose] - 1 frame_indices_tof = frame_indices_tof.astype(int) frames = frame_indices_pose + 1 return { 'pose/3d': poses_3d[frame_indices_pose], 'tof/range': tof_range[frame_indices_tof], 'tof/intensity': tof_intensity[frame_indices_tof], 'frame': frames, 'subject': np.full(frames.shape, int(included_subjects[subject])), 'action': np.full(frames.shape, int(action)), 'subaction': np.full(frames.shape, int(subaction)), }
def __init__(self, cdfPath, i=0): with pycdf.CDF(cdfPath) as root: var = root altitude = var[self.var_altitude][...] / 1000 cameraPosGCRS = var[self.var_cameraPos][i] photoTime = var[self.var_photoTime][i] # for three channels (RGB), each channel is stored as a # separate variable: img_red, img_green, img_blue # for grayscale, the single variable is called 'img' try: fillval = var[self.var_img].attrs['FILLVAL'] img = np.atleast_3d(var[self.var_img][i]) img = _convertImgDtype(img, fillval) except: fillval = var[self.var_img_red].attrs['FILLVAL'] img_red = _convertImgDtype(var[self.var_img_red][i], fillval) img_green = _convertImgDtype(var[self.var_img_green][i], fillval) img_blue = _convertImgDtype(var[self.var_img_blue][i], fillval) img = ma.dstack((img_red, img_green, img_blue)) latsCenter = var[self.var_latsCenter][i] lonsCenter = var[self.var_lonsCenter][i] lats = var[var[self.var_latsCenter].attrs['bounds']][i] lons = var[var[self.var_lonsCenter].attrs['bounds']][i] # TODO read in MLat/MLT as well if available self._latsCenter = ma.masked_invalid(latsCenter) self._lonsCenter = ma.masked_invalid(lonsCenter) self._lats = ma.masked_invalid(lats) self._lons = ma.masked_invalid(lons) self._elevation = ma.masked_invalid(90 - var[self.var_zenithAngle][i]) metadata = root.attrs assert var[self.var_altitude].attrs['UNITS'] == 'meters' assert var[self.var_cameraPos].attrs['UNITS'] == 'kilometers' identifier = os.path.splitext(os.path.basename(cdfPath))[0] BaseMapping.__init__(self, altitude, cameraPosGCRS, photoTime, identifier, metadata=metadata) ArrayImageMixin.__init__(self, img)
def readfile(): MFIdata = pycdf.CDF(MFI_path) FPEdata = pycdf.CDF(FPE_path) MFI = { "EPOCH":MFIdata["Epoch"][:],#获取时间 "BX":smooth(MFIdata["BX"][:]), #磁场强度分量 "BY":smooth(MFIdata["BY"][:]), "BZ":smooth(MFIdata["BZ"][:]), "BT":smooth(MFIdata["BT"][:]) #磁场总强度 } FPE = { "EPOCH":FPEdata["Epoch"][:], #获取时间 "DEN":smooth(FPEdata["DEN"][:]), #等离子体密度 "ENDEN":smooth(FPEdata["ENDEN"][:]), #能量密度 "TEMP":smooth(FPEdata["T"][:]), #温度 "GSEX":FPEdata["GSEX"][:], #GSE坐标分量 "GSEY":FPEdata["GSEY"][:], "GSEZ":FPEdata["GSEZ"][:], "VX":smooth(FPEdata["VX"][:]), #速度分量 "VY":smooth(FPEdata["VY"][:]) } print(FPE) print(MFI) return MFI,FPE