def test_client_fetch_wrong_type(mock_fetch): query = a.Time("2011/01/01", "2011/01/02") & a.Instrument("goes") qr = Fido.search(query) with pytest.raises(TypeError): Fido.fetch(qr)
def test_no_wait_fetch(): qr = Fido.search(a.Instrument('EVE'), a.Time("2016/10/01", "2016/10/02"), a.Level(0)) res = Fido.fetch(qr, wait=False) assert isinstance(res, DownloadResponse) assert isinstance(res.wait(), list)
def test_save_path(): with tempfile.TemporaryDirectory() as target_dir: qr = Fido.search(a.Instrument('EVE'), a.Time("2016/10/01", "2016/10/02"), a.Level(0)) files = Fido.fetch(qr, path=os.path.join(target_dir, "{instrument}"+os.path.sep+"{level}")) for f in files: assert target_dir in f assert "eve{}0".format(os.path.sep) in f
def test_fido(mock_fetch): qr = Fido.search(a.Time("2012/10/4", "2012/10/6"), a.Instrument('noaa-indices')) assert isinstance(qr, UnifiedResponse) response = Fido.fetch(qr) assert len(response) == qr._numfile
def test_fido(query): qr = Fido.search(query) client = qr.get_response(0).client assert isinstance(qr, UnifiedResponse) assert isinstance(client, eve.EVEClient) response = Fido.fetch(qr) assert len(response) == qr._numfile
def test_save_path(tmpdir): qr = Fido.search(a.Instrument('EVE'), a.Time("2016/10/01", "2016/10/02"), a.Level(0)) # Test when path is str files = Fido.fetch(qr, path=str(tmpdir / "{instrument}" / "{level}")) for f in files: assert str(tmpdir) in f assert "eve{}0".format(os.path.sep) in f
def test_save_path_pathlib(tmpdir): qr = Fido.search(a.Instrument('EVE'), a.Time("2016/10/01", "2016/10/02"), a.Level(0)) # Test when path is pathlib.Path target_dir = tmpdir.mkdir("down") path = pathlib.Path(target_dir, "{instrument}", "{level}") files = Fido.fetch(qr, path=path) for f in files: assert target_dir.strpath in f assert "eve{}0".format(os.path.sep) in f
def test_save_path_pathlib(): pathlib = pytest.importorskip('pathlib') qr = Fido.search(a.Instrument('EVE'), a.Time("2016/10/01", "2016/10/02"), a.Level(0)) # Test when path is pathlib.Path with tempfile.TemporaryDirectory() as target_dir: path = pathlib.Path(target_dir, "{instrument}", "{level}") files = Fido.fetch(qr, path=path) for f in files: assert target_dir in f assert "eve{}0".format(os.path.sep) in f
def test_fido(mock_wait, mock_search, mock_enqueue): qr1 = Fido.search(Time('2012/10/4', '2012/10/6'), Instrument('noaa-indices')) Fido.fetch(qr1, path="/some/path/{file}") # Here we assert that the `fetch` function has called the parfive # Downloader.enqueue_file method with the correct arguments. Everything # that happens after this point should either be tested in the # GenericClient tests or in parfive itself. assert mock_enqueue.called_once_with(("ftp://ftp.swpc.noaa.gov/pub/weekly/RecentIndices.txt", "/some/path/RecentIndices.txt"))
def test_no_time_error(): query = (a.Instrument('EVE'), a.Level(0)) with pytest.raises(ValueError) as excinfo: Fido.search(*query) assert all(str(a) in str(excinfo.value) for a in query) query1 = (a.Instrument('EVE') & a.Level(0)) query2 = (a.Time("2012/1/1", "2012/1/2") & a.Instrument("AIA")) with pytest.raises(ValueError) as excinfo: Fido.search(query1 | query2) assert all(str(a) in str(excinfo.value) for a in query1.attrs) assert all(str(a) not in str(excinfo.value) for a in query2.attrs)
def test_vso_errors_with_second_client(mock_download_all): query = a.Time("2011/01/01", "2011/01/02") & (a.Instrument("goes") | a.Instrument("EIT")) qr = Fido.search(query) res = Fido.fetch(qr) assert len(res.errors) == 1 assert len(res) != qr.file_num # Assert that all the XRSClient records are in the output. for resp in qr.responses: if isinstance(resp, XRSClient): assert len(resp) == len(res)
def test_unified_response(): start = parse_time("2012/1/1") end = parse_time("2012/1/2") qr = Fido.search(a.Instrument('EVE'), a.Level(0), a.Time(start, end)) assert qr.file_num == 2 strings = ['eve', 'SDO', start.strftime(TIMEFORMAT), end.strftime(TIMEFORMAT)] assert all(s in qr._repr_html_() for s in strings)
def test_unifiedresponse_slicing_reverse(): results = Fido.search( a.Time("2012/1/1", "2012/1/5"), a.Instrument("lyra")) assert isinstance(results[::-1], UnifiedResponse) assert len(results[::-1]) == len(results) assert isinstance(results[0, ::-1], UnifiedResponse) assert results[0, ::-1]._list[0] == results._list[0][::-1]
def test_fido_indexing(queries): query1, query2 = queries # This is a work around for an aberration where the filter was not catching # this. assume(query1.attrs[1].start != query2.attrs[1].start) res = Fido.search(query1 | query2) assert len(res) == 2 assert len(res[0]) == 1 assert len(res[1]) == 1 aa = res[0, 0] assert isinstance(aa, UnifiedResponse) assert len(aa) == 1 assert len(aa.get_response(0)) == 1 aa = res[:, 0] assert isinstance(aa, UnifiedResponse) assert len(aa) == 2 assert len(aa.get_response(0)) == 1 aa = res[0, :] assert isinstance(aa, UnifiedResponse) assert len(aa) == 1 with pytest.raises(IndexError): res[0, 0, 0] with pytest.raises(IndexError): res["saldkal"] with pytest.raises(IndexError): res[1.0132]
def test_levels(time): """ Test the correct handling of level 0 / 1. The default should be level 1 from VSO, level 0 comes from EVEClient. """ eve_a = a.Instrument('EVE') qr = Fido.search(time, eve_a) client = qr.get_response(0).client assert isinstance(client, VSOClient) qr = Fido.search(time, eve_a, a.Level(0)) client = qr.get_response(0).client assert isinstance(client, eve.EVEClient) qr = Fido.search(time, eve_a, a.Level(0) | a.Level(1)) clients = {type(a.client) for a in qr.responses} assert clients.symmetric_difference({VSOClient, eve.EVEClient}) == set()
def test_responses(): results = Fido.search( a.Time("2012/1/1", "2012/1/5"), a.Instrument("lyra")) for i, resp in enumerate(results.responses): assert isinstance(resp, QueryResponse) assert i + 1 == len(results)
def test_repr(): results = Fido.search( a.Time("2012/1/1", "2012/1/5"), a.Instrument("lyra")) rep = repr(results) rep = rep.split('\n') # 6 header lines, the results table and two blank lines at the end assert len(rep) == 7 + len(list(results.responses)[0]) + 2
def test_add_entries_from_fido_search_result_JSOC_client(database): assert len(database) == 0 search_result = Fido.search( net_attrs.jsoc.Time('2014-01-01T00:00:00', '2014-01-01T01:00:00'), net_attrs.jsoc.Series('hmi.m_45s'), net_attrs.jsoc.Notify("*****@*****.**") ) with pytest.raises(ValueError): database.add_from_fido_search_result(search_result)
def test_multiple_match(): """ Using the builtin clients a multiple match is not possible so we create a dummy class. """ new_registry = copy.deepcopy(Fido.registry) Fido.registry = new_registry class DummyClient(): @classmethod def _can_handle_query(cls, *query): return True Fido.registry.update({DummyClient: DummyClient._can_handle_query}) with pytest.raises(MultipleMatchError): Fido.search(a.Time("2016/10/1", "2016/10/2"), a.Instrument('lyra')) Fido.registry = CLIENTS
def test_fido_iter(queries): query1, query2 = queries # This is a work around for an aberration where the filter was not catching # this. assume(query1.attrs[1].start != query2.attrs[1].start) res = Fido.search(query1 | query2) for resp in res: assert isinstance(resp, QueryResponse)
def test_repr(query): res = Fido.search(query) for rep_meth in (res.__repr__, res.__str__, res._repr_html_): if len(res) == 1: assert "Provider" in rep_meth() assert "Providers" not in rep_meth() else: assert "Provider" not in rep_meth() assert "Providers" in rep_meth()
def test_entries_from_fido_search_result_JSOC(): search_result = Fido.search( net_attrs.jsoc.Time('2014-01-01T00:00:00', '2014-01-01T01:00:00'), net_attrs.jsoc.Series('hmi.m_45s'), net_attrs.jsoc.Notify("*****@*****.**") ) with pytest.raises(ValueError): # Using list() here is important because the # entries_from_fido_search_result function uses yield. # list() uses the generator to run the function body. list(entries_from_fido_search_result(search_result))
def fido_search_result(): # A search query with responses from all instruments # No JSOC query return Fido.search( net_attrs.Time("2012/1/1", "2012/1/2"), net_attrs.Instrument('lyra') | net_attrs.Instrument('eve') | net_attrs.Instrument('XRS') | net_attrs.Instrument('noaa-indices') | net_attrs.Instrument('noaa-predict') | (net_attrs.Instrument('norh') & net_attrs.Wavelength(17*units.GHz)) | net_attrs.Instrument('rhessi') | (net_attrs.Instrument('EVE') & net_attrs.Level(0)) )
def test_retry(mock_retry): """ Test that you can use Fido.fetch to retry failed downloads. """ res = Results() res.data.append("/this/worked.fits") err1 = FailedDownload("This is not a filename", "http://not.url/test", None) err2 = FailedDownload("This is not a filename2", "http://not.url/test2", None) res.errors.append(err1) res.errors.append(err2) mock_retry.return_value._errors += [err2] res2 = Fido.fetch(res, Results(["/this/also/worked.fits"])) assert res2 is not res # Assert that the result of retry ends up in the returned Results() object assert res2.data == ["/this/worked.fits", "/tmp/test", "/this/also/worked.fits", "/tmp/test"] assert res2.errors == [err2, err2]
def test_downloader_type_error(): with pytest.raises(TypeError): Fido.fetch([], downloader=Results())
def test_no_match(): with pytest.raises(DrmsQueryError): Fido.search(a.Time("2016/10/01", "2016/10/02"), a.jsoc.Series("bob"), a.Sample(10 * u.s))
def test_client_fetch_wrong_type(mock_fetch): query = a.Time("2011/01/01", "2011/01/02") & a.Instrument.goes qr = Fido.search(query) with pytest.raises(TypeError): Fido.fetch(qr)
def test_unifiedresponse_slicing(): results = Fido.search(a.Time("2012/1/1", "2012/1/5"), a.Instrument.lyra) assert isinstance(results[0:2], UnifiedResponse) assert isinstance(results[0], QueryResponseTable)
def test_fetch(): qr = Fido.search(a.Instrument.eve, a.Time("2016/10/01", "2016/10/02"), a.Level.zero) res = Fido.fetch(qr) assert isinstance(res, Results)
def test_fido_indexing(queries): query1, query2 = queries # This is a work around for an aberration where the filter was not catching # this. assume(query1.attrs[1].start != query2.attrs[1].start) res = Fido.search(query1 | query2) assert len(res) == 2 assert isinstance(res[1:], UnifiedResponse) assert len(res[1:]) == 1 assert isinstance(res[0:1], UnifiedResponse) assert len(res[0:1]) == 1 assert isinstance(res[1:, 0], UnifiedResponse) assert len(res[1:, 0]) == 1 assert isinstance(res[0:1, 0], UnifiedResponse) assert len(res[0:1, 0]) == 1 assert isinstance(res[0][0], QueryResponseRow) assert isinstance(res[1][0], QueryResponseRow) assert isinstance(res[1, 0:1], QueryResponseTable) aa = res[0, 0] assert isinstance(aa, QueryResponseRow) aa = res[0, 'Instrument'] assert isinstance(aa, QueryResponseColumn) aa = res[:, 'Instrument'] assert isinstance(aa, UnifiedResponse) for table in aa: assert len(table.columns) == 1 aa = res[0, ('Instrument', )] assert isinstance(aa, QueryResponseTable) for table in aa: assert len(table.columns) == 1 aa = res[:, 0] assert isinstance(aa, UnifiedResponse) assert len(aa) == 2 assert len(aa[0]) == 1 aa = res[0, :] assert isinstance(aa, QueryResponseTable) aa = res[0, 1:] assert isinstance(aa, QueryResponseTable) if len(res.keys()) == len(res): aa = res[res.keys()[0], 1:] assert isinstance(aa, QueryResponseTable) aa = res[res.keys()[0], 'Instrument'] assert isinstance(aa, QueryResponseColumn) with pytest.raises(IndexError): res[0, 0, 0] with pytest.raises(IndexError): res["saldkal"] with pytest.raises(IndexError): res[1.0132] if isinstance(res, UnifiedResponse): assert len(res) != 1
plt.rcParams['figure.figsize'] = (16, 8) ###################################################################### # Let’s download an EUV image from both AIA and EUVI A, when the # two spacecraft were separated by approximately 120 degrees. euvi = (a.vso.Source('STEREO_A') & a.Instrument("EUVI") & a.Time('2011-11-01', '2011-11-01T00:10:00')) aia = (a.Instrument.aia & a.Sample(24 * u.hour) & a.Time('2011-11-01', '2011-11-02')) wave = a.Wavelength(19.5 * u.nm, 19.5 * u.nm) res = Fido.search(wave, aia | euvi) files = Fido.fetch(res) ###################################################################### # Create a map for each image, after making sure to sort by the # appropriate name attribute (i.e., "AIA" and "EUVI") so that the # order is reliable. map_list = sunpy.map.Map(files) map_list.sort(key=lambda m: m.detector) map_aia, map_euvi = map_list # We downsample these maps to reduce memory consumption, but you can # comment this out. out_shape = (512, 512) map_aia = map_aia.resample(out_shape * u.pix)
############################################################################### # In order to download the required data, we will use # `Fido <sunpy.net.fido_factory.UnifiedDownloaderFactory>`, a downloader client. # Using the `Fido.search` method, we need to define two variables: # a timerange (`~sunpy.net.attrs.Time`) and # a instrument (`~sunpy.net.attrs.Instrument`). timerange = attrs.Time('1996/05/24 11:00', '1996/05/24 12:00') instrument = attrs.Instrument('C3') ############################################################################### # This now must be passed in ``Fido.search`` which will query the # online services: result = Fido.search(timerange, instrument) # ``result`` contains the return from the online search. # In this case, we have found 4 files that correspond to our search parameters. print(result) ############################################################################### # The next step is to download the search results and `Fido.fetch` will be used. # We will pass in the ``result`` and ``downloaded_files`` will contain # a list of the location of each of the downloaded files. downloaded_files = Fido.fetch(result) print(downloaded_files) ############################################################################### # Finally we can pass in the first file we downloaded into `sunpy.map.Map`
def download_omni_text(input_datetime): t_start = input_datetime - datetime.timedelta(1) t_end = input_datetime + datetime.timedelta(1) + datetime.timedelta( minutes=10) t_start_day = input_datetime t_end_day = input_datetime + datetime.timedelta(minutes=1439) #--------------------------------------------------------# # OMNI Data - includes solar wind, and geomag params # #--------------------------------------------------------# #get OMNI data omniInt = omnireader.omni_interval(t_start, t_end, '5min', cdf_or_txt='txt') #print(omniInt.cdfs[0].vars) #prints all the variables available on omni epochs = omniInt['Epoch'] #time array for omni 5min data By, Bz, AE, SymH = omniInt['BY_GSM'], omniInt['BZ_GSM'], omniInt[ 'AE_INDEX'], omniInt['SYM_H'] vsw, psw = omniInt['flow_speed'], omniInt['Pressure'] borovsky_reader = omnireader.borovsky(omniInt) borovsky = borovsky_reader() #newell_reader = omnireader.newell(omniInt) #newell = newell_reader() def NewellCF_calc(v, bz, by): # v expected in km/s # b's expected in nT NCF = np.zeros_like(v) NCF.fill(np.nan) bt = np.sqrt(by**2 + bz**2) bztemp = bz bztemp[bz == 0] = .001 #Caculate clock angle (theta_c = t_c) tc = np.arctan2(by, bztemp) neg_tc = bt * np.cos(tc) * bz < 0 tc[neg_tc] = tc[neg_tc] + np.pi sintc = np.abs(np.sin(tc / 2.)) NCF = (v**1.33333) * (sintc**2.66667) * (bt**0.66667) return NCF newell = NewellCF_calc(vsw, Bz, By) proton_flux_10MeV, proton_flux_30MeV, proton_flux_60MeV = omniInt[ 'PR-FLX_10'], omniInt['PR-FLX_30'], omniInt['PR-FLX_60'] #calculate clock angle clock_angle = np.degrees(np.arctan2(By, Bz)) clock_angle[clock_angle < 0] = clock_angle[clock_angle < 0] + 360. print('Got 5 minutes data') omniInt_1hr = omnireader.omni_interval(t_start, t_end, 'hourly', cdf_or_txt='txt') epochs_1hr = omniInt_1hr['Epoch'] #datetime timestamps F107, KP = omniInt_1hr['F10_INDEX'], omniInt_1hr['KP'] print('Got hour data') #--------------------------------------------------------# # GOES X-ray data - Channel 1-8A, defines flare class # #--------------------------------------------------------# results = Fido.search(a.Time(t_start, t_end), a.Instrument('XRS')) files = Fido.fetch(results) goes = TimeSeries(files, concatenate=True) goes_l = goes.data['xrsb'] print('Got GOES data') #--------------------------------------------------------# # Resample data to 1min to match GNSS CHAIN network # #--------------------------------------------------------# #resample OMNI Solar Wind Data By_data = pd.Series(By, index=epochs).resample('1T').pad().truncate( t_start_day, t_end_day) Bz_data = pd.Series(Bz, index=epochs).resample('1T').pad().truncate( t_start_day, t_end_day) AE_data = pd.Series(AE, index=epochs).resample('1T').pad().truncate( t_start_day, t_end_day) SymH_data = pd.Series(SymH, index=epochs).resample('1T').pad().truncate( t_start_day, t_end_day) vsw_data = pd.Series(vsw, index=epochs).resample('1T').pad().truncate( t_start_day, t_end_day) psw_data = pd.Series(psw, index=epochs).resample('1T').pad().truncate( t_start_day, t_end_day) borovsky_data = pd.Series(borovsky, index=epochs).resample('1T').pad().truncate( t_start_day, t_end_day) newell_data = pd.Series(newell, index=epochs).resample('1T').pad().truncate( t_start_day, t_end_day) proton_10_data = pd.Series(proton_flux_10MeV, index=epochs).resample('1T').pad().truncate( t_start_day, t_end_day) proton_30_data = pd.Series(proton_flux_30MeV, index=epochs).resample('1T').pad().truncate( t_start_day, t_end_day) proton_60_data = pd.Series(proton_flux_60MeV, index=epochs).resample('1T').pad().truncate( t_start_day, t_end_day) clock_angle_data = pd.Series(clock_angle, index=epochs).resample('1T').pad().truncate( t_start_day, t_end_day) F107data = pd.Series(F107, index=epochs_1hr).resample('1T').pad().truncate( t_start_day, t_end_day) KPdata = pd.Series(KP, index=epochs_1hr).resample('1T').pad().truncate( t_start_day, t_end_day) #function to find data at previous time intervals def roll_back(data, minutes=1): ts = t_start_day - datetime.timedelta(minutes=minutes) te = t_end_day - datetime.timedelta(minutes=minutes) data = pd.Series(data, index=epochs).resample('1T').pad() new_data = data.truncate(ts, te) rolled_data = pd.Series(np.array(new_data), index=By_data.index) return rolled_data #calculate rolled back timeseries - 15 and 30 minutes previous By_15 = roll_back(By, minutes=15) By_30 = roll_back(By, minutes=30) Bz_15 = roll_back(Bz, minutes=15) Bz_30 = roll_back(Bz, minutes=30) AE_15 = roll_back(AE, minutes=15) AE_30 = roll_back(AE, minutes=30) SymH_15 = roll_back(SymH, minutes=15) SymH_30 = roll_back(SymH, minutes=30) vsw_15 = roll_back(vsw, minutes=15) vsw_30 = roll_back(vsw, minutes=30) psw_15 = roll_back(psw, minutes=15) psw_30 = roll_back(psw, minutes=30) borovsky_15 = roll_back(borovsky, minutes=15) borovsky_30 = roll_back(borovsky, minutes=30) newell_15 = roll_back(newell, minutes=15) newell_30 = roll_back(newell, minutes=30) clock_angle_15 = roll_back(clock_angle, minutes=15) clock_angle_30 = roll_back(clock_angle, minutes=30) #resample GOES X-ray flux goes_data = goes_l.resample('1T').mean().truncate(t_start_day, t_end_day) #put all in a dataframe and save dataframe = pd.DataFrame() dataframe['Bz - 0min [nT]'] = Bz_data dataframe['Bz - 15min [nT]'] = Bz_15 dataframe['Bz - 30min [nT]'] = Bz_30 dataframe['By - 0min [nT]'] = By_data dataframe['By - 15min [nT]'] = By_15 dataframe['By - 30min [nT]'] = By_30 dataframe['Vsw - 0min [km/s]'] = vsw_data dataframe['Vsw - 15min [km/s]'] = vsw_15 dataframe['Vsw - 30min [km/s]'] = vsw_30 dataframe['Psw - 0min [nPa]'] = psw_data dataframe['Psw - 15min [nPa]'] = psw_15 dataframe['Psw - 30min [nPa]'] = psw_30 dataframe['AE - 0min [nT]'] = AE_data dataframe['AE - 15min [nT]'] = AE_15 dataframe['AE - 30min [nT]'] = AE_30 dataframe['SymH - 0min [nT]'] = SymH_data dataframe['SymH - 15min [nT]'] = SymH_15 dataframe['SymH - 30min [nT]'] = SymH_30 dataframe['Clock Angle - 0min [deg]'] = clock_angle_data dataframe['Clock Angle - 15min [deg]'] = clock_angle_15 dataframe['Clock Angle - 30min [deg]'] = clock_angle_30 dataframe['Newell CF - 0min [m/s^(4/3) T^(2/3)]'] = newell_data dataframe['Newell CF - 15min [m/s^(4/3) T^(2/3)]'] = newell_15 dataframe['Newell CF - 30min [m/s^(4/3) T^(2/3)]'] = newell_30 dataframe['Borovsky CF - 0min [nT km/s]'] = borovsky_data dataframe['Borovsky CF - 15min [nT km/s]'] = borovsky_15 dataframe['Borovsky CF - 30min [nT km/s]'] = borovsky_30 dataframe['Kp [dimensionless]'] = KPdata dataframe['F107 [sfu=10^-22 W/m^2/hz]'] = F107data dataframe['Proton 10MeV'] = proton_10_data dataframe['Proton 30MeV'] = proton_30_data dataframe['Proton 60MeV'] = proton_60_data dataframe['GOES X-ray Wm^-2'] = goes_data dataframe_nan = dataframe.replace(9999.99, np.nan) #replace 9999.99 with nans filepath = '/Users/ryanmcgranaghan/Documents/Conferences/ISSI_2018/ISSI_geospaceParticles/solar_data/' filename = filepath + 'solardata' + input_datetime.strftime( '%Y') + '_' + input_datetime.strftime('%j') + '.csv' print('output solardata file location = {}'.format(filename)) dataframe_nan.to_csv(filename, index_label='Datetime')
def test_fido_onewave_level1b(start, end, wave, expected_num_files): result = Fido.search(a.Time(start, end), a.Instrument('suvi'), a.Wavelength(wave * u.Angstrom), a.Level('1b')) assert result.file_num == expected_num_files
# Script to grab and update MDI and HMI data # See sunpy notes at https://sunpy.readthedocs.io/en/latest/guide/acquiring_data/jsoc.html # Import libraries import sunpy import sunpy.io import datetime from sunpy.net import Fido, attrs as a import drms import os # Specify any directories hmidat = os.path.expanduser('~/data/hmi.Synoptic_Mr.polfil/') # Start the client c = drms.Client() # Generate a search # CL - sanitize the use of my email address and directories here... today = datetime.datetime.now().replace(microsecond=0,second=0,minute=0,hour=0) res = Fido.search(a.Time('2019-06-01', today), a.jsoc.Series('hmi.Synoptic_Mr_polfil_720s')) # Once the query is made and trimmed down... download = Fido.fetch(res, path=hmidat+'{file}.fits')
def test_srs_tar_unpack(): qr = Fido.search( a.Instrument("soon") & a.Time("2015/01/01", "2015/01/01T23:59:29")) res = Fido.fetch(qr) assert len(res) == 1 assert res.data[0].endswith("20150101SRS.txt")
def test_online_fido(query): unifiedresp = Fido.search(query) check_response(query, unifiedresp)
def test_unclosedSocket_warning(): with pytest.warns(None): attrs_time = a.Time('2005/01/01 00:10', '2005/01/01 00:15') result = Fido.search(attrs_time, a.Instrument.eit) Fido.fetch(result)
ts_goes = sunpy.timeseries.TimeSeries(sunpy.data.sample.GOES_XRS_TIMESERIES, source='XRS') ts_lyra = sunpy.timeseries.TimeSeries(sunpy.data.sample.LYRA_LEVEL3_TIMESERIES, source='LYRA') ts_noaa_ind = sunpy.timeseries.TimeSeries( sunpy.data.sample.NOAAINDICES_TIMESERIES, source='NOAAIndices') ts_noaa_pre = sunpy.timeseries.TimeSeries( sunpy.data.sample.NOAAPREDICT_TIMESERIES, source='NOAAPredictIndices') ts_norh = sunpy.timeseries.TimeSeries(sunpy.data.sample.NORH_TIMESERIES, source='NoRH') ts_rhessi = sunpy.timeseries.TimeSeries(sunpy.data.sample.RHESSI_TIMESERIES, source='RHESSI') ts_gbm = sunpy.timeseries.TimeSeries(sunpy.data.sample.GBM_TIMESERIES, source='GBMSummary') # Note: for some FITS files a source can be determined implicitly, however it # is good practice to delcare it explicitly when possible. ############################################################################## # You can create a list of TimeSeries objects by using multiple files. First # however, we shall download these files using `Fido`. goes = Fido.search(a.Time("2012/06/01", "2012/06/04"), a.Instrument("XRS")) goes_files = Fido.fetch(goes) # Using these new files you get a list: lis_goes_ts = sunpy.timeseries.TimeSeries(goes_files[:2], source='XRS') lis_goes_ts = sunpy.timeseries.TimeSeries(goes_files, source='XRS') # Using concatenate=True kwarg you can merge the files into one TimeSeries: combined_goes_ts = sunpy.timeseries.TimeSeries(goes_files, source='XRS', concatenate=True) combined_goes_ts.peek() # Note: ATM we only accept TimeSeries of a single class being created together # with the factory. The issue is that several source filetypes don't contain # metadata that enables us to reliably implicitly gather the source and ATM the # source is given as a single keyword argument for simplicity. But you can merge # different TimeSeries classes using concatenate. # Debate: are we OK for one source at a time?
import astropy.time import astropy.units as u from astropy.coordinates import SkyCoord from astropy.visualization import ImageNormalize, SqrtStretch import sunpy.map from sunpy.net import Fido from sunpy.net import attrs as a ##################################################### # First, query a full frame AIA image. t0 = astropy.time.Time('2012-09-24T14:56:03', scale='utc', format='isot') q = Fido.search( a.Instrument.aia, a.Physobs.intensity, a.Wavelength(171 * u.angstrom), a.Time(t0, t0 + 13 * u.s), ) m = sunpy.map.Map(Fido.fetch(q)) ##################################################### # Next, we will create a submap from this image. We will # crop the field of view to active region NOAA 11575. m_cutout = m.submap( SkyCoord(-500 * u.arcsec, -275 * u.arcsec, frame=m.coordinate_frame), top_right=SkyCoord(150 * u.arcsec, 375 * u.arcsec, frame=m.coordinate_frame), ) m_cutout.peek()
def test_fido_mock(mock_get_observing_summary_dbase_file, mock_parse_observing_summary_dbase_file, mock_get_base_url): qr = Fido.search(a.Time('2003-11-01', '2003-11-03'), a.Instrument('rhessi')) assert isinstance(qr, UnifiedResponse) assert qr._numfile == 3
def test_path(): results = Fido.search(a.Time("2012/1/1", "2012/1/5"), a.Instrument.lyra) Fido.fetch(results, path="notapath/{file}")
# The first step is to download some data, we are going to get an image from # early 2011 when the STEREO spacecraft were roughly 90 deg seperated from the # Earth. stereo = (a.vso.Source('STEREO_B') & a.Instrument('EUVI') & a.Time('2011-01-01', '2011-01-01T00:10:00')) aia = (a.Instrument('AIA') & a.vso.Sample(24 * u.hour) & a.Time('2011-01-01', '2011-01-02')) wave = a.Wavelength(30 * u.nm, 31 * u.nm) res = Fido.search(wave, aia | stereo) ############################################################################### # The results from VSO query: print(res) ############################################################################### # Download the files: files = Fido.fetch(res) print(files) ############################################################################### # Create a dictionary with the two maps, cropped down to full disk. maps = {m.detector: m.submap(SkyCoord([-1100, 1100]*u.arcsec,
def test_unifiedresponse_slicing_reverse(): results = Fido.search(a.Time("2012/1/1", "2012/1/5"), a.Instrument.lyra) assert isinstance(results[::-1], UnifiedResponse) assert len(results[::-1]) == len(results[::1]) assert isinstance(results[0, ::-1], QueryResponseTable) assert all(results[0][::-1] == results[0, ::-1])
def get_HMI_data(user_date, user_notify='*****@*****.**', user_dir=None, max_conn=1, download=False, show_files=False): """ Locate the nearest (relative to the user input date) HMI 720s Level 1 Stokes data series and Level 2 inversion results. If the data is already present in the user specified data directory is loaded from there. TODO: 1. If download=False the code finds the nearest files in time which may not be inside the time search window. Create a keyword that determines the width of the search time window and then excludes any files, even locally from it. The downside to this is the reduced ability to load any files. Parameters ---------- user_date: `astropy.time` object. user_notify: Notification email. This must be registered with JSOC. """ # Calculate a 1s bounding time around the input date user_date # FIDO finds all series where at least one observation was present in the # time interval. time0 = astropy.time.Time(user_date.gps - 1., format='gps', scale='tai') time1 = astropy.time.Time(user_date.gps + 1., format='gps', scale='tai') a_time = attrs.Time(time0, time1) print('Time window used for the search: ', a_time) # Set the notification email. This must be registered with JSOC. a_notify = attrs.jsoc.Notify(user_notify) # Set the default data directory if no user directory is specified. if user_dir is None: # Set working directory. user_dir = os.getcwd() + '/Data/SDO/' print('User directory pointing to SDO data is not included.') print('Setting the default directory to: ' + user_dir) # Check if the data directory exists and create one if it doesn't. if not os.path.exists(user_dir): print('Data directory created: ', user_dir) os.makedirs(user_dir) ### Get the 720s HMI Stokes image series ### a_series = attrs.jsoc.Series('hmi.S_720s') if download: results_stokes = Fido.search(a_time, a_series, a_notify) down_files = Fido.fetch(results_stokes, path=user_dir, max_conn=1) # Sort the input filenames all_fnames_stokes = natsort.natsorted(down_files) else: all_fnames_stokes = parse_folder(dir_path=user_dir, inst='hmi', series='S_720s', ext='fits', show=show_files) if len(all_fnames_stokes) > 1: tstamps = [i.split('.')[2] for i in all_fnames_stokes] tstamps = [ sunpy.time.parse_time('_'.join(i.split('_')[0:2])) for i in tstamps ] tstamps_diff = [np.abs(i.gps - user_date.gps) for i in tstamps] # Search for the closest timestamp tstamps_diff = np.asarray(tstamps_diff) tstamps_ix, = np.where(tstamps_diff == tstamps_diff.min()) all_fnames_stokes = np.asarray(all_fnames_stokes)[tstamps_ix] print( f'No download requested. Nearest {len(all_fnames_stokes)} Stokes files found: ' ) print(all_fnames_stokes) ### Get the HMI Milne-Eddington magentic field inversion series ### a_series = attrs.jsoc.Series('hmi.ME_720s_fd10') if download: results_magvec = Fido.search(a_time, a_series, a_notify) down_files = Fido.fetch(results_magvec, path=user_dir, max_conn=1) # Sort the input names all_fnames_magvec = natsort.natsorted(down_files) else: all_fnames_magvec = parse_folder(dir_path=user_dir, inst='hmi', series='ME_720s_fd10', ext='fits', show=show_files) if len(all_fnames_magvec) > 1: tstamps = [i.split('.')[2] for i in all_fnames_magvec] tstamps = [ sunpy.time.parse_time('_'.join(i.split('_')[0:2])) for i in tstamps ] tstamps_diff = [np.abs(i.gps - user_date.gps) for i in tstamps] else: print('No files found close to the date requested') return # Search for the closest timestamp tstamps_diff = np.asarray(tstamps_diff) tstamps_ix, = np.where(tstamps_diff == tstamps_diff.min()) all_fnames_magvec = np.asarray(all_fnames_magvec)[tstamps_ix] print( f'No download requested. Nearest inversion {len(all_fnames_magvec)} files found: ' ) print(all_fnames_magvec) ## Create data array ## Use sunpy.map.Map to read HMI files since it provides the correct observer frame of reference. level1_data = [] for i, fname in enumerate(all_fnames_stokes): level1_data.append(sunpy.map.Map(fname).data) level1_data = np.asarray(level1_data) level1_data = level1_data.reshape(4, 6, level1_data.shape[1], level1_data.shape[2]) print(f'Created data cube with dimensions: {level1_data.shape}') ## Create the WCS object # Expand the coordinate axis to include wavelength and stokes dimensions. l0 = 6173.345 * 1.e-10 # m Central wavelength for FeI line dl = 0.0688 * 1.e-10 # m # Generate WCS for data cube using same WCS celestial information from AIA map. wcs_header = sunpy.map.Map(all_fnames_stokes[0]).wcs.to_header() wcs_header["WCSAXES"] = 4 # Add wavelength axis. wcs_header["CRPIX3"] = 3.5 wcs_header["CDELT3"] = dl wcs_header["CUNIT3"] = 'm' wcs_header["CTYPE3"] = "WAVE" wcs_header["CRVAL3"] = l0 # Add Stokes axis. wcs_header["CRPIX4"] = 0 wcs_header["CDELT4"] = 1 wcs_header["CUNIT4"] = '' wcs_header["CTYPE4"] = "STOKES" wcs_header["CRVAL4"] = 0 level1_wcs = astropy.wcs.WCS(wcs_header) # Create MagVectorCube from HMI inversions mag_params = ['field', 'inclination', 'azimuth'] level2_data = [] # Load 2D maps into level2_data in the order determined by entries in mag_params use_fnames = [] for mag_param in mag_params: for i, fname in enumerate(all_fnames_magvec): data_id = fname.split('.')[-2] if data_id == mag_param: use_fnames.append(fname) with astropy.io.fits.open(fname) as hdulist: level2_data.append(hdulist[1].data) level2_data = np.asarray(level2_data) print(f'Created data cube with dimensions: {level2_data.shape}') print('Filenames used: ') for fname in use_fnames: print(fname) # Expand the wcs coordinates to include the magnetic field parameters. # Generate WCS for data cube using same WCS celestial information from AIA map. wcs_header = sunpy.map.Map(all_fnames_stokes[0]).wcs.to_header() wcs_header["WCSAXES"] = 3 # Add Magnetic field axis. wcs_header["CRPIX3"] = 0 wcs_header["CDELT3"] = 1 wcs_header["CUNIT3"] = '' wcs_header["CTYPE3"] = "Parameter" wcs_header["CRVAL3"] = 0 level2_wcs = astropy.wcs.WCS(wcs_header) return level1_data, level1_wcs, level2_data, level2_wcs
def test_fido_onewave_level1b(start, end, wave, expected_num_files): goes_sat = a.goes.SatelliteNumber.sixteen result = Fido.search(a.Time(start, end), a.Instrument.suvi, goes_sat, a.Wavelength(wave * u.Angstrom), a.Level('1b')) assert result.file_num == expected_num_files
from sunpy.net import Fido, attrs as a, vso import datetime from astropy import units as u #get utc time rounded down to the previous hour time_now = datetime.datetime.utcnow().replace(microsecond=0, second=0, minute=0) #time to search for available fits files search_time = a.Time( (time_now - datetime.timedelta(hours=10)).strftime('%Y/%m/%d %H:%M'), time_now.strftime('%Y/%m/%d %H:%M')) search_time2 = a.Time('2014-01-01 00:00', '2014-01-02 00:00') Fido.search(search_time2, a.Instrument('hmi'), a.vso.Sample(1 * u.hour)) hmi_search = Fido.search(search_time, a.Instrument('hmi'), a.vso.Sample(1 * u.hour)) from astropy.time import Time from sunpy.net.attr import AttrAnd, AttrOr ta = Time(time_now, format='datetime', scale='tai') ta1 = Time(time_now - datetime.timedelta(days=2), format='datetime', scale='tai') ta2 = Time(time_now - datetime.timedelta(days=1), format='datetime', scale='tai')
############################################################################## # Let's select a date (yyyy-mm-dd) for which we will be downloading files. day = parse_time("2017-01-25") ############################################################################## # We will select the entire day as our timerange. start_time = day end_time = day + TimeDelta(23*u.hour + 59*u.minute + 59*u.second) ############################################################################## # Send the search query. results = Fido.search(a.Time(start_time, end_time), a.Instrument('HMI') & a.vso.Physobs("LOS_magnetic_field"), a.vso.Sample(60 * u.second)) ############################################################################## # We will only download the first file for the day. For that we use fido # indexing on the search results which will return the first file for the day. result = results[0, 0] ############################################################################## # Download the file. The `fetch` method returns a list of filenames. As we # used indexing to get the first file of the day, the list contains one # filename. file_name = Fido.fetch(result)
# In this example we are going to make a lot of side by side figures, so # let's change the default figure size. plt.rcParams['figure.figsize'] = (16, 8) ###################################################################### # We are going to download one AIA and one HMI magnetogram image. time = (a.Sample(24 * u.hour) & a.Time('2010-08-19', '2010-08-19T00:10:00', '2010-08-19') & a.vso.Extent(0, 0, 0, 0, "FULLDISK")) aia = a.Instrument.aia & a.Wavelength(17 * u.nm, 18 * u.nm) hmi = a.Instrument.hmi & a.Physobs.los_magnetic_field res = Fido.search(time, aia | hmi) files = Fido.fetch(res[:, 0]) ###################################################################### # We create a map for each image and resample each one just to # reduce the computation time. map_aia, map_hmi = [m.resample((1024, 1024)*u.pix) for m in sunpy.map.Map(sorted(files))] # Why do we have to do this? map_hmi.plot_settings['cmap'] = "hmimag" map_hmi.plot_settings['norm'] = plt.Normalize(-2000, 2000) ###################################################################### # Plot both images side by side.
def test_mixed_retry_error(): with pytest.raises(TypeError): Fido.fetch([], Results())
def test_fido(): atr = a.Time('2010/10/01', '2010/10/02') res = Fido.search(atr, a.Instrument('waves')) assert isinstance(res[0].client, WAVESClient) assert len(res[0]) == 4
end = datetime(2018, 5, 1, 0, 0, 0) #currently generating 8 years of data time_interval = timedelta(minutes=60) download_chunk = timedelta( days=10 ) #avoid download chunks greater than 1 month in order to not download too much at once #breaks the download into pieces and downloads current_time = start while (current_time < end): if (end - current_time > download_chunk): next_time = current_time + download_chunk else: next_time = end response = Fido.search(attrs.jsoc.Time(current_time, next_time), attrs.jsoc.Notify('*****@*****.**'), attrs.jsoc.Series('hmi.Sharp_720s'), attrs.jsoc.Segment('bitmap'), attrs.Sample(time_interval.total_seconds() * u.s)) response res = Fido.fetch(response, path=sharp_dir + '/{file}.fits') current_time = next_time warnings.simplefilter( "ignore" ) #.verify('fix') produces many warnings which will lag the jupyter notebook #extracts relevant keywords in the given order keywords = [ 'HARPNUM', 'T_REC', 'NAXIS1', 'NAXIS2', 'CDELT1', 'CDELT2', 'IMCRPIX1', 'IMCRPIX2', 'LAT_FWT', 'LON_FWT', 'NPIX' ] #Keywords in order to be saved
A simple example showing how to download data from the VSO with Fido. """ ############################################################################### # Fido is the primary interface to search for and download data and # will search the VSO if appropriate. First import it and the search # attributes. import astropy.units as u from sunpy.net import Fido, attrs as a ############################################################################### # We could ask for all SOHO/EIT data between January 1st and 2nd, 2001. attrs_time = a.Time('2005/01/01 00:10', '2005/01/01 00:15') result = Fido.search(attrs_time, a.Instrument('eit')) ############################################################################### # Let's inspect the result print(result) ############################################################################### # Now lets download this query. If we don't provide a path it will download the # file into the sunpy data directory. downloaded_files = Fido.fetch(result) ############################################################################### # You can check where the file was downloaded to.
def test_fido(time, instrument): qr = Fido.search(a.Time('2012/10/4', '2012/10/6'), Instrument('XRS')) assert isinstance(qr, UnifiedResponse) response = Fido.fetch(qr) assert len(response) == qr._numfile
import astropy.units as u import matplotlib.pyplot as plt import sunpy.map from sunpy.net import Fido, attrs as a ############################################################################### # Now we will download some data with `sunpy.net.Fido`. # A `Fido.search` requires us to specify a `~sunpy.net.attr.Time`, # `~sunpy.net.attr.Sample`, `~sunpy.net.attr.Instrument` # and the `~sunpy.net.attr.vso.Physobs`. # We set a time range from ``2015/11/04 12:00:00`` to ``2015/11/04 12:10:00`` # for HMI ``LOS_magnetic_field`` with the images spaced every 720 seconds. result = Fido.search(a.Time('2018/11/04 12:00:00', '2018/11/04 12:10:00'), a.Instrument('hmi'), a.Sample(720*u.s), a.vso.Physobs('LOS_magnetic_field')) ############################################################################### # Now we can see what results we obtained from our search. # Notice we have two files. One is the full disk image we plan to display # and the other is a synoptic version of said image. print(result) ############################################################################### # Once we are happy with the results obtained from the search. # We can download the data with `Fido.fetch` . # In this case we only want one file so we can index the result. # A `Fido` result can be from several clients, so we have to index the first # client and then index the first result.
def test_fido_waverange_level1b(start, end, wave1, wave2, expected_num_files): """check that we get all wavelengths if no wavelength is given""" result = Fido.search(a.Time(start, end), a.Instrument('suvi'), a.Wavelength(wave1 * u.Angstrom, wave2 * u.Angstrom), a.Level('1b')) assert result.file_num == expected_num_files
def test_fido(time, instrument): qr = Fido.search(time, instrument) assert isinstance(qr, UnifiedResponse) response = Fido.fetch(qr) assert len(response) == qr._numfile