Esempio n. 1
0
def DataUpdate():
    Data = pd.read_csv("dataset13-v3.csv", sep=';', engine='python')
    tstart = str(int(Data["Year"][len(Data) - 1])) + '/' + str(
        int(Data["Month"][len(Data) - 1])) + '/' + str(
            int(Data["Day"][len(Data) - 1])) + ' 23:59:59.000'
    tend = dt.datetime.now()
    print(tend)
    if tend.day != int(tstart[8:10]):
        time_range = TimeRange((tstart, tend))
        n_days = int(round(time_range.days.value))
        days_array = np.array(
            list(str(i.start).split('T')[0] for i in time_range.split(n_days)))
        year, month, day = tstart[:10].split('/')

        for m in time_range.split(n_days):
            time_range1 = TimeRange((m.start, m.end))

            print(str(m.start)[:10] + ' / ' + str(tend), end='')
            print('\r', end='')
            try:
                key1_1, value1_1, key1_2, value1_2 = Sunflare_data_finder(
                    time_range1, n_days, days_array, client)
                key2_1, value2_1, key2_2, value2_2 = Coronal_Holes_data_finder(
                    time_range1, n_days, days_array, client)
                key3_1, value3_1, key3_2, value3_2, key3_3, value3_3 = Sunspot_Finder(
                    time_range1, n_days, client)
                key5_1, value5_1, key5_2, value5_2, key5_3, value5_3 = CME_Finder(
                    time_range1, n_days, client)
            except:
                key1_1, value1_1, key1_2, value1_2 = Sunflare_data_finder(
                    time_range1, n_days, days_array, client)
                key2_1, value2_1, key2_2, value2_2 = Coronal_Holes_data_finder(
                    time_range1, n_days, days_array, client)
                key3_1, value3_1, key3_2, value3_2, key3_3, value3_3 = Sunspot_Finder(
                    time_range1, n_days, client)
            key5_1, value5_1, key5_2, value5_2, key5_3, value5_3 = CME_Finder(
                time_range1, n_days, client)

            key4_1, value4_1, key4_2, value4_2, key4_3, value4_3, key4_4, value4_4 = Geostorm_Finder(
                m.end, days_array)
            data = Dict_Generator([key4_1, key1_1, key1_2, key2_1,                               key2_2, key3_1, key3_2, key3_3,                               key4_2, key4_3, key4_4, key5_1, key5_2, key5_3],
                              [value4_1, value1_1, value1_2, value2_1,\
                               value2_2, value3_1, value3_2, value3_3,\
                               value4_2,\
                               value4_3,\
                               value4_4,\
                               value5_1,\
                               value5_2,\
                               value5_3])
            Data = Data.append(data, ignore_index=True, sort=False)
        Data.to_csv('dataset13-v3.csv', sep=';', index=False)
    pass
Esempio n. 2
0
    readTimestamp = 0
    dataBlock_all = filReader.readBlock(readTimestamp, samplesPerBlock)

    stokesI = spp.Filterbank.FilterbankBlock(dataBlock_all,
                                             dataBlock_all.header)
    stokesI = stokesI.normalise()

    time_len = stokesI.shape[1]
    time_res = 5.12e-6 * 512
    trange = TimeRange(tstart, time_len * time_res * u.second)

    sbs = np.arange(51, 461)
    obs_mode = 3
    no_sbs = len(sbs)
    ylims, xlims = get_data_lims(sbs, obs_mode, no_sbs, trange)

    nsplit = 10
    df_chunk = data_chunker(stokesI.data, nsplit)
    dts = trange.split(nsplit)
    plot_names = 'all_data_plots/Uranus_StokesI_normalised_'

    for i, j in enumerate(df_chunk):
        print(plot_names + str(i))
        ys = j.sum(axis=1)[::-1]
        xs = j.sum(axis=0)
        _, xlims = get_data_lims(sbs, obs_mode, no_sbs, dts[i])
        plot_data(j.T, xs, ys, xlims, ylims, xlabel, ylabel,
                  plot_names + str(i), plot_title)
        plt.close()
        print('...next')
Esempio n. 3
0
rawdata.data = rawdata.data[:, :
                            no_sbs]  #need to do this because of the way subbands were set up for uranus observations! (only use 78 subbands!)
#off-beam
rawoffdata = LofarRaw(fname=off_fname,
                      sbs=sbs,
                      obs_mode=obs_mode,
                      frange=frange)
rawoffdata.data = rawoffdata.data[:, :no_sbs]
#stokes V
sV_data = LofarRaw(fname=sV, sbs=sbs, obs_mode=obs_mode, frange=frange)
sV_data.data = sV_data.data[:, :no_sbs]

df_chunk = ued.data_chunker(rawdata.data, nsplit)
off_chunk = ued.data_chunker(rawoffdata.data, nsplit)
sV_chunks = ued.data_chunker(sV_data.data, nsplit)
tchunks = trange.split(nsplit)

strings = ["StokesI", "StokesI_OFF", "StokesV"]
#strings = ["StokesV"]
for i, df in enumerate([df_chunk, off_chunk, sV_chunks]):
    #for i,df in enumerate([sV_chunks]):
    total_f_sum = []
    print("Analysing {}".format(strings[i]))
    for n, df_split in enumerate(df):
        print("Analysing Chunk #{}".format(n + 1))
        ylims, xlims = ued.get_data_lims(sbs, obs_mode, no_sbs, tchunks[n])
        print("Removing RFI")
        df_norfi = ued.resample_dataset(
            df_split, f=1220
        )  #resampled to 100 ms resolution to mask rfi shorter than this
        rfi_mask = edf.basic_filter(df_norfi, 4.)