示例#1
0
    # row = query_result.iloc[100,]

    # Create file path to .fits
    raw_data_file = os.path.join(App.RAW_DATA_HOME, row.fname_raw)
    # prep the image
    print('')
    print('---------------------------------------------------------')
    print(f'Working on file {ifile} of {len(query_result)}')
    print('---------------------------------------------------------')
    print(f'  query row index:  {index}')
    print(f'  database data_id:  {row.data_id}')
    print('  Raw File:  ' + raw_data_file)
    subdir, fname, los = prep.prep_euv_image(
        raw_data_file, App.PROCESSED_DATA_HOME, write=write, idl_session=idl_session, deconvolve=deconvolve)

    # return a relative filename for the hdf
    hdf_rel_path = os.path.join(subdir, fname)
    hdf_full_path = os.path.join(App.PROCESSED_DATA_HOME, subdir, fname)

    # update DB to reflect the new filename
    print('  Committing processed path to database: ' + hdf_rel_path)
    db_session = update_image_val(db_session, row, "fname_hdf", hdf_rel_path)

    ifile = ifile + 1

# close the IDL session
if idl_session is not None:
    idl_session.end()

db_session.close()
示例#2
0
    # now cause an exception to stop the script
    raise KeyboardInterrupt

# build a list of bad images (can be one or many IDs set manually as done here, or make a list from the query).
bad_images = [126544]

# set the flag that you'll apply to these images (assume that you are flagging similar types)
flag = -3

# Confirm that you actually want to do this
query_full = pd.read_sql(
    db_session.query(EUV_Images).filter(
        EUV_Images.image_id.in_(tuple(bad_images))).statement, db_session.bind)

print(f'\n### Will apply a new flag value of {flag}, to the following images:')
print_full_dataframe(query_full[columns])

if input(f"\n  Do you want to continue? [y/n]") == "y":
    # loop over image ids one by one since this is how update_image_val works
    for image_id in bad_images:
        query_out = pd.read_sql(
            db_session.query(EUV_Images).filter(
                EUV_Images.image_id == image_id).statement, db_session.bind)
        print(
            f'  Flagging image: {query_out[columns].to_string(header=False, index=False)}'
        )
        update_image_val(db_session, query_out, 'flag', flag)

else:
    print('  Aborted!')
示例#3
0
                db_class.EUV_Images.data_id).order_by(
                    db_class.EUV_Images.date_obs).statement, db_session.bind)
        # remove duplicate columns
        query_pd = query_pd.loc[:, ~query_pd.columns.duplicated()]

        n_images = bad_image_lists[inst_index].__len__()
        for im_num, row in query_pd.iterrows():
            full_path = os.path.join(hdf_data_dir, row.fname_hdf)
            print("Plotting", instrument, im_num + 1, "of", n_images, "-",
                  row.date_obs)
            bad_im = psi_d_types.read_los_image(full_path)
            EasyPlot.PlotImage(bad_im, nfig=0)
            plt.waitforbuttonpress()
            plt.close(0)

# loop through flag_bad and change flag in database
for inst_index, instrument in enumerate(inst_list):
    # query images
    query_pd = pd.read_sql(
        db_session.query(db_class.EUV_Images, db_class.Data_Files).filter(
            db_class.EUV_Images.data_id.in_(bad_image_lists[inst_index]),
            db_class.Data_Files.data_id ==
            db_class.EUV_Images.data_id).order_by(
                db_class.EUV_Images.date_obs).statement, db_session.bind)
    # remove duplicate columns
    query_pd = query_pd.loc[:, ~query_pd.columns.duplicated()]
    for index, row in query_pd.iterrows():
        db_session = db_funs.update_image_val(db_session, row, 'flag', -1)

db_session.close()
                           wavelength=wavelength,
                           instrument=instrument)
print(test_pd)

# update_image_val function:
# currently requires a pandas-series object (like return of query_euv_images) to index
# into the DB. This semi-insures that there is not confusion about which row of the
# DB is to be updated.

# generate hdf file using some function like:
image_to_convert = test_pd.iloc[0]
hd_fname = pdseries_tohdf(image_to_convert)
#hd_fname = "2014/04/13/sta_euvi_20140413T183530_195.hdf5"
# update database with file location
db_session = update_image_val(db_session=db_session,
                              raw_series=test_pd.iloc[0],
                              col_name="fname_hdf",
                              new_val=hd_fname)

# also flag this image as 1 - 'verified good' (made-up example)
image_flag = 1
# update database with file location
db_session = update_image_val(db_session=db_session,
                              raw_series=test_pd.iloc[0],
                              col_name="flag",
                              new_val=image_flag)

# remove_euv_image function:
# removes the files and then the corresponding DB row
# this works, but has been commented because it will only work once
#exit_status, db_session = remove_euv_image(db_session=db_session, raw_series=test_pd.iloc[0], raw_dir=raw_data_dir, hdf_dir=hdf_data_dir)
示例#5
0
# first image (AIA)
query_min = datetime.datetime(2020, 7, 30, 15, 0, 0)
query_max = datetime.datetime(2020, 7, 30, 17, 0, 0)
instrument = [
    "AIA",
]

image_pd = db_funs.query_euv_images(db_session,
                                    time_min=query_min,
                                    time_max=query_max,
                                    instrument=instrument)

delete_path = os.path.join(hdf_data_dir, image_pd.fname_hdf.iloc[0])
os.remove(delete_path)

db_session = db_funs.update_image_val(db_session, image_pd.iloc[0],
                                      "fname_hdf", "")

# other two images (EUVI-A)
query_min = datetime.datetime(2020, 7, 30, 13, 0, 0)
query_max = datetime.datetime(2020, 7, 30, 17, 0, 0)
instrument = [
    "EUVI-A",
]

image_pd = db_funs.query_euv_images(db_session,
                                    time_min=query_min,
                                    time_max=query_max,
                                    instrument=instrument)

for index, row in image_pd.iterrows():
    delete_path = os.path.join(hdf_data_dir, row.fname_hdf)