def group_by_filter(f_ext, object, filters=['I', 'R', 'V', 'B'], target_dir='Sorted', parent_dir=''):
    """
    Preconditions: Must have the filter type letter as the last letter of the
    filename. Requires the directory location of the csv files and the name of
    the object being observed for naming the output file.  Can also take the
    filter types to look for as a list and the output directory to put the
    files in.
    Postconditions: Creates one csv file (named for observed object and
    filter) for each filter type with the csv files for each image with
    columns DataNum (matched by RA/Dec) and SourceFile.
    """
    mk_fldr(target_dir, parent_dir)
    write_location = os.path.join(parent_dir, target_dir)

    # f_ext is where I'm trying to grab the files from
    pattern = f_ext + '\*{}.csv'

    # send all the files to f_group for each filter
    for filter in filters:
        big_file = f_group(pattern.format(filter))

        # outputs table of located object's info in .csv format
        big_file.write(os.path.join(write_location, object+filter+'Filt.csv'))

    return write_location
Example #2
0
def avg_flux(location, target_dir='Averaged', parent_dir='', identColumn='DataNum', flux_name='flux', srcs=0):
    mk_fldr(target_dir, parent_dir)

    files = glob.glob(location)
    # print (files)
    # what will be the names of the columns in the table created when
    column_names = [identColumn, 'NumSources', 'AvgRA', 'AvgDec', 'AvgFlux', 'FluxErr', 'InstruMag', 'MaxPeak', 'a_Avg', 'b_Avg', 'thetaAvg']
    write_location = os.path.join(parent_dir, target_dir)
    print (files)
    for file in files:
        print (file)
        # creates a new empty table to put the averaged data in
        new_table = Table(names=column_names)
        # print (file)

        # retrieve file name minus extension and location for later use
        base_name = os.path.basename(file)
        first_part, _ = os.path.splitext(base_name)
        # print (first_part)

        # read file as table
        file = Table.read(file)

        num = 1
        while num in file[identColumn]:
            # finds the rows in the file that match the current DataNum
            matches = (file[identColumn] == num)
            # print (matches)

            # makes a copy of the file that only contains the rows that match
            # the desired DataNum
            file2 = file[matches]
            num_in_avg = len(file2)
            # print(num_in_avg)

            # makes sure there are enough points to matter
            if num_in_avg > srcs:
                # averages the data for the current DataNum
                flux_avg = sum(file2[flux_name])/num_in_avg
                max_peak = max(file2['peak'])
                ra_avg = sum(file2['RA'])/num_in_avg
                dec_avg = sum(file2['Dec'])/num_in_avg
                # make fractional square add root divide de-fractionalize
                frac_errs = sum((file2['FluxErr']/file2['flux'])**2)
                flux_err = ((sqrt(frac_errs))/num_in_avg)*flux_avg
                inst_mag = -2.5*log10(flux_avg)

                a_avg = sum(file2['a'])/num_in_avg
                b_avg = sum(file2['b'])/num_in_avg
                theta_avg = sum(file2['theta'])/num_in_avg

                # adds the new row to the table
                # print (num, ra_avg, dec_avg, flux_avg, flux_err, num_in_avg)
                row = [num, num_in_avg, ra_avg, dec_avg, flux_avg, flux_err, inst_mag, max_peak, a_avg, b_avg, theta_avg]
                new_table.add_row(row)
            num += 1  # increment counter

        # write averaged table out to disk
        # print (new_table)
        new_table.write(os.path.join(write_location, first_part+'(Avg).csv'))

    return write_location