def write_master_list(parent_dir, version):
    """Writes a master data lis in the parent directory for batch mode.

    Args:
        parent_dir (string): path of the parent directory
        version (string): the version of the software (for naming output file)

    """

    dir_list_path = parent_dir + '/dir_list.dat'
    subdir_list = [_[0] for _ in uf.read_file(dir_list_path)][1:]

    master_data = []
    for i in range(len(subdir_list)):
        data_dir = parent_dir + '/' + subdir_list[i]
        data = uf.read_file(data_dir + '/ls_data/ls_data.dat')
        if i == 0:
            for line in data:
                master_data.append(line)
        else:
            for line in data[1:]:
                master_data.append(line)

    # print master_data
    uf.save_data_array(master_data,
                       parent_dir + '/master_list_v%s.dat' % version)
コード例 #2
0
    def __run_test(self, filename, output_directory):
        """
        Handles running of data through algorithm not live, parses the file and puts data line by line
        through the algorithm, algorithm will react to it as if the data is being collected real time. Used in testing
        mode as can be run at home to see how algorithm reacts.
        Args:
            filename: name of recorded data to run through, defaults to latest file
            output_directory: relative path to output_directory folder
        Returns:
            None, but stores the logs of the test in a file in output_directory
        Example:
            > self.__run_test('data_to_load', '../Output_data')
        """

        # Read old data
        print(
            '\n\033[1mUsing test mode, will apply algorithm to data from file {}\033[0m\n'
            .format(filename))
        data = read_file(output_directory + filename)

        self.all_data = self.initialize_all_data()

        # some functions depend on sampling period, therefore extract correct
        # period and place into algorithm data so that it can be passed through
        average_cycle_time = numpy.mean(numpy.diff(data['time']))
        for algorithm in self.order:
            algorithm['period'] = average_cycle_time

        switch = 'switch'
        for i in xrange(len(data)):
            algo = self.algo_name

            # Make current row out of real values from data minus the position and algorithm
            # as those are the things we are running testing to watch
            row_no_pos_algo = list(data[i])[:-2]
            current_values = convert_list_dict(row_no_pos_algo +
                                               [algo, self.position])

            try:
                switch = self.__run_algorithm(switch, current_values)
            except AlgorithmFinished:
                print '\n\033[1mAlgorithm finished, stopping now\033[0m\n'
                break

        # Data loaded in will have ' Org' file so remove that and replace with ' Tst'
        store(filename[:-4] + ' Tst', self.all_data)
def load_ls(ls_path, px_size=1.):
    """Loads a linescan file

    Args:
        ls_path (str): path of the average linescan file to be loaded
        px_size (float): pixel size in microns

    Returns:
        x (numpy array): the positions (in microns)
        i (numpy array): the intensities

    """

    ls_data = uf.read_file(ls_path)
    x = np.array([float(_[0]) for _ in ls_data]) * px_size
    i = np.array([float(_[1]) for _ in ls_data])
    return x, i
コード例 #4
0
    def __run_test(self, filename, output_directory):
        """
        Function to run the interface with a fake robot and fake encoders

        Args:
            filename: The name of the file containing prvious data to run the interface off
            output_directory: name of the Directory containg the data file

        """

        # Read old data
        print(
            '\n\033[1mUsing test mode, will apply algorithm to data from file {}\033[0m\n'
            .format(filename))
        data = read_file(output_directory + filename)

        self.all_data = self.initialize_all_data()

        # some functions depend on sampling period, therefore extract correct
        # period and place into algorithm data so that it can be passed through
        average_cycle_time = numpy.mean(numpy.diff(data['time']))
        for algorithm in self.order:
            algorithm['period'] = average_cycle_time

        switch = 'switch'
        for i in xrange(len(data)):

            algo = self.algo_name

            # Make current row out of real values from data minus the position and algorithm
            # as those are the things we are running testing to watch
            row_no_pos_algo = list(data[i])[:-2]
            current_values = convert_list_dict(row_no_pos_algo +
                                               [algo, self.position])

            try:
                switch = self.__run_algorithm(switch, current_values)
            except AlgorithmFinished:
                print '\n\033[1mAlgorithm finished, stopping now\033[0m\n'
                break

        # Data loaded in will have ' Org' file so remove that and replace with ' Tst'
        store(filename[:-4] + ' Tst.txt', self.all_data)
コード例 #5
0
from scipy.signal import find_peaks
import numpy as np
from sys import path
path.insert(0, '..')
from graph_functions import *
from utility_functions import read_file

fig, ax = plt.subplots(1)
ax = format_graph(ax)

files = ['Quarter Period', 'Angular Velocity', 'Max Angle']
files = ['Quarter Period']

for filename in files:
    data = read_file('../Output_data/' + filename)

    be = data['be']
    t = data['time']
    algorithm = data['algo']
    change_indexes = shade_background_based_on_algorithm(t,
                                                         algorithm,
                                                         plot=False)

    t = t[change_indexes[0]:]
    be = be[change_indexes[0]:]

    t = t[be > 0]
    be = be[be > 0]

    max_indexes = find_peaks(be)[0]
    be = be[max_indexes]
コード例 #6
0
fig, ax = plt.subplots(1, 1, figsize=(13, 8))

ax = format_graph(ax)

information = OrderedDict({})

for offset in sorted(files_offsets.keys()):
    print 'Offset: {:.2f}'.format(offset)
    information[offset] = {
        'gradients': [],
        'periods': [],
    }

    for i, filename in enumerate(files_offsets[offset]):
        if filename != '':
            data = read_file(output_data_directory + filename)
            time = data['time']
            be = data['be']
            algorithm = data['algo']

            for pos in range(2):
                # find indexes where algorithm changes
                algorithm_change_indexes = np.append(
                    np.where(algorithm[:-1] != algorithm[1:])[0],
                    np.array(len(algorithm) - 1))

                # only take data after has switched to quarter period
                time = time[algorithm_change_indexes[1]:]
                be = be[algorithm_change_indexes[1]:]
                window = 21
コード例 #7
0
def plot_maxima_curve(filename, plot=True):
    angles = read_file(filename)
    time = angles['time']
    be = angles['be']
    algo = angles['algo']

    algo_change_indexes = shade_background_based_on_algorithm(time,
                                                              algo,
                                                              plot=False)

    # only take values from when algorithm is switched to increasing
    time = time[algo_change_indexes[1]:]
    be = be[algo_change_indexes[1]:]

    # there is a weird spike that goes really high, this filters that out
    time = time[be < 30]
    be = be[be < 30]
    # again something else to filter
    time = time[be > 1]
    be = be[be > 1]

    # calculate maximas after smoothing results together
    window_size = 5
    avg_be = np.array(moving_average(be, window_size=window_size))
    angle_max_index = (np.diff(np.sign(np.diff(avg_be))) <
                       0).nonzero()[0] + 1 + (window_size - 1) / 2

    # take times and values at maximas
    time = time[angle_max_index]
    be = be[angle_max_index]

    # Some specific filtering for the rotational datafile
    if filename == '../Output_data/Rotational No Masses 400secs':
        filtered_time, filtered_be = [], []
        for time_, be_ in zip(time, be):
            # print time_, be_
            if time_ <= 165 or time_ >= 185:
                filtered_time.append(time_)
                filtered_be.append(be_)
            elif 165 < time_ < 185:
                if be_ > 8.4:
                    filtered_time.append(time_)
                    filtered_be.append(be_)
            else:
                print "Doesn't fit category", time_, be_
        time, be = filtered_time, filtered_be

    # Remove as many weird spikes as possible
    filtered_time, filtered_be = [], []
    for time_, be_ in zip(time, be):
        if time_ <= 260 or 400 <= time_ <= 600 or time_ >= 700:
            filtered_time.append(time_)
            filtered_be.append(be_)
        elif 260 < time_ < 400:
            if be_ > 6.8:
                filtered_time.append(time_)
                filtered_be.append(be_)
        elif 600 < time_ < 700:
            if be_ > 17.5:
                filtered_time.append(time_)
                filtered_be.append(be_)
        else:
            print "Doesn't fit category", time_, be_

    # convert to numpy arrays get some weird error without
    time, be = np.array(filtered_time), np.array(filtered_be)
    # centre them so they both start the plot at zero
    print 'Time offset for file {}: {}s'.format(filename, time[0])
    time -= time[0]

    label = get_name(each_file)[:-1]
    if label == 'Rotational No Masses 400secs':
        label = 'Quarter Period'
    if plot:
        # plot against each other
        plt.plot(time, be, label=label)
    return time, be
コード例 #8
0
import matplotlib.pyplot as plt
import numpy as np
from graph_functions import *
from sys import path
path.insert(0, '..')
from utility_functions import read_file

output_data = '../Output_data/'
filenames = ['Box Masses', 'Box No Masses']

for filename in filenames:
    data = read_file(output_data + filename)

    t = data['time']
    be = data['be']

    plt.plot(t, be, label=filename)

plt.legend(loc='best')
plt.show()
コード例 #9
0
files_to_compare = [latest_filename]
fits = ['linear']

fit_dict = {
    'linear': linear_fit
}

# setup figure
fig, ax = plt.subplots(
    1, 1, figsize=(
        13, 8))
ax = format_graph(ax)

for i, _file in enumerate(files_to_compare):
    # collect the data
    angles = read_file(output_data_directory + _file)

    # Extract data
    t = angles['time']
    be = angles['be']
    algorithm = angles['algo']

    # find indexes where the algorithm changes
    algo_change_indexes = algorithm_change_indexes(algorithm)
    # [0] is first algorithm (start), [1] is second (increasing)
    increase_index = algo_change_indexes[1]

    # only take time from switch point, change time relative to this point
    t = t[increase_index:] - t[increase_index]
    # only take encoder values from this point
    be = be[increase_index:]
コード例 #10
0
from sys import path
path.append("..")

import numpy as np
from scipy.signal import find_peaks
from scipy.optimize import curve_fit
from matplotlib import pyplot as plt
from utility_functions import read_file
from graph_functions import format_graph

#parametric = read_file("../Output_data/Parametric No Masses 800secs")['be']
#rotational = read_file("../Output_data/Rotational No Masses 400secs")['be']

parametric = read_file("../Output_data/26-03-2019 18:17:02 Para 600 secs Org")
rotational = read_file("../Output_data/26-03-2019 18:30:45 Rot 600 secs Org")

para_dt = np.mean(np.diff(parametric['time']))
rot_dt = np.mean(np.diff(rotational['time']))

index_para = find_peaks(parametric['be'])[0]
index_rot = find_peaks(rotational['be'])[0]

peaks_para = abs(parametric['be'][index_para])
peaks_rot = abs(rotational['be'][index_rot])

times_para = parametric['time'][index_para]
times_rot = abs(rotational['time'][index_rot])

##### George's magical filter.
be = peaks_para
t = times_para
コード例 #11
0
        if location[-i] == "/":
            break

        result = location[-i] + result

    return result


files_to_compare = get_files(output_data_directory)

fig, ax = plt.subplots(1, 1, figsize=(8, 6))
ax = format_graph(ax)
ax.set_facecolor('#eeeeee')

for each_file in files_to_compare:
    angles = read_file(each_file)
    #angles = convert_read_numpy(angles) # Not sure if this should be here, it seems it was removed since my last pull.
    time = angles['time']
    be = angles['be']
    angle_max_index = (np.diff(np.sign(np.diff(be))) < 0).nonzero()[0] + 1

    be = be[time > 12.06]
    time = time[time > 12.06]

    print each_file
    if each_file == '../Output_data/Decrease Parametric':
        label = 'Parametric Decrease'
        be -= 1.2
    elif each_file == '../Output_data/decreasingdamping':
        label = 'Natural Damping'
    # true_max = time[angle_max_index][5]
def main():
    """__main__ function"""

    version = '5'

    #set up root for asking questions
    # root = Tk() #moved this up to the imports
    root.withdraw()

    #chooses analysis mode
    mode = askinteger(
        title="Analysis Mode Selection",
        prompt=
        "Please enter:\n1 for pairwise analysis or\n2 for batch analysis",
        minvalue=1,
        maxvalue=2)

    if mode == 1:

        ch1_path = askopenfilename(
            title='Select an average linescan file for channel 1',
            filetypes=[("dat", "*.dat")],
            initialdir='.',
            initialfile="")

        ch2_path = askopenfilename(
            title='Select an average linescan file for channel 2',
            filetypes=[("dat", "*.dat")],
            initialdir='/'.join(ch1_path.split('/')[:-1]),
            initialfile=ch1_path.split('/')[-1])

        px_size = askfloat(title='Pixel Size',
                           prompt='Please enter your pixel size')
        ch_actin = askinteger(title='Actin Channel',
                              prompt='Please enter the actin channel',
                              minvalue=1,
                              maxvalue=2)
        sigma_actin = askfloat(
            title='Actin Sigma',
            prompt=
            'Please enter the sigma value\nfor the PSF for the actin channel\n(in microns)'
        )

        analyze_ls_pair(ch1_path, ch2_path, px_size, ch_actin, sigma_actin,
                        version)

    if mode == 2:

        parent_dir = askdirectory(
            title=
            'Select the parent directory (be sure it contains dir_list.dat!)',
            initialdir=os.path.split(os.path.realpath(__file__))[0])
        # parent_dir = './test_data'
        dir_list = uf.get_dict_list(uf.read_file(parent_dir + '/dir_list.dat'))

        for line in dir_list:

            sub_dir = line['sub_dir']
            px_size = float(line['px_size'])
            category = line['category']
            ch_actin = int(line['ch_actin'])
            sigma_actin = float(line['sigma_actin'])
            data_dir = parent_dir + '/' + sub_dir

            print(data_dir)

            analyze_dir(data_dir, px_size, category, ch_actin, sigma_actin,
                        version)

        write_master_list(parent_dir, version)
コード例 #13
0
        clear_file(FLAGGED_SNR_FILE)
        clear_file(GOOD_FIT_FILE)
        clear_file(ORIGINAL_FILE)
        clear_file(UNFLAGGED_FILE)

        append_row_to_csv(FLAGGED_ABSORPTION_FILE, fields)
        append_row_to_csv(FLAGGED_BAD_FIT_FILE, fields)
        append_row_to_csv(FLAGGED_SNR_FILE, fields_snr)
        append_row_to_csv(GOOD_FIT_FILE, fields)
        append_row_to_csv(ORIGINAL_FILE, field)
        append_row_to_csv(UNFLAGGED_FILE, fields)

    clear_file(LOG_FILE)
    clear_file(LOG_NO_LOW_SNR_FILE)

redshift_value_list, snr_value_list, spectra_list = read_file(CONFIG_FILE)

indices, spectra_indices, processed_spectra_file_names, powerlaw_final_b_values, powerlaw_final_c_values = [], [], [], [], []
flagged_indices, flagged_spectra_indices, flagged_spectra_file_names = [], [], []
flagged_snr_indices, flagged_snr_spectra_indices, flagged_snr_spectra_file_names, flagged_snr_in_ehvo_values = [], [], [], []

for spectra_index in range(STARTS_FROM, ENDS_AT + 1):
    z = round(redshift_value_list[spectra_index - 1], 5)
    snr = round(snr_value_list[spectra_index - 1], 5)
    current_spectrum_file_name = spectra_list[spectra_index - 1]

    print(str(spectra_index) + ": " + current_spectrum_file_name)
    print_to_file(
        str(spectra_index) + ": " + current_spectrum_file_name, LOG_FILE)

    current_spectra_data = np.loadtxt(SPEC_DIREC + current_spectrum_file_name)
コード例 #14
0
ファイル: DR16_redshift_plots.py プロジェクト: paolaUWB/DR16Q
# infoDR16_all = sys.argv[1] if len(sys.argv) > 1 else "DR16_sorted_norm.csv"
# infoDR16_snr = sys.argv[1] if len(sys.argv) > 1 else os.getcwd() + "/OUTPUT_FILES/NORMALIZATION/parent_sample.csv" ### ALL CASES WITH SNR > 10
infoDR16_parent = sys.argv[1] if len(
    sys.argv) > 1 else "DR16_parent_sample.csv"
#infoDR16_BAL = sys.argv[1] if len(sys.argv) > 1 else "DR16_BAL.csv"
#infoDR16_EHVO = sys.argv[1] if len(sys.argv) > 1 else "DR16_EHVO.csv"

#spectra_list, zem, calc_snr_list = read_list_spectra(infoDR16_snr, ["NORM SPECTRA FILE NAME", "REDSHIFT", "CALCULATED SNR"])
# zem_orig, snr, spectra_name = read_file(infoDR16)
zem = np.zeros((specnum1) - 1)  ## Redshift
zem_EHVO = []  ## EHVO Redshift
zem_BAL = []  ## BAL Redshift

# zem, snr, spectra_list = read_file(infoDR16_snr)
# zem_all, snr_all, spectra_list_all = read_file(infoDR16_all)
zem, snr, spectra_list = read_file(infoDR16_parent)
#zem_BAL, snr_BAL, spectra_list_BAL = read_file(infoDR16_BAL)
#zem_EHVO, snr_EHVO, spectra_list_EHVO = read_file(infoDR16_EHVO)

## Read EHVO
# for m in range(0,specnum3):
#     ee=infoEHVO[m]
#     columns3=ee.split()

#     QUASARNAME_EHVO.append(str(columns3[0]))
#     PLATE_inEHVO.append(int(columns3[1]))
#     MJD_inEHVO.append(int(columns3[2]))
#     FIBERID_inEHVO.append(int(columns3[3]))

#     vmax_inEHVO.append(int(columns3[4]))
#     vmin_inEHVO.append(int(columns3[5]))
コード例 #15
0
    # Linear.
    return m * n + c


def fit_parametric(n, m, c, q, k):
    # Linear then exponential.
    # y(n<q) = mn+c
    # y(n>=q) = (mq+c) e^k(n-q)
    lin = lambda n: m * n + c
    exp = lambda n: (m * q + c) * np.exp(k * (n - q))
    ans = np.piecewise(n, [n < q, n >= q], [lin, exp])
    print ans
    return float(ans)


para_angles = read_file(files_to_compare[1])['be']
rot_angles = read_file(files_to_compare[0])['be']


def plot_maxima_curve(filename, plot=True, fit_func=None, p0=[]):
    angles = read_file(filename)
    time = angles['time']
    be = angles['be']
    algo = angles['algo']

    algo_change_indexes = shade_background_based_on_algorithm(time,
                                                              algo,
                                                              plot=False)

    # only take values from when algorithm is switched to increasing
    time = time[algo_change_indexes[1]:]