Beispiel #1
0
def parse(datafilename, patterns):
    # This loads the CTX file
    #dir = os.path.dirname(os.path.realpath(__file__))
    #datafilename = os.path.join(dir, '2014_05_13_tuesday_11h57m00s.ctx')
    ctxfile=ctx(datafilename)
    #print ctxfile

    # This pulls out all the stuff to do with count rates
    all_counts = list(ctxfile.stream('count_rates'))

    # This filters for the count rates we care about
    select_counts = lambda data: [parse_coincidence_pattern(pattern, data) for pattern in patterns] 
    interesting_counts_table = np.array(map(select_counts, all_counts))
    print interesting_counts_table
    # Now it's a numpy array, so we can integrate, average sum etc
    #print interesting_counts_table
    fit_data = np.sum(interesting_counts_table, axis=1)
    #print np.sum(interesting_counts_table, axis=1)
    print fit_data
    raw_input()
    return fit_data
Beispiel #2
0
def parse(datafilename, patterns):
    # This loads the CTX file
    #dir = os.path.dirname(os.path.realpath(__file__))
    #datafilename = os.path.join(dir, '2014_05_13_tuesday_11h57m00s.ctx')
    ctxfile = ctx(datafilename)
    #print ctxfile

    # This pulls out all the stuff to do with count rates
    all_counts = list(ctxfile.stream('count_rates'))

    # This filters for the count rates we care about
    select_counts = lambda data: [
        parse_coincidence_pattern(pattern, data) for pattern in patterns
    ]
    interesting_counts_table = np.array(map(select_counts, all_counts))
    print interesting_counts_table
    # Now it's a numpy array, so we can integrate, average sum etc
    #print interesting_counts_table
    fit_data = np.sum(interesting_counts_table, axis=1)
    #print np.sum(interesting_counts_table, axis=1)
    print fit_data
    raw_input()
    return fit_data
Beispiel #3
0
filename=all_files[-1]

# Load up a file for reading
my_file=ctx(filename)

# What do we know about it already?
print my_file


###############################
# Simple reading of data 
###############################
# Read out some countrates
for data in my_file.stream('count_rates'):
    print 'Raw data:', data
    twofolds = pattern_parser.parse_coincidence_pattern('**', data)
    print 'Total twofolds (**):', twofolds

# Read out some positions
for data in my_file.stream('position'):
    print 'Position:', data


###############################
# A different approach --- good for dips
###############################

# Define a lambda function "get_mn", which computes the "MN" count rate some raw data
# This could equally be a named ("deffed") function.
get_mn=lambda data: pattern_parser.parse_coincidence_pattern('MN', data)