Exemplo n.º 1
0
def adjacent_spread(mode = 'backward'):
    pca_data = spca.get_pca()
    out_dct = {}
    for bird_syl in set(pca_data['target']):
        out_dct[bird_syl] = []
    for i,j in zip(range(0,len(pca_data)-1),range(1,len(pca_data))):
        print(str(j)+' of '+str(len(pca_data)))
        syl = list(pca_data.loc[j])[-1]
        if mode == 'backward':
            previous_coordinates = list(pca_data.loc[i])[:-1]
            out_dct[syl].append(previous_coordinates)
        if mode == 'forward':
            try:
                next_coordinates = list(pca_data.loc[i+1])[:-1]
                out_dct[syl].append(next_coordinates)
            except BaseException:
                pass
    for key,value in out_dct.items():
        out_dct[key] = spread.spread(value)
    return out_dct
Exemplo n.º 2
0
# curve1 + curve2 + curve3 + line1
# plt.show()
## function to create pairs of iterable elevations
def pairwise(iterable):
    "s -> (s0,s1), (s1,s2), (s2,s3), ..."
    a, b = itertools.tee(iterable)
    next(b, None)
    return itertools.izip(a, b)


#Enter the check dam height
check_dam_height = 0.70
#to create stage with 5 cm intervals
no_of_stage_interval = check_dam_height / .05
# to create series of stage values
dz = list((spread(0.00, check_dam_height, int(no_of_stage_interval),
                  mode=3)))  # dz = stage
# y=1
#empty list to store the results in the end
results_1 = []
print(len(dz), len(df.Y1))
# for every value of 5 cm iteration
for z in dz:
    #for every iteration initial value needs to be set to 0
    water_area_1 = 0
    # creates consecutive no, see above for function
    for y1, y2 in pairwise(df.Y1):
        #to find delta elevation
        delev = (y2 - y1) / 10
        #assign initial value to elev
        elev = y1
        #for iterating over 10 cm strip, this creates no from 1 to 10
Exemplo n.º 3
0
            delev = (y2 - y1) / 10
            elev = y1
            for b in range(1, 11, 1):
                elev += delev
                if z > elev:
                    water_area += (0.1 * (z-elev))
        calc_vol = water_area * dy
        results.append(calc_vol)
    output[('Volume_%s' % order)] = results
#inout parameters
check_dam_no = 607
check_dam_height = 2
no_of_stage_interval = check_dam_height/.05

#create a list of stage values with 5 cm interval
dz = list((spread(0.00, check_dam_height, int(no_of_stage_interval), mode=3)))

#surveyed data
input_file = '/media/kiruba/New Volume/r/r_dir/stream_profile/new_code/created_profile_607.csv'
df = pd.read_csv(input_file, header=0)
row = 17   # row of Y values
print df

index = [range(1, 42, 1)]  # no of stage intervals
columns = ['stage_m']
data = np.array(dz)
output = pd.DataFrame(data, index=index, columns=columns)  # dataframe with stage values

for l1, l2 in pairwise(df.ix[row]):
    if l2 > 0:
        calcvolume(profile=df["Y_%s" % int(l1)], order=l1, dy=int(l2-l1))
Exemplo n.º 4
0
    def __init__(self,opt):
        self.func_me_color=opt['colors']['func_me_color']
        self.soc=stdout_colors(colors_active=opt['colors']['colors_active'],
                                output_caller=opt['colors']['output_caller'],
                                caller_color=opt['colors']['func_me_color'])
        self.soc.me_him(['ENTER:',__name__],self.func_me_color)
        self.opt=opt#options value/index dict
        self.opt['data']={}#stores line/header field/function parsed value, used for import of product data
        self.field="" #used in map functions, field of func mapped value
        self.value=[] #used in map functions, value of func mapped field
        self.files=self.opt['files'] #csv files from jar configuration
        self.site=self.opt['site_name'] #site url schema://domain/dir
        self.options={}# form field options
        self.__options__={} #fresh set of options for each line

        self.order=self.opt['order']
        self._optv=self.opt['options']# options param
        self.final_funcs=self.opt['final_funcs']
        self.headers={}
        self.lines={}
        self.parsed={}
        self.__headers={}
        self.__lines={}
        self.__parsed={}
        #print "optv",self._optv
        #print self.opt,sys.exit(1)
        for file in self.files.keys():
            if file == "files":
                for f in self.files[file].keys():
                    if self.files[file][f]=="main":
                        self.headers = self.files[f].buffer[0] # file headers list
                        self.lines = self.files[f].buffer #file list
                        self.parsed = self.files[f].bufferp # file list of dict

                    self.__headers[f]=self.files[f].buffer[0]
                    self.__lines[f] = self.files[f].buffer #file list
                    self.__parsed[f] = self.files[f].bufferp # file list of dict

        self.set_defaults()

        try:
            try:
                if int(self.opt['flavors']['pcreamysql']) == 1:
                    import pcreamysql
                    h=urlparse.urlparse(self.opt['site_name'])[1].split("www.")[1]
                    u=os.environ["USER"]
                    db_host_=raw_input("\nDB Host("+h+"): ")
                    if not db_host_:
                        db_host_ = h
                    db_user_=raw_input("\nDB User("+u+"): ")
                    if not db_user_:
                        db_user_ = u
                    db_pass_=getpass.getpass() #raw_input("DB Pass: "******"\nDB: ")
                    self.opt['dbc'] = pcreamysql.pcreamysql(db_host_,db_user_,db_pass_,db_db_)
                else:
                    self.opt['dbc'] = None
            except (KeyboardInterrupt):
                print("\nCTRL-C PRESSED....DOING NOTHING HERE")
        except (EOFError):
            print ("\n\nBYPASS MYSQL LOGIN.")
            self.opt['environ']['mysql_login_bypass']=1

        #print self._optv,sys.exit(1)
        #send options/mappings/configuratoins to be used in functions
        self.fun=spread(self.opt)
        self.loop_lines()

        self.soc.me_him(['EXIT:',__name__],self.func_me_color)
Exemplo n.º 5
0
def tutor_compare(n_for_previous_ent=2):
    pca_data = spectral_pca.get_medians()
    token_pca_data = spectral_pca.tokens_by_type_5D()
    forwards_ent_data = pc.depickle('forwards_ent_data')
    backwards_ent_data = pc.depickle('backwards_ent_data')
    fEP = pc.depickle('fEP')
    bEP = pc.depickle('bEP')
    divergence_data = branch_point_differences(2, 'euclidean')[1]
    dkl_data = branch_point_differences(2, 'dkl')[1]
    log_data = branch_point_differences(2, 'log')[1]
    previous_ent_data = previous_ent.batch_pe(n=n_for_previous_ent)
    tutor_previous_spread_dict = acoustic_transition_entropy.acoustic_spread()
    tutor_next_spread_dict = acoustic_transition_entropy.acoustic_spread(
        mode='forward')
    out_dict = {}
    for nest, birds_list in meta_nest_dict.items():
        nest_dict = {}
        pupil_IDs = birds_list[:-1]
        tutor_ID = birds_list[-1]
        tutor_syllables = pca_data[tutor_ID].keys()
        for pupil_ID in pupil_IDs:
            pupil_dict = {}
            tutor_syllables = pca_data[tutor_ID].keys()
            pupil_syllables = pca_data[pupil_ID].keys()
            retained_syllables = [
                value for value in tutor_syllables if value in pupil_syllables
            ]
            dropped_syllables = [
                value for value in tutor_syllables
                if value not in pupil_syllables
            ]
            for syllable in [
                    syllable for syllable in tutor_syllables if syllable != 'i'
            ]:
                try:
                    prevalence = prevalence_dict[tutor_ID + '_' + syllable]
                    category = category_dict[pupil_ID + '_' + syllable]
                except:
                    prevalence = ''
                    category = ''
                pupil_entropy = ''
                direction_dict = {
                    'forwards': {
                        'tutor': '',
                        'pupil': ''
                    },
                    'backwards': {
                        'tutor': '',
                        'pupil': ''
                    },
                    'fEP': {
                        'tutor': '',
                        'pupil': ''
                    },
                    'bEP': {
                        'tutor': '',
                        'pupil': ''
                    }
                }
                for direction, direction_data in zip(
                    ['forwards', 'backwards', 'fEP', 'bEP'],
                    [forwards_ent_data, backwards_ent_data, fEP, bEP]):
                    for row in direction_data:
                        if row[0] == pupil_ID and row[1] == syllable:
                            direction_dict[direction]['pupil'] = row[2]
                        if row[0] == tutor_ID and row[1] == syllable:
                            direction_dict[direction]['tutor'] = row[2]
                            tutor_spectral_data = row[-6:]
                spectral_distance = ''
                divergence = ''
                dkl_value = ''
                log_value = ''
                SDKL1 = ''
                SDKL2 = ''
                tutor_previous_spread = ''
                try:
                    tutor_spread = spread.spread(
                        token_pca_data[tutor_ID + '_' + syllable])
                except:
                    tutor_spread = ''
                try:
                    pupil_spread = spread.spread(
                        token_pca_data[pupil_ID + '_' + syllable])
                except:
                    pupil_spread = ''
                try:
                    cloud_distance = spread.sim(
                        token_pca_data[tutor_ID + '_' + syllable],
                        token_pca_data[pupil_ID + '_' + syllable])
                except:
                    cloud_distance = ''
                try:
                    tutor_pca = pca_data[tutor_ID][syllable]
                    pupil_pca = pca_data[pupil_ID][syllable]
                    spectral_distance = distance.euclidean(
                        tuple(tutor_pca), tuple(pupil_pca))
                except:
                    pass
                try:
                    tutor_previous_spread = tutor_previous_spread_dict[
                        tutor_ID + '_' + syllable]
                    tutor_next_spread = tutor_next_spread_dict[tutor_ID + '_' +
                                                               syllable]
                except BaseException:
                    tutor_previous_spread = ''
                    tutor_next_spread = ''
                if category == 'Retained':
                    try:
                        tutor_fp = 'C:/Users/SakataWoolleyLab/Desktop/BFfromLogan/' + nest + '/' + tutor_ID + '/'
                        pupil_fp = 'C:/Users/SakataWoolleyLab/Desktop/BFfromLogan/' + nest + '/' + pupil_ID + '/'
                        SDKL_output_list = sdkl_mk.main_program(
                            tutor_fp, tutor_fp + 'syllables/' + syllable + '/',
                            pupil_fp + 'syllables/' + syllable + '/', 1, 1)
                        SDKL1 = SDKL_output_list[5]
                        SDKL2 = SDKL_output_list[6]
                    except:
                        pass
                    print('SDKL1: ' + SDKL1 + '; SDKL2: ' + SDKL2)
                    try:
                        divergence = divergence_data[nest][pupil_ID][tuple(
                            syllable)]['divergence']
                        dkl_value = dkl_data[nest][pupil_ID][tuple(
                            syllable)]['divergence']
                        log_value = log_data[nest][pupil_ID][tuple(
                            syllable)]['divergence']
                    except:
                        pass
                try:
                    tutor_previous_ent = previous_ent_data[tutor_ID][syllable]
                    pupil_previous_ent = ''
                except:
                    pass
                try:
                    pupil_previous_ent = previous_ent_data[pupil_ID][syllable]
                except:
                    pass
                pupil_dict[syllable] = [
                    prevalence, category, direction_dict['forwards']['tutor'],
                    direction_dict['forwards']['pupil'],
                    direction_dict['backwards']['tutor'],
                    direction_dict['backwards']['pupil'],
                    direction_dict['fEP']['tutor'],
                    direction_dict['fEP']['pupil'],
                    direction_dict['bEP']['tutor'],
                    direction_dict['bEP']['pupil'], spectral_distance,
                    tutor_previous_spread, tutor_next_spread, tutor_spread,
                    pupil_spread, cloud_distance, divergence, dkl_value,
                    log_value, tutor_previous_ent, pupil_previous_ent, SDKL1,
                    SDKL2
                ]
                for feature in tutor_spectral_data:
                    pupil_dict[syllable].append(feature)
            nest_dict[pupil_ID] = pupil_dict
        out_dict[nest] = nest_dict
    matrix_version = []
    for nest, nestdict in out_dict.items():
        for bird, birddict in nestdict.items():
            for syl, syllist in birddict.items():
                matrix_version.append([nest, bird, syl] + syllist)
    with open("./output/nest_learning.csv", 'w') as output_file:
        writer = csv.writer(output_file)
        writer.writerow([
            'Nest',
            'BirdID',
            'Syllable',
            'Prevalence',
            'Category',
            'TutorForwardsEntropy',
            'PupilForwardsEntropy',
            'TutorBackwardsEntropy',
            'PupilBackwardsEntropy',
            'TutorfEP',
            'PupilfEP',
            'TutorbEP',
            'PupilbEP',
            'SpectralDistance',
            'TutorPreviousSpread',
            'TutorNextSpread',
            'TutorSpread',
            'PupilSpread',
            'CloudDistance',
            'EuclideanDistance',
            'DKL',
            'LogDistance',
            'TutorPreviousEnt',
            'PupilPreviousEnt',
            'SDKL1',
            'SDKL2',
            'MeanFreq',
            'SpecDense',
            'Duration',
            'LoudEnt',
            'SpecTempEnt',
            'meanLoud',
        ])
        for row in matrix_version:
            writer.writerow(row)
    return matrix_version