Ejemplo n.º 1
0
    def advance_in_time(self):
        """ Update avulsion model one time step. """

        ### future work: SLRR can be a vector to change rates ###

        # determine if there is an avulsion & find new path if so
        ### need to change this to look for shoreline after coupling ###
        ### (instead of looking for sea level)
        (self._riv_i, self._riv_j), self._avulsion_type, self._loc = avulse.find_avulsion(
             self._riv_i, self._riv_j, self._n,
             self._super_ratio, self._SL, self._ch_depth,
             self._short_path, self._splay_type, self._splay_dep, dx=self._dx,
             dy=self._dy)

        if self._saveavulsions & self._avulsion_type > 0:
            new_info = (self._avulsion_type, self._time / _SECONDS_PER_YEAR, self._loc)
            self._avulsion_info = np.vstack([self._avulsion_info, new_info])

        #assert(self._riv_i[-1] != 0)

        # save timestep and avulsion location if there was one
        #if len(loc) != 0:
        #    self._avulsions = self._avulsions + [(self._time/_SECONDS_PER_DAY),
        #                loc[-1], avulsion_type, length_old,
        #                length_new_sum, self._SL)]
        
        # raise first two rows by inlet rise rate (subsidence)
        self._n[:2, :] += self._IRR

        # change elevations according to sea level rise (SLRR)
        ### needs to be changed to subtracting elevation once coupled ###
        SLR.elev_change(self._SL, self._n, self._riv_i,
                        self._riv_j, self._ch_depth)

        # smooth river course elevations using linear diffusion equation
        diffuse.smooth_rc(self._dx, self._dy, self._nu, self._dt,
                          self._riv_i, self._riv_j, self._n)

        # Floodplain sedimentation
        FP.dep_blanket(self._SL, self._blanket_rate, self._n,
                       self._riv_i, self._riv_j, self._ch_depth)

        # Wetland sedimentation
        ### no wetlands in first version of coupling to CEM ###
        FP.wetlands(self._SL, self._WL_Z, self._WL_dist * self._dy,
                    self._n, self._riv_i, self._riv_j, self._x, self._y)

        # calculate sediment flux
        self._sed_flux = flux.calc_qs(self._nu, self._riv_i,
                                      self._riv_j, self._n,
                                      self._dx, self._dy, self._dt)

        self._profile = self._n[self._riv_i, self._riv_j]

        # Update sea level
        self._SL += self._SLRR
        self._time += self._dt
Ejemplo n.º 2
0
def main():
    director.init(width=Settings.WINDOW_WIDTH,
                  height=Settings.WINDOW_HEIGHT,
                  caption=Settings.WINDOW_TITLE)
    FP.do_fp_setup()

    # TODO Ideally, the scenes would be queued here for clarity
    # But since director's scene stack doesn't work, the next scene is called
    # explicitly in the builder controller, which really shouldn't be that coupled
    # and shouldn't see so high as to know about handling the next scene etc
    deck_builder_scene = BuilderController.get_scene()
    director.run(deck_builder_scene)
Ejemplo n.º 3
0
def find_avulsion(riv_i, riv_j, n, super_ratio, current_SL, ch_depth,
                  short_path, splay_type, splay_dep, dx=1., dy=1.):
    new = riv_i, riv_j
    old = riv_i, riv_j
    avulsion_type = 0

    for a in xrange(1, len(riv_i)):
        if channel_is_superelevated(n, (riv_i[a], riv_j[a]), ch_depth,
                                    super_ratio):

            # if superelevation greater than trigger ratio, determine
            # length of new steepest descent path

            new = steep_desc.find_course(n, riv_i[:a], riv_j[:a],
                                         sea_level=current_SL)

            # if using the shortest path as an avulsion criterion, then
            # the lengths of the previous and newly calculated paths will
            # be compared
            if short_path == 1:
                new_length = find_path_length(new, dx=dx, dy=dy)
                old_length = find_path_length(old, dx=dx, dy=dy)

                if new_length < old_length:
                    # if new river course < length of old
                    # river course, then an avulsion will occur
                    avulsion_type = 1

                    new, avulsion_type = avulse_to_new_path(n,
                                             (riv_i[a - 1:], riv_j[a - 1:]),
                                             (new[0][a - 1:], new[1][a - 1:]),
                                             current_SL, ch_depth, avulsion_type,
                                             dx=dx, dy=dy)

                    new = (np.append(riv_i[:a - 1], new[0]),
                           np.append(riv_j[:a - 1], new[1]))

                    break

                elif splay_type > 0:
                    avulsion_type = 3
                    FP.dep_splay(n, (new[0][a], new[1][a]), (riv_i, riv_j),
                                 splay_dep, splay_type=splay_type)
            # if shortest path is not an avulsion criterion, then the new
            # steepest descent path will become the new course regardless
            # of new course length relative to the old course

    return new, avulsion_type, a
Ejemplo n.º 4
0
 def test_data_access(self):
     x = FP.location_name('Ann Arbor')
     self.assertTrue(type(x) == dict)
     self.assertTrue(x.keys(), ['lat', 'lng'])
     self.assertTrue(x.values(), [42.2808256, -83.7430378])
     self.assertTrue(type(x.keys()), list)
     self.assertTrue(type(x.values()), list)
def CollectTranslatedStrings():
    """Collects all the translations for all the strings specified by kStringIds.
  Returns a list of tuples of (string_id, language, translated string). The
  list is sorted by language codes."""
    kGeneratedResourcesPath = os.path.join(path_utils.ScriptDir(), '..', '..',
                                           '..',
                                           'app/google_chrome_strings.grd')
    kTranslationDirectory = os.path.join(path_utils.ScriptDir(), '..', '..',
                                         '..', 'app', 'resources')
    kTranslationFiles = glob.glob(
        os.path.join(kTranslationDirectory, 'google_chrome_strings*.xtb'))

    # Get the strings out of generated_resources.grd.
    dom = minidom.parse(kGeneratedResourcesPath)
    # message_nodes is a list of message dom nodes corresponding to the string
    # ids we care about.  We want to make sure that this list is in the same
    # order as kStringIds so we can associate them together.
    message_nodes = []
    all_message_nodes = dom.getElementsByTagName('message')
    for string_id in kStringIds:
        message_nodes.append([
            x for x in all_message_nodes if x.getAttribute('name') == string_id
        ][0])
    message_texts = [
        node.firstChild.nodeValue.strip() for node in message_nodes
    ]

    # The fingerprint of the string is the message ID in the translation files
    # (xtb files).
    translation_ids = [str(FP.FingerPrint(text)) for text in message_texts]

    # Manually put _EN_US in the list of translated strings because it doesn't
    # have a .xtb file.
    translated_strings = []
    for string_id, message_text in zip(kStringIds, message_texts):
        translated_strings.append(
            TranslationStruct(string_id + '_EN_US', 'EN_US', message_text))

    # Gather the translated strings from the .xtb files.  If an .xtb file doesn't
    # have the string we want, use the en-US string.
    for xtb_filename in kTranslationFiles:
        dom = minidom.parse(xtb_filename)
        language = dom.documentElement.getAttribute('lang')
        language = language.replace('-', '_').upper()
        translation_nodes = {}
        for translation_node in dom.getElementsByTagName('translation'):
            translation_id = translation_node.getAttribute('id')
            if translation_id in translation_ids:
                translation_nodes[translation_id] = (
                    translation_node.firstChild.nodeValue.strip())
        for i, string_id in enumerate(kStringIds):
            translated_string = translation_nodes.get(translation_ids[i],
                                                      message_texts[i])
            translated_strings.append(
                TranslationStruct(string_id + '_' + language, language,
                                  translated_string))

    translated_strings.sort()
    return translated_strings
Ejemplo n.º 6
0
def get_fingerprint_for_xtb(message_tag):
  """Obtains the fingerprint meant for xtb files from a message tag."""
  string_to_hash = message_tag.text
  string_phs = message_tag.findall('ph')
  for string_ph in string_phs:
    string_to_hash = string_to_hash + string_ph.get('name').upper() + string_ph.tail
  string_to_hash = string_to_hash.strip().encode('utf-8')
  string_to_hash = clean_triple_quoted_string(string_to_hash)
  return FP.FingerPrint(string_to_hash) & 0x7fffffffffffffffL
Ejemplo n.º 7
0
def UpdateBraveIds(grd_file):
    messages = xml.etree.ElementTree.parse(grd_file).getroot().find(
        'release').find('messages')
    for message_tag in messages.findall('message'):
        brave_string = message_tag.text
        brave_string_phs = message_tag.findall('ph')
        for brave_string_ph in brave_string_phs:
            brave_string = brave_string + brave_string_ph.get(
                'name').upper() + brave_string_ph.tail
        if brave_string is None:
            continue
        brave_string = brave_string.strip().encode('utf-8')
        if brave_company in brave_string:
            # Calculate Brave string id
            brave_string_fp = FP.FingerPrint(
                brave_string) & 0x7fffffffffffffffL
            print(str(brave_string_fp) + ' - ' + brave_string)
            chrome_string = brave_string.replace(brave_company, google_company)
            # Calculate Chrome string id
            # Todo: it gets incorrect id here, need to figure out why next time, for now it is replaced it manually
            chrome_string_fp = FP.FingerPrint(
                chrome_string) & 0x7fffffffffffffffL
            print(str(chrome_string_fp) + ' - ' + chrome_string)
            if not chrome_string_fp in brave_ids:
                brave_ids[chrome_string_fp] = brave_string_fp
            print('\n')
        elif brave_brand_string in brave_string:
            # Calculate Brave string id
            brave_string_fp = FP.FingerPrint(
                brave_string) & 0x7fffffffffffffffL
            print(str(brave_string_fp) + ' - ' + brave_string)
            for chrome_brand_string in chrome_brand_strings:
                chrome_string = brave_string.replace(brave_brand_string,
                                                     chrome_brand_string)
                # Calculate Chrome string id
                chrome_string_fp = FP.FingerPrint(
                    chrome_string) & 0x7fffffffffffffffL
                print(str(chrome_string_fp) + ' - ' + chrome_string)
                if not chrome_string_fp in brave_ids:
                    brave_ids[chrome_string_fp] = brave_string_fp
            print('\n')
Ejemplo n.º 8
0
def get_fingerprint_for_xtb(message_tag):
    """Obtains the fingerprint meant for xtb files from a message tag."""
    string_to_hash = message_tag.text
    string_phs = message_tag.findall('ph')
    for string_ph in string_phs:
        string_to_hash = (string_to_hash + string_ph.get('name').upper() +
                          (string_ph.tail or ''))
    string_to_hash = (string_to_hash or '').strip().encode('utf-8')
    string_to_hash = clean_triple_quoted_string(string_to_hash)
    fp = FP.FingerPrint(string_to_hash.decode('utf-8'))
    meaning = (message_tag.get('meaning')
               if 'meaning' in message_tag.attrib else None)
    if meaning:
        # combine the fingerprints of message and meaning
        fp2 = FP.FingerPrint(meaning)
        if fp < 0:
            fp = fp2 + (fp << 1) + 1
        else:
            fp = fp2 + (fp << 1)
    # To avoid negative ids we strip the high-order bit
    return str(fp & 0x7fffffffffffffffL)
Ejemplo n.º 9
0
        avulsions = avulsions + [(k*dt/86400, loc[-1], avulsion_type, 
                                    length_old, length_new_sum, current_SL)]
    
    # raise first two rows by inlet rise rate (subsidence)
    n[0][:] = n[0][:] + (IRR)
    n[1][:] = n[1][:] + (IRR)

    # change elevations according to sea level rise (SLRR)
    n, rc_flag = SLR.elev_change(imax, jmax, current_SL, n, riv_x, riv_y,
                                 ch_depth, dx, dy)

    # smooth river course elevations using linear diffusion equation
    n, dn_rc = diffuse.smooth_rc(dx, dy, nu, dt, riv_x, riv_y, n, nslope)

    # Floodplain sedimentation
    n, dn_fp = FP.dep_blanket(dy, dx, imax, jmax, current_SL, blanket_rate,
                              n, riv_x, riv_y, ch_depth)
    
    # Wetland sedimentation
    n, dn_fp = FP.wetlands(dx, dy, imax, jmax, current_SL, WL_Z, WL_dist, n,
                           riv_x, riv_y, x, y, dn_fp)

    # here we will calculate flux (?)

    # create a river profile array
    profile = prof.make_profile(dx, dy, n, riv_x, riv_y, profile)

    # save files
    if savefiles == 1:
        if k >= save_after:
            if k % savespacing == 0:
                np.savetxt('elev_grid/elev_' + str(k*dt/86400 - (save_after)) +
Ejemplo n.º 10
0
if __name__=='__main__':
	sClusterFile = sys.argv[1]
	sFPListFile = sys.argv[2]

	clusterList = clusterParser(sClusterFile)
	with open(sFPListFile, 'r' ) as f:
		FPList = f.readlines()

	FPList = [fp[:-1] for fp in FPList]

	for i in range(len(clusterList)):
		cluster = clusterList[i]

		if len(cluster) == 1:
			continue

		
		setFP = FP.loadFPtoSet(FPList[cluster[0]])
		index = 1

		print i, len(cluster), len(setFP),

		while index < len(cluster):
			setFP = setFP.intersection(FP.loadFPtoSet(FPList[cluster[index]]))
			index += 1	

		print len(setFP)
	
	
Ejemplo n.º 11
0
    def advance_in_time(self):
        """ Update avulsion model one time step. """
        # if (self._time / _SECONDS_PER_YEAR) > 2000:
        #     self._SLRR = 0.01 / _SECONDS_PER_YEAR * self._dt

        self._riv_i, self._riv_j, self._course_update = steep_desc.update_course(
            self._n, self._riv_i, self._riv_j, self._ch_depth, self._slope,
            sea_level=self._SL, dx=self._dx, dy=self._dy)

        self._n = avulsion_utils.fix_elevations(self._n, self._riv_i, self._riv_j,
            self._ch_depth, self._SL, self._slope, self._dx, self._max_rand, self._SLRR)

        """ Save every time the course changes? """
        if self._saveupdates and self._course_update > 0:
            with open('output_data/river_info.out','a') as file:
                file.write("%.5f %i \n" % ((self._time / _SECONDS_PER_YEAR),
                    self._course_update))

        """ determine if there is an avulsion & find new path if so """
        (self._riv_i, self._riv_j), self._avulsion_type, self._loc, self._avulse_length, \
         self._path_diff, self._splay_deposit = avulse.find_avulsion(self._riv_i,
            self._riv_j, self._n, self._super_ratio, self._SL, self._ch_depth,
            self._short_path, self._splay_type, self._slope,
            self._splay_deposit, self._nu, self._dt, dx=self._dx, dy=self._dy)

        """ Save avulsion record. """
        if self._saveavulsions and self._avulsion_type > 0:
            with open('output_data/river_info.out','a') as file:
                file.write("%.5f %i %i %.5f %.5f\n" % ((self._time / _SECONDS_PER_YEAR),
                    self._avulsion_type, self._loc, self._avulse_length, self._path_diff))

        """ Save crevasse splay deposits. """        
        if self._saveavulsions and (self._splay_deposit.sum() > 0):
            np.savetxt('output_data/splay_deposit.out', self._splay_deposit, '%.8f')

        # need to fill old river channels if coupled to CEM
        if (self._avulsion_type == 1) or (self._avulsion_type == 2):
            self._n = avulsion_utils.fix_elevations(self._n, self._riv_i, self._riv_j,
                self._ch_depth, self._SL, self._slope, self._dx, self._max_rand, self._SLRR)

        #assert(self._riv_i[-1] != 0)

        """ change elevations according to sea level rise (SLRR)
        (if not coupled -- this occurs in coupling script otherwise) """
        # SLR.elev_change(self._SL, self._n, self._riv_i,
        #                 self._riv_j, self._ch_depth, self._SLRR)

        """ smooth river course elevations using linear diffusion equation """
        self._dn_rc = diffuse.smooth_rc(self._dx, self._dy, self._nu, self._dt, self._ch_depth,
                          self._riv_i, self._riv_j, self._n, self._SL, self._slope)

        """ Floodplain sedimentation (use one or the other) """
        #-------------------------------------------------------
        ### Deposit blanket across entire subaerial domain: ###
        # FP.dep_blanket(self._SL, self._blanket_rate, self._n,
        #                self._riv_i, self._riv_j, self._ch_depth)

        ### Deposit fines adjacent to river channel: ###
        FP.dep_fines(self._n, self._riv_i, self._riv_j, self._dn_rc, self._frac_fines,
                     self._SL)
        #-------------------------------------------------------

        """ Wetland sedimentation """
        ### no wetlands in first version of coupling to CEM ###
        # FP.wetlands(self._SL, self._WL_Z, self._WL_dist * self._dy,
        #             self._n, self._riv_i, self._riv_j, self._x, self._y)

        """ Subsidence """
        subside.linear_subsidence(self._n, self._riv_i, self._riv_j, self._ch_depth,
                                  self._SubRate, self._SubStart, self._SL)

        """ calculate sediment flux at the river mouth """
        self._sed_flux = flux.calc_qs(self._nu, self._riv_i, self._riv_j,
                                      self._n, self._SL, self._ch_depth,
                                      self._dx, self._dy, self._dt, self._slope)

        self._profile = self._n[self._riv_i, self._riv_j]

        # Update time
        self._time += self._dt
        # update sea level
        self._SL += self._SLRR
Ejemplo n.º 12
0
import matplotlib.pyplot as plt
from numpy import *
from FP import *    


path='/home/ben/MasterProject/Code/FP_solver/Runs/'
dirs_NR = ['MS_NR_AH09', # AH09 loss cone term  with X_pinhole = 0.5
           'MS_NR_AH09_E09']
dirs_RR = ['MS_RR_AH09',
           'MS_RR_AH09_E09']
labels = ['AH09',
          'AH09 with E09']
colors = ['r','b','g','k']
FP_NR = [FP(path+dirs_NR[i],labels[i],4) for i in xrange(len(dirs_NR))]
FP_RR = [FP(path+dirs_RR[i],labels[i],4) for i in xrange(len(dirs_NR))]
for i in xrange(len(FP_RR)):
    FP_RR[i].linestyles = ['--','--','--','--']
    FP_RR[i].labels = ['','','','']
    FP_NR[i].FP_RR = FP_RR[i]
#############################################
def Plot_Q(FP_NR,FP_RR,colors):
    """
    
    Arguments:
    - `FP_NR`:
    - `FP_RR`:
    """
    plt.figure()
    if (FP_NR.nM == 1):
        plt.loglog(FP_NR.Q_x,FP_NR.Q,label=FP_NR.label,color=colors[i])
        plt.loglog(FP_RR.Q_x,FP_RR.Q,'--',color=colors[i])    
def SyncTransifexToTranslations():
    # load all strings and calculate their translation id (ignore memory consumption at this point to speed up whole process)
    brave_strings={}
    e = xml.etree.ElementTree.parse(base_strings_file).getroot()
    for string_tag in e.findall('string'):
        string_name = string_tag.get('name')
        string_value = string_tag.text    
        if not string_name:
            sys.exit('String name is empty')
        if not string_value:
            sys.exit("String value is empty")
        # calculate translation id
        string_fp = FP.FingerPrint(string_value) & 0x7fffffffffffffffL
        if string_name in brave_strings:
            sys.exit('String name "' + string_name + '" is duplicated')
        brave_strings[string_name] = string_fp

    # go through all .xtb files in translations_folder
    replacingNumber = 1
    addingNumber = 1
    for (dirpath, dirnames, filenames) in walk(translations_folder):
        for filename in filenames:
            if filename.endswith('.xtb'):
                translations_tree = xml.etree.ElementTree.parse(translations_folder + '/' + filename)
                translations = translations_tree.getroot()
                # get language id
                lang_id = translations.get('lang').replace('-', '_')
                if not lang_id:
                    sys.exit('Language id not found for ' + filename)
                # if not lang_id == 'uk':
                #   continue
                #there are some differences in language codes, so correct them
                if lang_id == 'ca':
                    lang_id = 'ca_ES'
                elif lang_id == 'bg':
                    lang_id = 'bg_BG'
                elif lang_id == 'iw':
                    lang_id = 'he'
                elif lang_id == 'cs':
                    lang_id = 'cs_CZ'            
                print('Processing language "' + lang_id + '"...')
                # find appropriate xml file in transifex folder
                xml_file_name = transifex_folder + '/stringsxml_' + lang_id + '.xml'
                if os.path.isfile(xml_file_name):
                    # go through all strings in a file name
                    strings = xml.etree.ElementTree.parse(xml_file_name).getroot()
                    translations_file_was_changed = False
                    for string_tag in strings.findall('string'):
                        string_name = string_tag.get('name')
                        string_value = string_tag.text
                        if string_name in brave_strings:
                            # we have its translation id, lets look for it in .xtb file
                            translation_id_found = False                        
                            for translation_tag in translations.findall('translation'):
                                translation_id = translation_tag.get('id')
                                translation_text = translation_tag.text
                                # we found id, so replace it
                                if translation_id == str(brave_strings[string_name]):
                                    if not translation_text == string_value:
                                        print(str(replacingNumber) + ' replacing "' + translation_text + '" with "' + string_value + '"')
                                        replacingNumber += 1
                                        translation_tag.text = string_value
                                        translations_file_was_changed = True
                                    translation_id_found = True
                                    break
                            # could not find translation id, so append it to the end
                            if not translation_id_found:
                                print(str(addingNumber) + ' adding "' + string_name + '" with "' + string_value + '"')
                                addingNumber += 1
                                new_translation_tag = xml.etree.ElementTree.Element('translation')
                                new_translation_tag.set('id', str(brave_strings[string_name]))
                                new_translation_tag.text = string_value
                                new_translation_tag.tail = '\n'
                                translations.append(new_translation_tag)
                                translations_file_was_changed = True
                        else:
                            sys.exit('String name "' + string_name + '" not found in base strings')
                    # write changes
                    if translations_file_was_changed:
                        translations_file_name = translations_folder + '/' + filename
                        translations_tree.write(translations_file_name, encoding="utf-8", xml_declaration=False)
                        # we need to add prepend headers
                        f = open(translations_file_name, 'r+')
                        # load all content to the memory to make it faster (size is less than 1Mb, so should not be a problem)
                        content = f.read()
                        f.seek(0, 0)
                        f.write(('<?xml version="1.0" ?>\n<!DOCTYPE translationbundle>\n') + content)
                        f.close()
                else:
                    sys.exit('Language xml file not found ' + xml_file_name)
        break
    print('Sync transifex to translations finished successfully')
# Gives translation ids for messages.
#
# Messages with placeholders should go like that:
# original message = "I'll buy a <ph name="WAVELENGTH">%d<ex>200</ex></ph> nm laser at <ph name="STORE_NAME">%s<ex>the grocery store</ex></ph>."
# message to get id = "I'll buy a WAVELENGTH nm laser at STORE_NAME."
#
# Messages with line breaks should go like that:
# original message = "She gathered
#wood, charcoal, and
#a sledge hammer."
# message to get id = "She gathered\nwood, charcoal, and\na sledge hammer."

import FP

oldString = "old string"
newString = "new string"
fp = FP.FingerPrint(newString)
fp2 = FP.FingerPrint(oldString)
file_ = open('strings.txt', 'w')
file_.write(str(fp2 & 0x7fffffffffffffffL) + ' - ' + oldString + '\r\n')
file_.write(str(fp & 0x7fffffffffffffffL) + ' - ' + newString + '\r\n')
file_.close()
Ejemplo n.º 15
0
def find_avulsion(dx, dy, imax, jmax, riv_x, riv_y, n, super_ratio, current_SL,
                  ch_depth, short_path, dn_fp, splay_type, splay_dep):

    loc = []
    SEL = np.zeros(len(riv_x))
    SER = np.zeros(len(riv_x))
    avulsion_type = 0
    length_new_sum = 0    
    length_old = 0

    for a in range(1, len(riv_x)):

        ch_Z = n[riv_x[a]/dx][riv_y[a]/dy] + ch_depth   # bankfull elev.
        LHS = n[riv_x[a]/dx][(riv_y[a]/dy)-1]
        RHS = n[riv_x[a]/dx][(riv_y[a]/dy)+1]

        # normalized superelevation ratio on left side
        SEL[a] = ((ch_Z - LHS) / ch_depth)

        # normalized superelevation ratio on right side
        SER[a] = ((ch_Z - RHS) / ch_depth)

        if SEL[a] >= super_ratio or SER[a] >= super_ratio:

            # if superelevation greater than trigger ratio, determine
            # length of new steepest descent path
            new_riv_x = riv_x[:a-1]
            new_riv_y = riv_y[:a-1]

            new_riv_x, new_riv_y = steep_desc.find_new_course(
                dx, dy, imax, jmax, n, new_riv_x, new_riv_y, current_SL)

            # if using the shortest path as an avulsion criterion, then
            # the lengths of the previous and newly calculated paths will
            # be compared
            if short_path == 1:
                
                # duplicates arrays so that length can be compared below
                test_new_x = new_riv_x[a:]
                test_new_y = new_riv_y[a:]
                test_old_x = riv_x[a:]
                test_old_y = riv_y[a:]
                length_new = []
                
                for c in range(len(test_new_x)-1):
                    
                    if (((test_new_x[c+1]/dx) - (test_new_x[c]/dx) == 0) and
                        (test_new_y[c+1]/dy) - (test_new_y[c]/dy) == -1):
                            
                            length_new.append(1)
                    
                    elif (((test_new_x[c+1]/dx) - (test_new_x[c]/dx) == 0)
                        and (test_new_y[c+1]/dy) - (test_new_y[c]/dy) == 1):
                    
                            length_new.append(1)
                            
                    elif (((test_new_x[c+1]/dx) - (test_new_x[c]/dx) == 1)
                        and (test_new_y[c+1]/dy) - (test_new_y[c]/dy) == 0):

                            length_new.append(1)
                    
                    elif (((test_new_x[c+1]/dx) - (test_new_x[c]/dx) == 1)
                        and (test_new_y[c+1]/dy) - (test_new_y[c]/dy) == -1):
                            
                            length_new.append(math.sqrt(2))

                    elif (((test_new_x[c+1]/dx) - (test_new_x[c]/dx) == 1)
                        and (test_new_y[c+1]/dy) - (test_new_y[c]/dy) == 1):
                            
                            length_new.append(math.sqrt(2))
                    
                    c += 1
                
                for b in range(len(test_old_x)-1):
                    
                    if (((test_old_x[b+1]/dx) - (test_old_x[b]/dx) == 0) and
                        (test_old_y[b+1]/dy) - (test_old_y[b]/dy) == -1):
                            
                            length_old += 1
                    
                    elif (((test_old_x[b+1]/dx) - (test_old_x[b]/dx) == 0)
                        and (test_old_y[b+1]/dy) - (test_old_y[b]/dy) == 1):
                    
                            length_old += 1
                            
                    elif (((test_old_x[b+1]/dx) - (test_old_x[b]/dx) == 1)
                        and (test_old_y[b+1]/dy) - (test_old_y[b]/dy) == 0):

                            length_old += 1
                    
                    elif (((test_old_x[b+1]/dx) - (test_old_x[b]/dx) == 1)
                        and (test_old_y[b+1]/dy) - (test_old_y[b]/dy) == -1):
                            
                            length_old += math.sqrt(2)

                    elif (((test_old_x[b+1]/dx) - (test_old_x[b]/dx) == 1)
                        and (test_old_y[b+1]/dy) - (test_old_y[b]/dy) == 1):
                            
                            length_old += math.sqrt(2)
                    
                    b += 1

                # if new river course < length of old
                # river course, then an avulsion will occur
                length_new_sum = sum(length_new)
                if sum(length_new) < length_old:
                    
                    loc = [a]         # avulsion location
                    avulsion_type = 1 # sets avulsion to be regional, may be 
                                        # updated again below (if local)
                
                    # maybe this should be len(test_old_x)-1?
                    for d in range(1,len(test_old_x)):
                        
                        x_diff = new_riv_x[-1] - riv_x[a+d]
                        y_diff = new_riv_y[-1] - riv_y[a+d]
                        
                        if x_diff == 0 and y_diff == 0:
                            
                            avulsion_type = 2   # local avulsion
                            
                            riv_x = new_riv_x + riv_x[a+d+1:]
                            riv_y = new_riv_y + riv_y[a+d+1:]
                            """
                            above doesn't change river mouth location unless it's
                            a regional avulsion
                            """ 

                            break
                        
                        else: d += 1
                    
                    if avulsion_type == 1: 
                    
                        riv_x = new_riv_x
                        riv_y = new_riv_y

                        n = downcut.cut_new(dx, dy, riv_x, riv_y, n, length_new,
                                        current_SL, a, ch_depth)

                    return (riv_x, riv_y, loc, SEL, SER, n, dn_fp, avulsion_type,
                            length_new_sum, length_old
                            )

                else:

                    if splay_type > 0:
                        n, dn_fp = FP.dep_splay(dy, dx, imax, jmax,
                                   riv_x, riv_y, new_riv_x, new_riv_y,
                                   ch_depth, n, a, dn_fp, splay_type,
                                   splay_dep)

            # if shortest path is not an avulsion criterion, then the new
            # steepest descent path will become the new course regardless
            # of new course length relative to the old course
            if short_path == 0:

                riv_x = new_riv_x
                riv_y = new_riv_y
                loc = [a]
        a += 1

    return (riv_x, riv_y, loc, SEL, SER, n, dn_fp, avulsion_type, length_new_sum,
            length_old)
def SyncTranslationsToTransifex():
    # load all strings and calculate their translation id (ignore memory consumption at this point to speed up whole process)
    brave_strings={}
    e = xml.etree.ElementTree.parse(base_strings_file).getroot()
    for string_tag in e.findall('string'):
        string_name = string_tag.get('name')
        string_value = string_tag.text    
        if not string_name:
            sys.exit('String name is empty')
        if not string_value:
            sys.exit("String value is empty")
        # calculate translation id
        string_fp = FP.FingerPrint(string_value) & 0x7fffffffffffffffL
        if string_name in brave_strings:
            sys.exit('String name "' + string_name + '" is duplicated')
        brave_strings[string_name] = string_fp

    # go through all .xtb files in translations_folder
    replacingNumber = 1
    for (dirpath, dirnames, filenames) in walk(translations_folder):
        for filename in filenames:
            if filename.endswith('.xtb'):
                translations = xml.etree.ElementTree.parse(translations_folder + '/' + filename).getroot()
                # get language id
                lang_id = translations.get('lang').replace('-', '_')
                if not lang_id:
                    sys.exit('Language id not found for ' + filename)
                # if not lang_id == 'uk':
                #   continue
                #there are some differences in language codes, so correct them
                if lang_id == 'ca':
                    lang_id = 'ca_ES'
                elif lang_id == 'bg':
                    lang_id = 'bg_BG'
                elif lang_id == 'iw':
                    lang_id = 'he'
                elif lang_id == 'cs':
                    lang_id = 'cs_CZ'            
                print('Processing language "' + lang_id + '"...')
                # find appropriate xml file in transifex folder
                xml_file_name = transifex_folder + '/stringsxml_' + lang_id + '.xml'
                if os.path.isfile(xml_file_name):
                    # go through all strings in a file name
                    strings_tree = xml.etree.ElementTree.parse(xml_file_name)
                    strings_file_was_changed = False
                    strings = strings_tree.getroot()
                    for string_tag in strings.findall('string'):
                        string_name = string_tag.get('name')
                        string_value = string_tag.text
                        if string_name in brave_strings:
                            # we have its translation id, lets look for it in .xtb file
                            translation_id_found = False                        
                            for translation_tag in translations.findall('translation'):
                                translation_id = translation_tag.get('id')
                                translation_text = translation_tag.text
                                # we found id, so replace it
                                if translation_id == str(brave_strings[string_name]):
                                    if not translation_text == string_value:
                                        print(str(replacingNumber) + ' replacing "' + string_value + '" with "' + translation_text + '"')
                                        replacingNumber += 1
                                        string_tag.text = translation_text
                                        strings_file_was_changed = True
                                    translation_id_found = True
                                    break
                            # could not find translation id, so append it to the end
                            if not translation_id_found:
                                sys.exit('Translation id "' + str(brave_strings[string_name]) + '" for "' + string_name + '" not found')
                        else:
                            sys.exit('String name "' + string_name + '" not found in base strings')
                    if strings_file_was_changed:
                        strings_tree.write(xml_file_name, encoding="utf-8", xml_declaration=False)
                else:
                    sys.exit('Language xml file not found ' + xml_file_name)
        break
    print('Sync translations to transifex finished successfully')
Ejemplo n.º 17
0
    fCluster.close()
    return clusterList


if __name__ == '__main__':
    sClusterFile = sys.argv[1]
    sFPListFile = sys.argv[2]

    clusterList = clusterParser(sClusterFile)
    with open(sFPListFile, 'r') as f:
        FPList = f.readlines()

    FPList = [fp[:-1] for fp in FPList]

    for i in range(len(clusterList)):
        cluster = clusterList[i]

        if len(cluster) == 1:
            continue

        setFP = FP.loadFPtoSet(FPList[cluster[0]])
        index = 1

        print i, len(cluster), len(setFP),

        while index < len(cluster):
            setFP = setFP.intersection(FP.loadFPtoSet(FPList[cluster[index]]))
            index += 1

        print len(setFP)
Ejemplo n.º 18
0
def hello_world():


    db = cx_Oracle.connect('****', '****', tns);

    hm_fp= tool.HashMap()
    cr_fp = db.cursor();
    sql_fp= "select cf.financial_product_id||','||cf.term as \"financial_product_id\" , cf.financial_product_nme from  CURRENT_FP cf  " \
                                                                                  "union " \
                                                                                  "select cf.financial_product_id||','||cf.term as \"financial_product_id\", cf.financial_product_nme from  LAST_FP cf ";
    cr_fp.execute(sql_fp);
    rs_fp = cr_fp.fetchall();
    for x in rs_fp:
        fp = FP_BASIC.FP_BASIC(str(x[0]), str(x[1]).decode("utf-8"))
        hm_fp.put(str(x[0]), fp);
    cr_fp.close()

    cr_fp_old = db.cursor();
    sql_fp_old = "select cf.financial_product_id, cf.financial_product_nme,cf.valid_from_dte,cf.valid_to_dte,cf.Minimum_Financing_Amt,cf.Maximum_Financing_Amt,cf.minimum_lease_trm,cf.maximun_lease_trm ," \
          " cf.MAXIMUM_FINANCING_PCT,cf.ACTUAL_RTE,cf.CUSTOMER_RTE,cf.SUBSIDY_RTE,cf.term from  CURRENT_FP cf  " \
          " minus " \
          "  select  lf.financial_product_id, lf.financial_product_nme,lf.valid_from_dte,lf.valid_to_dte,lf.Minimum_Financing_Amt,lf.Maximum_Financing_Amt,lf.minimum_lease_trm,lf.maximun_lease_trm, "\
            " lf.MAXIMUM_FINANCING_PCT,lf.ACTUAL_RTE,lf.CUSTOMER_RTE,lf.SUBSIDY_RTE,lf.term  from  LAST_FP  lf  ";
    hm_fp_old = tool.HashMap()
    cr_fp_old.execute(sql_fp_old);
    rs_fp_old = cr_fp_old.fetchall();
    my_list=[];
    key_list=[]
    for x in rs_fp_old:
        fp_old=FP.FP(str(x[0]),str(x[1]).decode("utf-8"),str(x[2]),str(x[3]),str(x[4]),str(x[5]),str(x[6]),str(x[7]),str(x[8]),str(x[9]),str(x[10]),str(x[11]),str(x[12]))
        hm_fp_old.put(str(x[0])+','+str(x[12]),fp_old);
        key_list.append(str(x[0])+','+str(x[12]));
    cr_fp_old.close()



    sql_fp_new = "select  lf.financial_product_id, lf.financial_product_nme,lf.valid_from_dte,lf.valid_to_dte,lf.Minimum_Financing_Amt,lf.Maximum_Financing_Amt,lf.minimum_lease_trm,lf.maximun_lease_trm,  " \
              " lf.MAXIMUM_FINANCING_PCT,lf.ACTUAL_RTE,lf.CUSTOMER_RTE,lf.SUBSIDY_RTE,lf.term  from  LAST_FP  lf " \
" minus  " \
" select cf.financial_product_id, cf.financial_product_nme,cf.valid_from_dte,cf.valid_to_dte,cf.Minimum_Financing_Amt,cf.Maximum_Financing_Amt,cf.minimum_lease_trm,cf.maximun_lease_trm ," \
" cf.MAXIMUM_FINANCING_PCT,cf.ACTUAL_RTE,cf.CUSTOMER_RTE,cf.SUBSIDY_RTE,cf.term " \
" from  CURRENT_FP cf " ;

    hm_fp_new=tool.HashMap()
    cr_fp_new = db.cursor();
    cr_fp_new.execute(sql_fp_new);
    rs_fp_new = cr_fp_new.fetchall();
    my_list1=[];
    for x in rs_fp_new:
        fp_new=FP.FP(str(x[0]),str(x[1]).decode("utf-8"),str(x[2]),str(x[3]),str(x[4]),str(x[5]),str(x[6]),str(x[7]),str(x[8]),str(x[9]),str(x[10]),str(x[11]),str(x[12]))
        hm_fp_new.put(str(x[0])+','+str(x[12]),fp_new);
        # my_list1.append(fp1)
        key_list.append(str(x[0])+','+str(x[12]));
    cr_fp_new.close()

    sql_fp_asset_old = "select cf.financial_product_id||','||cf.term  ,cfas.financial_product_nme,cfas.makemodel_group_nme,cfas.asset_model_dsc "\
    " from current_fp cf left join current_fp_asset_series cfas on cf.financial_product_id = cfas.financial_product_id  " \
                    " minus  " \
                    " select lf.financial_product_id||','||lf.term  ,lfas.financial_product_nme,lfas.makemodel_group_nme,lfas.asset_model_dsc from last_fp lf " \
                       " left join last_fp_asset_series lfas on lf.financial_product_id = lfas.financial_product_id ";

    hm_fp_asset_old = tool.HashMap()
    cr_fp_asset_old = db.cursor();
    cr_fp_asset_old.execute(sql_fp_asset_old);
    rs_fp_asset_old = cr_fp_asset_old.fetchall();
    fp_asset_old_list = [];
    for x in rs_fp_asset_old:

        fp_asset_old_list= hm_fp_asset_old.get(str(x[0]));
        if fp_asset_old_list == None:
            fp_asset_old_list = [];
        fp_asset_old = FP_ASSET.FP_ASSET(str(x[0]), str(x[1]).decode("utf-8"), str(x[2]).decode("utf-8"))
        fp_asset_old_list.append(fp_asset_old);
        hm_fp_asset_old.put(str(x[0]), fp_asset_old_list);
        # my_list1.append(fp1)
        key_list.append(str(x[0]));


    cr_fp_asset_old.close()

    sql_fp_asset_new =  "select lf.financial_product_id||','||lf.term ,lfas.financial_product_nme,lfas.makemodel_group_nme,lfas.asset_model_dsc " \
                       " from last_fp lf left join last_fp_asset_series lfas on lf.financial_product_id = lfas.financial_product_id  " \
                        " minus   select cf.financial_product_id||','||cf.term ,cfas.financial_product_nme,cfas.makemodel_group_nme,cfas.asset_model_dsc  " \
                       "from current_fp cf left join current_fp_asset_series cfas on cf.financial_product_id = cfas.financial_product_id "  ;
    hm_fp_asset_new = tool.HashMap()
    cr_fp_asset_new = db.cursor();
    cr_fp_asset_new.execute(sql_fp_asset_new);
    rs_fp_asset_new = cr_fp_asset_new.fetchall();
    fp_asset_new_list = [];
    for x in rs_fp_asset_new:

        fp_asset_new_list=hm_fp_asset_new.get(str(x[0]));
        if fp_asset_new_list == None:
            fp_asset_new_list = [];
        fp_asset_new = FP_ASSET.FP_ASSET(str(x[0]), str(x[1]).decode("utf-8"), str(x[2]).decode("utf-8"))
        fp_asset_new_list.append(fp_asset_new)
        key_list.append(str(x[0]));
        hm_fp_asset_new.put(str(x[0]), fp_asset_new_list);

    cr_fp_asset_new.close()

    sql_fp_dealer_old = "select cf.financial_product_id||','||cf.term ,cfd.dealer_name from current_fp cf " \
                        "left join current_fp_dealer cfd on cf.financial_product_id = cfd.financial_product_id " \
                        " minus " \
                        " select lf.financial_product_id||','||lf.term  ,lfd.dealer_name from last_fp lf left join last_fp_dealer lfd " \
                        "on lf.financial_product_id = lfd.financial_product_id ";

    hm_fp_dealer_old = tool.HashMap();
    cr_fp_dealer_old = db.cursor();
    cr_fp_dealer_old.execute(sql_fp_dealer_old);
    rs_fp_dealer_old = cr_fp_dealer_old.fetchall();
    fp_dealer_old_list = [];
    for x in rs_fp_dealer_old:

        fp_dealer_old_list = hm_fp_dealer_old.get(str(x[0]));
        if fp_dealer_old_list == None:
            fp_dealer_old_list = [];
        fp_dealer_old = FP_DEALER.FP_DEALER(str(x[0]), str(x[1]).decode("utf-8"))
        fp_dealer_old_list.append(fp_dealer_old);
        hm_fp_dealer_old.put(str(x[0]), fp_dealer_old_list);
        # my_list1.append(fp1)
        key_list.append(str(x[0]));

    cr_fp_dealer_old.close()

    sql_fp_dealer_new = "select lf.financial_product_id||','||lf.term  ,lfd.dealer_name from last_fp lf left join last_fp_dealer lfd  on lf.financial_product_id = lfd.financial_product_id  " \
                        " minus " \
                        "select cf.financial_product_id||','||cf.term  ,cfd.dealer_name from current_fp cf left join current_fp_dealer cfd on cf.financial_product_id = cfd.financial_product_id ";

    hm_fp_dealer_new = tool.HashMap()
    cr_fp_dealer_new = db.cursor();
    cr_fp_dealer_new.execute(sql_fp_dealer_new);
    rs_fp_dealer_new = cr_fp_dealer_new.fetchall();
    fp_dealer_new_list = [];
    for x in rs_fp_dealer_new:

        fp_dealer_new_list = hm_fp_dealer_new.get(str(x[0]));
        if fp_dealer_new_list == None:
            fp_dealer_new_list = [];
        fp_dealer_new = FP_DEALER.FP_DEALER(str(x[0]), str(x[1]).decode("utf-8"))
        fp_dealer_new_list.append(fp_dealer_new)
        key_list.append(str(x[0]));
        hm_fp_dealer_new.put(str(x[0]), fp_dealer_new_list);

    cr_fp_dealer_new.close()


    db.close()
    key_list=list(set(key_list));
    # key_list=map(int, key_list)
    key_list.sort();
    # key_list = map(str, key_list)
    title='金融产品变化明细';
    today=date.today().strftime("%Y-%m-%d");
    yesterday = (date.today() + timedelta(days = -1)).strftime("%Y-%m-%d");


    num = len(key_list);

    return render_template(

        'helloworld.html',
        title=title.decode("utf-8"),
        hm_fp=hm_fp,
        hm_fp_old=hm_fp_old,
        hm_fp_new=hm_fp_new,
        hm_fp_asset_old=hm_fp_asset_old,
        hm_fp_asset_new=hm_fp_asset_new,
        hm_fp_dealer_old=hm_fp_dealer_old,
        hm_fp_dealer_new=hm_fp_dealer_new,
        key_list=key_list,
        today=today,
        yesterday=yesterday,
        num=num
    )
Ejemplo n.º 19
0
def find_avulsion(riv_i, riv_j, n, super_ratio, current_SL, ch_depth,
                  short_path, splay_type, slope, splay_depth, 
                  nu, dt, dx=1., dy=1.):
    new = riv_i, riv_j
    old = riv_i, riv_j
    avulsion_type = 0
    a = 0
    loc = 0
    avulse_length = 0
    new_length = 0
    new_course_length = 0
    avul_locs = np.zeros(0, dtype=np.int)
    path_slopes = np.zeros(0)
    crevasse_locs = np.zeros(3, dtype=np.int)
    path_diff = np.zeros(0)
    path_difference = 0

    old_length = find_riv_path_length(n, old, current_SL, ch_depth,
                                      slope, dx=dx, dy=dy)

    for a in xrange(1, len(riv_i)-1):
        if channel_is_superelevated(n, (riv_i[a], riv_j[a]),
                                    (riv_i[a-1], riv_j[a-1]),
                                    ch_depth, super_ratio, current_SL):

            # if superelevation greater than trigger ratio, determine
            # new steepest descent path
            new = steep_desc.find_course(n, riv_i, riv_j, a, ch_depth,
                                         sea_level=current_SL)

            if n[new[0][-1], new[1][-1]] < current_SL:
                new_length = find_riv_path_length(n, new, current_SL, ch_depth,
                                                  slope, dx=dx, dy=dy)
            else:
                new_length = find_path_length(n, new, current_SL, ch_depth,
                                              slope, dx=dx, dy=dy)

            if new_length < old_length:
                # calculate slope of new path

                if len(new[0][a:]) <= 1:
                    avulsed_length = find_path_length(n, (new[0][a-1:], new[1][a-1:]),
                                                      current_SL, ch_depth, slope,
                                                      dx=dx, dy=dy)
                    slope_new_path = ((n[new[0][-2], new[1][-2]] - n[new[0][-1], new[1][-1]])
                                  / avulsed_length)

                elif n[new[0][-1], new[1][-1]] < current_SL:
                    avulsed_length = find_riv_path_length(n, (new[0][a:], new[1][a:]),
                                                      current_SL, ch_depth,
                                                      slope, dx=dx, dy=dy)
                    slope_new_path = ((n[new[0][a], new[1][a]] - n[new[0][-1], new[1][-1]])
                                      / avulsed_length)

                else:
                    avulsed_length = find_path_length(n, (new[0][a:], new[1][a:]),
                                                      current_SL, ch_depth, slope,
                                                      dx=dx, dy=dy)
                    slope_new_path = ((n[new[0][a], new[1][a]] - n[new[0][-1], new[1][-1]])
                                      / avulsed_length)

                avul_locs = np.append(avul_locs, a)
                path_slopes = np.append(path_slopes, slope_new_path)
                path_diff = np.append(path_diff, (old_length - new_length))

            crevasse_locs = np.vstack((crevasse_locs, [new[0][a], new[1][a], a]))


    if (crevasse_locs.sum() > 0):
        crevasse_locs = np.delete(crevasse_locs, 0, 0)

    if avul_locs.size > 0:

        max_slope = np.argmax(path_slopes)
        loc = avul_locs[max_slope]
        path_difference = path_diff[max_slope]

        new = steep_desc.find_course(n, riv_i, riv_j, loc, ch_depth,
                                     sea_level=current_SL)

        avulsion_type = 1

        new, avulsion_type = avulse_to_new_path(n,
                                 (riv_i[loc - 1:], riv_j[loc - 1:]),
                                 (new[0][loc - 1:], new[1][loc - 1:]),
                                 current_SL, ch_depth, avulsion_type,
                                 slope, dx=dx, dy=dy)

        new = (np.append(riv_i[:loc - 1], new[0]),
               np.append(riv_j[:loc - 1], new[1]))

        avulse_length = find_riv_path_length(n, (riv_i[loc:], riv_j[loc:]),
                                             current_SL, ch_depth,
                                             slope, dx=dx, dy=dy)

        # fill up old channel... could be some fraction in the future
        # (determines whether channels are repellors or attractors)
        fill_abandoned_channel(loc, n, new, riv_i, riv_j, current_SL,
                               ch_depth, slope, dx)

        crevasse_locs = np.delete(crevasse_locs, max_slope, 0)

    else:
        new = riv_i, riv_j

    if (crevasse_locs.sum() > 0) and (splay_type > 0):

        n_before_splay = np.copy(n)

        # Don' think we need to worry about preserving old river elevations??
        # old_river_elevations = n[riv_i, riv_j]
        new_river_elevations = n[new[0], new[1]]

        for i in xrange(crevasse_locs.shape[0]):

            splay_dep = calc_crevasse_dep(dx, dy, nu, dt, ch_depth, riv_i, riv_j, n,
                                          current_SL, slope, crevasse_locs[i][2])

            if splay_dep > 0:
                FP.dep_splay(n, (crevasse_locs[i][0], crevasse_locs[i][1]),
                             splay_dep, splay_type=splay_type)

        # n[riv_i, riv_j] = old_river_elevations
        n[new[0], new[1]] = new_river_elevations
        n_splay = n - n_before_splay
        splay_depth += n_splay

    return (new, avulsion_type, loc, avulse_length, path_difference, splay_depth)