예제 #1
0
    def _ccmp_near_cwind(self, stn, ccmp_lat, ccmp_lon, uwnd, vwnd):
        # get the CCMP cell which cwind station fall into
        stn_lat = stn.latitude
        stn_lon = stn.longitude
        lat_indices = utils.find_index([stn_lat, stn_lat], 'lat')
        lon_indices = utils.find_index([stn_lon, stn_lon], 'lon')

        if (abs(ccmp_lat[lat_indices[0]] - stn_lat) <\
            abs(ccmp_lat[lat_indices[1]] - stn_lat)):
            ccmp_lat_idx = lat_indices[0]
        else:
            ccmp_lat_idx = lat_indices[1]

        if (abs(ccmp_lon[lon_indices[0]] - stn_lon) <\
            abs(ccmp_lon[lon_indices[1]] - stn_lon)):
            ccmp_lon_idx = lon_indices[0]
        else:
            ccmp_lon_idx = lon_indices[1]

        if (abs(ccmp_lat[ccmp_lat_idx] - stn_lat) > 0.25
                or abs(ccmp_lon[ccmp_lon_idx] - stn_lon) > 0.25):
            self.logger.error('Fail getting WVC near cwind station.')

        # calculate WVC's speed and direction
        ccmp_u_wspd = uwnd[ccmp_lat_idx][ccmp_lon_idx]
        ccmp_v_wspd = vwnd[ccmp_lat_idx][ccmp_lon_idx]
        ccmp_wspd = math.sqrt(ccmp_u_wspd**2 + ccmp_v_wspd**2)
        # Convert CCMP's Wind Vector Azimuth to
        # NDBC's Meteorological Wind Direction
        ccmp_wdir = math.degrees(math.atan2(ccmp_u_wspd, ccmp_v_wspd))
        ccmp_wdir_converted = (ccmp_wdir + 180) % 360

        return ccmp_wspd, ccmp_wdir_converted
예제 #2
0
파일: search.py 프로젝트: frugeles/geoapi
def search_insee(db, code_post, city):
    city = city.replace('CEDEX', '')
    city_pos_list = find_all_from_index(code_post, db.cities_post_index,
                                        db.cities['code_post'], string=True)
    cities = [db.cities[pos] for pos in city_pos_list]
    if len(cities) == 0:
        lo = find_index(code_post[:2] + '000', db.cities_post_index,
                        db.cities['code_post'], string=True)
        hi = find_index(code_post[:2] + '999', db.cities_post_index,
                        db.cities['code_post'], string=True)
        cities = [db.cities[db.cities_post_index[idx]] for idx
                  in range(lo, hi + 1)]
    names = [c['nom_commune'].decode('UTF-8') for c in cities]
    city, max_score = best_match(city, names)
    return cities[city]['code_insee'].decode('UTF-8') if city is not None \
        else None
예제 #3
0
    def predict_helper(self, x, node, verbose):
        offset = node.depth * "     "
        if verbose:
            print(node.string())

        if node.is_leaf:
            return node.label

        idx = node.split_idx
        categorical = node.split_categorical
        thresholds = node.split_thresholds

        if categorical:
            categories = thresholds
            x_val = x[idx]
            child = categories.index(x_val)
            if verbose:
                print(offset + " Observed value: ", x_val)
            return self.predict_helper(x, node.children[child], verbose)
        else:
            x_val = x[idx]
            child = find_index(x_val, thresholds)
            if verbose:
                print(offset + " Observed value: ", x_val)
            return self.predict_helper(x, node.children[child], verbose)
예제 #4
0
    def transload(self, confl_resol_method):
        conf_resolver = conflict_resolver(confl_resol_method)

        directories = ['/']
        while len(directories) > 0:
            curr_dir = directories.pop(0)
            #print(directories)#'Process ' + curr_dir)
            src_dirs, src_files = self.get_child_dirs_files(self.source, curr_dir)

            directories += src_dirs

            # ensure  that directory exists on the destination server
            self.dest.mkdir(curr_dir)

            dst_dirs, dst_files = self.get_child_dirs_files(self.dest, curr_dir)
            dst_files_pathes = [path for (path, file) in dst_files]

            for src_f_path, src_f in src_files:
                # check if folder with such name exists
                if src_f_path in dst_dirs:
                    self.log(ERROR_MESSAGES['fold'] + src_f_path)
                else:
                    # check if file exists
                    index = find_index(dst_files_pathes, src_f_path)
                    # proceed if file does not exist or by conflict resolver allow
                    if index is None or conf_resolver.should_replace(dst_files[index][1], src_f):
                        self.copy_file(src_f_path)
                    else:
                        self.log(ERROR_MESSAGES['file'] + src_f_path)
예제 #5
0
def extract_senses(content: Tag) -> List[str]:
    text = extract_text_from_parent_tag(content, 'Sentidos')
    if not text:
        p_list = content.find_all('p')
        languages_tag = content.find('b', string=re.compile(r'Idiomas:?\s?'))
        language_index = find_index(p_list, lambda p: p.b == languages_tag) - 1
        text = p_list[language_index].b.parent.text

    return [text.strip().capitalize() for text in text.split(',')]
예제 #6
0
def extract_special_abilities(content: Tag) -> List[SpecialAbility]:
    p_list = content.find_all('p')
    tag = content.find('b', string=re.compile(r'Desafío:?\s?'))
    cr_index = find_index(p_list, lambda p: p.b == tag)
    tag = content.find('b', string=re.compile(r'Acciones:?\s?'))
    actions_index = find_index(p_list, lambda p: p.b == tag)
    if actions_index == cr_index + 1:
        return []

    special_abilities = p_list[cr_index + 1:]

    result = []
    caster = None
    for tag in special_abilities:
        ability = {}
        if len(tag.contents) == 1 and result:
            break
        if tag.i and tag.i.b:
            name = re.sub(r'\.|:$', '', tag.i.text.replace('\n', ' ').strip())
            desc = tag.text.replace(tag.i.text, '').replace('\n', ' ').strip()
            ability = {'name': name, 'description': desc}

            caster = re.search('Lanzamiento de conjuro', tag.i.text)
            if caster:
                ability['spells'] = []

            result.append(ability)

        elif tag.b:
            name = re.sub(r'\.|:$', '', tag.b.text.replace('\n', ' ').strip())
            desc = tag.text.replace(tag.b.text, '').replace('\n', ' ').strip()
            if caster:
                spell = {'name': name, 'description': desc}
                result[-1]['spells'].append(spell)
            else:
                ability = {'name': name, 'description': desc}
                result.append(ability)

        else:
            description = tag.text.replace('\n', ' ').strip()
            result[-1]['description'] += '\n' + description

    return result
예제 #7
0
def main():
    seq = sys.stdin.readline().strip()

    num_coded = [1 if c == '(' else -1 for c in seq]

    part1 = sum(num_coded)
    print('part 1: ', part1)

    part2 = find_index(lambda x: x < 0, scanl(add, 0, num_coded))
    print('part 2: ', part2)
예제 #8
0
def main():
  seq = sys.stdin.readline().strip()

  num_coded = [1 if c == '(' else -1 for c in seq]

  part1 = sum(num_coded)
  print('part 1: ', part1)

  part2 = find_index(lambda x: x < 0, scanl(add, 0, num_coded))
  print('part 2: ', part2)  
예제 #9
0
파일: search.py 프로젝트: frugeles/geoapi
def search_number(db, street_id, locality_id, number, max_score):
    if locality_id:
        result_idx = find_index(locality_id, db.numbers_locality_index,
                                db.numbers['locality_id'])
        n_idx = db.numbers_locality_index[result_idx]
        return address.Result.from_plate(db, n_idx, max_score)
    elif number:
        n_idx = find(street_id, db.numbers['street_id'])
        lo = None
        hi = None
        while n_idx < db.numbers.size:
            n = db.numbers[n_idx]
            if n['street_id'] != street_id:
                break
            if n['number'] == number:
                return address.Result.from_plate(db, n_idx, max_score)
            if n['number'] < number:
                lo = n_idx
            elif not hi:
                hi = n_idx
            n_idx += 1

        # exact number was not found => interpolate address position
        if lo:
            n = db.numbers[lo]
            lon, lat = reverse_geohash(n['geohash'])
            return address.Result.from_interpolated(db, number, street_id,
                                                    lon, lat)
        else:
            n = db.numbers[hi]
            lon, lat = reverse_geohash(n['geohash'])
            return address.Result.from_interpolated(db, number, street_id,
                                                    lon, lat)

    else:
        # middle of the street
        n_idx_hi = find(street_id, db.numbers['street_id'])
        n_idx_lo = n_idx_hi
        while n_idx_hi < db.numbers.size:
            n = db.numbers[n_idx_hi]
            if n['street_id'] != street_id:
                break
            n_idx_hi += 1
        n_idx = (n_idx_lo + n_idx_hi) // 2
        return address.Result.from_street(db, n_idx)
예제 #10
0
def extract_languages(content: Tag) -> List[str]:
    languages_text = extract_text_from_parent_tag(content, 'Idiomas')
    if not languages_text:
        p_list = content.find_all('p')
        senses_tag = content.find('b', string=re.compile(r'Sentidos:?\s?'))
        language_index = find_index(p_list, lambda p: p.b == senses_tag) + 1
        languages_text = p_list[language_index].text

    cant_speak = re.search(', pero no puede hablar', languages_text)

    if cant_speak:

        special = languages_text.split(';')
        special_match = None
        if len(special) > 1:
            special_match = re.search('telepatía', special[1])
            languages_text.replace(';', ',')
            languages_text = re.sub(special[0] + ';', '', languages_text)
        result = ['No puede hablar']
        languages_text = re.sub(cant_speak.group(0), '', languages_text)
        languages_text = re.sub('^Entiende', '', languages_text).strip()
        languages_text = re.sub('^entiende', '', languages_text).strip()
        result += [
            'Entiende ' + text.strip().lower()
            for text in re.split(',| y | e ', languages_text) if text != '-'
        ]
        if special_match:
            result.append(special[1].strip().capitalize())

    else:
        result = [
            text.strip().capitalize()
            for text in re.split(',| y | e ', languages_text) if text != '-'
        ]

    if result and result[-1] == 'Pero no lo habla':
        result[-2] = result[-2] + ', pero no lo habla'
        result.pop()

    return result
예제 #11
0
def make_table_of_contents(menu_list):
    global count
    global FLAG
    tree_pos = None
    head = None
    head_desc = None
    topic_url = None
    menu_itm_txt = None

    if menu_list:
        for menu_item in menu_list:
            # TODO convert string parsing to regex
            # find menu position(tree_pos)
            index_pos = find_index(menu_item, '(') + 1
            tree_pos = menu_item[index_pos:index_pos + 1]

            # find the head, head desc
            menu_itm_txt = menu_item[index_pos:]
            index_pos = find_index(menu_itm_txt, '"') + 1
            end_pos = find_index(menu_itm_txt, '"', index_pos)
            head_desc = menu_itm_txt[index_pos: end_pos]
            head = head_desc.translate(None, ":,'")
            head = head.replace(" ", "_")

            # find topic_url
            index_pos = find_index(menu_itm_txt, '"', end_pos + 1)
            end_pos = find_index(menu_itm_txt, '"', end_pos + 1 + 1)
            menu_itm_txt = menu_itm_txt[index_pos:]
            index_pos = find_index(menu_itm_txt, '"')

            if index_pos > -1:
                end_pos = find_index(menu_itm_txt, '"', index_pos + 1)
                topic_url = menu_itm_txt[index_pos + 1:end_pos]
            else:
                menu_itm_txt = None
                topic_url = None

            count += 1

            build_dictionary(tree_pos, head, head_desc, topic_url)
예제 #12
0
def extract_description(content: Tag) -> str:
    p_list = content.find_all('p')
    ac_tag = content.find('b', string=re.compile(r'Clase de Armadura:?\s?'))
    ac_index = find_index(p_list, lambda p: p.b == ac_tag)
    return '\n'.join([p.text.replace('\n', ' ') for p in p_list[1:ac_index]])
예제 #13
0
def test(data_file, model_file, sample_text, newtext_length):
    training_data = utils.read_data(data_file)
    # print training_data
    encode, decode = utils.build_encode_decode_dictionary(training_data)
    words_size = len(encode)

    x = tf.placeholder("float", [None, num_input, 1])
    y = tf.placeholder("float", [None, words_size])

    pred = build_lstm(x, num_input, words_size)

    start_time = time.time()
    # Model evaluation
    correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    init = tf.global_variables_initializer()

    with tf.Session() as session:
        session.run(init)
        saver = tf.train.Saver()
        saver.restore(session, model_file)

        sample_text = sample_text.strip()
        words = sample_text.split(' ')
        try:
            symbols_in_keys = [
                encode[str(words[i])] for i in range(len(words))
            ]
            for i in range(newtext_length):
                keys = np.reshape(np.array(symbols_in_keys),
                                  [-1, num_input, 1])
                onehot_pred = session.run(pred, feed_dict={x: keys})
                onehot_pred_index = int(tf.argmax(onehot_pred, 1).eval())
                sample_text = "%s %s" % (sample_text,
                                         decode[onehot_pred_index])
                symbols_in_keys = symbols_in_keys[1:]
                symbols_in_keys.append(onehot_pred_index)
            print(sample_text)
            predictions = sample_text.split(" ")
            original_text = predictions[:3]
            predictions = predictions[3:]
            index = utils.find_index(training_data, original_text)
            if index == -1:
                print "No such original text find. Hence accuracy cannot be " \
                      "calculated."
            else:
                count_correct = []
                for i in range(len(predictions)):
                    if predictions[i] == training_data[index + i]:
                        count_correct.append(1)
                    else:
                        count_correct.append(0)
                print "Test Accuracy :", (count_correct.count(1) *
                                          1.0) / len(count_correct)
                plot_test_accuracy(count_correct)
        except:
            print("Word not in the encoded dictionary")

    print 'Testing completed!'
    print 'Testing time:', time.time() - start_time