コード例 #1
0
def _enumerate(reversible, reverse_index):
    if reverse_index is False:
        return __builtin__.enumerate(reversible)
    else:
        my_list = list(__builtin__.enumerate(reversed(reversible)))
        my_list.reverse()
        return my_list
コード例 #2
0
def _enumerate(reversible, reverse_index):
    if reverse_index is False:
        return __builtin__.enumerate(reversible)
    else:
        my_list = list(__builtin__.enumerate(reversed(reversible)))
        my_list.reverse()
        return my_list
コード例 #3
0
def enumerate(reversible, reverse_index=False):
    '''
    Iterate over `(i, item)` pairs, where `i` is the index number of `item`.
    
    This is an extension of the builtin `enumerate`. What it allows is to get a
    reverse index, by specifying `reverse_index=True`. This causes `i` to count
    down to zero instead of up from zero, so the `i` of the last member will be
    zero.
    '''
    if reverse_index is False:
        return __builtin__.enumerate(reversible)
    else:
        my_list = list(__builtin__.enumerate(reversed(reversible)))
        my_list.reverse()
        return my_list
コード例 #4
0
def enumerate(reversible, reverse_index=False):
    '''
    Iterate over `(i, item)` pairs, where `i` is the index number of `item`.
    
    This is an extension of the builtin `enumerate`. What it allows is to get a
    reverse index, by specifying `reverse_index=True`. This causes `i` to count
    down to zero instead of up from zero, so the `i` of the last member will be
    zero.
    '''
    if reverse_index is False:
        return __builtin__.enumerate(reversible)
    else:
        my_list = list(__builtin__.enumerate(reversed(reversible)))
        my_list.reverse()
        return my_list
コード例 #5
0
def load_ustar_cut(siteid,
                   sitedir,
                   first_year,
                   last_year,
                   nee_perc_ustar_cut_template=NEE_PERC_USTAR_CUT):
    nee_perc_ustar_cut = nee_perc_ustar_cut_template.format(s=siteid,
                                                            sd=sitedir,
                                                            fy=first_year,
                                                            ly=last_year)
    log.debug("{s}: processing file: {f}".format(s=siteid,
                                                 f=nee_perc_ustar_cut))
    nee_perc_ustar_cut_lines = load_csv_lines(filename=nee_perc_ustar_cut)
    if (last_year - first_year) < 2:
        if not nee_perc_ustar_cut_lines:
            log.warning("{s}: too few years, {e} file not created: {f}".format(
                s=siteid, e='NEE CUT USTAR percentiles', f=nee_perc_ustar_cut))
        nee_perc_ustar_cut_values = {
            k: '-9999'
            for i, k in enumerate(PERCENTILES)
        }
        nee_perc_ustar_cut_values['50.00'] = nee_perc_ustar_cut_values['50']
    else:
        if not nee_perc_ustar_cut_lines:
            raise ONEFluxError("{s}: {e} file not found: {f}".format(
                s=siteid, e='NEE CUT USTAR percentiles', f=nee_perc_ustar_cut))

        if (len(nee_perc_ustar_cut_lines) == 3
                and not nee_perc_ustar_cut_lines[2].strip()
            ) or len(nee_perc_ustar_cut_lines) > 2:
            raise ONEFluxError(
                "{s}: NEE USTAR CUT file too many lines ({l}): {f}".format(
                    s=siteid,
                    l=len(nee_perc_ustar_cut_lines),
                    f=nee_perc_ustar_cut))
        elif not (nee_perc_ustar_cut_lines[0][0].startswith(PERCENTILES[0])
                  and nee_perc_ustar_cut_lines[0][-1].strip().endswith(
                      PERCENTILES[-1])):
            raise ONEFluxError(
                "{s}: NEE USTAR CUT bad headers ({h}): {f}".format(
                    s=siteid,
                    h=nee_perc_ustar_cut_lines[0],
                    f=nee_perc_ustar_cut))
        nee_perc_ustar_cut_values = {
            k: nee_perc_ustar_cut_lines[1][i].strip()
            for i, k in enumerate(PERCENTILES)
        }
        nee_perc_ustar_cut_values['50.00'] = nee_perc_ustar_cut_values['50']
    return nee_perc_ustar_cut_values
コード例 #6
0
    def _fill_dates_with_zero(self, all_dates, question_list):
        dates_str_list = []
        # Contain all dates and delete the duplication of date
        no_duplication_dates = list(set(all_dates))
        # Get the max and the min date
        dates_max_min_list = [
            min(no_duplication_dates),
            max(no_duplication_dates)
        ] if no_duplication_dates != [] else []
        # Fills the dates between the min and max date
        date_compare = int(
            (max(no_duplication_dates) - min(no_duplication_dates)
             ).days) if no_duplication_dates != [] else 0
        if date_compare > 1:
            for i in xrange(1, date_compare):
                val = (no_duplication_dates[0] + datetime.timedelta(days=i))
                dates_max_min_list.insert(i, val)

        # question_list: [List date, count choices, text choice
        # Add the count of choices if date of choice in list dates else add zero
        for i, filtered in enumerate(question_list):
            list_choice = []
            for date in dates_max_min_list:
                item_list = 0
                if date in filtered[0]:
                    index = filtered[0].index(date)
                    item_list = filtered[1][index]
                list_choice.append(item_list)
            question_list[i] = list_choice
        # change the form of date to string "day month year"
        for date in dates_max_min_list:
            dates_str_list.append(date.strftime('%b %d %Y'))
        return question_list, dates_str_list
コード例 #7
0
    def validate_test_execution_result(test, result, email_input=default_inputs['email']):
        header_idx = {
            'email': -1,
            'contact_header': -1,
            'contact_body': -1,
            'logo': -1,
            'email_output': -1,
            'error': -1
        }
        test.assertEqual(len(result.headers), len(header_idx))
        for idx, header in enumerate(result.headers):
            header_idx[header] = idx
        for header in header_idx:
            if header_idx[header] == -1:
                test.fail('Header not found in response %s' % header)

        test.assertEqual(len(result.rows), 1)
        test.assertEqual(len(result.rows[0]), len(header_idx))

        test.assertEqual(result.rows[0][header_idx['email']], email_input)
        test.assertEqual(result.rows[0][header_idx['contact_header']], u'dexi.io')
        assert u'Norre Voldgade 24' in result.rows[0][header_idx['contact_body']]
        assert u'FILE' in result.rows[0][header_idx['logo']]
        test.assertEqual(email_input, result.rows[0][header_idx['email_output']])
        test.assertIsNone(result.rows[0][header_idx['error']])
コード例 #8
0
ファイル: merger.py プロジェクト: JonMosenkis/Sefaria-Data
def collapse(comantator, column, datafile, mappingfile):
    mapp = old2koren(mappingfile)
    """
    collapse the refs to a dictonary, key: the collapsed column (segments in this case)
    value: list of refs (containing all the comantary Refs)
    each
    returns a list, bit can use dict.update(list of pairs) to geta dictonary
    """
    with open(datafile, 'rb') as tsvin:
        tsvin = csv.DictReader(tsvin, delimiter='\t')
        segments_dict = OrderedDict()
        prev = None
        for i, row in enumerate(tsvin):
            try:
                if not row[column]:
                    continue
                elif mapp[row[column]] == prev:
                    if [row[com] for com in comantator]:# and [row[com] not in segment for com in comantator]:
                        segment.extend([ls for ls in [convert_display(row[com]) for com in comantator]])
                else:
                    segment = []
                    segment.extend([ls for ls in [convert_display(row[com]) for com in comantator]])
                segments_dict[mapp[row[column]]] = segment
                prev = mapp[row[column]]
            except KeyError:
                print 'on key ' + row[com] + 'there was a problem'
        print segments_dict
        return segments_dict
コード例 #9
0
ファイル: mvp.py プロジェクト: TheExGenesis/aligning_rs
def main():
    # Config
    mvpDilemmaPolicyPool = ['Cooperator', 'Defector', 'Random']
    mediatorParamPool = [mediatorParams(evalPairPayoff)]

    for mediatorParam in mediatorParamPool:
        mvpNetwork = baNetworkParams(N=1000, m=1)
        mvpAgent = simplestAgentParams(
            dilemma_policy_pool=mvpDilemmaPolicyPool)
        mvpGame = pdGameParams(alpha=2)
        config = experimentConfig(networkParams=mvpNetwork,
                                  agentParams=mvpAgent,
                                  gameParams=mvpGame,
                                  mediatorParams=mediatorParam)
        expName = 'test'
        episodes = runExperiment('test', config, 2)
        for i, ep in enumerate(episodes):
            ep['happenings'] = pd.DataFrame.from_dict(ep['happenings']).T
            (_coconut_partial(
                storeEpisode, {
                    1:
                    '{_coconut_format_0}/{_coconut_format_1}{_coconut_format_2}'
                    .format(_coconut_format_0=(storagePath),
                            _coconut_format_1=(expName),
                            _coconut_format_2=(i))
                }, 2))(ep)
コード例 #10
0
    def _fill_dates_with_zero(self, all_dates, question_list):
        dates_str_list = []
        # Contain all dates and delete the duplication of date
        no_duplication_dates = list(set(all_dates))
        # Get the max and the min date
        dates_max_min_list = [min(no_duplication_dates), max(no_duplication_dates)] if no_duplication_dates !=[] else []
        # Fills the dates between the min and max date
        date_compare = int((max(no_duplication_dates) - min(no_duplication_dates)).days) if no_duplication_dates !=[] else 0
        if date_compare > 1:
            for i in xrange(1, date_compare):
                val = (no_duplication_dates[0] + datetime.timedelta(days=i))
                dates_max_min_list.insert(i, val)

        # question_list: [List date, count choices, text choice
        # Add the count of choices if date of choice in list dates else add zero
        for i, filtered in enumerate(question_list):
            list_choice = []
            for date in dates_max_min_list:
                item_list = 0
                if date in filtered[0]:
                    index = filtered[0].index(date)
                    item_list = filtered[1][index]
                list_choice.append(item_list)
            question_list[i] = list_choice
        # change the form of date to string "day month year"
        for date in dates_max_min_list:
            dates_str_list.append(date.strftime('%b %d %Y'))
        return question_list, dates_str_list
コード例 #11
0
ファイル: prelude.py プロジェクト: 99plus2/copperhead
def enumerate(x):
    """
    Return a sequence containing (index, value) pairs, with values
    from the input sequence.

    """
    return list(__builtin__.enumerate(x))
コード例 #12
0
def run_user_test(server_list, ezbox, client_list, vip_list):
    print "FUNCTION " + sys._getframe().f_code.co_name + " called"
    process_list = []
    port = '80'

    #service scheduling algorithm is SH without port
    for i in range(service_count):
        ezbox.add_service(vip_list[i], port, sched_alg_opt='')
    for server in server_list:
        ezbox.add_server(server.vip, port, server.ip, port)

    for index, client in enumerate(client_list):
        process_list.append(
            Process(target=client_execution,
                    args=(
                        client,
                        vip_list[index % service_count],
                    )))
    for p in process_list:
        p.start()

    time.sleep(1)
    # Disable server - director will remove server from IPVS
    print 'remove test.html'
    server_list[0].delete_test_html()
    time.sleep(60)
    print 're-add test.html'
    server_list[0].set_test_html()

    for p in process_list:
        p.join()
コード例 #13
0
ファイル: prelude.py プロジェクト: wuzy2d/copperhead
def enumerate(x):
    """
    Return a sequence containing (index, value) pairs, with values
    from the input sequence.

    """
    return list(__builtin__.enumerate(x))
コード例 #14
0
    def parse_semag(self, str, mass):
        reg_book = re.compile(u'ו?ב?(עשין|שם|לאוין|לאין)')
        split = re.split(reg_book, str.strip())
        str_list = filter(None, [item.strip() for item in split])
        resolveds = []
        derabanan_flag = False
        book = None
        reg_siman = u"סי'?|סימן"
        reg_vav = u'ו{}'.format(reg_siman)
        for i, word in enumerate(str_list):
            if derabanan_flag:
                derabanan_flag = False
                # resolved = self._tracker.resolve(book, [1])
                resolved = resolveExceptin(self._tracker, book, [1])
                resolveds.append(resolved)
                continue
            elif re.search(reg_book, word):
                try:
                    if word != u'שם':
                        derabanan = filter(None, [item.strip() for item in re.split(u'(מד"ס|מ?דרבנן)',str_list[i+1].strip())])
                except IndexError:
                    mass.write_shgia('error smg, no place in book notation')
                    return
                if word == u'עשין' and len(derabanan) > 1:
                    book = re.search(u'[א-ה]',derabanan[1])
                    book = self._table[book.group(0)]
                    derabanan_flag = True
                elif re.match(reg_book, word):
                    book = self._table[word]
                else:
                    mass.write_shgia("error smg, don't recognize book name")
                    return
            else:
                mitzva = re.split('\s', word)
                for m in mitzva:
                    if re.search(reg_vav, m) and not book:
                        # resolved = self._tracker.resolve(book, [None])
                        resolved = resolveExceptin(self._tracker, book, [None])
                        resolveds.append(resolved)

                    if m == u'ו?שם':
                        m = None
                    elif re.search(reg_siman, m):
                        continue
                    elif getGematriaVav(m, mass):
                        m = getGematriaVav(m, mass)
                    else:
                        m = None
                    # resolved = self._tracker.resolve(book, [m])
                    resolved = resolveExceptin(self._tracker, book, [m])
                    resolveds.append(resolved)
        if not resolveds:
            # resolved = self._tracker.resolve(book, [None])
            resolved = resolveExceptin(self._tracker, book, [None])

            resolveds.append(resolved)
        if len([item for item in resolveds if not isinstance(item, Ref)]) > 0:
            mass.write_shgia(u'error from ibid in Ref or table none problem')
        return resolveds
コード例 #15
0
    def parse_semag(self, str, mass):
        reg_book = re.compile(u'ו?ב?(עשין|שם|לאוין|לאין)')
        split = re.split(reg_book, str.strip())
        str_list = filter(None, [item.strip() for item in split])
        resolveds = []
        derabanan_flag = False
        book = None
        reg_siman = u"סי'?|סימן"
        reg_vav = u'ו{}'.format(reg_siman)
        for i, word in enumerate(str_list):
            if derabanan_flag:
                derabanan_flag = False
                # resolved = self._tracker.resolve(book, [1])
                resolved = resolveExceptin(self._tracker, book, [1])
                resolveds.append(resolved)
                continue
            elif re.search(reg_book, word):
                try:
                    if word != u'שם':
                        derabanan = filter(None, [item.strip() for item in re.split(u'(מד"ס|מ?דרבנן)',str_list[i+1].strip())])
                except IndexError:
                    mass.write_shgia('error smg, no place in book notation')
                    return
                if word == u'עשין' and len(derabanan) > 1:
                    book = re.search(u'[א-ה]',derabanan[1])
                    book = self._table[book.group(0)]
                    derabanan_flag = True
                elif re.match(reg_book, word):
                    book = self._table[word]
                else:
                    mass.write_shgia("error smg, don't recognize book name")
                    return
            else:
                mitzva = re.split('\s', word)
                for m in mitzva:
                    if re.search(reg_vav, m) and not book:
                        # resolved = self._tracker.resolve(book, [None])
                        resolved = resolveExceptin(self._tracker, book, [None])
                        resolveds.append(resolved)

                    if m == u'ו?שם':
                        m = None
                    elif re.search(reg_siman, m):
                        continue
                    elif getGematriaVav(m, mass):
                        m = getGematriaVav(m, mass)
                    else:
                        m = None
                    # resolved = self._tracker.resolve(book, [m])
                    resolved = resolveExceptin(self._tracker, book, [m])
                    resolveds.append(resolved)
        if not resolveds:
            # resolved = self._tracker.resolve(book, [None])
            resolved = resolveExceptin(self._tracker, book, [None])

            resolveds.append(resolved)
        if len([item for item in resolveds if not isinstance(item, Ref)]) > 0:
            mass.write_shgia(u'error from ibid in Ref or table none problem')
        return resolveds
コード例 #16
0
 def parse_semag(self, str, mass):
     # split = re.split('\s', str.strip())
     reg_book = re.compile(u'ו?(עשין|שם|לאוין)')
     split = re.split(reg_book, str.strip())
     # str_list = filter(None, split)
     str_list = filter(None, [item.strip() for item in split])
     resolveds = []
     # it = iter(str_list)
     derabanan_flag = False
     book = None
     for i, word in enumerate(str_list):
         if derabanan_flag:
             derabanan_flag = False
             resolved = self._tracker.resolve(book, [1])
             resolveds.append(resolved)
             continue
         elif re.search(reg_book, word):
             # book = word
             # if book == u'שם':
             #     book = None
             # elif book == u'לאוין':
             #     book = u'Sefer Mitzvot Gadol, Volume One'
             try:
                 if word != u'שם':
                     derabanan = filter(None, [item.strip() for item in re.split(u'(מד"ס|מ?דרבנן)',str_list[i+1].strip())])
             except IndexError:
                 # mass.ErrorFile.write('error smg, no place in book notation')
                 mass.error_flag = 'error smg, no place in book notation'
                 print 'error smg, no place in book notation'
                 return
             if word == u'עשין' and len(derabanan) > 1 and (derabanan[0] != u"סימן"):
                 book = re.search(u'[א-ה]',derabanan[1])
                 # print book.group(0)
                 book = self._table[book.group(0)]
                 derabanan_flag = True
             elif re.match(reg_book, word):
                 book = self._table[word]
             else:
                 mass.ErrorFile.write("error smg, don't recognize book name")
                 print "error smg, don't recognize book name", book
                 return
         else:
             mitzva = re.split('\s', word)
             for m in mitzva:
                 # if m == u'סימן':
                 #     continue
                 if m == u'שם':
                     m = None
                 elif getGematriaVav(m):
                     m = getGematriaVav(m)
                 else:
                     m = None
                 resolved = self._tracker.resolve(book, [m])
                 resolveds.append(resolved)
     if not resolveds:
         resolved = self._tracker.resolve(book, [None])
         resolveds.append(resolved)
     # print resolveds
     return resolveds
コード例 #17
0
def fromCSV(fromcsv, newfile):
    f = codecs.open(newfile, 'w', encoding = 'utf-8')
    with open(fromcsv, 'r') as csvfile:
        file_reader = csv.DictReader(csvfile)
        for i, row in enumerate(file_reader):
            if not row:
                continue
            f.write(row[u'original'].strip() + u'\n')
コード例 #18
0
    def user_init(self, setup_num):
        print "FUNCTION " + sys._getframe().f_code.co_name + " called"

        self.test_resources = ALVS_Players_Factory.generic_init(
            setup_num, service_count, server_count, client_count)

        for i, s in enumerate(self.test_resources['server_list']):
            s.vip = self.test_resources['vip_list'][i % service_count]
コード例 #19
0
def fromCSV(fromcsv, newfile):
    f = codecs.open(newfile, 'w', encoding = 'utf-8')
    with open(fromcsv, 'r') as csvfile:
        file_reader = csv.DictReader(csvfile)
        for i, row in enumerate(file_reader):
            if not row:
                continue
            f.write(row[u'original'].strip() + u'\n')
コード例 #20
0
ファイル: merger.py プロジェクト: JonMosenkis/Sefaria-Data
def make_dict(row_lst, comantators):
    """
    row_lst is a list of rows
    comantators is a list of comantators
    """
    letter_dict = OrderedDict()
    for i, row in enumerate(row_lst):
        letter_dict[i] = (row[u'page running counter'], [row[com] for com in comantators])
    return letter_dict
コード例 #21
0
ファイル: __init__.py プロジェクト: flaviojs/pyparse
    def starts_with(self, s):
        r""" TODO """
        if not isinstance(s, str):
            raise ValueError, (s, )  # expecting str
        for _i, _c in enumerate(s):
            if self[_i] != _c:
                return False  # does not match

        return True  # matches
コード例 #22
0
def user_init(setup_num):
    print "FUNCTION " + sys._getframe().f_code.co_name + " called"

    dict = generic_init(setup_num, service_count, server_count, client_count)

    for i, s in enumerate(dict['server_list']):
        s.vip = dict['vip_list'][i % service_count]

    return convert_generic_init_to_user_format(dict)
コード例 #23
0
 def obj2dict(self,passing):
     citations = []
     if passing == 1:
         dict = self.get_dict(0)
         citations.append(dict)
     elif passing == 2:
         for i, small_letter in enumerate(self._page_counter):
             dict = self.get_dict(i)
             citations.append(dict)
     return citations
コード例 #24
0
def reverse_collapse(fromcsv, collapsed_file):
    f = codecs.open(u'{}.txt'.format(collapsed_file), 'w', encoding='utf-8')
    with open(fromcsv, 'r') as csvfile:
        file_reader = csv.DictReader(csvfile)
        prev = None
        for i, row in enumerate(file_reader):
            if prev != (row[u'original'].strip() + u'\n'):
                f.write(row[u'original'].strip() + u'\n')
            prev = (row[u'original'].strip() + u'\n')
    run1(u'{}'.format(collapsed_file), u'{}'.format(collapsed_file))
コード例 #25
0
def reverse_collapse(fromcsv, collapsed_file):
    f = codecs.open(u'{}.txt'.format(collapsed_file), 'w', encoding='utf-8')
    with open(fromcsv, 'r') as csvfile:
        file_reader = csv.DictReader(csvfile)
        prev = None
        for i, row in enumerate(file_reader):
            if prev != (row[u'original'].strip() + u'\n'):
                f.write(row[u'original'].strip() + u'\n')
            prev = (row[u'original'].strip() + u'\n')
    run1(u'{}'.format(collapsed_file), u'{}'.format(collapsed_file))
コード例 #26
0
 def obj2dict(self,passing):
     citations = []
     if passing == 1:
         dict = self.get_dict(0)
         citations.append(dict)
     elif passing == 2:
         for i, small_letter in enumerate(self._page_counter):
             dict = self.get_dict(i)
             citations.append(dict)
     return citations
コード例 #27
0
def _enumerate(iterable, reverse_index):
    if reverse_index is False:
        return __builtin__.enumerate(iterable)
    else:
        from python_toolbox import sequence_tools
        try:
            length = sequence_tools.get_length(iterable)
        except AttributeError:
            iterable = nifty_collections.LazyTuple(iterable)
            length = len(iterable)
        return itertools.izip(range(length - 1, -1, -1), iterable)
コード例 #28
0
def _enumerate(iterable, reverse_index):
    if reverse_index is False:
        return __builtin__.enumerate(iterable)
    else:
        from python_toolbox import sequence_tools
        try:
            length = sequence_tools.get_length(iterable)
        except AttributeError:
            iterable = nifty_collections.LazyTuple(iterable)
            length = len(iterable)
        return itertools.izip(range(length - 1, -1, -1), iterable)
コード例 #29
0
 def sorted(iterable, cmp=None, key=None, reverse=False):
     original = list(iterable)
     if key:
         l2 = [(key(elt), index) for index, elt in builtins.enumerate(original)]
     else:
         l2 = original
     l2.sort(cmp)
     if reverse:
         l2.reverse()
     if key:
         return [original[index] for elt, index in l2]
     return l2
コード例 #30
0
ファイル: merger.py プロジェクト: JonMosenkis/Sefaria-Data
def exctractRefs(datafile, col,filetype):
    refs = set()
    if filetype == u'tsv':
        with open(datafile, 'rb') as tsvin:
            tsvin = csv.DictReader(tsvin, delimiter='\t')
            for i, row in enumerate(tsvin):
                try:
                    ref = convert_display(row[col])
                    refs.add(ref)
                except:
                    continue
    if filetype == u'csv':
        with open(datafile, 'r') as csvfile:
            file_reader = csv.DictReader(csvfile)
            for i, row in enumerate(file_reader):
                try:
                    for ref in eval(row[col]):
                        refs.add(ref)
                except:
                    continue
    return refs.remove('')
コード例 #31
0
def load_ustar_vut(siteid,
                   sitedir,
                   year_range,
                   nee_perc_ustar_vut_template=NEE_PERC_USTAR_VUT):
    nee_perc_ustar_vut = nee_perc_ustar_vut_template.format(s=siteid,
                                                            sd=sitedir)
    log.debug("{s}: processing file: {f}".format(s=siteid,
                                                 f=nee_perc_ustar_vut))
    nee_perc_ustar_vut_lines = load_csv_lines(filename=nee_perc_ustar_vut)
    if not nee_perc_ustar_vut_lines:
        raise ONEFluxError("{s}: {e} file not found: {f}".format(
            s=siteid, e='NEE VUT USTAR percentiles', f=nee_perc_ustar_vut))

    nee_perc_ustar_vut_values = {i: {} for i in year_range}
    if not ((nee_perc_ustar_vut_lines[0][0].lower().startswith('timestamp') or\
             nee_perc_ustar_vut_lines[0][0].lower().startswith('isodate') or\
             nee_perc_ustar_vut_lines[0][0].lower().startswith('year')) and\
            nee_perc_ustar_vut_lines[0][-1].endswith(PERCENTILES[-1])):
        raise ONEFluxError("{s}: NEE USTAR VUT bad headers ({h}): {f}".format(
            s=siteid, h=nee_perc_ustar_vut_lines[0], f=nee_perc_ustar_vut))
    elif (int(nee_perc_ustar_vut_lines[1][0]) != year_range[0]) or (int(
            nee_perc_ustar_vut_lines[-1][0]) != year_range[-1]):
        raise ONEFluxError(
            "{s}: NEE USTAR VUT incompatible year range data=({d}), info=({i})"
            .format(s=siteid,
                    d="{f}-{l}".format(f=nee_perc_ustar_vut_lines[1][0],
                                       l=nee_perc_ustar_vut_lines[-1][0]),
                    i="{f}-{l}".format(f=year_range[0], l=year_range[-1])))
    for y, year in enumerate(year_range):
        nee_perc_ustar_vut_values[year] = {
            k: nee_perc_ustar_vut_lines[y + 1][i + 1].strip()
            for i, k in enumerate(PERCENTILES)
        }
        nee_perc_ustar_vut_values[year]['50.00'] = nee_perc_ustar_vut_values[
            year]['50']
    return nee_perc_ustar_vut_values
コード例 #32
0
def ImageViewer():
	dir = MediaContainer()
	items = RSS.FeedFromURL(RSS_FEED).entries
	for i, item in enumerate(items):
		try:
			img = [x.href for x in item.links if x.rel == 'enclosure'][0]
		except:
			continue
		title = item.title
		updated = datetime.fromtimestamp(mktime(item.updated_parsed))
		counter = '%s/%s' % (i + 1, len(items))
		# Escape HTML entities
		summary = HTML.ElementFromString( item.get('summary', '') ).text_content().replace('\n', ' ')
		summary = '%s\n%s\n%s   %s' % (title, summary, counter, days_ago(updated))
		dir.Append(PhotoItem(img, title=title, summary=summary))
	return dir
コード例 #33
0
 def _reformat_dict_static(self, choice_question_list, question_list,
                           dates_str_list, result):
     # Reformat the dict
     choice_dict = {}
     old_choice = 0
     for index, choice in enumerate(choice_question_list):
         if choice[1] == old_choice:
             choice_dict.update({choice[0]: question_list[index]})
         else:
             choice_dict = {choice[0]: question_list[index]}
         result[choice[1]].update({
             'choices': choice_dict,
             'dates': dates_str_list
         })
         old_choice = choice[1]
     return result
コード例 #34
0
ファイル: day11.py プロジェクト: AGiantSquid/advent_of_code
def get_safe_new_state(state, new_elevator, items_to_move):
    """Return new state after items have moved to new floor if valid."""
    new_testing_facility = tuple(
        ((existing_floor
          | items_to_move if i == new_elevator else existing_floor -
          items_to_move if i == state['elevator'] else existing_floor)
         for (i, existing_floor) in enumerate(state['testing_facility'])))

    both_floors_are_safe = floor_is_safe(
        new_testing_facility[state['elevator']]) and floor_is_safe(
            new_testing_facility[new_elevator])

    if both_floors_are_safe:
        return _coconut_tail_call(Map, {
            "elevator": new_elevator,
            "testing_facility": new_testing_facility
        })
コード例 #35
0
def convert_type_to_unicode(object):
    '''Converts a generic string representable object to unicode.'''
    if builtins.hasattr(object, '__unicode__'):
        return object.__unicode__()
    elif builtins.hasattr(object, '__str__'):
        try:
            object = object.__str__()
        except builtins.UnicodeEncodeError:
            if builtins.isinstance(object, Iterable):
                for index, item in builtins.enumerate(object):
                    object[index] = convert_to_unicode(item)
                object = object.__str__()
            else:
                raise
        if builtins.isinstance(object, builtins.unicode):
            return object
        return builtins.unicode(object, ENCODING)
    return builtins.unicode(object)
コード例 #36
0
    def obtener_telefono_y_datos(self, encoding, lista_dato,
                                 posicion_primer_telefono, columna_id_externo):
        id_externo = None
        if len(lista_dato) > 1:
            item = []
            for i, valor in enumerate(lista_dato):
                if i == posicion_primer_telefono:
                    telefono = valor
                elif i == columna_id_externo:
                    id_externo = valor
                else:
                    item.append(valor.decode(encoding))
        else:
            telefono = lista_dato[0]
            item = ['']

        datos = json.dumps(item)
        return telefono, datos, id_externo
コード例 #37
0
def write_errfile(filename):
    error = codecs.open(u'error_{}.txt'.format(filename), 'w', encoding = 'utf-8')
    with codecs.open(u'error_main.txt', 'r', 'utf-8') as fp:
        lines = fp.readlines()
        # it = iter(lines)
        e = enumerate(lines)
        for i, line in e:
            if re.search(u'error', line):
                j, k = i, i
                while not re.search(u'.txt', lines[j]):
                    j -= 1
                while (j < i):
                    error.write(lines[j])
                    j+=1
                while not re.search(u'.txt', lines[k]):
                    error.write(lines[k])
                    k+=1
                    e.next()
コード例 #38
0
def write_errfile(filename):
    error = codecs.open(u'error_{}.txt'.format(filename), 'w', encoding = 'utf-8')
    with codecs.open(u'error_main.txt', 'r', 'utf-8') as fp:
        lines = fp.readlines()
        # it = iter(lines)
        e = enumerate(lines)
        for i, line in e:
            if re.search(u'error', line):
                j, k = i, i
                while not re.search(u'.txt', lines[j]):
                    j -= 1
                while (j < i):
                    error.write(lines[j])
                    j+=1
                while not re.search(u'.txt', lines[k]):
                    error.write(lines[k])
                    k+=1
                    e.next()
コード例 #39
0
ファイル: output.py プロジェクト: thaibault/boostNode
    def __repr__(cls):
        '''
            Invokes if this object should describe itself by a string.

            Examples:

            >>> repr(Logger()) # doctest: +ELLIPSIS
            'Object of "Logger" with logger "...

            >>> logger1 = Logger.get()
            >>> repr(Logger()) # doctest: +ELLIPSIS
            'Object of "Logger" with logger "...

            >>> logger1 = Logger.get()
            >>> logger2 = Logger.get('hans')
            >>> repr(Logger()) # doctest: +ELLIPSIS
            'Object of "Logger" with logger "... and ...
        '''
        handler_string = formatter_string = ''
        for index, logger in builtins.enumerate(cls.instances):
            start = ', "'
            end = '"'
            if index + 1 == builtins.len(cls.instances):
                start = ' and "'
                end = ''
            if index == 0:
                start = ''
            handler_string += start + builtins.repr(logger.handlers[0]) + end
            formatter_string += start + builtins.repr(
                logger.handlers[0].formatter
            ) + end
# # python3.5
# #         return (
# #             'Object of "{class_name}" with logger "{handler}", formatter '
# #             '"{formatter}" and buffer "{buffer}".'.format(
# #                 class_name=cls.__name__, handler=handler_string,
# #                 formatter=formatter_string,
# #                 buffer=builtins.str(cls.buffer)))
        return (
            'Object of "{class_name}" with logger "{handler}", formatter '
            '"{formatter}" and buffer "{buffer}".'.format(
                class_name=cls.__name__, handler=handler_string,
                formatter=formatter_string,
                buffer=convert_to_unicode(cls.buffer)))
コード例 #40
0
    def _inferir_columnas(self, lineas, func_validadora):
        assert callable(func_validadora)

        matriz = []
        for linea in lineas:
            matriz.append([func_validadora(celda) for celda in linea])

        # https://stackoverflow.com/questions/4937491/\
        #    matrix-transpose-in-python
        matriz_transpuesta = zip(*matriz)
        resultado_validacion_por_columna = [
            all(lista) for lista in matriz_transpuesta
        ]

        return [
            index
            for index, value in enumerate(resultado_validacion_por_columna)
            if value
        ]
コード例 #41
0
ファイル: merger.py プロジェクト: JonMosenkis/Sefaria-Data
def duplicate_rows_in_file(file2open, newfile):
    allrows = []
    with open(u'{}.csv'.format(newfile), 'w') as csv_file:
        writer = csv.DictWriter(csv_file, [u'txt file line', u'Perek running counter',u'page running counter',
                                u'Perek aprx', u'Page aprx', u'Rambam', u'Semag', u'Tur Shulchan Arukh', u'original', u'problem']) #fieldnames = obj_list[0].keys())
        writer.writeheader()
        with open(file2open, 'r') as csvfile:
            file_reader = csv.DictReader(csvfile)
            for i, row in enumerate(file_reader):
                rambam_len = times(u'Mishneh', u'Rambam', row)
                semag_len = times(u'Sefer Mitzvot Gadol', u'Semag', row)
                tur_len = times(u'Tur,',u'Tur Shulchan Arukh', row)
                sa_len = times(u'Shulchan Arukh,', u'Tur Shulchan Arukh', row)
                largest = max(rambam_len, semag_len, tur_len, sa_len)
                l = 1
                for x in range(largest):
                    writer.writerows([row])
                    l += 1
                    allrows.append(row)
    return allrows
コード例 #42
0
 def _reformat_dict_static(self, choice_question_list, question_list,
                           dates_str_list, result):
     # Reformat the dict
     choice_dict = {}
     old_choice = 0
     for index, choice in enumerate(choice_question_list):
         if choice[1] == old_choice:
             choice_dict.update({
                 choice[0]: question_list[index]
             })
         else:
             choice_dict = {
                 choice[0]: question_list[index]
             }
         result[choice[1]].update({
             'choices': choice_dict,
             'dates': dates_str_list
         })
         old_choice = choice[1]
     return result
コード例 #43
0
ファイル: merger.py プロジェクト: JonMosenkis/Sefaria-Data
def equal_sets(all_rows = False, oldworkfilename = False):
    #[u'daf',u'Ramabam	Semag',u'Shulchan Arukh', u'Tur'])
    with open(oldworkfilename, 'rb') as tsvin:
        tsvin = csv.DictReader(tsvin, delimiter='\t')
        # file_reader = csv.DictReader(csvfile)
        # get the set from the row of tsv
        for i, row in enumerate(tsvin):
            # row is a dictonary
            row.update((k, re.sub(u'_', u' ', row[k])) for k in row.keys())
            ram_them = row[u'Rambam_f'] # re.sub(u'_', u' ', row[u'Rambam_f'])
            ram_us = re.sub(u' (\d{1,3}):(\d{1,3})', ur'.\1.\2', all_rows[i-1][u'Rambam'])
            sa_them = row[u'Shulchan_Arukh_f']
            sa_us = all_rows[i-1][u'Tur Shulchan Arukh']
            print i, all_rows[i-1][u'original']
            print i, ram_them , ram_us
            # print i, sa_them , sa_us
            if ram_them not in ram_us:
                print ram_them not in ram_us
                # print i, all_rows[i - 1][u'original']
                print i, sa_them, sa_us
コード例 #44
0
ファイル: views.py プロジェクト: erickvh/agro-export-hdp
def validar(request):
    exportacionesmasivas=[]
    masivos.productos=[]
    cadena = "Errores:"
    ruta=request.POST.get('rutas')
    reader=csv.reader(open('entradasmasivas/'+ruta,'r'))
    for index,row in enumerate(reader):
        if validar_entrada(row[0],row[1],row[2],row[3],row[4],row[5]):
           exportacionesmasivas.append([row[0],row[1],row[2],row[3],row[4],row[5],index])
           masivos.productos.append([row[0],row[1],row[2],row[3],row[4],row[5]])
        else:
            cadena=cadena+"\nerror en :"+"Fila:"+str(index)+"Fila no se mostrara para su insercion"
        
    context={
             'exportaciones':exportacionesmasivas,
             'tamanio':len(exportacionesmasivas),
             'errores':cadena,
             
                         }
    
    return render(request,'visualizar_entradas_masivas.html',context)
コード例 #45
0
ファイル: S3LDIF2UCSusers.py プロジェクト: tcraxs/LDIFtoUCS
    def create_groups(self):
        """
        Create not-existing groups except those in settings.black_lists["groups"], add users to (also pre-existing)
        groups.

        :return: None
        """
        logger.info("Starting to add groups.")
        created = 0
        existed = 0
        failed = 0
        blacklisted = 0
        for counter, (dn, entry) in enumerate(sorted(self.groups.items()), 1):
            cn = entry["cn"][0]
            if cn in settings.black_lists["groups_create"]:
                logger.info("%04d/%04d omitting blacklisted group '%s'.", counter, len(self.users), dn)
                blacklisted += 1
                continue
            try:
                out = subprocess.check_output(["udm", "groups/group", "list", "--filter", "name=" + cn])
            except subprocess.CalledProcessError, cpe:
                logger.exception("Looking for existence of group '%s', error code: %d, output: '%s'", cn,
                                 cpe.returncode, cpe.output)
                continue
            if len(out.split("\n")) > 2:
                logger.info("%04d/%04d not adding existing group '%s'", counter, len(self.users), dn)
                existed += 1
                continue
            logger.info("%04d/%04d adding group '%s'", counter, len(self.users), cn)
            try:
                out = subprocess.check_output(["udm", "groups/group", "create",
                                               "--position=" + self.group_container,
                                               "--set", "name=" + cn,
                                               "--set", "description=added by S3LDIF2UCSusers"])
                logger.debug("out: '%s'", out)
                created += 1
            except subprocess.CalledProcessError, cpe:
                logger.exception("Creating group '%s', error code: %d, output: '%s'", cn, cpe.returncode,
                                 cpe.output)
                failed += 1
コード例 #46
0
def generate_meteo(siteid,
                   sitedir,
                   first_year,
                   last_year,
                   version_data,
                   version_processing,
                   pipeline=None):
    log.debug("{s}: starting generation of AUXMETEO file".format(s=siteid))
    meteo_info = (METEO_INFO if pipeline is None else pipeline.meteo_info)
    prodfile_aux_template = (PRODFILE_AUX_TEMPLATE if pipeline is None else
                             pipeline.prodfile_aux_template)

    filename = meteo_info.format(s=siteid, sd=sitedir, r='hh')
    if not os.path.isfile(filename):
        raise ONEFluxError("{s}: meteo info file not found: {f}".format(
            s=siteid, f=filename))

    H_BEGIN, H_END = "var", "corr"
    VAR_D = {
        'Ta': 'TA',
        'TA': 'TA',
        'Pa': 'PA',
        'PA': 'PA',
        'VPD': 'VPD',
        'WS': 'WS',
        'Precip': 'P',
        'P': 'P',
        'Rg': 'SW_IN',
        'SW_IN': 'SW_IN',
        'LWin': 'LW_IN',
        'LW_IN': 'LW_IN',
        'LWin_calc': 'LW_IN_JSB',
        'LW_IN_calc': 'LW_IN_JSB',
    }

    lines = []
    with open(filename, 'r') as f:
        lines = f.readlines()

    c_var, c_slope, c_intercept, c_rmse, c_corr = 0, 1, 2, 3, 4
    first_line = None
    for i, line in enumerate(lines):
        l = line.strip().lower()
        if l.startswith(H_BEGIN) and l.endswith(H_END):
            first_line = i
            break

    if first_line is None:
        raise ONEFluxError(
            "{s}: first line of meteo info file not found: {f}".format(
                s=siteid, f=filename))
    if 'unit' in lines[first_line].lower():
        log.info("{s}: handling old format meteo info file: {f}".format(
            s=siteid, f=filename))
        c_slope, c_intercept, c_rmse, c_corr = 3, 4, 5, 6

    vars_l = ['TA', 'PA', 'VPD', 'WS', 'P', 'SW_IN', 'LW_IN', 'LW_IN_JSB']
    #pars_l = ['ERA_SLOPE', 'ERA_INTERCEPT', 'ERA_RMSE', 'ERA_CORRELATION']
    values = {i: None for i in vars_l}
    for line in lines[first_line + 1:first_line + 9]:
        l = line.strip().split(',')
        values[VAR_D[l[c_var]]] = [
            (float(l[c_slope].strip()) if
             (l[c_slope].strip() and l[c_slope].strip() != '-') else -9999),
            (float(l[c_intercept].strip()) if
             (l[c_intercept].strip()
              and l[c_intercept].strip() != '-') else -9999),
            (float(l[c_rmse].strip()) if
             (l[c_rmse].strip() and l[c_rmse].strip() != '-') else -9999),
            (float(l[c_corr].strip()) if
             (l[c_corr].strip() and l[c_corr].strip() != '-') else -9999),
        ]

    output_lines = [','.join(AUX_HEADER) + '\n']
    for i, var in enumerate(vars_l, start=1):
        if values[var] is None:
            raise ONEFluxError(
                "{s}: ERA variable '{v}' not found in: {f}".format(s=siteid,
                                                                   v=var,
                                                                   f=filename))
        slope = ("{v:.2f}".format(
            v=values[var][0]) if values[var][0] != -9999 else '-9999')
        intercept = ("{v:.2f}".format(
            v=values[var][1]) if values[var][1] != -9999 else '-9999')
        rmse = ("{v:.2f}".format(
            v=values[var][2]) if values[var][2] != -9999 else '-9999')
        corr = ("{v:.2f}".format(
            v=values[var][3]) if values[var][3] != -9999 else '-9999')
        output_lines.append("{i},{v},{p},{val},{t}\n".format(i=i,
                                                             v=var,
                                                             p='ERA_SLOPE',
                                                             val=slope,
                                                             t='-9999'))
        output_lines.append("{i},{v},{p},{val},{t}\n".format(i=i,
                                                             v=var,
                                                             p='ERA_INTERCEPT',
                                                             val=intercept,
                                                             t='-9999'))
        output_lines.append("{i},{v},{p},{val},{t}\n".format(i=i,
                                                             v=var,
                                                             p='ERA_RMSE',
                                                             val=rmse,
                                                             t='-9999'))
        output_lines.append("{i},{v},{p},{val},{t}\n".format(
            i=i, v=var, p='ERA_CORRELATION', val=corr, t='-9999'))

    output_filename = prodfile_aux_template.format(s=siteid,
                                                   sd=sitedir,
                                                   aux='AUXMETEO',
                                                   fy=first_year,
                                                   ly=last_year,
                                                   vd=version_data,
                                                   vp=version_processing)
    log.info("{s}: writing auxiliary METEO file: {f}".format(
        s=siteid, f=output_filename))
    with open(output_filename, 'w') as f:
        f.writelines(output_lines)
    log.debug("{s}: finished generation of AUXMETEO file: {f}".format(
        s=siteid, f=output_filename))

    return output_filename
コード例 #47
0
    meas_cfs = np.array(meas_cfs)
    f0binned_meas = np.round(meas_cfs*nsamp/512.0)*512.0/nsamp
    f0s = f0binned_meas
    measured_freqs = sweeps.prepare_sweep(ri,f0binned_meas,offsets,nsamp=nsamp)
    print "loaded updated waveforms in", (time.time()-start),"seconds"



    sys.stdout.flush()
    time.sleep(1)


    df = data_file.DataFile(suffix=suffix)
    df.nc.mmw_atten_turns=mmw_atten_turns
    for k,atten in enumerate(attenlist):
        ri.set_dac_attenuator(atten)
        print "measuring at attenuation", atten
        df.log_hw_state(ri)
        if k != 0:
            orig_sweep_data = None
        sweep_data = sweeps.do_prepared_sweep(ri, nchan_per_step=atonce, reads_per_step=1, sweep_data=orig_sweep_data)
        df.add_sweep(sweep_data)
        meas_cfs = []
        idxs = []
        for m in range(len(f0s)):
            fr,s21,errors = sweep_data.select_by_freq(f0s[m])
            thiscf = f0s[m]*source_on_freq_scale
            s21 = s21*np.exp(2j*np.pi*delay*fr)
            res = fit_best_resonator(fr,s21,errors=errors) #Resonator(fr,s21,errors=errors)
            fmin = fr[np.abs(s21).argmin()]
コード例 #48
0
ファイル: output.py プロジェクト: thaibault/boostNode
    def __init__(self, *output, **codewords):
# #
        '''
            Writes something to the output buffer or prints to standard \
            output.

            Examples:

            >>> buffer = Buffer()

            >>> Print(native_queue.Queue(), buffer=buffer) # doctest: +ELLIPSIS
            Object of "Print" with "...

            >>> queue1 = native_queue.Queue()
            >>> queue2 = native_queue.Queue()
            >>> queue1.put('hans')
            >>> queue2.put('hans')
            >>> Print(
            ...     queue1, queue2, buffer=buffer, flush=True
            ... ) # doctest: +ELLIPSIS
            Object of "Print" with "...

            >>> Print.default_buffer = Buffer()
            >>> Print('hans', 'hans again') # doctest: +ELLIPSIS
            Object of "Print" with "Object of "Buffer" (mem... "hans hans again
            ".".

            >>> buffer = Buffer()
            >>> Print(
            ...     'hans,', 'peter', end=' and klaus', sep=' ', buffer=buffer
            ... ) # doctest: +ELLIPSIS
            Object of "Print" with "Object of "Buffer" (memory buffered...".".

            >>> buffer # doctest: +ELLIPSIS
            Object ... (memory buffered) with content "hans, peter and klaus".
        '''
        keywords = {
            'replace': self.__class__.replace,
            'start': self.__class__.start,
            'separator': self.__class__.separator,
            'end': self.__class__.end,
            'buffer': self.__class__.default_buffer,
            'flush': codewords.get('replace', False)}
        keywords.update(codewords)
        '''Redirect print output to this buffer.'''
        self.buffer = keywords['buffer']
        output = builtins.list(output)
        for index, out in builtins.enumerate(output):
            if builtins.isinstance(out, native_queue.Queue):
                result = ''
                while not out.empty():
                    if index != 0 and keywords['separator']:
# # python3.5
# #                         result += builtins.str(keywords['separator'])
# #                     result += out.get()
                        result += convert_to_unicode(
                            keywords['separator'])
                    result += convert_to_unicode(out.get())
# #
                output[index] = result
            elif index == 0:
# # python3.5                 output[index] = builtins.str(out)
                output[index] = convert_to_unicode(out)
            else:
# # python3.5
# #                 output[index] = '%s%s' % (builtins.str(
# #                     keywords['separator']
# #                 ), builtins.str(out))
                output[index] = '%s%s' % (convert_to_unicode(
                    keywords['separator']
                ), convert_to_unicode(out))
# #
        line_replace = '\033[1A\r\033[2K' if keywords['replace'] else ''
        output = [keywords['start'], line_replace] + output + [keywords['end']]
# # python3.5
# #         builtins.print(
# #             *output, sep='', end='', file=keywords['buffer'],
# #             flush=keywords['flush'])
        builtins.print(*filter(lambda content: convert_to_string(
            content
        ), output), sep='', end='', file=keywords['buffer'])
        if keywords['flush']:
            sys.stdout.flush()
コード例 #49
0
        [
            open('./WindowsAPIhash-master/API_Hash_{_coconut_format_0}.txt'.
                 format(_coconut_format_0=(i + 1))) for i in range(5)
        ]))


def find_hash(h):
    for api in api_hashes:
        if h in api.lower():
            return api
    return None


funcs = filter(lambda x: x is not None, map(find_hash, hashes))

for idx, func in enumerate(funcs):
    print(
        '0x46d + {_coconut_format_0} | {_coconut_format_1}:\t{_coconut_format_2}'
        .format(_coconut_format_0=(hex(idx * 4)),
                _coconut_format_1=(hex(0x46d + idx * 4)),
                _coconut_format_2=(func)))

# 0x349
arg_1 = b'\x15\x44\xa8\xc0'  # 0xc0a84415
arg_2 = b'\x39\x05'  # 0x539
socket = bytearray(0x10)
socket[0] = 0
socket[1] = 2
# ax = (arg_2 << 8) | (arg_2 >> 8)
ax = arg_2[::-1]
socket[2] = arg_2[0]
コード例 #50
0
ファイル: S3LDIF2UCSusers.py プロジェクト: tcraxs/LDIFtoUCS
    def create_users(self):
        """
        Create not-existing users except those in settings.black_lists["users"].

        :return: None
        """
        created = 0
        existed = 0
        failed = 0
        blacklisted = 0
        passwords_txt = open("passwords.txt", "ab")
        passwords_txt.write('"username", "password"\n')

        logger.info("Starting to add users.")
        for counter, (dn, entry) in enumerate(sorted(self.users.items()), 1):
            if entry["uid"][0] in settings.black_lists["users"]:
                logger.info("%04d/%04d omitting blacklisted user '%s'.", counter, len(self.users), dn)
                blacklisted += 1
                continue
            if " " in entry["uid"][0]:
                logger.info("Not adding user with space in username: '******'", entry["uid"][0])
                failed += 1
                continue
            try:
                out = subprocess.check_output(["udm", "users/user", "list", "--filter", "username="******"uid"][0]])
            except subprocess.CalledProcessError, cpe:
                logger.exception("Looking for existence of user '%s', error code: %d, output: '%s'", entry["uid"][0],
                                 cpe.returncode, cpe.output)
                continue
            if len(out.split("\n")) > 2:
                logger.info("%04d/%04d not adding existing user '%s'", counter, len(self.users), dn)
                existed += 1
                continue
            pw = self._mkpw()
            logger.info("%04d/%04d adding user '%s' with password '%s'", counter, len(self.users), dn, pw)
            try:
                firstname = " ".join(entry["cn"][0].split()[0:-1])
            except:
                firstname = ""
            try:
                lastname = entry["cn"][0].split()[-1]
            except:
                lastname = ""
            try:
                profilepath = entry["sambaProfilePath"][0]

            except:
                profilepath = ""

            try:
                out = subprocess.check_output(["udm", "users/user", "create",
                                               "--position=" + self.user_container,
                                               "--set", "username="******"uid"][0],
                                               "--set", "firstname=" + firstname,
                                               "--set", "lastname=" + lastname,
                                               "--set", "password="******"--set", "profilepath=" + profilepath,
                                               "--set", "sambahome=" + entry["sambaHomePath"][0],
                                               "--set", "unixhome=" + entry["homeDirectory"][0],
                                               "--set", "gecos=%s %s" % (firstname, lastname),
                                               "--set", "displayName=%s %s" % (firstname, lastname),
                                               "--set", "description=added by S3LDIF2UCSusers"])
                logger.debug("out: '%s'", out)
                created += 1
                passwords_txt.write('"%s", "%s"\n' % (entry["uid"][0], pw))
            except subprocess.CalledProcessError, cpe:
                logger.exception("Creating user '%s', error code: %d, output: '%s'", entry["uid"][0], cpe.returncode,
                                 cpe.output)
                failed += 1
コード例 #51
0
ファイル: rossbyloglib.py プロジェクト: nknezek/rossby_waves
def log_eigenvalues(logger,model,vals):
    found = '\nEigenvalues found :\n'
    for ind,val in enumerate(vals):
        found = found+'{0}: {1:.3e}\n'.format(ind,val)
    logger.info(found)
コード例 #52
0
    meas_cfs = np.array(meas_cfs)
    f0binned_meas = np.round(meas_cfs * nsamp / 512.0) * 512.0 / nsamp
    f0s = f0binned_meas
    measured_freqs = sweeps.prepare_sweep(ri,
                                          f0binned_meas,
                                          offsets,
                                          nsamp=nsamp)
    print "loaded updated waveforms in", (time.time() - start), "seconds"

    sys.stdout.flush()
    time.sleep(1)

    df = data_file.DataFile(suffix=suffix)
    df.nc.mmw_atten_turns = mmw_atten_turns
    for k, atten in enumerate(attenlist):
        ri.set_dac_attenuator(atten)
        print "measuring at attenuation", atten
        df.log_hw_state(ri)
        if k != 0:
            orig_sweep_data = None
        sweep_data = sweeps.do_prepared_sweep(ri,
                                              nchan_per_step=atonce,
                                              reads_per_step=1,
                                              sweep_data=orig_sweep_data)
        df.add_sweep(sweep_data)
        meas_cfs = []
        idxs = []
        for m in range(len(f0s)):
            fr, s21, errors = sweep_data.select_by_freq(f0s[m])
            thiscf = f0s[m]
コード例 #53
0
#成员操作符
if(6576 in alist):
    print 'in!'
blist = alist+list1
alist += list2 # 当然也可以这样  类似append
print blist
print alist

#重复操作符! 之前不知道 *
print list1*2  #重复两遍
list1 *= 3
print list1

#和字符串一样 使用enumerate返回枚举集合 可以查看类型
t = enumerate(alist)
print type(t)
for i in t:
    print i
#sorted
numlist=[34,5,4,6,7,6,8,34,3,634,25]
newlist = sorted(numlist) #注意不会改变原来的 这是工厂函数
print newlist,sum(newlist)
print type(reversed(newlist)) #返回逆向迭代器
for i in reversed(newlist):
    print i
    
#list()和tuple()可以接受一个可迭代对象 然后浅拷贝创造一个新的列表或元组
print tuple(list1) #返回一个新的元组 

#重要,列表没有专门的操作符和成员(内建)函数, 都是来自于序列!
コード例 #54
0
ファイル: cfgrules.py プロジェクト: lefterav/qualitative
 def get_features_simplesentence(self, simplesentence, parallelsentence):
     '''
     Count the CFG rules appearing in the parse
     '''
     try:
         parsestring = simplesentence.get_attribute("berkeley-tree")
     except:
         log.error("error reading berkeley tree")
         return {}
     cfg_rules = get_cfg_rules(parsestring)
     atts = {}
     
     ruledepth = {}
     labeldepth = {}
     labelleaves = {}
     fulldepth = 0
     
     for rule in cfg_rules:
         ruledepth[rule] = ruledepth.setdefault(rule, []).append(rule.depth)
         labeldepth.setdefault(rule.lhs, []).append(rule.depth)
         labelleaves.setdefault(rule.lhs, []).append(rule.length) 
         if rule.depth > fulldepth:
             fulldepth = rule.depth
         
     for label, depthvector in labeldepth.iteritems():
         try:
             atts["parse_{}_depth_max".format(label)] = max(depthvector)
             atts["parse_{}_height_max".format(label)] = fulldepth - max(depthvector)
             atts["parse_{}_depth_avg".format(label)] = average(depthvector)
             atts["parse_{}_height_avg".format(label)] = fulldepth - average(depthvector)
         except:
             atts["parse_{}_depth_max".format(label)] = 0
             atts["parse_{}_height_max".format(label)] = 0
             atts["parse_{}_depth_avg".format(label)] = 0
             atts["parse_{}_height_avg".format(label)] = 0
         
     for label, leavevector in labelleaves.iteritems():
         try:
             atts["parse_{}_leaves_max".format(label)] = max(leavevector)
             atts["parse_{}_leaves_avg".format(label)] = average(leavevector)
         except:
             atts["parse_{}_leaves_max".format(label)] = 0
             atts["parse_{}_leaves_avg".format(label)] = 0
     
     #check the position of verbs in comparison to the parent node
     for rule in cfg_rules:
         atts["cfg_{}".format(rule)] =  atts.setdefault("cfg_{}".format(rule), 0) + 1
         
         for index, child in enumerate(rule.rhs, 1):
             if child.startswith("V"):
                 #position from the beginning
                 atts["cfgpos_{}-{}".format(rule.lhs, child)] = index
                 #position from the end
                 atts["cfgpos_end_{}-{}".format(rule.lhs, child)] = len(rule.rhs) - index + 1
                 
         try:
             atts["cfg_{}_depth_max".format(rule)] = max(ruledepth.setdefault(rule, []))
             atts["cfg_{}_depth_avg".format(rule)] = average(ruledepth.setdefault(rule, []))
             atts["cfg_{}_height_max".format(rule)] = fulldepth - max(ruledepth.setdefault(rule, []))
             atts["cfg_{}_height_avg".format(rule)] = fulldepth - average(ruledepth.setdefault(rule, []))
             
         except:
             atts["cfg_{}_depth_max".format(rule)] = 0
             atts["cfg_{}_depth_avg".format(rule)] = 0
             atts["cfg_{}_height_max".format(rule)] = fulldepth
             atts["cfg_{}_height_avg".format(rule)] = fulldepth
                         
     atts["cfg_fulldepth"] = fulldepth
         
     return atts    
コード例 #55
0
    def labelData(self):
        # Detected and idxs values to False and [], to make sure we are not using information from a previous labelling
        self.labels['detected'] = False
        self.labels['idxs'] = []

        # Labelling process dependent of the sensor type
        if self.msg_type_str == 'LaserScan':  # 2D LIDARS -------------------------------------
            # For 2D LIDARS the process is the following: First cluster all the range data into clusters. Then,
            # associate one of the clusters with the calibration pattern by selecting the cluster which is closest to
            # the rviz interactive marker.

            clusters = []  # initialize cluster list to empty
            cluster_counter = 0  # init counter
            points = []  # init points

            # Compute cartesian coordinates
            xs, ys = atom_core.utilities.laser_scan_msg_to_xy(self.msg)

            # Clustering:
            first_iteration = True
            for idx, r in enumerate(self.msg.ranges):
                # Skip if either this point or the previous have range smaller than minimum_range_value
                if r < self.minimum_range_value or self.msg.ranges[
                        idx - 1] < self.minimum_range_value:
                    continue

                if first_iteration:  # if first iteration, create a new cluster
                    clusters.append(LaserScanCluster(cluster_counter, idx))
                    first_iteration = False
                else:  # check if new point belongs to current cluster, create new cluster if not
                    x = xs[clusters[-1].idxs[
                        -1]]  # x coordinate of last point of last cluster
                    y = ys[clusters[-1].idxs[
                        -1]]  # y coordinate of last point of last cluster
                    distance = math.sqrt((xs[idx] - x)**2 + (ys[idx] - y)**2)
                    if distance > self.threshold:  # if distance larger than threshold, create new cluster
                        cluster_counter += 1
                        clusters.append(LaserScanCluster(cluster_counter, idx))
                    else:  # same cluster, push this point into the same cluster
                        clusters[-1].pushIdx(idx)

            # Association stage: find out which cluster is closer to the marker
            x_marker, y_marker = self.marker.pose.position.x, self.marker.pose.position.y  # interactive marker pose
            idx_closest_cluster = 0
            min_dist = sys.maxint
            for cluster_idx, cluster in enumerate(
                    clusters):  # cycle all clusters
                for idx in cluster.idxs:  # cycle each point in the cluster
                    x, y = xs[idx], ys[idx]
                    dist = math.sqrt((x_marker - x)**2 + (y_marker - y)**2)
                    if dist < min_dist:
                        idx_closest_cluster = cluster_idx
                        min_dist = dist

            closest_cluster = clusters[idx_closest_cluster]

            # Find the coordinate of the middle point in the closest cluster and bring the marker to that point
            x_sum, y_sum = 0, 0
            for idx in closest_cluster.idxs:
                x_sum += xs[idx]
                y_sum += ys[idx]

            self.marker.pose.position.x = x_sum / float(
                len(closest_cluster.idxs))
            self.marker.pose.position.y = y_sum / float(
                len(closest_cluster.idxs))
            self.marker.pose.position.z = 0
            self.menu_handler.reApply(self.server)
            self.server.applyChanges()

            # Update the dictionary with the labels
            self.labels['detected'] = True

            percentage_points_to_remove = 0.0  # remove x% of data from each side
            number_of_idxs = len(clusters[idx_closest_cluster].idxs)
            idxs_to_remove = int(percentage_points_to_remove *
                                 float(number_of_idxs))
            clusters[idx_closest_cluster].idxs_filtered = clusters[
                idx_closest_cluster].idxs[idxs_to_remove:number_of_idxs -
                                          idxs_to_remove]

            self.labels['idxs'] = clusters[idx_closest_cluster].idxs_filtered

            # Create and publish point cloud message with the colored clusters (just for debugging)
            cmap = cm.prism(np.linspace(0, 1, len(clusters)))
            points = []
            z, a = 0, 255
            for cluster in clusters:
                for idx in cluster.idxs:
                    x, y = xs[idx], ys[idx]
                    r, g, b = int(cmap[cluster.cluster_count, 0] * 255.0), \
                              int(cmap[cluster.cluster_count, 1] * 255.0), \
                              int(cmap[cluster.cluster_count, 2] * 255.0)
                    rgb = struct.unpack('I', struct.pack('BBBB', b, g, r,
                                                         a))[0]
                    pt = [x, y, z, rgb]
                    points.append(pt)

            fields = [
                PointField('x', 0, PointField.FLOAT32, 1),
                PointField('y', 4, PointField.FLOAT32, 1),
                PointField('z', 8, PointField.FLOAT32, 1),
                PointField('rgba', 12, PointField.UINT32, 1)
            ]
            header = Header()
            header.frame_id = self.parent
            header.stamp = self.msg.header.stamp
            pc_msg = point_cloud2.create_cloud(header, fields, points)
            self.publisher_clusters.publish(pc_msg)

            # Create and publish point cloud message containing only the selected calibration pattern points
            points = []
            for idx in clusters[idx_closest_cluster].idxs_filtered:
                x_marker, y_marker, z_marker = xs[idx], ys[idx], 0
                r = int(0 * 255.0)
                g = int(0 * 255.0)
                b = int(1 * 255.0)
                a = 255
                rgb = struct.unpack('I', struct.pack('BBBB', b, g, r, a))[0]
                pt = [x_marker, y_marker, z_marker, rgb]
                points.append(pt)

            pc_msg = point_cloud2.create_cloud(header, fields, points)
            self.publisher_selected_points.publish(pc_msg)

        elif self.msg_type_str == 'Image':  # Cameras -------------------------------------------

            # Convert to opencv image and save image to disk
            image = self.bridge.imgmsg_to_cv2(self.msg, "bgr8")

            result = self.pattern.detect(image, equalize_histogram=True)
            if result['detected']:
                c = []

                if result.has_key('ids'):
                    # The charuco pattern also return an ID for each keypoint.
                    # We can use this information for partial detections.
                    for idx, corner in enumerate(result['keypoints']):
                        c.append({
                            'x': float(corner[0][0]),
                            'y': float(corner[0][1]),
                            'id': result['ids'][idx]
                        })
                else:
                    for corner in result['keypoints']:
                        c.append({
                            'x': float(corner[0][0]),
                            'y': float(corner[0][1])
                        })

                x = int(round(c[0]['x']))
                y = int(round(c[0]['y']))
                cv2.line(image, (x, y), (x, y), (0, 255, 255), 20)

                # Update the dictionary with the labels
                self.labels['detected'] = True
                self.labels['idxs'] = c

            # For visual debugging
            self.pattern.drawKeypoints(image, result)

            msg_out = self.bridge.cv2_to_imgmsg(image, encoding="passthrough")
            msg_out.header.stamp = self.msg.header.stamp
            msg_out.header.frame_id = self.msg.header.frame_id
            self.publisher_labelled_image.publish(msg_out)

        elif self.msg_type_str == 'PointCloud2TIAGO':  # RGB-D pointcloud -------------------------------------------
            # TODO, this will have to be revised later on Check #44

            # print("Found point cloud!")

            tall = rospy.Time.now()

            # Get 3D coords
            t = rospy.Time.now()
            # points = pc2.read_points_list(self.msg, skip_nans=False, field_names=("x", "y", "z"))
            print('0. took ' + str((rospy.Time.now() - t).to_sec()))

            # Get the marker position
            x_marker, y_marker, z_marker = self.marker.pose.position.x, self.marker.pose.position.y, self.marker.pose.position.z  # interactive marker pose

            t = rospy.Time.now()
            # Project points
            print('x_marker=' + str(x_marker))
            print('y_marker=' + str(y_marker))
            print('z_marker=' + str(z_marker))
            seed_point = self.cam_model.project3dToPixel(
                (x_marker, y_marker, z_marker))
            print('seed_point = ' + str(seed_point))
            if np.isnan(
                    seed_point[0]
            ):  # something went wrong, reposition marker on initial position and return
                self.marker.pose.position.x = 0
                self.marker.pose.position.y = 0
                self.marker.pose.position.z = 4
                self.menu_handler.reApply(self.server)
                self.server.applyChanges()
                rospy.logwarn(
                    'Could not project pixel, putting marker in home position.'
                )
                return

            seed_point = (int(round(seed_point[0])), int(round(seed_point[1])))

            # Check if projection is inside the image
            x = seed_point[0]
            y = seed_point[1]
            if x < 0 or x >= self.cam_model.width or y < 0 or y >= self.cam_model.height:
                rospy.logwarn(
                    'Projection of point is outside of image. Not labelling point cloud.'
                )
                return

            print('1. took ' + str((rospy.Time.now() - t).to_sec()))

            t = rospy.Time.now()
            # Wait for depth image message
            imgmsg = rospy.wait_for_message(
                '/top_center_rgbd_camera/depth/image_rect', Image)

            print('2. took ' + str((rospy.Time.now() - t).to_sec()))

            t = rospy.Time.now()

            # img = self.bridge.imgmsg_to_cv2(imgmsg, desired_encoding="8UC1")
            img_raw = self.bridge.imgmsg_to_cv2(imgmsg,
                                                desired_encoding="passthrough")

            img = deepcopy(img_raw)
            img_float = img.astype(np.float32)
            img_float = img_float

            h, w = img.shape
            # print('img type = ' + str(img.dtype))
            # print('img_float type = ' + str(img_float.dtype))
            # print('img_float shape = ' + str(img_float.shape))

            mask = np.zeros((h + 2, w + 2, 1), np.uint8)

            # mask[seed_point[1] - 2:seed_point[1] + 2, seed_point[0] - 2:seed_point[0] + 2] = 255

            # PCA + Consensus + FloodFill ------------------

            # get 10 points around the seed
            # seed = {'x': seed_point[0], 'y': seed_point[1]}
            # pts = []
            # pts.append({'x': seed['x'], 'y': seed['y'] - 10})  # top neighbor
            # pts.append({'x': seed['x'], 'y': seed['y'] + 10})  # bottom neighbor
            # pts.append({'x': seed['x'] - 1, 'y': seed['y']})  # left neighbor
            # pts.append({'x': seed['x'] + 1, 'y': seed['y']})  # right neighbor
            #
            # def fitPlaneLTSQ(XYZ):
            #     (rows, cols) = XYZ.shape
            #     G = np.ones((rows, 3))
            #     G[:, 0] = XYZ[:, 0]  # X
            #     G[:, 1] = XYZ[:, 1]  # Y
            #     Z = XYZ[:, 2]
            #     (a, b, c), resid, rank, s = np.linalg.lstsq(G, Z)
            #     normal = (a, b, -1)
            #     nn = np.linalg.norm(normal)
            #     normal = normal / nn
            #     return (c, normal)
            #
            # data = np.random.randn(100, 3) / 3
            # data[:, 2] /= 10
            # c, normal = fitPlaneLTSQ(data)

            # out flood fill ------------------
            # to_visit = [{'x': seed_point[0], 'y': seed_point[1]}]
            # # filled = []
            # threshold = 0.05
            # filled_img = np.zeros((h, w), dtype=np.bool)
            # visited_img = np.zeros((h, w), dtype=np.bool)
            #
            # def isInsideBox(p, min_x, max_x, min_y, max_y):
            #     if min_x <= p['x'] < max_x and min_y <= p['y'] < max_y:
            #         return True
            #     else:
            #         return False
            #
            # def getNotVisitedNeighbors(p, min_x, max_x, min_y, max_y, img):
            #     neighbors = []
            #     tmp_neighbors = []
            #     tmp_neighbors.append({'x': p['x'], 'y': p['y'] - 1})  # top neighbor
            #     tmp_neighbors.append({'x': p['x'], 'y': p['y'] + 1})  # bottom neighbor
            #     tmp_neighbors.append({'x': p['x'] - 1, 'y': p['y']})  # left neighbor
            #     tmp_neighbors.append({'x': p['x'] + 1, 'y': p['y']})  # right neighbor
            #
            #     for idx, n in enumerate(tmp_neighbors):
            #         if isInsideBox(n, min_x, max_x, min_y, max_y) and not img[n['y'], n['x']] == True:
            #             neighbors.append(n)
            #
            #     return neighbors
            #
            # cv2.namedWindow('Filled', cv2.WINDOW_NORMAL)
            # cv2.namedWindow('Visited', cv2.WINDOW_NORMAL)
            # cv2.namedWindow('To Visit', cv2.WINDOW_NORMAL)
            # while to_visit != []:
            #     p = to_visit[0]
            #     # print('Visiting ' + str(p))
            #     range_p = img_float[p['y'], p['x']]
            #     to_visit.pop(0)  # remove p from to_visit
            #     # filled.append(p)  # append p to filled
            #     filled_img[p['y'], p['x']] = True
            #     # print(filled)
            #
            #     # compute neighbors of this point
            #     neighbors = getNotVisitedNeighbors(p, 0, w, 0, h, visited_img)
            #
            #     # print('neighbors ' + str(neighbors))
            #
            #     for n in neighbors:  # test if should propagate to neighbors
            #         range_n = img_float[n['y'], n['x']]
            #         visited_img[n['y'], n['x']] = True
            #
            #         if abs(range_n - range_p) <= threshold:
            #             # if not n in to_visit:
            #             to_visit.append(n)
            #
            #     # Create the mask image
            # to_visit_img = np.zeros((h, w), dtype=np.bool)
            # for p in to_visit:
            #     to_visit_img[p['y'], p['x']] = True
            #
            #
            # # print('To_visit ' + str(to_visit))
            #
            # cv2.imshow('Filled', filled_img.astype(np.uint8) * 255)
            # cv2.imshow('Visited', visited_img.astype(np.uint8) * 255)
            # cv2.imshow('To Visit', to_visit_img.astype(np.uint8) * 255)
            # key = cv2.waitKey(5)

            # --------------------------------

            img_float2 = deepcopy(img_float)
            cv2.floodFill(
                img_float2, mask, seed_point, 128, 80, 80, 8 | (128 << 8)
                | cv2.FLOODFILL_MASK_ONLY | cv2.FLOODFILL_FIXED_RANGE)

            # Switch coords of seed point
            # mask[seed_point[1]-2:seed_point[1]+2, seed_point[0]-2:seed_point[0]+2] = 255

            tmpmask = mask[1:h + 1, 1:w + 1]

            cv2.namedWindow('tmpmask', cv2.WINDOW_NORMAL)
            cv2.imshow('tmpmask', tmpmask)

            def onMouse(event, x, y, flags, param):
                print("x = " + str(x) + ' y = ' + str(y) + ' value = ' +
                      str(img_float2[y, x]))

            cv2.namedWindow('float', cv2.WINDOW_GUI_EXPANDED)
            cv2.setMouseCallback('float', onMouse, param=None)
            cv2.imshow('float', img_raw)
            key = cv2.waitKey(0)

            print('3. took ' + str((rospy.Time.now() - t).to_sec()))
            t = rospy.Time.now()

            # calculate moments of binary image
            M = cv2.moments(tmpmask)

            self.labels['detected'] = True
            print('4. took ' + str((rospy.Time.now() - t).to_sec()))
            t = rospy.Time.now()

            if M["m00"] != 0:
                # calculate x,y coordinate of center
                cX = int(M["m10"] / M["m00"])
                cY = int(M["m01"] / M["m00"])

                red = deepcopy(img)
                # bmask =  tmpmask.astype(np.bool)

                print(tmpmask.shape)
                tmpmask = np.reshape(tmpmask, (480, 640))

                print(img.shape)

                red[tmpmask != 0] = red[tmpmask != 0] + 10000

                img = cv2.merge((img, img, red))

                img[cY - 2:cY + 2, cX - 2:cX + 2, 1] = 30000

                img[seed_point[1] - 2:seed_point[1] + 2,
                    seed_point[0] - 2:seed_point[0] + 2, 0] = 30000

                # img[100:400, 20:150] = 255

                cv2.imshow("mask", img)
                cv2.waitKey(5)

                # msg_out = self.bridge.cv2_to_imgmsg(showcenter, encoding="passthrough")
                # msg_out.header.stamp = self.msg.header.stamp
                # msg_out.header.frame_id = self.msg.header.frame_id

                # self.publisher_labelled_depth_image.publish(msg_out)

                # coords = points[cY * 640 + cX]
                # print('coords' + str(coords))

                ray = self.cam_model.projectPixelTo3dRay((cX, cY))

                print('ray' + str(ray))
                print('img' + str(img_float.shape))

                print(type(cX))
                print(type(cY))
                print(type(ray))

                dist = float(img_float[cX, cY])
                print('dist = ' + str(dist))
                x = ray[0] * dist
                y = ray[1] * dist
                z = ray[2] * dist

                print('xyz = ' + str(x) + ' ' + str(y) + ' ' + str(z))

                # if not math.isnan(coords[0]):
                #     self.marker.pose.position.x = coords[0]
                #     self.marker.pose.position.y = coords[1]
                #     self.marker.pose.position.z = coords[2]
                #     self.menu_handler.reApply(self.server)
                #     self.server.applyChanges()

                if dist > 0.1:
                    # self.marker.pose.position.x = x
                    # self.marker.pose.position.y = y
                    # self.marker.pose.position.z = z
                    # self.menu_handler.reApply(self.server)
                    # self.server.applyChanges()
                    pass

            print('5. took ' + str((rospy.Time.now() - t).to_sec()))
            # idx = np.where(tmpmask == 100)
            # # Create tuple with (l, c)
            # pointcoords = list(zip(idx[0], idx[1]))
            #
            # points = pc2.read_points_list(self.msg, skip_nans=False, field_names=("x", "y", "z"))
            # tmppoints = []
            #
            # for coord in pointcoords:
            #     pointidx = (coord[0]) * 640 + (coord[1])
            #     tmppoints.append(points[pointidx])
            #
            # msg_out = createRosCloud(tmppoints, self.msg.header.stamp, self.msg.header.frame_id)
            #
            # self.publisher_selected_points.publish(msg_out)
            print('all. took ' + str((rospy.Time.now() - tall).to_sec()))

        elif self.msg_type_str == 'PointCloud2':  # 3D scan pointcloud (Andre Aguiar) ---------------------------------
            # Get the marker position (this comes from the shpere in rviz)
            x_marker, y_marker, z_marker = self.marker.pose.position.x, self.marker.pose.position.y, \
                                           self.marker.pose.position.z  # interactive marker pose

            # Extract 3D point from the LiDAR
            pc = ros_numpy.numpify(self.msg)
            points = np.zeros((pc.shape[0], 3))
            points[:, 0] = pc['x']
            points[:, 1] = pc['y']
            points[:, 2] = pc['z']

            # Extract the points close to the seed point from the entire PCL
            marker_point = np.array([[x_marker, y_marker, z_marker]])
            dist = scipy.spatial.distance.cdist(marker_point,
                                                points,
                                                metric='euclidean')
            pts = points[np.transpose(dist < self.tracker_threshold)[:, 0], :]
            idx = np.where(np.transpose(dist < self.tracker_threshold)[:,
                                                                       0])[0]

            # Tracker - update seed point with the average of cluster to use in the next
            # iteration
            seed_point = []
            if 0 < len(pts):
                x_sum, y_sum, z_sum = 0, 0, 0
                for i in range(0, len(pts)):
                    x_sum += pts[i, 0]
                    y_sum += pts[i, 1]
                    z_sum += pts[i, 2]
                seed_point.append(x_sum / len(pts))
                seed_point.append(y_sum / len(pts))
                seed_point.append(z_sum / len(pts))

            # RANSAC - eliminate the tracker outliers
            number_points = pts.shape[0]
            if number_points == 0:
                return []
            # RANSAC iterations
            for i in range(0, self.number_iterations):
                # Randomly select three points that cannot be coincident
                # nor collinear
                while True:
                    idx1 = random.randint(0, number_points - 1)
                    idx2 = random.randint(0, number_points - 1)
                    idx3 = random.randint(0, number_points - 1)
                    pt1, pt2, pt3 = pts[[idx1, idx2, idx3], :]
                    # Compute the norm of position vectors
                    ab = np.linalg.norm(pt2 - pt1)
                    bc = np.linalg.norm(pt3 - pt2)
                    ac = np.linalg.norm(pt3 - pt1)
                    # Check if points are colinear
                    if (ab + bc) == ac:
                        continue
                    # Check if are coincident
                    if idx2 == idx1:
                        continue
                    if idx3 == idx1 or idx3 == idx2:
                        continue

                    # If all the conditions are satisfied, we can end the loop
                    break

                # ABC Hessian coefficients and given by the external product between two vectors lying on hte plane
                A, B, C = np.cross(pt2 - pt1, pt3 - pt1)
                # Hessian parameter D is computed using one point that lies on the plane
                D = -(A * pt1[0] + B * pt1[1] + C * pt1[2])
                # Compute the distance from all points to the plane
                # from https://www.geeksforgeeks.org/distance-between-a-point-and-a-plane-in-3-d/
                distances = abs(
                    (A * pts[:, 0] + B * pts[:, 1] + C * pts[:, 2] +
                     D)) / (math.sqrt(A * A + B * B + C * C))
                # Compute number of inliers for this plane hypothesis.
                # Inliers are points which have distance to the plane less than a tracker_threshold
                num_inliers = (distances < self.ransac_threshold).sum()
                # Store this as the best hypothesis if the number of inliers is larger than the previous max
                if num_inliers > self.n_inliers:
                    self.n_inliers = num_inliers
                    self.A = A
                    self.B = B
                    self.C = C
                    self.D = D

            # Extract the inliers
            distances = abs((self.A * pts[:, 0] + self.B * pts[:, 1] + self.C * pts[:, 2] + self.D)) / \
                        (math.sqrt(self.A * self.A + self.B * self.B + self.C * self.C))
            inliers = pts[np.where(distances < self.ransac_threshold)]
            # Create dictionary [pcl point index, distance to plane] to select the pcl indexes of the inliers
            idx_map = dict(zip(idx, distances))
            final_idx = []
            for key in idx_map:
                if idx_map[key] < self.ransac_threshold:
                    final_idx.append(key)
            # -------------------------------------- End of RANSAC ----------------------------------------- #

            # publish the points that belong to the cluster
            points = []
            for i in range(len(inliers)):
                r = int(1 * 255.0)
                g = int(1 * 255.0)
                b = int(1 * 255.0)
                a = 150
                rgb = struct.unpack('I', struct.pack('BBBB', b, g, r, a))[0]
                pt = [inliers[i, 0], inliers[i, 1], inliers[i, 2], rgb]
                points.append(pt)

            fields = [
                PointField('x', 0, PointField.FLOAT32, 1),
                PointField('y', 4, PointField.FLOAT32, 1),
                PointField('z', 8, PointField.FLOAT32, 1),
                PointField('rgba', 12, PointField.UINT32, 1)
            ]
            header = Header()
            header.frame_id = self.parent
            header.stamp = self.msg.header.stamp
            pc_msg = point_cloud2.create_cloud(header, fields, points)
            self.publisher_selected_points.publish(pc_msg)

            # Reset the number of inliers to have a fresh start on the next interation
            self.n_inliers = 0

            # Update the dictionary with the labels (to be saved if the user selects the option)
            self.labels['detected'] = True
            self.labels['idxs'] = final_idx

            # Update the interactive marker pose
            self.marker.pose.position.x = seed_point[0]
            self.marker.pose.position.y = seed_point[1]
            self.marker.pose.position.z = seed_point[2]
            self.menu_handler.reApply(self.server)
            self.server.applyChanges()
コード例 #56
0
 def parse_semag(self, str, mass):
     # split = re.split('\s', str.strip())
     reg_book = re.compile(u'ו?(עשין|שם|לאוין)')
     split = re.split(reg_book, str.strip())
     # str_list = filter(None, split)
     str_list = filter(None, [item.strip() for item in split])
     resolveds = []
     # it = iter(str_list)
     derabanan_flag = False
     book = None
     for i, word in enumerate(str_list):
         if derabanan_flag:
             derabanan_flag = False
             resolved = self._tracker.resolve(book, [1])
             resolveds.append(resolved)
             continue
         elif re.search(reg_book, word):
             # book = word
             # if book == u'שם':
             #     book = None
             # elif book == u'לאוין':
             #     book = u'Sefer Mitzvot Gadol, Volume One'
             try:
                 if word != u'שם':
                     derabanan = filter(None, [
                         item.strip() for item in re.split(
                             u'(מד"ס|מ?דרבנן)', str_list[i + 1].strip())
                     ])
             except IndexError:
                 # mass.ErrorFile.write('error smg, no place in book notation')
                 mass.error_flag = 'error smg, no place in book notation'
                 print 'error smg, no place in book notation'
                 return
             if word == u'עשין' and len(derabanan) > 1 and (derabanan[0] !=
                                                            u"סימן"):
                 book = re.search(u'[א-ה]', derabanan[1])
                 # print book.group(0)
                 book = self._table[book.group(0)]
                 derabanan_flag = True
             elif re.match(reg_book, word):
                 book = self._table[word]
             else:
                 mass.ErrorFile.write(
                     "error smg, don't recognize book name")
                 print "error smg, don't recognize book name", book
                 return
         else:
             mitzva = re.split('\s', word)
             for m in mitzva:
                 # if m == u'סימן':
                 #     continue
                 if m == u'שם':
                     m = None
                 elif getGematriaVav(m):
                     m = getGematriaVav(m)
                 else:
                     m = None
                 resolved = self._tracker.resolve(book, [m])
                 resolveds.append(resolved)
     if not resolveds:
         resolved = self._tracker.resolve(book, [None])
         resolveds.append(resolved)
     # print resolveds
     return resolveds
コード例 #57
0
def generate_and_push_new_documentation_page(
    temporary_documentation_folder,
    distribution_bundle_file,
    has_api_documentation,
    temporary_documentation_node_modules_directory
):
# #
    '''
        Renders a new index.html file and copies new assets to generate a new \
        documentation homepage.
    '''
    global BUILD_DOCUMENTATION_PAGE_COMMAND
    __logger__.info('Update documentation design.')
    if distribution_bundle_file:
        new_distribution_bundle_file = FileHandler(location='%s%s%s' % (
            temporary_documentation_folder.path, DOCUMENTATION_BUILD_PATH,
            DISTRIBUTION_BUNDLE_FILE_PATH))
        new_distribution_bundle_file.directory.make_directories()
        distribution_bundle_file.path = new_distribution_bundle_file
        new_distribution_bundle_directory = FileHandler(location='%s%s%s' % (
            temporary_documentation_folder.path, DOCUMENTATION_BUILD_PATH,
            DISTRIBUTION_BUNDLE_DIRECTORY_PATH))
        new_distribution_bundle_directory.make_directories()
        zipfile.ZipFile(distribution_bundle_file.path).extractall(
            new_distribution_bundle_directory.path)
    favicon = FileHandler(location='favicon.png')
    if favicon:
        favicon.copy(target='%s/source/image/favicon.ico' %
            temporary_documentation_folder.path)
    parameter = builtins.dict(builtins.map(lambda item: (
        String(item[0]).camel_case_to_delimited.content.upper(), item[1]
    ), SCOPE.get('documentationWebsite', {}).items()))
    if 'TAGLINE' not in parameter and 'description' in SCOPE:
        parameter['TAGLINE'] = SCOPE['description']
    if 'NAME' not in parameter and 'name' in SCOPE:
        parameter['NAME'] = SCOPE['name']
    __logger__.debug('Found parameter "%s".', json.dumps(parameter))
    api_documentation_path = None
    if has_api_documentation:
        api_documentation_path = '%s%s' % (
            API_DOCUMENTATION_PATH[1], API_DOCUMENTATION_PATH_SUFFIX)
        if not FileHandler(location='%s%s' % (
            FileHandler().path, api_documentation_path
        )).is_directory():
            api_documentation_path = API_DOCUMENTATION_PATH[1]
    parameter.update({
        'CONTENT': CONTENT,
        'CONTENT_FILE_PATH': None,
        'RENDER_CONTENT': False,
        'API_DOCUMENTATION_PATH': api_documentation_path,
        'DISTRIBUTION_BUNDLE_FILE_PATH': DISTRIBUTION_BUNDLE_FILE_PATH if (
            distribution_bundle_file and
            distribution_bundle_file.is_file()
        ) else None
    })
# # python3.5
# #     parameter = Dictionary(parameter).convert(
# #         value_wrapper=lambda key, value: value.replace(
# #             '!', '#%%%#'
# #         ) if builtins.isinstance(value, builtins.str) else value
# #     ).content
    parameter = Dictionary(parameter).convert(
        value_wrapper=lambda key, value: value.replace(
            '!', '#%%%#'
        ) if builtins.isinstance(value, builtins.unicode) else value
    ).content
# #
    if __logger__.isEnabledFor(logging.DEBUG):
        BUILD_DOCUMENTATION_PAGE_COMMAND = \
            BUILD_DOCUMENTATION_PAGE_COMMAND[:-1] + [
                '-debug'
            ] + BUILD_DOCUMENTATION_PAGE_COMMAND[-1:]
    serialized_parameter = json.dumps(parameter)
    parameter_file = FileHandler(location=make_secure_temporary_file('.json')[
        1])
    parameter_file.content = \
        BUILD_DOCUMENTATION_PAGE_PARAMETER_TEMPLATE.format(
            serializedParameter=serialized_parameter, **SCOPE)
    for index, command in builtins.enumerate(BUILD_DOCUMENTATION_PAGE_COMMAND):
        BUILD_DOCUMENTATION_PAGE_COMMAND[index] = \
            BUILD_DOCUMENTATION_PAGE_COMMAND[index].format(
                serializedParameter=serialized_parameter,
                parameterFilePath=parameter_file._path,
                **SCOPE)
    __logger__.debug('Use parameter "%s".', serialized_parameter)
    __logger__.info('Run "%s".', ' '.join(BUILD_DOCUMENTATION_PAGE_COMMAND))
    current_working_directory_backup = FileHandler()
    temporary_documentation_folder.change_working_directory()
    Platform.run(
        command=BUILD_DOCUMENTATION_PAGE_COMMAND[0],
        command_arguments=BUILD_DOCUMENTATION_PAGE_COMMAND[1:], error=False,
        log=True)
    current_working_directory_backup.change_working_directory()
    parameter_file.remove_file()
    for file in FileHandler():
        if not (file in (temporary_documentation_folder, FileHandler(
            location='.%s' % API_DOCUMENTATION_PATH[1]
        )) or is_file_ignored(file)):
            file.remove_deep()
    documentation_build_folder = FileHandler(location='%s%s' % (
        temporary_documentation_folder.path, DOCUMENTATION_BUILD_PATH
    ), must_exist=True)
    documentation_build_folder.iterate_directory(
        function=copy_repository_file, recursive=True,
        source=documentation_build_folder, target=FileHandler())
    if (Platform.run(
        "/usr/bin/env sudo umount '%s'" %
            temporary_documentation_node_modules_directory.path,
        native_shell=True, error=False, log=True
    )['return_code'] == 0):
        temporary_documentation_folder.remove_deep()
    Platform.run(
        (
            '/usr/bin/env git add --all',
            '/usr/bin/env git commit --message "%s" --all' %
                PROJECT_PAGE_COMMIT_MESSAGE,
            '/usr/bin/env git push',
            '/usr/bin/env git checkout master'
        ),
        native_shell=True,
        error=False,
        log=True
    )
コード例 #58
0
    def labelData(self):
        # Detected and idxs values to False and [], to make sure we are not using information from a previous labelling
        self.labels['detected'] = False
        self.labels['idxs'] = []

        # Labelling process dependent of the sensor type
        if self.msg_type_str == 'LaserScan':  # 2D LIDARS -------------------------------------
            # For 2D LIDARS the process is the following: First cluster all the range data into clusters. Then,
            # associate one of the clusters with the calibration pattern by selecting the cluster which is closest to
            # the rviz interactive marker.

            clusters = []  # initialize cluster list to empty
            cluster_counter = 0  # init counter
            points = []  # init points

            # Compute cartesian coordinates
            xs, ys = interactive_calibration.utilities.laser_scan_msg_to_xy(self.msg)

            # Clustering:
            first_iteration = True
            for idx, r in enumerate(self.msg.ranges):
                # Skip if either this point or the previous have range smaller than minimum_range_value
                if r < self.minimum_range_value or self.msg.ranges[idx - 1] < self.minimum_range_value:
                    continue

                if first_iteration:  # if first iteration, create a new cluster
                    clusters.append(LaserScanCluster(cluster_counter, idx))
                    first_iteration = False
                else:  # check if new point belongs to current cluster, create new cluster if not
                    x = xs[clusters[-1].idxs[-1]]  # x coordinate of last point of last cluster
                    y = ys[clusters[-1].idxs[-1]]  # y coordinate of last point of last cluster
                    distance = math.sqrt((xs[idx] - x) ** 2 + (ys[idx] - y) ** 2)
                    if distance > self.threshold:  # if distance larger than threshold, create new cluster
                        cluster_counter += 1
                        clusters.append(LaserScanCluster(cluster_counter, idx))
                    else:  # same cluster, push this point into the same cluster
                        clusters[-1].pushIdx(idx)

            # Association stage: find out which cluster is closer to the marker
            x_marker, y_marker = self.marker.pose.position.x, self.marker.pose.position.y  # interactive marker pose
            idx_closest_cluster = 0
            min_dist = sys.maxint
            for cluster_idx, cluster in enumerate(clusters):  # cycle all clusters
                for idx in cluster.idxs:  # cycle each point in the cluster
                    x, y = xs[idx], ys[idx]
                    dist = math.sqrt((x_marker - x) ** 2 + (y_marker - y) ** 2)
                    if dist < min_dist:
                        idx_closest_cluster = cluster_idx
                        min_dist = dist

            closest_cluster = clusters[idx_closest_cluster]

            # Find the coordinate of the middle point in the closest cluster and bring the marker to that point
            x_sum, y_sum = 0, 0
            for idx in closest_cluster.idxs:
                x_sum += xs[idx]
                y_sum += ys[idx]

            self.marker.pose.position.x = x_sum / float(len(closest_cluster.idxs))
            self.marker.pose.position.y = y_sum / float(len(closest_cluster.idxs))
            self.marker.pose.position.z = 0
            self.menu_handler.reApply(self.server)
            self.server.applyChanges()

            # Update the dictionary with the labels
            self.labels['detected'] = True

            percentage_points_to_remove = 0.0  # remove x% of data from each side
            number_of_idxs = len(clusters[idx_closest_cluster].idxs)
            idxs_to_remove = int(percentage_points_to_remove * float(number_of_idxs))
            clusters[idx_closest_cluster].idxs_filtered = clusters[idx_closest_cluster].idxs[
                                                          idxs_to_remove:number_of_idxs - idxs_to_remove]

            self.labels['idxs'] = clusters[idx_closest_cluster].idxs_filtered

            # Create and publish point cloud message with the colored clusters (just for debugging)
            cmap = cm.prism(np.linspace(0, 1, len(clusters)))
            points = []
            z, a = 0, 255
            for cluster in clusters:
                for idx in cluster.idxs:
                    x, y = xs[idx], ys[idx]
                    r, g, b = int(cmap[cluster.cluster_count, 0] * 255.0), \
                              int(cmap[cluster.cluster_count, 1] * 255.0), \
                              int(cmap[cluster.cluster_count, 2] * 255.0)
                    rgb = struct.unpack('I', struct.pack('BBBB', b, g, r, a))[0]
                    pt = [x, y, z, rgb]
                    points.append(pt)

            fields = [PointField('x', 0, PointField.FLOAT32, 1), PointField('y', 4, PointField.FLOAT32, 1),
                      PointField('z', 8, PointField.FLOAT32, 1), PointField('rgba', 12, PointField.UINT32, 1)]
            header = Header()
            header.frame_id = self.parent
            header.stamp = self.msg.header.stamp
            pc_msg = point_cloud2.create_cloud(header, fields, points)
            self.publisher_clusters.publish(pc_msg)

            # Create and publish point cloud message containing only the selected calibration pattern points
            points = []
            for idx in clusters[idx_closest_cluster].idxs_filtered:
                x_marker, y_marker, z_marker = xs[idx], ys[idx], 0
                r = int(0 * 255.0)
                g = int(0 * 255.0)
                b = int(1 * 255.0)
                a = 255
                rgb = struct.unpack('I', struct.pack('BBBB', b, g, r, a))[0]
                pt = [x_marker, y_marker, z_marker, rgb]
                points.append(pt)

            pc_msg = point_cloud2.create_cloud(header, fields, points)
            self.publisher_selected_points.publish(pc_msg)

        elif self.msg_type_str == 'Image':  # Cameras -------------------------------------------

            # Convert to opencv image and save image to disk
            image = self.bridge.imgmsg_to_cv2(self.msg, "bgr8")

            result = self.pattern.detect(image)
            if result['detected']:
                c = []
                for corner in result['keypoints']:
                    c.append({'x': float(corner[0][0]), 'y': float(corner[0][1])})

                x = int(round(c[0]['x']))
                y = int(round(c[0]['y']))
                cv2.line(image, (x, y), (x, y), (0, 255, 255), 20)

                # Update the dictionary with the labels
                self.labels['detected'] = True
                self.labels['idxs'] = c

            # For visual debugging
            self.pattern.drawKeypoints(image, result)

            msg_out = self.bridge.cv2_to_imgmsg(image, encoding="passthrough")
            msg_out.header.stamp = self.msg.header.stamp
            msg_out.header.frame_id = self.msg.header.frame_id
            self.publisher_labelled_image.publish(msg_out)

        elif self.msg_type_str == 'PointCloud2':  # RGB-D pointcloud -------------------------------------------
            # print("Found point cloud!")

            # Get 3D coords
            points = pc2.read_points_list(self.msg, skip_nans=False, field_names=("x", "y", "z"))

            # Get the marker position
            x_marker, y_marker, z_marker = self.marker.pose.position.x, self.marker.pose.position.y, self.marker.pose.position.z  # interactive marker pose

            cam_model = PinholeCameraModel()

            # Wait for camera info message
            camera_info = rospy.wait_for_message('/top_center_rgbd_camera/depth/camera_info', CameraInfo)
            cam_model.fromCameraInfo(camera_info)

            # Project points
            seed_point = cam_model.project3dToPixel((x_marker, y_marker, z_marker))
            seed_point = (int(math.floor(seed_point[0])), int(math.floor(seed_point[1])))

            # Wait for depth image message
            imgmsg = rospy.wait_for_message('/top_center_rgbd_camera/depth/image_rect', Image)

            # img = self.bridge.imgmsg_to_cv2(imgmsg, desired_encoding="8UC1")
            img = self.bridge.imgmsg_to_cv2(imgmsg, desired_encoding="passthrough")

            img_float = img.astype(np.float32)
            img_float = img_float / 1000

            # print('img type = ' + str(img.dtype))
            # print('img_float type = ' + str(img_float.dtype))
            # print('img_float shape = ' + str(img_float.shape))
            h, w = img.shape

            mask = np.zeros((h + 2, w + 2, 1), np.uint8)

            # mask[seed_point[1] - 2:seed_point[1] + 2, seed_point[0] - 2:seed_point[0] + 2] = 255

            img_float2 = deepcopy(img_float)
            cv2.floodFill(img_float2, mask, seed_point, 128, 0.1, 0.1,
                          8 | (128 << 8) | cv2.FLOODFILL_MASK_ONLY) # | cv2.FLOODFILL_FIXED_RANGE)

            # Switch coords of seed point
            # mask[seed_point[1]-2:seed_point[1]+2, seed_point[0]-2:seed_point[0]+2] = 255

            tmpmask = mask[1:h + 1, 1:w + 1]

            # calculate moments of binary image
            M = cv2.moments(tmpmask)

            if M["m00"] != 0:
                # calculate x,y coordinate of center
                cX = int(M["m10"] / M["m00"])
                cY = int(M["m01"] / M["m00"])

                mask[cY-2:cY+2, cX-2:cX+2] = 255

                cv2.imshow("mask", mask)
                cv2.waitKey(20)

                # msg_out = self.bridge.cv2_to_imgmsg(showcenter, encoding="passthrough")
                # msg_out.header.stamp = self.msg.header.stamp
                # msg_out.header.frame_id = self.msg.header.frame_id

                # self.publisher_labelled_depth_image.publish(msg_out)

                coords = points[cY * 640 + cX]

                if not math.isnan(coords[0]):
                    self.marker.pose.position.x = coords[0]
                    self.marker.pose.position.y = coords[1]
                    self.marker.pose.position.z = coords[2]
                    self.menu_handler.reApply(self.server)
                    self.server.applyChanges()