def translate_locale(cmd, dic=cmd_translated):
    l_cmd = text_utils.remove_extra_symbols(cmd, True)
    for command in dic.keys():
        possible_aliases = dic[command]
        if l_cmd in possible_aliases:
            return command
    return False
def find_answer(question):
    question = text_utils.remove_extra_symbols(question)
    used = set()
    def crawl(question):
        mindist = float('Inf')
        query = None
        global questions
        for q in questions:
            if q['id'] in used:
                continue
            variations = [q['question']] + q['same_as']
            for el in variations:
                tmp = text_utils.levenstein_dist(el, question) 
                if tmp < mindist:
                    mindist = tmp
                    query = q
        if float(mindist) / (len(unicode(question)) + 1) <= 0.2:
            used.add(query['id'])
            print used
            answers = query['answers']
            if query.get('same_as') and len(query['same_as']) > 0:
                for same_question in query['same_as']:
                    answers += crawl(same_question)
            return answers
        else:
            return []
    answers = crawl(question)
    if len(answers) == 0:
        return damn()
    rand_ans = random.randint(0, len(answers) - 1)
    for ans in answers[rand_ans:] + answers[0:rand_ans]:
        tmp = translate_irisscript(ans)
        if tmp is not None:
            return tmp
    return damn()
def read_learn_public(public_id, shell):
    amount = 10
    cur = 0
    public = []
    while cur < amount:
        pars = {
            'owner_id': public_id,
            'count': 100,
            'offset': cur,
        }
        is_lana = 1 if config.BOT_LEARN_PUBLIC == config.MAIN_LEARN_PUBLIC else 0
        posts = api.query(u'wall.get', pars, lana=is_lana)
        if posts is None or posts.get('error') is not None:
            cur += 10
            continue
        posts = posts['response']
        amount = posts['count']
        posts = posts['items']
        for post in posts:
            post['text'] = text_utils.remove_extra_symbols(post['text'])
            if shell:
                print post['text']
            questions = [el.strip() for el in post['text'].split(u'//')]
            public.append({
                'id': post['id'], 
                'question': questions[0], 
                'answers': [],
                'same_as': questions[1:]
                })
            params = {
                'owner_id': post['owner_id'],
                'post_id': post['id'],
                'count': 100,
                'need_likes': 1,
            }
            comments = api.query(u'wall.getComments', params, lana=is_lana)
            time.sleep(0.3)
            if comments is None or comments.get('error') is not None:
                time.sleep(0.5)
                comments = api.query(u'wall.getComments', params, lana=is_lana)
            if comments is None or comments.get('error') is not None:
                logger.log(u'Не удалось получить комменты к посту с вопросом: ' + post['text'])
                continue
            comments = comments['response']['items']
            for comm in comments:
                if comm['likes']['count'] >= config.LIKES_TO_BE_IN_ANSWERS or comm['likes']['user_likes'] == 1:
                    text_to_append = text_utils.remove_links(comm['text'])
                    if comm.get('attachments'):
                        for el in comm['attachments']:
                            text_to_append += config.SPLIT_SIGN + unicode(el['type']) + \
                                            unicode(el[el['type']]['owner_id']) + u'_' + unicode(el[el['type']]['id'])
                    public[-1]['answers'].append(text_to_append)


        cur += len(posts)
    #for post in public:
    #    print post
    return public