def _get_time_line(self, t_user):
        try:
            cur = tweepy.Cursor(self.api.user_timeline,id=t_user.id)
            t_timeline = []
            for t_el in cur.items():
                try:
                    log.info("appending user timeline")
                    t_timeline.append(t_el)
                except Exception as e:
                    if 'Rate limit exceeded' in str(e):
                        log.info('oook wil be sleep...')
                        time.sleep(60)

            self._count_requests += 1
            log.debug("get timeline +1")

            timeline = [
            m_hash_dict({'text': element.text, 'retweets': element.retweet_count, 'initted': element.created_at})
            for element in t_timeline
            ] #retieving user perls
            return timeline
        except Exception as e:
            if 'Rate limit exceeded' in str(e):
                log.info('oook wil be sleep...')
                time.sleep(60)
                return self._get_time_line(t_user)
            else:
                raise e
def __get_statistic_of_tweets(data):
    """
    input some [] with strings
    which return [] of {freq :<frequency_of_this_element_in_all_elements_in_input>, type:<type_name>, entity:<word>}
    //also in db - will can create collection entities, which include some interests
    """
    all_data = process_to_words(data)
    types = {mention: [], word: [], hash_tag: [], url: []}

    #separate and implying string types
    for element in all_data:
        __imply_string_obj(element, types)

    #calculate all frequencies:
    result = []
    all_len = float(len(all_data))
    for type, arr in types.items():
        for el in arr:
            result.append(m_hash_dict({'entity': el,
                                       'freq': float(all_data.count(el)) / all_len,
                                       'type': type}))

    return {'statistic': list(set(result)),
            hash_tag: list(set(types[hash_tag])),
            mention: list(set(types[mention]))}
 def diff_str(element1, element2):
     difference_element(
         difference_element.s_changed,
         m_hash_dict({difference_element.s_a_old: element1, difference_element.s_a_new: element2}),
     )
 def _validate_dict_of_array(array):
     if len(array) and isinstance(array[0], dict) and not isinstance(array[0], m_hash_dict):
         return [m_hash_dict(el) for el in array]
     return array
 def _prep_fields(fields, exclude):
     fields_ = [field for field in fields.items() if not exclude(field[0])]
     fields_ = m_hash_dict(dict(fields_))
     return fields_
    @staticmethod
    def diff_str(element1, element2):
        difference_element(
            difference_element.s_changed,
            m_hash_dict({difference_element.s_a_old: element1, difference_element.s_a_new: element2}),
        )

    @staticmethod
    def __print_difference(m_difference):
        if not m_difference:
            print None
            return
        dict = m_difference.__dict__
        for item in dict.items():
            print item


class h_set(set):
    def __hash__(self):
        hashes = []
        for x in self:
            hashes.append(hash(x))
        return sum(hashes)


if __name__ == "__main__":
    print set(m_hash_dict({1: 1, 2: 2, 3: 4}))
#    diff = difference_factory.diff_arrays([[1, 1], 1, 1, 1, [1, 1]], [2, [2, 2], 2, [2, 2]])
#    print diff
 def serialise(self):
     return m_hash_dict(self.__dict__)