示例#1
0
文件: karma.py 项目: pjurik2/pykarma
    def watch(self):
        rpc = new_rpc('Link Karma')

        link_karma = -1
        while True:
            new_link_karma = rpc.gui_karma_get()

            if link_karma != new_link_karma:
                link_karma = new_link_karma
                rpc.gui_karma_set(link_karma)
                print link_karma
            time.sleep(35)
示例#2
0
文件: rss.py 项目: pjurik2/pykarma
    def watch(self, new_streams=None):
        self.configure()
        self.web = web.Web()

        try:
            self.rpc = new_rpc(self.title)
        except:
            self.rpc = None
            print 'Warning: Running without RPC'
        
        if new_streams is None:
            new_streams = []
            
        streams = self.streams + new_streams

        for url in itertools.cycle(streams):
            print url
            self.check_feed(url)
            time.sleep(random.randint(*self.wait_range))
示例#3
0
文件: hn.py 项目: pjurik2/pykarma
    def watch(self):
        rpc = new_rpc('HN')
        w = web.Web()

        random.seed()
        tick = 0
        next_url = None
        urls = {}
        fail_count = 0
        datad = {}
        while True:
            if fail_count == 0:
                tick += 1
            else:
                next_url = None
            if tick == 4 or next_url is None:
                tick = 1
                if not USE_API:
                    next_url = 'https://news.ycombinator.com/newest'
                elif USE_API == 2:
                    next_url = 'http://hndroidapi.appspot.com/newest'
                else:
                    next_url = 'http://api.ihackernews.com/new'

            print next_url
            if not USE_API:
                posts = hn_posts(next_url)
            elif USE_API == 2:
                try:
                    r = urllib2.urlopen(next_url)
                    data = r.read()
                    ct = r.headers.getparam('charset')
                    data = encoding.decode(data, ct)
                            
                except:
                    print 'network error'
                    time.sleep(min(2 ** fail_count, 600))
                    fail_count += 1
                    continue

                try:
                    datad = json.loads(data)
                except:
                    print 'parse error'
                    time.sleep(min(2 ** fail_count, 600))
                    fail_count += 1
                    continue
                posts = datad.get('items', [])   
            else:
                try:
                    r = urllib2.urlopen(next_url)
                    data = r.read()
                    ct = r.headers.getparam('charset')
                    try:
                        data = data.decode(ct)
                    except:
                        data = data.decode('utf-8')
                except:
                    print 'network error'
                    time.sleep(min(2 ** fail_count, 600))
                    fail_count += 1
                    continue

                try:
                    datad = json.loads(data)
                except:
                    print 'parse error'
                    time.sleep(min(2 ** fail_count, 600))
                    fail_count += 1
                    continue
                posts = datad.get('items', [])

            
            if len(posts) == 0:
                next_url = None
                time.sleep(min(2 ** fail_count, 600))
                fail_count += 1
                continue

            fail_count = 0

            if not USE_API:
                if posts[-1].get('title', '') == 'More':
                    next_url = 'https://news.ycombinator.com' + posts[-1].get('url')
            elif USE_API == 2:
                next_url = None
            else:
                next_url = u'http://api.ihackernews.com/new/%s' % datad.get('nextId', '')
                
            points = [(int(post.get('points', 0)), i) for i, post in enumerate(posts)]
            points.sort()
            points = list(reversed(points))
            for post_id in points:
                post = posts[post_id[1]]

                if USE_API == 2:
                    score = post.get('score', 0)
                    try:
                        points = int(score.split(' ', 1)[0])
                    except:
                        points = 0
                    post['points'] = points
                
                if post.get('points', 0) < 3*tick:
                    continue
                if post.get('url', '') in urls:
                    continue
                else:
                    urls[post['url']] = None

                if post['url'].startswith('http://'):
                    title = w.title(post['url'])
                    subreddit = rpc.get_title_subreddit(title)
                    keywords = rpc.get_title_keywords(title)
                    url = post['url']
                    if reddit.url_output(title, subreddit, url, rpc):
                        stats = self.rpc.get_learned_stats(title, keywords)
                        rpc.gui_link_add('HN', title, url, subreddit, keywords, **stats)

            sleep_sec = random.randint(50, 70)
            sleep_step = sleep_sec / 10.0
            for x in range(10):
                time.sleep(sleep_step)