コード例 #1
0
 def get_pic_from_content(self, content):
     res = self._parse(xpath='//a/@href', content=content)
     #con = lambda x: x.find('detail') != -1
     #res = filter(con, res)
     printf(res)
コード例 #2
0
    def dianzan(self, cnt = 5, op = '1', url_from_db = None):
        '''
        下面这中方法返回的地址是转义了的。。
        '''
        #patter = r'''<a href="([^>]*?)">赞'''
        #content = self.session.get(self.url).content
        #urls = re.findall(patter, content)

        try:
          if self.verify:
              return self.verify
        except:
          pass

        if not url_from_db:feed_url = self.url
        else: feed_url = url_from_db
        #url = self._parse(feed_url, '/wml/card/@ontimer') #不知道为什么换了一个qq号的时候这里会多加一个跳转
        #if url:
        #    feed_url = url[0].content

        #self.url = feed_url

        pos = [ _.strip() for _ in self.pos.split('#') ]
        neg = [ _.strip() for _ in self.neg.split('#') ]
        print pos, neg
        is_filter = self.pos or self.neg
        try:
          cnt = int(self.cnt)
        except Exception as e:
          print str(e)
          import traceback, sys
          traceback.print_exc(file=sys.stdout)

        zan_success = False

        for i in xrange(cnt):
            print "feed_url:" + feed_url
            content = self.session.get(feed_url).content

            if is_filter:
              for zan_url, zan_content, zan_user in self.get_zan_datail(content):
                print zan_content
                if any( [ (_ and _ in zan_content.decode('utf-8')) for _ in pos] ) and not any( [ (_ and _ in zan_content.decode('utf-8')) for _ in neg ] ):
                #if zan_content.decode('utf-8') in pos and zan_content not in neg:
                  print zan_content, pos, neg
                  self.session.get(zan_url)
                  print '赞成功'
                  zan_success = True

            urls = self._parse(None, '//*/@href', content = content)
            #import json
            #return json.dumps( self.get_friend() )


            if not is_filter:
              for url in urls:
                if url.content.find('like_action') != -1 and url.content[-1] == op:
                    if self.repeat_set.issuperset({url.content}):
                        continue
                    ret = self.session.get(url.content).content
                    if ret.find('成功') != -1:
                        print '赞成功'
                        zan_success = True
                    self.repeat_set.add(url.content)

            urls = self._parse(None, '//*[text()="更多好友动态>>" or text()="下页"]/@href', content = content)
            for url in urls:
                #if url.content.find('feeds_friends') != -1 or url.content.find('dayval=1') != -1:
                feed_url = url.content

        if self.remember == 'on':
          session.permanent = True  # make the session permanent after closing brower
          session['qq'] = self.qq
        #  记住登陆信息
        if zan_success:
          with kvdbwrap.KVDB() as kv:
            val = {
                'qq': self.qq,
                'url': self.url,
                'cnt': self.cnt,
                'feq': self.feq,
                'inc': self.inc,
                'frr': self.frr,
                'pos': self.pos,
                'neg': self.neg,
                }
            key = 'qq#%s' % self.qq
            from pprint import pprint as printf
            printf(val)
            kv.add(key, json.dumps(val))
        return 'success'
コード例 #3
0
        rank_group = rank // group_distance
        while len(lst) <= rank_group:
            lst.append(0)
        lst[rank_group] += 1
    while len(lst) <= max_rank // group_distance:
        lst.append(0)
    for i in range(len(lst)):
        lst[i] /= len(rank_values)
        lst[i] *= 100

indexes = []
fromindex = 0
while fromindex <= max_rank:
    indexes.append(fromindex)
    fromindex += group_distance

from pprint import pprint as printf
printf(list(groups.values()))
print(indexes)

df = pd.DataFrame(data=groups, index=indexes)

plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

cmap = sns.cm.rocket_r
sns.heatmap(df, annot=True, cmap=cmap)
plt.tight_layout()
plt.title(fig_title)
plt.show()
コード例 #4
0
ファイル: db_methods.py プロジェクト: Stellari/3gqq
 def _test_add(self):
     cursor = self.db.cursor()
     cursor.execute('select * from task')
     printf( cursor.fetchall() )
コード例 #5
0
ファイル: qq.py プロジェクト: qq40660/3gqq
def select_friend():
    printf(request.form.keys())
    return 'yes'
コード例 #6
0
ファイル: test_train.py プロジェクト: wxf0322/train_graph
2019.11.15:
测试Train的相似度计算动态规划算法。
"""
from ..data import *
from pprint import pprint as printf

graph1 = Graph()
graph1.loadGraph('source/西成客专线广成段20190410.json')
train1 = graph1.trainFromCheci('D1911',True)  # 410图的D1911

graph2 = Graph()
graph2.loadGraph('source/西成客专线广成段20190105.json')
train2 = graph2.trainFromCheci('D1911',True)

re1,value1 = train1.globalDiff(train2)
printf(re1)
print(value1)

# 测试两个完全无关的车次
print("totally different train test")
train3 = graph1.trainFromCheci('D1913',True)
re2,value2 = train1.globalDiff(train3)
printf(re2)
print(value2)

# 测试两个不同运行图上的车次
print("trains from different graphs")
graph3 = Graph()
graph3.loadGraph('source/京沪线上局段20190410.json')
train4 = graph3.trainFromCheci('D707',True)