def parse_headers(self, use_cookies, raw): """ analyze headers from file or raw messages :return: (url, dat) :rtype: """ if not raw: packet = helper.to_str(helper.read_file(self.fpth)) else: packet = raw dat = {} pks = [x for x in packet.split('\n') if x.replace(' ', '')] url = pks[0].split(' ')[1] for i, cnt in enumerate(pks[1:]): arr = cnt.split(':') if len(arr) < 2: continue arr = [x.replace(' ', '') for x in arr] _k, v = arr[0], ':'.join(arr[1:]) dat[_k] = v if use_cookies: try: self.fmt_cookies(dat.pop('Cookie')) except: pass self.headers = dat self.url = 'https://{}{}'.format(self.headers.get('Host'), url) return url, dat
def parse_charles(self, use_cookies): """ analy plain info from charles packet :return: (url, dat) :rtype: """ packet = helper.to_str(helper.read_file(self.fpth)) dat = {} pks = [x for x in packet.split('\n') if x.replace(' ', '')] url = pks[0].split(' ')[1] for i, cnt in enumerate(pks[1:]): arr = cnt.split(':') if len(arr) < 2: continue arr = [x.replace(' ', '') for x in arr] _k, v = arr[0], ':'.join(arr[1:]) dat[_k] = v if use_cookies: self.fmt_cookies(dat.pop('Cookie')) self.headers = dat self.url = 'https://{}{}'.format(self.headers.get('Host'), url)
def rbig(fpth): cnt = '' with open(fpth) as fp: cnt = fp.read() # for l in fp: # print(l) cnt = helper.read_file(fpth) print(cnt)
def get_index_name_by_tag(self, page_idx): """ 依据 index 值, 来反向获取对应 按名字分类的 tag 值 :return: :rtype: """ if not self.i2t: self.i2t = json.loads(helper.read_file('d4.i2t.json')) return self.i2t.get(page_idx)
def upload_pic(self): pic_url = 'https://picupload.weibo.com/interface/pic_upload.php' params = { 'cb': 'https://weibo.com/aj/static/upimgback.html?_wv=5&callback=STK_ijax_{}' .format(helper.unixtime(True) * 100), 'mime': 'image/jpeg', 'data': 'base64', 'url': 'weibo.com/u/6069778559', 'markpos': '1', 'logo': '1', 'marks': '1', 'app': 'miniblog', 's': 'rdxt', 'file_source': '1', } pic_pth = '' if not pic_pth: pic_pth = click.prompt('输入图片路径,或者拖拽图片到此') pic_pth = pic_pth.lstrip().rstrip() if not pic_pth: log.error('a picture path should be given!!!') return if not confirm_do('上传图片'): return data = {'b64_data': base64.b64encode(helper.read_file(pic_pth))} raw = self.web_sess.post(pic_url, params=params, data=data, allow_redirects=False, headers=HEADERS['pic_headers']) if raw.status_code == 302: loc = raw.headers['Location'] p = dict(parse_qsl(unquote_plus(loc))) return p else: log.error(raw.text)
def cl2num(): # 562, 552, 558, 556, 559, 554 y = [0, 1, 5, 8, 14, 17] x = [2, 6, 7, 8, 9, 10, 11, 12, 13] sc = [7, 9, 11, 12] cnt = helper.to_str(helper.read_file('bj.txt')) for i, l in enumerate(cnt.split('\n')): l = [x for x in l.split(' ') if x] if i not in y: continue v = l s = '{}, 最高:{}, 最低:{}, 平均:{}, 人数:{}, 分数差:{}'.format( v[2], v[7], v[9], v[11], v[6], int(v[11]) - int(v[12])) print(s)
def update_tag_pages_cache(self, tag): """ 更新tag对应页面到本地缓存 :param tag: :type tag: :return: :rtype: """ _k = tag['src'] if tag['src'] in json.loads(helper.read_file('d4.t2i.json')): log.debug('{} already got'.format(tag['src'])) return pages = self.fetch_tags_pages_by_index(tag['src']) self.t2i[_k] = pages helper.write_file(json.dumps(self.t2i).encode(), 'd4.t2i.json')
def parse_firefox(self): """ analy plain info from packet :return: (url, dat) :rtype: """ packet = helper.read_file(self.fpth, False) dat = {} pks = [x for x in packet.split('\n') if x] for i, cnt in enumerate(pks[0:]): arr = cnt.split(': ') if len(arr) < 2: continue _k, v = arr[0], ':'.join(arr[1:]) dat[_k] = v self.fmt_cookies(dat.pop('Cookie')) self.headers = dat
def load_cache(self): self.all_tags = json.loads(helper.read_file('d4.tags.json')) self.t2i = json.loads(helper.read_file('d4.t2i.json')) self.i2t = json.loads(helper.read_file('d4.i2t.json'))
def update_cache(self): self.txt = helper.to_str(helper.read_file('awesome.md')) self.md2dict()
def load_from_cache(name): if not helper.is_file_ok(name): return '' return helper.read_file(name)
def load_my_page_config(self): my_info = json.loads(helper.read_file(base.app_pth['personal'])) self.my_info = my_info if not my_info: log.error('Login to get your person info first!!!') base.force_quit()
def load_chapters(self): if helper.is_file_ok(self.catalog_info['cache_file']): self.da2017 = json.loads( helper.to_str(helper.read_file( self.catalog_info['cache_file'])))
def load_cache(self): self.ones = json.loads(helper.read_file('one.1910.all.json'))
def use_cache_indexes(self): self.all_indexes = json.loads(helper.read_file('mz.idx.json').decode())
def use_cache(self): self.archives = json.loads(helper.read_file('mz.json').decode())
def search_by_name(self, name): flat = json.loads(helper.read_file('gitflat.awesome.json')) cand_soft = [x for x in flat if x.get('name').lower().find(name) != -1] return cand_soft
def load_cache(self): self.soft = json.loads(helper.read_file('jobble.json')) self.soft_flatten = json.loads(helper.read_file('jobble.flat.json')) self.soft_count = '分类:{}, 总数:{}'.format(len(self.soft), len(self.soft_flatten))
def who_am_i(self): person_info = json.loads(helper.read_file('personal.txt')) self.personal_info = person_info if not person_info: log.error('Login to get your person info first!!!') abc.force_quit()