def ana_song(weblink): ml = mylogger(logfile, get_funcname()) html = op_simple(weblink, header)[0] # html = op_requests(url,verify=False).content bsObj = BeautifulSoup(html, "html.parser") # ml.debug(bsObj) # title = bsObj.find('title') # print(title) song_name = bsObj.find('em', {'class': 'f-ff2'}) songname = modstr(song_name.text.strip()) ml.info(songname) aa = bsObj.findAll('p', {'class': 'des s-fc4'}) artistname = modstr(aa[0].span.a.text) albumname = modstr(aa[1].a.text) ml.info(artistname) ml.info(albumname) cover = bsObj.find('div', {'class': 'u-cover u-cover-6 f-fl'}) cover = cover.img.attrs['href'] ml.info(cover) songmid = weblink.split('=')[-1] sDict = { 'artist': artistname, 'song_name': songname, 'songmid': songmid, 'cover': cover } ml.debug(sDict) return sDict
def winner(result, bang): # sort according to score l = mylogger(logfile, logfilelevel, get_funcname()) with open(output, 'a') as f: l.debug(bang) f.writelines('=' * 5 + bang + '=' * 5 + '\n') scorenamedic = {} count = 0 for i in result: try: scorenamedic[result[i][bang]] = i except: l.error(bang + ' ' + i + ' 有问题') winlist = {} for i in sorted(scorenamedic, reverse=(True if bang != '平均早鸟榜' else False)): if bang == '办公室达人榜': hm = str2hm(i) hm = hm.split(':')[0] + '小时' + hm.split(':')[1] + '分钟' elif bang == '平均早鸟榜': hm = str2hm(i) else: hm = i count += 1 l.debug('%s %s %s' % (count, scorenamedic[i], hm)) # f.writelines('%s %s %s \n' % (count , scorenamedic[i],hm)) winlist[count] = (scorenamedic[i], hm) l.debug(winlist) toplist = buildtoplist(winlist) for t in toplist: f.writelines('%s %s %s \n' % (t[0], t[1], t[2])) return toplist
def get_fav(): ml = mylogger(logfile,get_funcname()) try: M = poplib.POP3_SSL(mailsvr) except TimeoutError as e: ml.error(e) ml.error('Retry') # M.set_debuglevel(2) ml.debug(M.getwelcome()) # M.apop(user,key) # not supported M.user(user) M.pass_(key) MS = M.stat() ml.debug(MS) ff = {} num = len(M.list()[1]) ml.info("You have %d messages." % num) for i in range(int(num),0,-1): resp, lines, octets = M.retr(i) msg_content = b'\r\n'.join(lines).decode('utf-8') msg = Parser().parsestr(msg_content) # ml.debug(msg) f = read_mail(msg) if 'link' in f.keys(): ff[i]=f M.dele(i) ml.debug('Remove email') else: ff[i]=f ml.error('Empty link Email from: '+f['email']) ml.debug('Favor list: '+str(ff)) M.quit() return ff # favor list without title
def dl(albumlink, force=False): '''main function to download album''' ml = mylogger(logfile, get_funcname()) adict = ana_cd(albumlink) coverlink = adict['cover'] artist = adict['artist'] year = adict['year'] albumname = adict['albumname'] albumdir = f'{artist} - {year} - {albumname}' if find_album(albumdir) and force == False: ml.warning(f'Album alread archived') else: albumfulldir = create_folder(dldir, albumdir) cover = os.path.join(albumfulldir, albumdir + '.jpg') m_cover = os.path.join(albumfulldir, albumdir + '.png') if os.path.isfile(cover): ml.warning('---- Big Cover download already !') else: ml.info('Download big cover') myget.dl(coverlink, out=cover) if os.path.isfile(m_cover): ml.warning('---- Small cover ready !') else: shutil.copy(cover, m_cover) squaresize(m_cover) for tracknum in range(1, adict['number'] + 1): songid = adict[tracknum]['id'] singer = modstr(adict[tracknum]['singer']) songname = modstr(adict[tracknum]['songname']) songfullname = f'{singer} - {songname}.mp3' mp3 = os.path.join(albumfulldir, songfullname) ml.info(f'{tracknum} {singer} - {songname}') if os.path.isfile(mp3): ml.warning('---- Track download already !') else: try: dlurl = get_dlurl(songid) myget.dl(dlurl, out=mp3) except TypeError: ml.error('Not published Track') continue except Exception as e: ml.error(e) ml.error("Content incomplete -> retry") myget.dl(dlurl, out=mp3) else: addtag(mp3, songname, albumname, artist, singer, m_cover, year, tracknum) mywait(random.randint(1, 3)) try: os.remove(m_cover) clean_f(albumfulldir, 'tmp') ml.info(f'Complete download {albumdir}') except FileNotFoundError: pass
def get_vkeyguid(songmid, q=1): ml = mylogger(logfile, get_funcname()) guid = int(random.random() * 2147483647) * int( time.time() * 1000) % 10000000000 ml.debug(f'GUID:{guid}') url = 'http://c.y.qq.com/base/fcgi-bin/fcg_music_express_mobile3.fcg' qly = quality[q][0] t = quality[q][1] para = { 'loginUin': '0', 'hostUin': '0', 'format': 'json', 'inCharset': 'utf8', 'outCharset': 'utf-8', 'notice': '0', 'platform': 'yqq', 'needNewCode': '0', 'cid': '205361747', #important 'uin': '0', 'songmid': str(songmid), 'filename': qly + str(songmid) + t, 'guid': str(guid) } req = op_requests(url, header=ran_header(ref=ref), para=para, verify=False) # print(req.content) j = req.json() vkey = j['data']['items'][0]['vkey'] ml.debug(f'vkey:{vkey}') return vkey, guid
def windraw(wlist, imout, title): l = mylogger(logfile, logfilelevel, get_funcname()) im = Image.open(imageFile) draw = ImageDraw.Draw(im) x, y = (70, 50) #初始左上角的坐标 xstep = 100 ystep = 40 texttile = title[0] draw.text((x + xstep * 2, y), texttile, color, font=font) x, y = (x, y + ystep * 2) draw.text((x, y), title[1], color, font=font) draw.text((x + xstep * 2, y), title[2], color, font=font) draw.text((x + xstep * 4, y), title[3], color, font=font) x, y = (x, y + ystep) for i in range(len(wlist)): textposition = wlist[i][0] textname = wlist[i][1] textvalue = str(wlist[i][2]) l.debug(textposition + ' ' + textname + ' ' + str(textvalue)) draw.text((x, y), textposition, color, font=font) draw.text((x + xstep * 2, y), textname, color, font=font) draw.text((x + xstep * 4, y), textvalue, color, font=font) x, y = (x, y + ystep) draw = ImageDraw.Draw(im) im.save(imout)
def tj(dakarecord): l = mylogger(logfile, logfilelevel, get_funcname()) wb = openpyxl.load_workbook(os.path.join(workpath, dakarecord)) tjdic = {} sheet1 = wb['原始记录'] for i in range(4, sheet1.max_row + 1): status = sheet1.cell(row=i, column=9).value #打卡状态 if re.search('打卡无效', status): l.debug('Row ' + str(i) + ' 打卡无效') continue name = sheet1.cell(row=i, column=1).value # 姓名 if name not in tjdic: tjdic[name] = {} dk = sheet1.cell(row=i, column=8).value # 打卡日期时间 day = dk.split(' ')[0].split('-')[-1].replace('0', '') l.debug(name + ' ' + day + ' ' + dk) if day not in tjdic[name]: tjdic[name][day] = {} ti = dk.split(' ')[-1] # 打卡时间 h = int(ti.split(':')[0]) m = int(ti.split(':')[1]) hm = h + m / 60 if hm > 12: # 下班 tjdic[name][day]['off'] = hm tjdic[name][day]['rawoff'] = ti else: # 上班 tjdic[name][day]['on'] = hm tjdic[name][day]['rawon'] = ti l.debug(tjdic) return tjdic
def dl_one(weburl, workfolder): l = mylogger(logfile, get_funcname()) songid = str(weburl.split('/')[-1]) songid = songid.split('?')[0] l.debug(songid) os.chdir(workfolder) SongDic = get_loc_one(songid) songname = SongDic['artist'] + ' - ' + SongDic['song'] l.info(songname) mp3 = songname + '.mp3' m_cover = songname + '.png' if os.path.isfile(mp3): l.warning("Track download already") else: myget.dl(SongDic['location'], out=mp3) if os.path.isfile(m_cover): pass else: myget.dl(SongDic['cover'], out=m_cover) fname = mp3 m_song = SongDic['song'] m_album = SongDic['album'] m_artist = SongDic['artist'] m_singer = SongDic['singer'] addtag(fname, m_song, m_album, m_artist, m_singer, m_cover) os.remove(m_cover)
def get_loc_one(song_id): l = mylogger(logfile,get_funcname()) url = f'http://www.xiami.com/widget/xml-single/sid/{song_id}' page = op_simple(url) l.debug(page[1]) bsObj = BeautifulSoup(page[0],"html.parser") #;print(bsObj) location = bsObj.find("location") location = str(location)[19:-14] if location == '': l.debug('Track not published') SongDic = {} else: l.debug('Raw Location: '+location) location = decry(location) song = bsObj.find("song_name") song = modstr(str(song)[20:-15]) singer = bsObj.find("artist_name") singer = modstr(str(singer)[22:-17]) album = bsObj.find("album_name") album = modstr(str(album)[21:-16]) cover = bsObj.find('album_cover') cover = 'http:'+str(cover)[22:-17] SongDic = {'location':location,'song':song,'cover':cover,\ 'artist':singer,'singer':singer,'album':album} l.debug(SongDic) return SongDic
def cal(tjdic): l = mylogger(logfile, logfilelevel, get_funcname()) result = {} for i in tjdic: name = i result[name] = {} maxt = 0 avgt = 0 sumt = 0 maxoff = '' for d in tjdic[i]: day = d off = tjdic[name][day]['off'] rawoff = tjdic[name][day]['rawoff'] on = tjdic[name][day]['on'] sumt = sumt + off - on avgt = avgt + on if off > maxt: maxt = off maxoff = rawoff avgt = avgt / len(tjdic[i]) l.debug(name + ' ' + str(maxt) + ' ' + str(avgt) + ' ' + str(sumt)) result[name]['夜归人榜'] = maxoff result[name]['平均早鸟榜'] = avgt result[name]['办公室达人榜'] = sumt l.debug(result) return result
def addtag(fname, m_song, m_album, m_artist, m_singer, m_cover, m_year='', m_trackid='', m_cd=''): '''Add Tag for MP3''' ml = mylogger(logfile, get_funcname()) try: tags = ID3(fname) except ID3NoHeaderError: ml.dbg("Adding ID3 header on " + m_trackid) tags = ID3() tags["TIT2"] = TIT2(encoding=3, text=m_song) tags["TALB"] = TALB(encoding=3, text=m_album) tags["TPE2"] = TPE2(encoding=3, text=m_artist) #album artist #tags["COMM"] = COMM(encoding=3, lang=u'eng', desc='desc', text=u'mutagen comment') tags["TPE1"] = TPE1(encoding=3, text=m_singer) # singer #tags["TCOM"] = TCOM(encoding=3, text=u'mutagen Composer') #tags["TCON"] = TCON(encoding=3, text=u'mutagen Genre') tags["TDRC"] = TDRC(encoding=3, text=m_year) tags["TRCK"] = TRCK(encoding=3, text=str(m_trackid)) tags["TPOS"] = TPOS(encoding=3, text=m_cd) if m_cover != '': with open(m_cover, 'rb') as c: cover = c.read() #prepare for tag tags["APIC"] = APIC(encoding=3, mime=u'image/png', type=3, desc=u'Cover', data=cover) tags.save(fname, v2_version=3)
def ana_song(weblink): # return song dictionary ml = mylogger(logfile, get_funcname()) songmid = weblink.split('/')[-1] songmid = songmid.split('.')[0] ml.debug(songmid) html = op_simple(weblink)[0] bsObj = BeautifulSoup(html, "html.parser") artist_name = bsObj.find('div', {'class': 'data__singer'}) artist_name = artist_name.attrs['title'] ml.debug(artist_name) song_name = bsObj.find('h1', {'class': 'data__name_txt'}) song_name = modstr(song_name.text.strip()) ml.debug(song_name) cover = bsObj.find('img', {'class': 'data__photo'}) cover = 'http:' + cover.attrs['src'] ml.debug('Cover link: ' + cover) sDict = { 'artist': artist_name, 'song_name': song_name, 'songmid': songmid, 'cover': cover } ml.debug(sDict) return sDict
def decode_str(s): ml = mylogger(logfile, get_funcname()) value, charset = decode_header(s)[0] if charset: ml.dbg('Header charset: ' + charset) value = value.decode(charset) ml.dbg(value) return value
def decode_str(s): ml = mylogger(logfile,get_funcname()) value, charset = decode_header(s)[0] if charset: ml.debug('Header charset: '+charset) value = value.decode(charset) ml.debug(value) return value
def download_album(self, workfolder, album_detail): ml = mylogger(logfile, get_funcname()) artist_name = album_detail['artist_name'] album_name = album_detail['album_name'] year = album_detail['year'] albumdir = f'{artist_name} - {year} - {album_name}' albumfulldir = create_folder(workfolder, albumdir) try: coverlink = album_detail['coverlink'] cover = os.path.join(albumfulldir, albumdir + '.jpg') m_cover = os.path.join(albumfulldir, albumdir + '.png') if os.path.isfile(cover): ml.warn('---- Big Cover download already !') else: ml.info('Download big cover') myget.dl(coverlink, out=cover) if os.path.isfile(m_cover): ml.warn('---- Small cover ready !') else: shutil.copy(cover, m_cover) squaresize(m_cover) songid_list = album_detail['songid_list'] download_url_dict = self.get_song_download_url(songid_list) ml.dbg(download_url_dict) for s in songid_list: singers = modstr( album_detail['song_detail_list'][s]['singers']) songname = modstr( album_detail['song_detail_list'][s]['songname']) songfullname = f'{singers} - {songname}.mp3' mp3 = os.path.join(albumfulldir, songfullname) if os.path.isfile(mp3): ml.warn(f'---- {songname} download already !') else: cdserial = str( album_detail['song_detail_list'][s]['cdserial']) track = str(album_detail['song_detail_list'][s]['track']) ml.info(f'{cdserial}.{track} {singers} - {songname}') if dlurl := download_url_dict[s]: try: myget.dl(dlurl, out=mp3) mywait(random.randint(1, 3)) addtag(mp3, songname, album_name, artist_name, singers, m_cover, year, track, cdserial) except AttributeError as e: if "urllib.error" in str(e): pass except Exception as e: print(e) if "HTTP Error 404: Not Found" in str(e): ml.err("File Not Found") else: raise os.remove(m_cover) clean_f(albumfulldir, 'tmp') ml.info('Download Complete')
def f2json(text): ml = mylogger(logfile, get_funcname()) try: data = re.split('jsonp\d*', text) j = json.loads(data[1][1:-1]) except TypeError as e: ml.err(e) return None return j
def readtag(fname): '''Read Tag info from MP3''' l = mylogger(logfile, get_funcname()) tags = ID3(fname) title = str(tags["TIT2"]) singer = str(tags['TPE1']) l.dbg(singer) l.dbg(title) return singer, title
def ana_mono(page): ml = mylogger(logfile,get_funcname()) html = op_simple(page,ran_header())[0] bsObj = BeautifulSoup(html,"html.parser") #;print(bsObj) author = bsObj.find('span',{'class':'title'}).text.strip() title = bsObj.find('h1',{'class':'title'}).text.strip() p = {'author':author,'title':title} ml.debug(p) return p
def read_mail(msg, indent=0): '''Main function to read mail message''' ml = mylogger(logfile, get_funcname()) f = {} # mail,tag,date,link if indent == 0: for header in ['From', 'To', 'Subject', 'Date']: value = msg.get(header, '') if value: if header == 'From': hdr, addr = parseaddr(value) ml.dbg(f'Find FROM address {addr}') f['email'] = addr elif header == 'Subject': ml.dbg('Look for TAG') value = decode_str(value) tag = value[:1] ml.dbg('Tag: ' + tag) f['tag'] = tag.lower() elif header == 'Date': ml.dbg('Look for DATE') mdate = time.strftime('%Y-%m-%d %H:%M:%S', parsedate(value)) ml.dbg(mdate) f['timestamp'] = mdate else: ml.dbg('Header: ' + value) if (msg.is_multipart()): parts = msg.get_payload() for n, part in enumerate(parts): ml.dbg('%spart %s' % (' ' * indent, n)) ml.dbg('%s-------' % (' ' * indent)) read_mail(part, indent + 1) else: content_type = msg.get_content_type() ml.dbg('Message body content type: ' + content_type) if content_type == 'text/plain' or content_type == 'text/html': content = msg.get_payload(decode=True) ml.dbg('Content is') ml.dbg(content) charset = guess_charset(msg) if charset: content = content.decode(charset) ml.dbg('Content after decode') # ml.dbg(content) # link = content.split('\r\n')[0] content = content.split('\r\n') ml.dbg(content) for h in content: if h[:4] == 'http': f['link'] = h #what if multiple http link? else: ml.dbg('%sAttachment: %s' % (' ' * indent, content_type)) ml.dbg('Favor entry: ' + str(f)) return f # mail,tag,date,link
def main(): ml = mylogger(logfile, get_funcname()) parser = argparse.ArgumentParser(description='Music download tool') group = parser.add_mutually_exclusive_group() group.add_argument('-s', '--song', help='Download single song', action='store_true') group.add_argument('-c', '--cds', help='Download CD Link', action='store_true') group.add_argument('-a', '--artist', help='Download all CD of artist') group.add_argument('-f', '--favorite', help='Download favorite list') group.add_argument('-t', '--top', help='Download top songs') group.add_argument('-x', action="store_true", help='Download xiami songs') args = parser.parse_args() if args.song: link = input('>>') ml.info('Begin download single song') if re.findall('qq', link.split('/')[2]): ml.debug(f"Download from QQMusic {link}") qdl_song(link) elif re.findall('xiami', link.split('/')[2]): ml.debug(f'Download from Xiami {link}') else: ml.error('Pls check link') elif args.cds: link = input('>>') ml.info('Begin download CDs') if re.findall('qq', link.split('/')[2]): ml.debug(f"Download from QQMusic {link}") qdl_album(link) elif re.findall('xiami', link.split('/')[2]): ml.debug(f'Download from Xiami {link}') else: ml.error('Pls check link') elif args.artist: ml.info('Begin download all CD of artist') link = args.artist ml.info(link) elif args.favorite: ml.info('Begin download all CD of artist') link = args.favorite ml.info(link) elif args.x: ml.info('Begin download from xiami') xm_json(dldir) else: parser.print_help()
def get_dlurl(vkey, guid, songmid, q=1): ml = mylogger(logfile, get_funcname()) qly = quality[q][0] t = quality[q][1] tag = quality[q][2] # vkey,guid = get_vkeyguid(songmid) # url = 'http://dl.stream.qqmusic.qq.com/%s?vkey=%s&guid=%s&uin=0&fromtag=%s' % (qly+songmid+t,vkey,guid,tag) url = f'http://dl.stream.qqmusic.qq.com/{qly+songmid+t}?vkey={vkey}&guid={guid}&uin=0&fromtag={tag}' ml.debug(url) return url
def move_mp3(topdir,musicure): '''Move MP3 from Topfolder to Musicure''' ml = mylogger(logfile,get_funcname()) for mp3 in os.listdir(topdir): if mp3[-3:].lower() == 'mp3': ml.info('Move --> '+mp3) src = os.path.join(topdir,mp3) dst = os.path.join(musicure,mp3) ml.dbg(f'{src} --> {dst}') shutil.move(src,dst)
def move_cover(evadir,coverdir): '''Move Cover to CoverFolder''' ml = mylogger(logfile,get_funcname()) for jpg in os.listdir(evadir): if jpg[-3:].lower() == 'jpg': ml.info('Move --> '+jpg) src = os.path.join(evadir,jpg) dst = os.path.join(coverdir,jpg) ml.info(f'{src} --> {dst}') shutil.move(src,dst)
def get_json(url, params, encSecKey): '''Get response of song download url''' ml = mylogger(logfile,get_funcname()) data = { "params": params, "encSecKey": encSecKey } response = requests.post(url,headers=ran_header(agentref,host,org),data=data) ml.dbg(response.json()) return response.json()['data']
def ana_mono(page): '''Analyze Mono web''' ml = mylogger(logfile,get_funcname()) html = op_simple(page,ran_header())[0] bsObj = BeautifulSoup(html,"html.parser") #;print(bsObj) author = bsObj.find('span',{'class':'title'}).text.strip() title = bsObj.find('h1',{'class':'title'}).text.strip() p = {'author':author,'title':title} ml.debug(p) return p
def ana_dy(page): '''Analyze Douyin web''' ml = mylogger(logfile,get_funcname()) html = op_simple(page,ran_header())[0] bsObj = BeautifulSoup(html,"html.parser") #;print(bsObj) author = bsObj.find('p',{'class':'name nowrap'}).text.strip() title = bsObj.find('h1',{'class':'desc'}).text.strip() p = {'author':author,'title':title} ml.info(p) return p
def read_mail(msg, indent=0): ml = mylogger(logfile,get_funcname()) f = {} # mail,tag,date,link if indent == 0: for header in ['From','To','Subject','Date']: value = msg.get(header, '') if value: if header == 'From': ml.debug('Look for FROM address') hdr, addr = parseaddr(value) ml.debug(addr) f['email']=addr elif header=='Subject': ml.debug('Look for TAG') value = decode_str(value) tag = value[:1] ml.debug('Tag: '+tag) f['tag']=tag elif header == 'Date': ml.debug('Look for DATE') mdate = time.strftime('%Y-%m-%d %H:%M:%S',parsedate(value)) ml.debug(mdate) f['timestamp'] = mdate else: ml.debug('Header: '+value) if (msg.is_multipart()): parts = msg.get_payload() for n, part in enumerate(parts): ml.info('%spart %s' % (' ' * indent, n)) ml.info('%s-------' % (' ' * indent)) read_mail(part, indent + 1) else: content_type = msg.get_content_type() ml.debug('Message body content type: '+content_type) if content_type=='text/plain' or content_type=='text/html': content = msg.get_payload(decode=True) ml.debug('Content is') ml.debug(content) charset = guess_charset(msg) if charset: content = content.decode(charset) ml.debug('Content after decode') ml.debug(content) # link = content.split('\r\n')[0] content = content.split('\r\n') ml.debug(content) for h in content: if h[:4] == 'http': f['link'] = h #what if multiple http link? else: ml.info('%sAttachment: %s' % (' ' * indent, content_type)) ml.debug('Favor entry: '+str(f)) return f # mail,tag,date,link
def ana_album(weblink): ml = mylogger(logfile, get_funcname()) html = op_simple(weblink, header=ran_header(ref=ref))[0] bsObj = BeautifulSoup(html, "html.parser") #;print(bsObj) album_name = bsObj.find('h1', {'class': 'data__name_txt'}) album_name = modstr(album_name.text) ml.debug(album_name) artist_name = bsObj.find('a', {'class': 'js_singer data__singer_txt'}) artist_name = modstr(artist_name.text) ml.debug(artist_name) year = bsObj.find(text=re.compile('^发行时间'))[5:9] ml.debug(year) cover = bsObj.find('img', {'id': 'albumImg'}) cover = 'http:' + cover.attrs['src'] ml.debug('Cover link: ' + cover) fullname = artist_name + ' - ' + year + ' - ' + album_name aDict = { 'album': album_name, 'artist': artist_name, 'year': year, 'cover': cover, 'fullname': fullname } song = bsObj.findAll('div', {'class': 'songlist__number'}) n = 0 songtmp = [] # name duplicate check for i in song: n += 1 tracknumber = i.text ml.debug('Find track ' + str(tracknumber)) tmp = i.next_sibling.next_sibling si = tmp.find('span', {'class': 'songlist__songname_txt'}).a songmid = si.attrs['href'].split('/')[-1][:-5] songname = si.text if songname in songtmp: songname = songname + '_' + tracknumber songtmp.append(songname) ml.debug(songname) singers = tmp.parent.findAll('a', {'class': "singer_name"}) if len(singers) > 1: s = list(map(lambda x: x.text, singers)) singer = ','.join(s) else: singer = singers[0].text ml.debug(singer) si = [songmid, songname, singer] aDict[int(tracknumber)] = si aDict['TrackNum'] = n # ml.info(aDict) return aDict # Album dictionary
def arch_cover(coverdir): '''move Cover to a-z, manuel check others''' ml = mylogger(logfile,get_funcname()) for c in os.listdir(coverdir): src = os.path.join(coverdir,c) if os.path.isdir(src) == False: dd = os.path.join(coverdir,c[0]) if os.path.isdir(dd) == True: dst = os.path.join(dd,c) result = myfs.f_move(src,dst) ml.info(result)
def create(self): ml = mylogger(logfile, get_funcname()) conn = sqlite3.connect(database) cursor = conn.cursor() cmd = 'create table inventory (SN varchar(20) primary key,\ book varchar(20),lib varchar(20),cat varchar(20) )' ml.debug(cmd) cursor.execute(cmd) cursor.close() conn.close()
def guess_charset(msg): ml = mylogger(logfile, get_funcname()) charset = msg.get_charset() if charset is None: content_type = msg.get('Content-Type', '').lower() ml.dbg(content_type) pos = content_type.find('charset=') if pos >= 0: charset = content_type[pos + 8:].strip() ml.dbg('Message body charset: ' + charset) return charset
def guess_charset(msg): ml = mylogger(logfile,get_funcname()) charset = msg.get_charset() if charset is None: content_type = msg.get('Content-Type', '').lower() ml.debug(content_type) pos = content_type.find('charset=') if pos >= 0: charset = content_type[pos + 8:].strip() ml.debug('Message body charset: '+charset) return charset
def all(self): ml = mylogger(logfile, get_funcname()) ml.info(">>>>>>>显示所有图书<<<<<<<") ml.info("=" * 26) conn = sqlite3.connect(database) cursor = conn.cursor() cursor.execute( 'select lib as 图书馆,book as 书,SN,cat as 索书号 from inventory') v = from_db_cursor(cursor) cursor.close() conn.close() return v
def listbook(self): ml = mylogger(logfile, get_funcname()) ml.info(">>>>显示找到的图书列表<<<<") ml.info("=" * 26) conn = sqlite3.connect(database) cursor = conn.cursor() cursor.execute('select distinct book, cat from inventory') values = cursor.fetchall() for i in values: ml.info(i) cursor.close() conn.close()
def main(): ml = mylogger(logfile,get_funcname()) if os.path.exists(ffile): with open(ffile,'r',encoding='utf-8') as f: ff = json.loads(f.read()) else: ml.info('Query Email') ff = get_fav() with open(ffile,'w',encoding='utf-8') as x: json.dump(ff,x,ensure_ascii=False,indent=2) # fl = {} # favor list db = NoteDataBase(dbfile) pprint(ff) for x in range(1,len(ff)+1): f = ff[str(x)] #email,tag,timestamp,link ml.dbg(f) if 'link' in f.keys(): link = f['link'] if link.split('/')[2] == 'mp.weixin.qq.com': p = ana_wx(link) if p: f['source'] = '微信公众号' f['author'] = p['author'] f['title'] = p['title'] ml.dbg(f) else: with open(attention,'a') as f: f.write('Need to check: '+link+'\n') continue elif link.split('/')[2] == 'mmmono.com': if p := ana_mono(link): f['source'] = 'MONO' f['author'] = p['author'] f['title'] = p['title'] ml.dbg(f) else: with open(attention,'a') as f: f.write('Need to check: '+link+'\n') continue elif link.split('/')[2] == 'v.douyin.com': pass else: ml.warn('Need to check source') raise # fl[i] = f db.insert_article(f) #email,tag,timestamp,link,source,author,title else: ml.info('Empty link Email from: '+f['email']) b = f['email'] with open(attention,'a') as f: f.write('Empty link Email from: '+b+'\n')
def ana_wx(page): ml = mylogger(logfile,get_funcname()) html = op_simple(page,ran_header())[0] # print(html) bsObj = BeautifulSoup(html,"html.parser") #;print(bsObj) # bsObj = BeautifulSoup(html,"html5lib") #;print(bsObj) author = bsObj.find('span',{'class':'rich_media_meta rich_media_meta_nickname'}) author = author.a.text.strip() title = bsObj.find('h2',{'class':'rich_media_title'}) title = title.text.strip() p = {'author':author,'title':title} # p['link'] = page ml.debug(p) return p
def main(): ml = mylogger(logfile,get_funcname()) ml.debug('Query Email') ff = get_fav() fl = {} # favor list num = len(ff)+1 for i in range(1,num): f = ff[i] if 'link' in f.keys(): link = f['link'] if link.split('/')[2] == 'mp.weixin.qq.com': p = ana_wx(link) f['source'] = '微信公众号' f['author'] = p['author'] f['title'] = p['title'] ml.debug(f) # fl[i] = f elif link.split('/')[2] == 'mmmono.com': p = ana_mono(link) f['source'] = 'MONO' f['author'] = p['author'] f['title'] = p['title'] ml.debug(f) # fl[i] = f else: ml.warning('Need to check source') # fl[i] = f else: ml.debug('Empty link Email from: '+f['email']) # fl[i] = f fl[i] = f ml.debug('Full list: '+str(fl)) ml.debug('store in DB') db = NoteDataBase(dbfile) num = len(fl)+1 for i in range(1,num): f = fl[i] if 'link' in f.keys(): ml.debug(f) db.insert_article(f) else: ml.debug('Empty link Email from: '+f['email']) b = f['email'] with open(attention,'a') as f: f.write('Empty link Email from: '+b+'\n')