def download_job_details(skill): existing = glob.glob1('detail', skill + u'_*.html') if existing: print(u'{0} has been downloaded'.format(skill)) return for f in glob.glob(skill + u'*.html'): hp = os.path.join(os.getcwdu(), f) print(f) html = read_all(hp) soup = make_soup(html) for li in soup.find('ul', 'hot_pos reset').find_all('li', recursive=False): save_job_detail_html(skill, li['data-jobid']) print(li['data-jobid'] + ' downloaded.') time.sleep(sleep_seconds)
def save_keywords(html_file): html = read_all(html_file) soup = make_soup(html) main_navs = soup.find('div', 'mainNavs') cat_tags = main_navs.find_all('div', 'menu_box') cats = {} for i, cat in enumerate(cat_tags): cats[i] = category_contents(cat) for i in cats: cat = cats[i] print(u'** {0} **'.format(cat['name'])) for sub in cat['sub_cats']: print(sub[0]) for skill, href in sub[1]: print('\t-' + skill + ' - ' + href) to_pickle(cats, 'cats.pkl')
def make_soup_from_file(file_name): html = read_all('./html/' + file_name) return BeautifulSoup(html, 'lxml')
def read_job_from_html(skill, html_file): """ read job info from downloaded html file :param html_file: contains job info, but sometime the contents are empty. """ html = read_all(html_file) soup = make_soup(html) detail = soup.find('dl', 'job_detail') # in some rare cases, e.g. the job is closed already, then the job info is missing. if not detail: return None job = Job() job.job_id = int(soup.find('input', {'id': 'jobid'})['value']) job.skill_tag = skill log('*** JOB ***') title = detail.find('h1') log(title['title']) log(title.div.text) job.title = title['title'] job.dept = title.div.text log('') request = detail.find('dd', 'job_request') main_features = [] for s in request.stripped_strings: f = s.strip().lstrip(u'职位诱惑 : ').lstrip(u'发布时间:').rstrip(u'发布') log(f) main_features.append(f) assert len(main_features) == 7 job.salary = main_features[0] job.city = main_features[1] job.experience = main_features[2] job.education = main_features[3] job.full_time = main_features[4] == u'全职' job.benefits = main_features[5] job.published_date = get_published_date(main_features[6], created_on(html_file)) log('') desc_html = [] desc = detail.find('dd', 'job_bt').find_all('p') for bt in desc: desc_html.append(unicode(bt)) job.desc = ''.join(desc_html) log(job.desc) log('\n*** COMPANY ***\n') company = Company() comp = soup.find('dl', 'job_company') url = comp.dt.a['href'] pat = re.compile(r'(?P<comp_id>\d+)') m = re.search(pat, url) log(url) company.comp_id = int(m.group('comp_id')) job.comp_id = company.comp_id log(comp.dt.a.img['src']) log(comp.dt.a.div.h2.text.split()[0]) company.logo = comp.dt.a.img['src'] company.name = comp.dt.a.div.h2.text.split()[0] log('') comp_features = comp.dd features = [] for li in comp_features.ul.find_all('li'): for ls in li.stripped_strings: features.append(ls) log(''.join(features)) if len(features) == 6: company.domain = features[1] company.size = features[3] company.url = features[5] else: print(u'features ex: ' + html_file) log('') stage_h = comp_features.h4 stage_tags = stage_h.find_next_sibling('ul').find_all('li') stage = [] for li in stage_tags: for ls in li.stripped_strings: stage.append(ls) log('\t'.join(stage)) if len(stage) % 2 == 0: for i in xrange(0, len(stage), 2): if stage[i] == u'目前阶段': company.cur_stage = stage[i + 1] elif stage[i] == u'投资机构': company.investor = stage[i + 1] else: print(u'stages ex: ' + html_file) log('') # address if comp_features.div: log(comp_features.div.text) company.address = comp_features.div.text return job, company
# from __future__ import unicode_literals # coding: utf-8 import os import jieba import time from common.chinese import read_all text = read_all(u'围城.txt') print os.path.getsize(u'围城.txt') jieba.cut('热身一下') # print len(text) # seg_list = jieba.cut(text, cut_all=False, HMM=True) # print "全模式:", "/ ".join(seg_list) n = 10 t1 = time.time() for i in range(n): l = list(jieba.cut(text)) seconds = time.time() - t1 print(seconds) t1 = time.time() for i in range(n): l = list(jieba.cut(text, cut_all=True)) seconds = time.time() - t1 print(seconds)