コード例 #1
0
def data_submit_post():
    # auth process
    if current_user.is_authenticated is True:
        account = get_account_info_by_account_id(current_user.account_id)
    else:
        return redirect(url_for('auth.login_view'))
    # process end
    form = GasDataRecordForm()
    post_doc_url = 'http://' + Config.DB_OPS_URL + '/api/gas/document'
    doc_dict = dict(request.form)
    doc_dict['boiler_room'] = str(form.boiler_room_and_no.data).split('/')[0]
    doc_dict['boiler_no'] = str(form.boiler_room_and_no.data).split('/')[1]
    doc_dict['employee_no'] = current_user.account_id
    doc_dict['employee_name'] = account['account_nickname']
    doc_dict['datetime'] = get_current_datetime()
    doc_dict['date'] = get_current_date()
    doc_dict['time'] = get_current_time()
    if form.validate_on_submit():
        result = requests.post(post_doc_url, data=doc_dict)
        if result.status_code == 200:
            flash('数据提交成功', 'success')
            return redirect(url_for('gas.gas_data_submit_view'))
        else:
            flash('发生了错误, 数据未成功提交', 'danger')
            return redirect(url_for('gas.gas_data_submit_view'))
コード例 #2
0
def post():
    form = PostForm()
    # 处理 POST 的逻辑
    if form.validate_on_submit():
        # 发起请求的数据库操作 url
        insert_url = 'http://' + Config.DB_CONNECTOR_URL + '/insert-post/' + create_rec_hash()
        # 发送 POST 请求的数据
        post_data = {
            'username': form.username.data,
            'post': form.comment.data,
            'postTime': get_current_time()
        }
        # 发出 POST 请求
        result = requests.post(insert_url, data=post_data)
        if result.status_code is 200:
            # flash('Comment post success!{}'.format(form.username.data, form.comment.data))
            flash('Comment post success!')
        else:
            flash('Comment post failed!')
        return redirect('/')
コード例 #3
0
    def parse_specific_info(self, response):
        infoItem = JobInfoItem()
        # 职业名称信息
        infoItem['job_name'] = response.xpath(
            '/html/body/div[5]/div[1]/div[1]/h1/text()').extract_first()
        # infoItem['feedback_rate'] = response.meta['feedback_rate']
        for item in response.xpath('//div[@class="terminalpage clearfix"]'):

            #url信息
            infoItem['job_url'] = response.url

            # 职位信息
            infoItem['salary'] = item.xpath(
                './/div[1]/ul[1]/li[1]/strong/text()').extract_first()
            infoItem['work_position'] = item.xpath(
                './/div[1]/ul[1]/li[2]/strong/a/text()').extract_first()
            infoItem['publish_date'] = item.xpath(
                './/div[1]/ul[1]/li[3]/strong/span/text()').extract_first()
            infoItem['job_nature'] = item.xpath(
                './/div[1]/ul[1]/li[4]/strong/text()').extract_first()
            infoItem['work_experience'] = item.xpath(
                './/div[1]/ul[1]/li[5]/strong/text()').extract_first()
            infoItem['education_degree'] = item.xpath(
                './/div[1]/ul[1]/li[6]/strong/text()').extract_first()
            infoItem['demand_number'] = item.xpath(
                './/div[1]/ul[1]/li[7]/strong/text()').extract_first()
            infoItem['job_category'] = item.xpath(
                './/div[1]/ul[1]/li[8]/strong/a/text()').extract_first()

            #公司信息
            infoItem['company_name'] = item.xpath(
                './/div[@class="company-box"]/p[@class="company-name-t"]/a/text()'
            ).extract_first()

            for detail in item.xpath('.//div[@class="company-box"]/ul[1]/li'):
                subtitle = detail.xpath('.//span/text()').extract_first()
                # subtitle.encode('utf-8')
                if subtitle == unicode('公司规模:'):
                    infoItem['company_scale'] = detail.xpath(
                        './/strong/text()').extract_first()
                elif subtitle == '公司性质:':
                    infoItem['company_nature'] = detail.xpath(
                        './/strong/text()').extract_first()
                elif subtitle == '公司行业:':
                    infoItem['company_industrial'] = detail.xpath(
                        './/strong/a/text()').extract_first()
                elif subtitle == '公司主页:':
                    infoItem['company_webpage'] = detail.xpath(
                        './/strong/a/@href').extract_first()
                elif subtitle == '公司地址:':
                    infoItem['company_address'] = detail.xpath(
                        './/strong/text()').extract_first()
            # infoItem['company_scale'] = item.xpath('.//div[@class="company-box"]/ul[1]/li[1]/strong/text()').extract_first()
            # infoItem['company_nature'] = item.xpath('.//div[@class="company-box"]/ul[1]/li[2]/strong/text()').extract_first()
            # infoItem['company_industrial'] = item.xpath('.//div[@class="company-box"]/ul[1]/li[3]/strong/a/text()').extract_first()
            # infoItem['company_webpage'] = item.xpath('.//div[@class="company-box"]/ul[1]/li[4]/strong/a/@href').extract_first()
            # infoItem['company_address'] = item.xpath('.//div[@class="company-box"]/ul[1]/li[5]/strong/text()').extract_first()

            # 利用meta传递从之前页面爬取到的简历反馈率
            infoItem['feedback_rate'] = response.meta['feedback_rate']
            infoItem['scrape_time'] = get_current_day(
            ) + "_" + get_current_time()

            yield infoItem

        pass