Пример #1
0
 def log_url(self, job_execution):
     job_instance = JobInstance.find_job_instance_by_id(job_execution.job_instance_id)
     project = Project.find_project_by_id(job_instance.project_id)
     for spider_service_instance in self.spider_service_instances:
         if spider_service_instance.server == job_execution.running_on:
             return spider_service_instance.log_url(project.project_name, job_instance.spider_name,
                                                    job_execution.service_job_execution_id)
Пример #2
0
def download_items(project_id, job_exec_id):
    format = request.args.get('format')
    if not format in ['json', 'csv']:
        abort(404)

    job_execution = JobExecution.query.filter_by(project_id=project_id,
                                                 id=job_exec_id).first()

    job_instance = JobInstance.find_job_instance_by_id(
        job_execution.job_instance_id)
    project = Project.find_project_by_id(job_instance.project_id)

    res = requests.get(agent.items_url(job_execution))
    res.encoding = 'utf8'
    json_data = [json.loads(s) for s in filter(None, res.text.split('\n'))]

    filename = '{}-{}.{}'.format(project.project_name,
                                 job_instance.spider_name, format)
    if format == 'json':
        open(os.path.join(app.static_folder, filename),
             'w').write(json.dumps(json_data))
    elif format == 'csv':
        f = open(os.path.join(app.static_folder, filename), 'w')
        csvwriter = csv.writer(f)
        count = 0
        for item in json_data:
            if count == 0:
                header = item.keys()
                csvwriter.writerow(header)
                count += 1
            csvwriter.writerow(item.values())
        f.close()

    return send_from_directory(app.static_folder, filename, as_attachment=True)
Пример #3
0
 def log_url(self, job_execution):
     job_instance = JobInstance.find_job_instance_by_id(job_execution.job_instance_id)
     project = Project.find_project_by_id(job_instance.project_id)
     for spider_service_instance in self.spider_service_instances:
         if spider_service_instance.server == job_execution.running_on:
             return spider_service_instance.log_url(project.project_name, job_instance.spider_name,
                                                    job_execution.service_job_execution_id)
Пример #4
0
    def log_url_slave(self, job_execution):
        """
        功能: 获取从爬虫的日志,只要获取一个
        :param job_execution: job_execution对象
        :return: 返回log的url
        """
        job_instance = JobInstance.find_job_instance_by_id(job_execution.job_instance_id)
        project = Project.find_project_by_id(job_instance.project_id)
        # 主从爬虫运行的服务器字符串
        service_job_execution_id = job_execution.service_job_execution_id.split('>')
        #  从爬虫服务器列表
        slave_service_job_execution_id = service_job_execution_id[1].split(',')
        # 爬虫运行的服务器地址
        running_on = job_execution.running_on.split('>')
        slave_running_on = running_on[1].split(',')
        # 调用从爬虫的日志
        spider_name_slave_obj = SpiderInstance.query.filter_by(
            spider_name=job_instance.spider_name,
            project_id=job_instance.project_id).first()
        spider_name_slave = spider_name_slave_obj.spider_name_slave

        for spider_service_instance in self.spider_service_instances_slave:
            for job_execution_id, running_on_ in zip(slave_service_job_execution_id, slave_running_on):
                if spider_service_instance.server == running_on_:
                    slave_log_url = spider_service_instance.log_url(
                        project.project_name, spider_name_slave,
                        job_execution_id)
                    return slave_log_url
Пример #5
0
 def cancel_spider(self, job_execution):
     job_instance = JobInstance.find_job_instance_by_id(job_execution.job_instance_id)
     project = Project.find_project_by_id(job_instance.project_id)
     # TODO multi service
     for spider_service_instance in self.spider_service_instances:
         if spider_service_instance.server == job_execution.running_on:
             if spider_service_instance.cancel_spider(project.project_name, job_execution.service_job_execution_id):
                 job_execution.running_status = SpiderStatus.CANCELED
                 db.session.commit()
             break
Пример #6
0
 def cancel_spider(self, job_execution):
     job_instance = JobInstance.find_job_instance_by_id(job_execution.job_instance_id)
     project = Project.find_project_by_id(job_instance.project_id)
     for spider_service_instance in self.spider_service_instances:
         if spider_service_instance.server == job_execution.running_on:
             if spider_service_instance.cancel_spider(project.project_name, job_execution.service_job_execution_id):
                 job_execution.end_time = datetime.datetime.now()
                 job_execution.running_status = SpiderStatus.CANCELED
                 db.session.commit()
             break
Пример #7
0
def run_spider_job(job_instance_id):
    '''
    run spider by scheduler
    :param job_instance:
    :return:
    '''
    try:
        job_instance = JobInstance.find_job_instance_by_id(job_instance_id)
        agent.start_spider(job_instance)
        app.logger.info('[run_spider_job][project:%s][spider_name:%s][job_instance_id:%s]' % (
            job_instance.project_id, job_instance.spider_name, job_instance.id))
    except Exception as e:
        app.logger.error('[run_spider_job] ' + str(e))
Пример #8
0
def run_spider_job(job_instance_id):
    '''
    run spider by scheduler
    :param job_instance:
    :return:
    '''
    try:
        job_instance = JobInstance.find_job_instance_by_id(job_instance_id)
        agent.start_spider(job_instance)
        app.logger.info('[run_spider_job][project:%s][spider_name:%s][job_instance_id:%s]' % (
            job_instance.project_id, job_instance.spider_name, job_instance.id))
    except Exception as e:
        app.logger.error('[run_spider_job] ' + str(e))
Пример #9
0
 def cancel_spider(self, job_execution):
     job_instance = JobInstance.find_job_instance_by_id(job_execution.job_instance_id)
     project = Project.find_project_by_id(job_instance.project_id)
     for spider_service_instance in self.spider_service_instances:
         if spider_service_instance.server == job_execution.running_on:
             if spider_service_instance.cancel_spider(project.project_name, job_execution.service_job_execution_id):
                 job_execution.end_time = datetime.datetime.now()
                 job_execution.running_status = SpiderStatus.CANCELED
                 try:
                     db.session.commit()
                 except:
                     db.session.rollback()
                     raise
             break
Пример #10
0
def run_spider_job(job_instance_id):
    """
    功能: 通过scrapyd启动一个爬虫
    :param job_instance:
    :return:
    """
    try:
        job_instance = JobInstance.find_job_instance_by_id(job_instance_id)
        agent.start_spider(job_instance)
        app.logger.info('[APScheduler调度器调度了一个爬虫任务] [是工程名为: %s] [下的 %s 蜘蛛]'
                        ' [调度任务id为: %s]'
                        % (job_instance.project_id, job_instance.spider_name, job_instance.id))
    except Exception as e:
        app.logger.error('[APScheduler调度器运行爬虫任务出错啦!错误信息为] ' + str(e))
Пример #11
0
def clear_jobexecution(job_execution):
    """
    clear_jobexecution
    check JobExecution still existed on scrapyd servers
    delete it if didn't existed anymore.

    :param job_execution:
    :return:
    """
    job_instance = JobInstance.find_job_instance_by_id(
        job_execution.job_instance_id)
    project = Project.find_project_by_id(job_instance.project_id)
    if not check_job_existed(running_on=job_execution.running_on,
                             project_name=project.project_name,
                             spider_name=job_instance.spider_name,
                             job_id=job_execution.service_job_execution_id):
        db.session.delete(job_execution)
        db.session.commit()
Пример #12
0
def job_update(project_id, job_id):
    project = Project.find_project_by_id(project_id)
    job_instance = JobInstance.find_job_instance_by_id(job_id)
    # app.logger.info("aaaaaaaaaaaa")
    # app.logger.info(job_instance)
    # if job_instance is None:
    #     print(job_instance)
    #     abort(404)
    job_instance.spider_name = request.form['spider_name']
    job_instance.project_id = project_id
    job_instance.spider_arguments = request.form['spider_arguments']
    job_instance.desc = request.form['desc']
    job_instance.tags = request.form.get('spider_tags', "")
    job_instance.priority = request.form.get('priority', 0)
    job_instance.run_type = request.form['run_type']
    # chose daemon manually
    if request.form['daemon'] != 'auto':
        spider_args = []
        if request.form['spider_arguments']:
            spider_args = request.form['spider_arguments'].split(",")
        spider_args.append("daemon={}".format(request.form['daemon']))
        job_instance.spider_arguments = ','.join(spider_args)
    if job_instance.run_type == JobRunType.ONETIME:
        job_instance.enabled = -1
        db.session.add(job_instance)
        db.session.commit()
        agent.start_spider(job_instance)
    if job_instance.run_type == JobRunType.PERIODIC:
        job_instance.cron_minutes = request.form.get('cron_minutes') or '0'
        job_instance.cron_hour = request.form.get('cron_hour') or '*'
        job_instance.cron_day_of_month = request.form.get('cron_day_of_month') or '*'
        job_instance.cron_day_of_week = request.form.get('cron_day_of_week') or '*'
        job_instance.cron_month = request.form.get('cron_month') or '*'
        # set cron exp manually
        if request.form.get('cron_exp'):
            job_instance.cron_minutes, job_instance.cron_hour, job_instance.cron_day_of_month, job_instance.cron_day_of_week, job_instance.cron_month = \
                request.form['cron_exp'].split(' ')
        db.session.add(job_instance)
        db.session.commit()
    return redirect(request.referrer, code=302)
Пример #13
0
 def log_url_master(self, job_execution):
     """
     功能: 获取主爬虫的日志
     :param job_execution: job_execution对象
     :return: 返回log的url
     """
     job_instance = JobInstance.find_job_instance_by_id(job_execution.job_instance_id)
     project = Project.find_project_by_id(job_instance.project_id)
     # 主从爬虫运行的服务器字符串
     service_job_execution_id = job_execution.service_job_execution_id.split('>')
     # 主爬虫服务器
     master_service_job_execution_id = service_job_execution_id[0]
     # 爬虫运行的服务器地址
     running_on = job_execution.running_on.split('>')
     master_running_on = running_on[0]
     # 调用主爬虫的日志
     for spider_service_instance in self.spider_service_instances_master:
         if spider_service_instance.server == master_running_on:
             master_log_url = spider_service_instance.log_url(
                 project.project_name, job_instance.spider_name,
                 master_service_job_execution_id)
             return master_log_url
Пример #14
0
def job_get(project_id, job_id):
    # JobInstance.query.filter_by(project_id=project_id, id=job_id).first().to_dict()
    job_instance = JobInstance.find_job_instance_by_id(job_id).to_dict()
    # app.logger.info(job_instance)
    return json.dumps(job_instance)