Beispiel #1
0
def add_task_html(task_id=None):
    try:
        web_policys = db.session.query(WebVulPolicy)
        web_schemes = db.session.query(TaskWebScheme)
        rule_types = db.session.query(WebVulFamily).filter(WebVulFamily.parent_id != 0)
        rep_models = db.session.query(ReportModel.model_id, ReportModel.model_name)
        if task_id:
            rule_family_ids = db.session.query(func.group_concat(TaskRuleFamilyRef.rule_family_id)).\
                filter(TaskRuleFamilyRef.task_id == task_id).first()
            # print rule_family_ids[0]
            task = db.session.query(Task).filter(Task.id == task_id).first()
            task_rep_model = db.session.query(TaskRepModelRef).filter(TaskRepModelRef.task_id==task_id).first()
            if task_rep_model:
                task_rep_model_id = task_rep_model.rep_model_id
            else:
                task_rep_model_id = db.session.query(ReportModel).filter(or_(ReportModel.company == '上海云盾信息技术有限公司',
                                                                    ReportModel.model_name == '盾眼默认模板')).first().model_id
            return render_template('web_task_edit.html', task=task, policys=web_policys, schemes=web_schemes,
                                   rep_models=rep_models, task_rep_model_id=task_rep_model_id,
                                   rule_family_ids=rule_family_ids[0], level_one='task', level_two='add_task')

        return render_template('web_task_add.html', policys=web_policys, schemes=web_schemes, rule_types=rule_types,
                               rep_models=rep_models, level_one='task', level_two='add_task')
    except Exception as e:
        logger.error(e)
        abort(404)
Beispiel #2
0
def spider_notify():
    try:
        # print 11111111111111
        data_str = request.data
        data = json.loads(data_str)
        spider_task_id = data.get('task_id')
        execute_id = data.get('execute_id')
        # spider_task_id = 235
        # execute_id = 6247

        spider_job = db.session.query(SpiderJob).filter(
            SpiderJob.spider_task_id == spider_task_id).first()
        spider_job.spider_exe_id = execute_id
        times = spider_job.notify_times
        spider_job.notify_times = times - 1
        db.session.add(spider_job)
        db.session.commit()
        task_id = spider_job.task_id
        action = 'start'
        task = db.session.query(Task).filter(Task.id == task_id).first()
        # 未启动的任务
        if task.state == 1:
            job = run_engine.apply_async(args=[task_id, action, execute_id],
                                         countdown=10)
            job_task_ref = ApJobsTaskRef(job.id, task_id, 'PENDING')
            db.session.add(job_task_ref)

            db.session.commit()
        return 'ok'
    except Exception, e:
        logger.error(e)
        # print e
        return 'failed'
Beispiel #3
0
    def gen_rep(self, update_task_id, domain=None):
        '''
        # 排名和分布生成累计数据
        {"ranking": {vul_family_name: {"count": 200, "url_count": 150}},
         "distribute": {www.baidu.com: {"HIGH": 5, "MED": 3, "LOW": 6}}
        }
         # over_view 只生成和传当前数据
          { www.baidu.com: {
             vul_name: {"count": 15, "urls": [], "level": "MED", "vul_desc": "", "vul_solu": ""}}
         }
        '''
        if not self.rep_json:
            rep_dict = {'ranking': {},
                        'distribute': {}
                        }
        else:
            rep_dict = json.loads(self.rep_json)
        if not domain:
            task = db.session.query(Task).filter(Task.id == update_task_id).first()
            domain = get_task_domain(task.target)
        try:

            # vuls = db.session.query(WebResult.url, WebResult.level, WebVulList.vul_name, WebVulList.family, WebVulList.desc,
            #                         WebVulList.solu).join(WebVulList, WebResult.vul_id == WebVulList.vul_id).filter(
            #     WebResult.task_id == update_task_id).all()
            # statistic the ranking
            ranking_res = db.session.query(WebVulList.family, func.count('1'), func.count(distinct(WebResult.url))).join(
                WebResult, WebResult.vul_id == WebVulList.vul_id).filter(WebResult.task_id == update_task_id).\
                group_by(WebVulList.family).all()
            for family_count in ranking_res:
                count = family_count[1]
                url_count = family_count[2]
                if rep_dict['ranking'].get(family_count[0]):
                    count = rep_dict['ranking'][family_count[0]]['count'] + count
                    url_count = rep_dict['ranking'][family_count[0]]['url_count'] + url_count
                rep_dict['ranking'][family_count[0]] = {"count": count, "url_count": url_count}
            #  static distribute
            d_dis = {"HIGH": 0, "MED": 0, "LOW": 0}
            dis_res = db.session.query(WebResult.level, func.count(1)).filter(WebResult.task_id==update_task_id).\
                group_by(WebResult.level).all()
            for dis_count in dis_res:
                d_dis[dis_count[0]] = dis_count[1]
            rep_dict['distribute'][domain] = d_dis
            # static overview
            v_dict = {}
            over_view_res = db.session.query(WebVulList.vul_name, func.count(1), WebVulList.level, func.concat(
                WebResult.url), WebVulList.desc, WebVulList.solu).join(
                WebResult, WebResult.vul_id == WebVulList.vul_id).filter(WebResult.task_id == update_task_id).\
                group_by(WebVulList.vul_name).all()
            for vul in over_view_res:
                v_dict[vul[0]] = {"count": vul[1], "level": vul[2], "urls": vul[3],  "vul_desc": vul[4], "vul_solu": vul[5]}
            over_view_dict = {domain: v_dict}
        except Exception, e:
            logger.error(e)
            over_view_dict = {}
Beispiel #4
0
def del_job_db(job_id):
    try:
        db.session.query(ApJobsTaskRef).filter(
            ApJobsTaskRef.parent_id == job_id).delete()
        db.session.query(ApJobsTaskRef).filter(
            ApJobsTaskRef.job_id == job_id).delete()
        db.session.commit()
        return True
    except Exception, e:
        print e
        logger.error(e)
        return False
Beispiel #5
0
def exceptions(error):
    """ Logs after every Exception. """
    ts = strftime('[%Y-%b-%d %H:%M]')
    tb = traceback.format_exc()
    err = f"{ts}" \
       f"{request.remote_addr} " \
       f"{request.method} " \
       f"{request.scheme} " \
       f"{request.full_path} " \
       f"{tb} "
    logger.error(err)
    return "Internal Server Error", 500
Beispiel #6
0
def add_spider_task(start_urls, limit_time=1800, execute_delay=60, token=None):

    post_data = {
        'start_urls':
        start_urls,
        'type':
        'spider',
        'limit_depth':
        15,
        'limit_total':
        1000,
        'limit_time':
        limit_time,
        'limit_image':
        0,
        'limit_subdomain':
        0,
        'limit_js':
        1,
        'url_unique_mode':
        'url-query',
        'notify_url':
        str(url_for('api.spider_notify', _external=True)) + "?token=" + token,
        'source_ip':
        '',
        'proxies':
        '',
        'crontab':
        '',
        'execute_at':
        '',
        'execute_delay':
        execute_delay
    }
    body = urllib.urlencode(post_data)
    spider_task_id = None

    try:
        res, content = http.request(SPIDER_URL + '/task/save',
                                    'POST',
                                    body=body,
                                    headers=header)
        con = json.loads(content)
        if res.get('status') == '200' and con.get('status') == 'ok':
            spider_task_id = con.get('data').get('task_id')
            msg = con.get('msg')
            return spider_task_id, msg
        else:
            msg = con.get('msg')
    except Exception, e:
        logger.error(e)
        msg = str(e)
Beispiel #7
0
def revoke_job(job_id):
    # job = run_spider.AsyncResult(job_id)
    # job.revoke()
    try:
        celery.control.revoke(job_id)
        job_child_list = db.session.query(ApJobsTaskRef).filter(
            ApJobsTaskRef.parent_id == job_id).all()
        if len(job_child_list) > 0:
            for job_child in job_child_list:
                celery.control.revoke(job_child.job_id)
        return True
    except Exception, e:
        logger.error(e)
        return False
Beispiel #8
0
def after_request(response):
    """ Logs after every request. """
    ## This avoids the duplication of registry in the log,
    ## since that 500 is already logged via @app.errorhandler.
    if response.status_code != 500:
        ts = strftime('[%Y-%b-%d %H:%M]')
        err = f"{ts} " \
           f"{request.remote_addr} " \
           f"{request.method} " \
           f"{request.scheme} " \
           f"{request.full_path} " \
           f"{response.status} "
        logger.error(err)
    return response
Beispiel #9
0
    def generate_auth_uuid(self):
        token = self.gen_auth_token()
        s_uuid = uuid.uuid1()
        l_uuid = str(s_uuid).split('-')
        s_uuid = ''.join(l_uuid)
        tokenMapping = TokenMapping(
            uuid=s_uuid,
            token=token,
        )
        db.session.add(tokenMapping)
        try:
            db.session.commit()
        except Exception as e:
            db.session.rollback()
            logger.error(e)

        return s_uuid
Beispiel #10
0
def del_spider_task(scan_task_id=None):

    spider_task = db.session.query(SpiderJob).filter(
        SpiderJob.task_id == scan_task_id).first()
    # spider_task_id = 200
    if spider_task:
        try:
            post_data = {'ids': spider_task.spider_task_id}
            body = urllib.urlencode(post_data)
            res, content = http.request(SPIDER_URL + '/task/delete',
                                        'POST',
                                        body=body,
                                        headers=header)
            print res, content
            con = json.loads(content)
            if res.get('status') == '200' and con.get('status') == 'ok':
                return True
        except Exception, e:
            logger.error(e)
        return False
Beispiel #11
0
def run_engine(self, task_id, action, spider_flag, spider_exec_id=None):
    print 'run_engine'

    self.update_state(state='STARTED')
    job_id = self.request.id
    job_task_ref = db.session.query(ApJobsTaskRef).filter(
        ApJobsTaskRef.job_id == job_id).first()
    job = self.AsyncResult(job_id)
    job_task_ref.job_status = 2
    job_task_ref.job_state = job.state
    job_task_ref.worker_name = self.request.hostname
    db.session.add(job_task_ref)
    db.session.commit()
    task = db.session.query(Task).filter(
        Task.id == job_task_ref.task_id).first()
    task.start_time = job_task_ref.run_time
    task.state = 2
    db.session.add(task)
    db.session.commit()
    domain, scheme, path, cookie = get_task_domain(task.target)
    start_url = '%s://%s%s' % (scheme, domain, path)
    # # GET URL DATA FROM SPIDER_API
    # if spider_exec_id:
    #     get_url_data(spider_exec_id, task_id, domain)
    # start scan_spider
    if spider_flag:
        custom_headers = {
            'Host': domain,
            'Accept':
            'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'User-Agent':
            'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0',
            'Accept-Encoding': 'gzip,deflate,sdch',
            "Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
            "Connection": "keep-alive",
            "Cache-Control": "no-cache",
            "Referer": start_url,
            'Cookie': cookie if cookie else ''
        }
        # from common.sql_orm import DBSession
        from common.spider_models import ScanSpiderUrl
        # db_session = DBSession()
        try:
            db.session.query(ScanSpiderUrl).filter(
                ScanSpiderUrl.task_id == task_id).delete()
            db.session.commit()
            spider_timeout = SPIDER_LIMIT_TIME.get(task.web_scan_policy, 1800)
            # 爬虫动态模式,快速扫描,只进行静态爬虫
            dynamic_parse = False if task.web_scan_policy == 511 else True
            spider = Spider(concurrent_num=2,
                            depth=50,
                            max_url_num=1000,
                            crawler_mode=1,
                            dynamic_parse=dynamic_parse,
                            spider_timeout=spider_timeout,
                            custom_headers=custom_headers,
                            login_dict=None,
                            scan_task_id=task_id)

            spider.feed_url(start_url)
            spider.start()
        except Exception, e:
            logger.error(e)
        try:
            # copy the urls to the table spider_url from scan_spider_url
            db.session.query(SpiderUrl).filter(
                SpiderUrl.task_id == task_id).delete()
            db.session.commit()
            urls = db.session.query(ScanSpiderUrl).filter(
                ScanSpiderUrl.task_id == task_id).all()
            if not urls:
                try:
                    spider_timeout = SPIDER_LIMIT_TIME.get(
                        task.web_scan_policy, 1800)
                    dynamic_parse = False if task.web_scan_policy == 511 else True
                    spider = Spider(concurrent_num=2,
                                    depth=50,
                                    max_url_num=1000,
                                    crawler_mode=1,
                                    dynamic_parse=dynamic_parse,
                                    spider_timeout=spider_timeout,
                                    custom_headers=custom_headers,
                                    login_dict=None,
                                    scan_task_id=task_id)

                    spider.feed_url(start_url)
                    spider.start()
                except Exception, e:
                    logger.error(e)
                urls = db.session.query(ScanSpiderUrl).filter(
                    ScanSpiderUrl.task_id == task_id).all()
            for s_url in urls:
                # print s_url.url, s_url.params, s_url.method, s_url.refer
                url = s_url.url
                params = s_url.params
                refer = s_url.refer if s_url.refer else start_url
                if 'GET' == s_url.method.upper():
                    url_split = url.split('?', 1)
                    url = url_split[0]
                    if len(url_split) > 1:
                        params = url_split[1]
                spider_url = SpiderUrl(task_id,
                                       url,
                                       params=params,
                                       method=s_url.method,
                                       refer=refer)
                db.session.add(spider_url)
                db.session.commit()
            # db_session.close()

            # copy the other_urls to the table spider_url_other from scan_spider_url_other
            db.session.query(SpiderUrlOther).filter(
                SpiderUrlOther.task_id == task_id).delete()
            db.session.commit()
            urls = db.session.query(
                ScanSpiderUrlOther.url, ScanSpiderUrlOther.refer,
                ScanSpiderUrlOther.type).filter(
                    ScanSpiderUrlOther.task_id == task_id).all()
            for s_url in urls:
                spider_url = SpiderUrlOther(task_id=task_id,
                                            url=s_url[0],
                                            refer=s_url[1],
                                            type=s_url[2])
                db.session.add(spider_url)
                db.session.commit()
Beispiel #12
0
                SpiderUrlOther.task_id == task_id).delete()
            db.session.commit()
            urls = db.session.query(
                ScanSpiderUrlOther.url, ScanSpiderUrlOther.refer,
                ScanSpiderUrlOther.type).filter(
                    ScanSpiderUrlOther.task_id == task_id).all()
            for s_url in urls:
                spider_url = SpiderUrlOther(task_id=task_id,
                                            url=s_url[0],
                                            refer=s_url[1],
                                            type=s_url[2])
                db.session.add(spider_url)
                db.session.commit()

        except Exception, e:
            logger.error(e)

    web_scan_engine = WebScanEngine(task_id, action)
    web_scan_engine.run()
    print 'finish_run_engine'
    job_task_ref.job_state = job.status
    job_task_ref.job_status = 3
    job_task_ref.end_time = datetime.now()
    db.session.add(job_task_ref)
    db.session.commit()
    task = db.session.query(Task).filter(
        Task.id == job_task_ref.task_id).first()
    task.end_time = job_task_ref.end_time
    task.state = 3
    db.session.add(task)
    db.session.commit()