Пример #1
0
def index(request):
    SM = ScrapyManager("malspider", "full_domain", "http://0.0.0.0:6802")
    crawl_jobs = SM.get_all_jobs_by_project()
    pending_jobs = 0
    running_jobs = 0
    finished_jobs = 0
    scrapyd_connected = 0

    crawl_jobs = SM.get_all_jobs_by_project()
    if crawl_jobs is not None:
        pending_jobs = len(crawl_jobs['pending'])
        running_jobs = len(crawl_jobs['running'])
        finished_jobs = len(crawl_jobs['finished'])
        scrapyd_connected = 1

    unique_alerts = ModelQuery.get_num_unique_alerts()
    alert_count_by_reason = ModelQuery.get_num_alerts_by_reason()

    elems_count = Element.objects.count
    pages_count = Page.objects.count
    template = loader.get_template('dashboard_index.html')
    all_orgs = ModelQuery.get_all_organizations()
    top_offenders = ModelQuery.get_top_offenders()
    my_url = urlparse(request.get_full_path()).hostname or ''
    if my_url == '':
        scrapyd_url = 'http://localhost:6802'
    else:
        scrapyd_url = my_url + ":6802"

    context = RequestContext(request, {'elems_count':elems_count,'pages_count':pages_count, 'pending_jobs':pending_jobs, 'running_jobs':running_jobs, 'finished_jobs':finished_jobs, 'unique_alerts':unique_alerts, 'alert_count_by_reason':alert_count_by_reason, 'all_orgs':all_orgs, 'top_offenders':top_offenders, 'scrapyd_connected':scrapyd_connected, 'my_url':scrapyd_url})
    return HttpResponse(template.render(context))
Пример #2
0
def daemon(request, jobid=None):
    SM = ScrapyManager("malspider", "full_domain", "http://0.0.0.0:6802")
    crawl_jobs = SM.get_all_jobs_by_project()
    pending_jobs = []
    running_jobs = []
    finished_jobs = []
    scrapyd_connected = []

    crawl_jobs = SM.get_all_jobs_by_project()
    if crawl_jobs is not None:
        pending_jobs = crawl_jobs['pending']
        running_jobs = crawl_jobs['running']
        finished_jobs = crawl_jobs['finished']
        scrapyd_connected = 1

    context = RequestContext(
        request, {
            'finished_jobs': finished_jobs,
            'pending_jobs': pending_jobs,
            'running_jobs': running_jobs,
            'status': scrapyd_connected,
            'jobid': jobid
        })
    template = loader.get_template("dashboard_daemon.html")
    return HttpResponse(template.render(context))
Пример #3
0
def index(request, org_id=None):
    SM = ScrapyManager("malspider", "full_domain", "http://0.0.0.0:6802")
    crawl_jobs = SM.get_all_jobs_by_project()
    pending_jobs = 0
    running_jobs = 0
    finished_jobs = 0
    scrapyd_connected = 0

    crawl_jobs = SM.get_all_jobs_by_project()
    if crawl_jobs is not None:
        pending_jobs = len(crawl_jobs['pending'])
        running_jobs = len(crawl_jobs['running'])
        finished_jobs = len(crawl_jobs['finished'])
        scrapyd_connected = 1

    unique_alerts = ModelQuery.get_num_unique_alerts()
    alert_count_by_reason = ModelQuery.get_num_alerts_by_reason()

    elems_count = Element.objects.count
    pages_count = Page.objects.count
    template = loader.get_template('dashboard_index.html')
    all_orgs = ModelQuery.get_all_organizations()
    top_offenders = ModelQuery.get_top_offenders()

    org_obj = ModelQuery.get_org_by_id(org_id)
    jobid = None
    scan_domain = None
    error = None
    if org_obj:
        output = SM.schedule_job(org_id, org_obj.domain)
        if 'jobid' in output:
            jobid = output['jobid']
            scan_domain = org_obj.domain
    elif org_obj is None and org_id is not None:
        error = "Error: Invalid Organization ID!"

    context = RequestContext(
        request, {
            'elems_count': elems_count,
            'pages_count': pages_count,
            'pending_jobs': pending_jobs,
            'running_jobs': running_jobs,
            'finished_jobs': finished_jobs,
            'unique_alerts': unique_alerts,
            'alert_count_by_reason': alert_count_by_reason,
            'all_orgs': all_orgs,
            'top_offenders': top_offenders,
            'scrapyd_connected': scrapyd_connected,
            'jobid': jobid,
            'scan_domain': scan_domain,
            'error': error
        })
    return HttpResponse(template.render(context))
Пример #4
0
def daemon(request, jobid=None):
    SM = ScrapyManager("malspider", "full_domain", "http://0.0.0.0:6802")
    crawl_jobs = SM.get_all_jobs_by_project()
    pending_jobs = []
    running_jobs = []
    finished_jobs = []
    scrapyd_connected = []

    crawl_jobs = SM.get_all_jobs_by_project()
    if crawl_jobs is not None:
        pending_jobs = crawl_jobs['pending']
        running_jobs = crawl_jobs['running']
        finished_jobs = crawl_jobs['finished']
        scrapyd_connected = 1

    context = RequestContext(request, {'finished_jobs':finished_jobs, 'pending_jobs':pending_jobs, 'running_jobs':running_jobs, 'status':scrapyd_connected, 'jobid':jobid})
    template = loader.get_template("dashboard_daemon.html")
    return HttpResponse(template.render(context))
Пример #5
0
def index(request):
    SM = ScrapyManager("malspider", "full_domain", "http://0.0.0.0:6802")
    crawl_jobs = SM.get_all_jobs_by_project()
    pending_jobs = 0
    running_jobs = 0
    finished_jobs = 0
    scrapyd_connected = 0

    crawl_jobs = SM.get_all_jobs_by_project()
    if crawl_jobs is not None:
        pending_jobs = len(crawl_jobs['pending'])
        running_jobs = len(crawl_jobs['running'])
        finished_jobs = len(crawl_jobs['finished'])
        scrapyd_connected = 1

    unique_alerts = ModelQuery.get_num_unique_alerts()
    alert_count_by_reason = ModelQuery.get_num_alerts_by_reason()

    elems_count = Element.objects.count
    pages_count = Page.objects.count
    template = loader.get_template('dashboard_index.html')
    all_orgs = ModelQuery.get_all_organizations()
    top_offenders = ModelQuery.get_top_offenders()
    my_url = urlparse(request.get_full_path()).hostname or ''
    if my_url == '':
        scrapyd_url = 'http://localhost:6802'
    else:
        scrapyd_url = my_url + ":6802"

    context = RequestContext(
        request, {
            'elems_count': elems_count,
            'pages_count': pages_count,
            'pending_jobs': pending_jobs,
            'running_jobs': running_jobs,
            'finished_jobs': finished_jobs,
            'unique_alerts': unique_alerts,
            'alert_count_by_reason': alert_count_by_reason,
            'all_orgs': all_orgs,
            'top_offenders': top_offenders,
            'scrapyd_connected': scrapyd_connected,
            'my_url': scrapyd_url
        })
    return HttpResponse(template.render(context))
Пример #6
0
 def handle(self, *args, **options):
     SM = ScrapyManager("malspider", "full_domain", "http://0.0.0.0:6802")
     SM.cancel_all_jobs()
     orgs = Organization.objects.all()
     for org in orgs:
         print "Scheduling job for ", org.domain
         SM.schedule_job(org.id, org.domain)
Пример #7
0
def index(request, org_id=None):
    SM = ScrapyManager("malspider", "full_domain", "http://0.0.0.0:6802")
    crawl_jobs = SM.get_all_jobs_by_project()
    pending_jobs = 0
    running_jobs = 0
    finished_jobs = 0
    scrapyd_connected = 0

    crawl_jobs = SM.get_all_jobs_by_project()
    if crawl_jobs is not None:
        pending_jobs = len(crawl_jobs['pending'])
        running_jobs = len(crawl_jobs['running'])
        finished_jobs = len(crawl_jobs['finished'])
        scrapyd_connected = 1

    unique_alerts = ModelQuery.get_num_unique_alerts()
    alert_count_by_reason = ModelQuery.get_num_alerts_by_reason()

    elems_count = Element.objects.count
    pages_count = Page.objects.count
    template = loader.get_template('dashboard_index.html')
    all_orgs = ModelQuery.get_all_organizations()
    top_offenders = ModelQuery.get_top_offenders()

    org_obj = ModelQuery.get_org_by_id(org_id)
    jobid = None
    scan_domain = None
    error = None
    if org_obj:
        output = SM.schedule_job(org_id,org_obj.domain)
        if 'jobid' in output:
            jobid = output['jobid']
            scan_domain = org_obj.domain
    elif org_obj is None and org_id is not None:
        error = "Error: Invalid Organization ID!"

    context = RequestContext(request, {'elems_count':elems_count,'pages_count':pages_count, 'pending_jobs':pending_jobs, 'running_jobs':running_jobs, 'finished_jobs':finished_jobs, 'unique_alerts':unique_alerts, 'alert_count_by_reason':alert_count_by_reason, 'all_orgs':all_orgs, 'top_offenders':top_offenders, 'scrapyd_connected':scrapyd_connected, 'jobid':jobid,'scan_domain':scan_domain, 'error':error})
    return HttpResponse(template.render(context))
Пример #8
0
    def handle(self, *args, **options):
        SM = ScrapyManager("malspider", "full_domain", "http://0.0.0.0:6802")
        SM.cancel_all_jobs()
        orgs = Organization.objects.all()
        for org in orgs:
            allowed_domains = ""
            start_urls = ""
            domain = org.domain
            
            if "www." in domain:
                allowed_domains = domain + "," + domain[4:]
                start_urls = 'http://' + domain + ',https://' + domain + ',http://' + domain[4:] + ',https://' + domain[4:]
            else:
                allowed_domains = domain + ",www." + domain
                start_urls = 'http://' + domain + ',http://www.' + domain + ',https://www.' + domain + ',https://' + domain

            crawl_params = {
                    "org": org.id,
                    "allowed_domains": allowed_domains,
                    "start_urls": start_urls,
            }
            print "Adding ", org.domain, " to the queue..."
            SM.schedule_job(crawl_params)
Пример #9
0
    def handle(self, *args, **options):
        SM = ScrapyManager("malspider", "full_domain", "http://0.0.0.0:6802")
        SM.cancel_all_jobs()
        orgs = Organization.objects.all()
        for org in orgs:
            allowed_domains = ""
            start_urls = ""
            domain = org.domain

            if "http://" in domain:
                domain = domain.replace("http://", "")
                if "www." in domain:
                    allowed_domains = domain + "," + domain[4:]
                    start_urls = 'http://' + domain + ',http://' + domain[4:]

                else:
                    allowed_domains = domain + ",www." + domain
                    start_urls = 'http://' + domain + ',http://www.' + domain

            elif "https://" in domain:
                domain = domain.replace("https://", "")
                if "www." in domain:
                    allowed_domains = domain + "," + domain[4:]
                    start_urls = 'https://' + domain + ',https://' + domain[4:]

                else:
                    allowed_domains = domain + ",www." + domain
                    start_urls = 'http://' + domain + ',http://www.' + domain

            else:
                if "www." in domain:
                    allowed_domains = domain + "," + domain[4:]
                    start_urls = 'http://' + domain + ',http://' + domain[4:]

                else:
                    allowed_domains = domain + ",www." + domain
                    start_urls = 'http://' + domain + ',http://www.' + domain

            crawl_params = {
                "org": org.id,
                "allowed_domains": allowed_domains,
                "start_urls": start_urls,
            }
            print "Adding ", org.domain, " to the queue..."
            SM.schedule_job(crawl_params)