예제 #1
0
 def handle(self, *args, **options):
     if options['query']:
         if len(options['column']) == 0:
             results = FilterNode.objects.get(name=options['query']).results.values()
         else:
             results = FilterNode.objects.get(name=options['query']).results.values(*options['column'])
         dataset = Dataset(headers=results[0].keys())
         for result in results:
             dataset.append(result.values())
         print dataset.export(options['format'])
     else:
         filters = FilterNode.objects.named()
         print "Available queries:"
         for filter in filters:
             print filter.name
예제 #2
0
파일: cli.py 프로젝트: cadeef/bear-exporter
def display_table(
    records: Sequence[Any],
    headers: Sequence[str],
    attrs: Sequence[str],
    tablefmt: str = "fancy_grid",
) -> None:
    """

    Args:
      records: Sequence[Any]:
      headers: Sequence[str]:
      attrs: Sequence[str]:
      tablefmt: str:  (Default value = "fancy_grid")

    Returns:

    """
    if len(records) == 0:
        display_error("No results found")
    else:
        data = Dataset()
        data.headers = headers
        for record in records:
            data.append([getattr(record, a) for a in attrs])

        click.echo(data.export("cli", tablefmt=tablefmt))
    def build_csv_from_data(self, form_data, response_data):
        id_to_label = self.get_header_maps(form_data)
        headers = ["id"]
        for c in id_to_label.values():
            headers.append(re.sub("[\,\n\r]+", "", c))

        data = Dataset(headers=headers)
        for response_info in response_data:
            response = response_info.get("responses", {})
            row_id = response_info["id"]
            row = [row_id]
            for pk in response.keys():
                value = response.get(pk)
                if isinstance(value, str) or not value:
                    row.append(value)
                # attachment
                elif isinstance(value, list) and value[0].get("filename"):
                    links = " ".join(
                        [self.attachment_link(rec) for rec in value])
                    row.append(links)
                elif value.get("checked"):
                    row.append(", ".join(value.get("checked")))
                elif value.get("other_text"):
                    row.append(value.get("other_text"))
                else:
                    logger.error("Unhandled value type: %s (%s)." %
                                 (value, type(value)))
                    logger.error("Response data structure: %s" % (response))
                    row.append(None)
            data.append(row)
        return data.export("csv")
예제 #4
0
def emergency(current_user):
    # prepare user info
    user_data = user_schema.dump(
        User.query.filter_by(id=current_user.id).first())
    User.query.filter_by(id=current_user.id).first().set_critical_state()
    data = Dataset()
    data.headers = [
        'First Name', 'Last Name', 'Email', 'Address', 'State', 'Age',
        'Travel History', 'Telephone'
    ]
    for i in [(user_data['first_name'], user_data['last_name'],
               user_data['email'], user_data['address'], user_data['state'],
               user_data['age'], user_data['travel_history'], user_data['tel'])
              ]:
        data.append(i)
    with open(f'{os.getcwd()}/user_dat.xlsx', 'wb') as file:
        print(file.name)
        file.write(data.export('xlsx'))
        # actually send the message
        try:
            result = EmergencyMail("Emergency Report!",
                                   render_template('Emergency.html'),
                                   file.name)
            if result:
                return jsonify({'Sent Email': True}), 200
            else:
                return jsonify({'Email not sent': True}), 500
        except Exception as e:
            raise e
            return jsonify({'Sent Email': False}), 500
        file.close()
예제 #5
0
 def writeDataToExcel(self, filePath, sheetName):
     data = Dataset()
     data.json = json.dumps(self.data)
     with open(
             '{filePath}-{sheetName}.xls'.format(filePath=filePath,
                                                 sheetName=sheetName),
             'wb') as f:
         f.write(data.export('xls'))
예제 #6
0
 def onExport(self):
     try:
         data = Dataset(*self._items, **{'headers': self._titleLine})
         fileName = time.strftime('%m-%d-%H_%M_%S', time.localtime()) + '-performance.xls'
         with open(fileName, 'wb') as (f):
             f.write(data.export('xls'))
             QMessageBox.information(self, '导出成功!', 'Excel文件名为' + fileName)
     except Exception as err:
         try:
             QMessageBox.warning(self, '导出异常!', str(err))
         finally:
             err = None
             del err
예제 #7
0
def clean_csv_headers(csv):
    """
    Remove commas, line breaks, etc, anything that will screw
    up the translation from CSV -> database table. CSVKit, in particular,
    doesn't like header columns with these chars in it.
    """
    data = Dataset().load(csv, format="csv")
    headers = [re.sub("[,\"'\n]", "", h) for h in data.headers]

    new_data = Dataset(headers=headers)
    for row in data:
        new_data.append(row)
    return new_data.export("csv")
예제 #8
0
    def test_file_to_dataset_incorrect(self):
        """If input file is not correctly decoded, returns an error."""
        dataset = Dataset()
        dataset.append_col(['row1', 'row2'])
        dataset.headers = ['col1']
        encoded_dataset = dataset.export('csv').encode('utf-16')

        bio = io.BytesIO(encoded_dataset)
        uploaded_file = self.create_uploaded_file(file=bio, )
        uploaded_dataset, error_msg = self.file_to_dataset(uploaded_file)

        self.assertIsNone(uploaded_dataset)
        self.assertIsNotNone(error_msg)
    def handle(self, *args, **options):
        empty_related_projects = RelatedProject.objects\
            .select_related('project', 'related_project')\
            .filter(related_project__isnull=True, related_iati_id__exact='')
        empty_relations = RelatedProject.objects\
            .select_related('project', 'related_project')\
            .filter(relation__exact='')

        if options['delete']:
            empty_related_projects.delete()
            empty_relations.delete()
        else:
            problematic_relations = empty_related_projects.union(
                empty_relations).order_by('-project_id')
            dataset = Dataset()
            dataset.headers = (
                'project_id',
                'project_title',
                'project_date_end',
                'project_status',
                'program_title',
                'related_project_id',
                'related_project_title',
                'related_project_date_end',
                'related_project_status',
                'related_iati_id',
                'relation',
                'id',
            )
            for item in problematic_relations:
                project = item.project
                related_project = item.related_project
                program = project.get_program()
                dataset.append([
                    project.id,
                    project.title,
                    project.date_end_planned,
                    project.show_plain_status(),
                    program.title if program else None,
                    related_project.id if related_project else None,
                    related_project.title if related_project else None,
                    related_project.date_end_planned
                    if related_project else None,
                    related_project.show_plain_status()
                    if related_project else None,
                    item.related_iati_id,
                    item.iati_relation_unicode(),
                    item.id,
                ])
            print(dataset.export('csv'))
예제 #10
0
def to_xls(root: Path, output_file: Path):
    data = Dataset()
    data.title = f"{root.name} CMS"
    data.headers = ['name', 'de', 'en', 'fr', 'it', 'uri']
    rows = to_dict_table(collect_all(root))

    for row in to_row_tuples(rows):
        data.append(row)

    if output_file is None:
        output_file = Path.cwd() / 'output.xls'

    with open(output_file, 'wb') as out:
        out.write(data.export('xls'))
예제 #11
0
def csv_recover_password(modeladmin, request, queryset):
    headers = ('username', 'first_name', 'last_name', 'reset_password_link')
    data = TablibDataset(headers=headers)

    for user in queryset:
        data.append([
            user.username,
            user.first_name,
            user.last_name,
            get_reset_password_link(request, user)
        ])

    response = HttpResponse(data.export('csv'), 'text/csv')
    response['Content-Disposition'] = 'attachment; filename="users.csv"'
    return response
예제 #12
0
def import_xls_data(resource, headers, data):
    xls_file_path = os.path.join(FIXTURE_DIR, 'data.xls')
    dataset = Dataset(*data, headers=headers)

    with open(xls_file_path, 'wb') as f:
        f.write(dataset.export('xls'))

    with open(xls_file_path, 'rb') as f:
        dataset = Dataset().load(f.read())
    os.remove(xls_file_path)

    try:
        return resource.import_data(dataset)
    except AttributeError:
        return resource.bulk_import_data(dataset)
예제 #13
0
파일: cli.py 프로젝트: cadeef/cade-task
def display_table(
    array: Sequence[str],
    headers: List[str],
    number_lines=False,
    tablefmt: str = "fancy_grid",
) -> None:
    if len(array) == 0:
        display_error("No results found")
    else:
        data = Dataset()
        data.headers = headers
        for t in array:
            data.append([t])

        click.echo(
            data.export("cli", showindex=number_lines, tablefmt=tablefmt))
    def build_csv_from_data(self, form_data, response_data):
        id_to_label = self.get_header_maps(form_data)
        headers = ["id"]
        for c in id_to_label.values():
            headers.append(re.sub(r"[\,\n\r]+", "", c))

        data = Dataset(headers=headers)
        for response_info in response_data:
            response = response_info.get("responses", {})
            row_id = response_info["id"]
            row = [row_id]
            for pk in response.keys():
                value = response.get(pk)
                if isinstance(value, str) or not value:
                    row.append(value)
                # attachment
                elif isinstance(value, list) and value[0].get("filename"):
                    links = " ".join(
                        [self.attachment_link(rec) for rec in value])
                    row.append(links)
                elif value.get("checked"):
                    row.append(", ".join(value.get("checked")))
                elif value.get("other_text"):
                    row.append(value.get("other_text"))
                # this handles other_checked w/ blank response
                elif value.get("other_checked"):
                    row.append("(Other, blank)")
                # Screendoor dates come across like this:
                # {'day': '01', 'year': '2019', 'month': '01'}
                elif value.get("day") and value.get("year") \
                        and value.get("month"):
                    row.append("{year}-{month}-{day}".format(**value))
                # location, sometimes it's w/o state
                elif value.get("city") and value.get("country"):
                    state = value.get("state")
                    row.append("%s,%s%s" %
                               (value.get("city"), " %s, " %
                                state if state else " ", value.get("country")))
                else:
                    logger.error("Unhandled value type: %s (%s)." %
                                 (value, type(value)))
                    # logger.error("Response data structure: %s" % (
                    #     response
                    # ))
                    row.append(None)
            data.append(row)
        return data.export("csv")
예제 #15
0
    def print_dict(self, data):
        """Print dataset generated by a command to the standard output"""
        dataset = Dataset()
        dataset.dict = data
        if dataset.height:
            if self.output == "table":
                click.echo(tabulate(dataset.dict, headers="keys"))
            else:
                # we will probably implement JSON output only in the long run
                # and get rid of the `tablib` dependency
                click.echo(dataset.export(self.output))

        click.echo(
            "\n({} emoji{})".format(dataset.height,
                                    "" if dataset.height == 1 else "s"),
            err=True,
        )
예제 #16
0
    def loadExcelWithEmployeesAndDoingPrediction(file):
        to_predict_list = []
        new_employees = []
        raw_data = file.read()
        imported_data = Dataset().load(raw_data, format='xlsx')
        employees = imported_data.export('json')
        employees_obj = json.loads(employees)

        for employee in employees_obj:

            if len(employee.keys()) > 0:

                to_predict_list.append(employee['satisfaction_level'])
                to_predict_list.append(employee['last_evaluation'])
                to_predict_list.append(employee['number_project'])
                to_predict_list.append(employee['average_montly_hours'])
                to_predict_list.append(employee['time_spend_company'])
                to_predict_list.append(employee['Work_accident'])
                to_predict_list.append(employee['promotion_last_5years'])
                to_predict_list.append(employee['salary'])

                if to_predict_list[0] != None:

                    to_predict_list = list(map(float, to_predict_list))

                    result = PredictorHelper.predictionValue(
                        to_predict_list)[0]
                    if int(result) == 1:
                        employee[
                            'predictionText'] = 'El empleado va a dejar la empresa'
                        employee['probabilidad'] = str(
                            PredictorHelper.predictionValue(to_predict_list)[1]
                            [1])
                    else:
                        employee[
                            'predictionText'] = 'El empleado no va a dejar la empresa'
                        employee['probabilidad'] = str(
                            PredictorHelper.predictionValue(to_predict_list)[1]
                            [0])

                    new_employees.append(employee)

                to_predict_list = []

        return new_employees
 def get_csv_from_url(self, sheet_url):
     """
     Return a CSV (text data) from a protected Google sheet URL.
     """
     sheet_id = extract_key_from_csv_url(sheet_url)
     values = self.get_sheet_values(sheet_id)
     headers = [re.sub("[:,\"'\n]", "", h) for h in values.pop(0)]
     logger.error("Sheet Headers: %s" % headers)
     # TODO: this should be shared across screendoor importer
     data = Dataset(headers=headers)
     n_headers = len(headers)
     for row in values:
         n_cols = len(row)
         if n_cols < n_headers:
             row += [""] * (n_headers - n_cols)
         data.append(row)
     csv_data = data.export("csv")
     return csv_data
예제 #18
0
 def _prepare_xls_file(self):
     headers = ('id', 'Hospital name', 'Manufacturer name',
                'Hospital part #', 'Manufacturer part #', 'Serial number',
                'Lot number', 'Purchased date', 'Expiry date', 'Cost type',
                'Bulk discount percent', 'Bulk discount value',
                'Discount order', 'Discount start date',
                'Discount end date')
     data = [
         ('', 'UVMC', 'Medtronic', '', 'SESR01', 'PJN7204200', '',
          '2019-03-22', '', 'Unit cost', 15, 0, 1, '', ''),
         (self.item.id, 'UVMC', 'Medtronic', '80000', 'SESR01',
          'PJN7204267', '', '2019-03-28', '', 'Unit cost', 10, 0, 1, '',
          ''),
         ('', 'UVMC', 'Medtronic', '', 'SESR01', '', '31232123',
          '2019-03-22', '', 'System cost', 0, 20, 1, '', ''),
     ]
     dataset = Dataset(*data, headers=headers)
     with open(self.xls_file_path, 'wb') as f:
         f.write(dataset.export('xls'))
예제 #19
0
    def test_file_to_dataset_correct(self):
        """If input file can be decoded returns correct dataset."""
        dataset = Dataset()
        dataset.append_col(['row1', 'row2'])
        dataset.headers = ['col1']
        encoded_dataset = dataset.export('csv').encode('utf-8')

        bio = io.BytesIO(encoded_dataset)
        uploaded_file = self.create_uploaded_file(file=bio, )
        uploaded_dataset, error_msg = self.file_to_dataset(uploaded_file)

        self.assertEqual(
            uploaded_dataset.dict,
            dataset.dict,
        )
        self.assertEqual(
            uploaded_dataset.headers,
            dataset.headers,
        )
        self.assertIsNone(error_msg)
 def handle(self, *args, **options):
     the_date = options['date']
     dry_run = options['dry_run']
     writer = VerbosityAwareWriter(self.stdout, options['verbosity'])
     projects = Project.objects.filter(
         created_at__lt=the_date,
         title__exact='',
         publishingstatus__status=PublishingStatus.STATUS_UNPUBLISHED)
     if not dry_run:
         with transaction.atomic():
             updates = IndicatorPeriodData.objects.filter(
                 period__indicator__result__project__in=projects)
             writer.write(f"Deleting {updates.count()} period updates")
             updates.delete()
             iati_import = IatiActivityImport.objects.filter(
                 project__in=projects)
             writer.write(
                 f"Deleting {iati_import.count()} iati activity import")
             iati_import.delete()
             results = Result.objects.filter(project__in=projects)
             writer.write(f"Deleting {results.count()} results")
             results.delete()
             writer.write(f"Deleting {projects.count()} projects")
             projects.delete()
     else:
         if options['verbosity'] > 1:
             data = Dataset()
             data.headers = [
                 'project_id', 'project_title', 'is_published', 'created_at'
             ]
             for p in projects:
                 data.append(
                     [p.id, p.title,
                      p.is_published(), p.created_at])
             writer.write(data.export('csv'))
         writer.write(f'Found {projects.count()} projects to delete.')
예제 #21
0
class Takeover(Module):
    """
    OneForAll多线程子域接管风险检查模块

    Example:
        python3 takeover.py --target www.example.com  --format csv run
        python3 takeover.py --target ./subdomains.txt --thread 10 run

    Note:
        参数format可选格式有'txt', 'rst', 'csv', 'tsv', 'json', 'yaml', 'html',
                          'jira', 'xls', 'xlsx', 'dbf', 'latex', 'ods'
        参数dpath为None默认使用OneForAll结果目录

    :param str target:  单个子域或者每行一个子域的文件路径(必需参数)
    :param int thread:  线程数(默认10)
    :param str format:  导出格式(默认xls)
    :param str dpath:   导出目录(默认None)
    """
    def __init__(self, target, thread=10, dpath=None, format='xls'):
        Module.__init__(self)
        self.subdomains = set()
        self.module = 'Check'
        self.source = 'Takeover'
        self.target = target
        self.thread = thread
        self.dpath = dpath
        self.format = format
        self.fingerprints = None
        self.domainq = Queue()
        self.cnames = list()
        self.results = Dataset()

    def save(self):
        logger.log('INFOR', '正在保存检查结果')
        if self.format == 'txt':
            data = str(self.results)
        else:
            data = self.results.export(self.format)
        fpath = self.dpath.joinpath(f'takeover.{self.format}')
        utils.save_data(fpath, data)

    def compare(self, subdomain, cname, responses):
        domain_resp = self.get('http://' + subdomain, check=False)
        cname_resp = self.get('http://' + cname, check=False)
        if domain_resp is None or cname_resp is None:
            return

        for resp in responses:
            if resp in domain_resp.text and resp in cname_resp.text:
                logger.log('ALERT', f'{subdomain}存在子域接管风险')
                self.results.append([subdomain, cname])
                break

    def check(self):
        while not self.domainq.empty():  # 保证域名队列遍历结束后能退出线程
            subdomain = self.domainq.get()  # 从队列中获取域名
            cname = get_cname(subdomain)
            maindomain = get_maindomain(cname)
            if cname is None:
                return
            for fingerprint in self.fingerprints:
                cnames = fingerprint.get('cname')
                if maindomain not in cnames:
                    continue
                responses = fingerprint.get('response')
                self.compare(subdomain, cname, responses)

    def run(self):
        start = time.time()
        logger.log('INFOR', f'正在检查子域接管风险')
        self.format = utils.check_format(self.format)
        self.dpath = utils.check_dpath(self.dpath)
        self.subdomains = utils.get_domains(self.target)
        if self.subdomains:
            self.fingerprints = get_fingerprint()
            self.results.headers = ['subdomain', 'cname']
            for domain in self.subdomains:
                self.domainq.put(domain)
            threads = []
            for _ in range(self.thread):
                thread = Thread(target=self.check, daemon=True)
                thread.start()
                threads.append(thread)
            for thread in threads:
                thread.join()
            self.save()
        else:
            logger.log('FATAL', f'获取域名失败')
        end = time.time()
        elapsed = round(end - start, 1)
        logger.log(
            'INFOR', f'{self.source}模块耗时{elapsed}秒'
            f'发现{len(self.results)}个子域存在接管风险')
예제 #22
0
    def build_csv_from_data(self, form_data, project_id, form_id):
        id_to_label = self.get_header_maps(form_data)
        headers = ["id"]
        for c in id_to_label.values():
            headers.append(re.sub(r"[\,\n\r]+", "", c))

        # responder information, not included in the form fields, is
        # collected by screendoor. we want to extract this and append it
        # to the fields. also, don't explode on empty responses here.
        response_data = self.get_responses(project_id, form_id)
        first_response = response_data.__next__()
        if not first_response:
            logger.errror("No records found for Screendoor import!.")
            return

        first_responder = first_response.get("responder", {})
        if "email" in first_responder:
            headers.append("Responder email (ID: resp_email)")
        if "name" in first_responder:
            headers.append("Responder name (ID: resp_name)")

        data = Dataset(headers=headers)
        response_data = self.get_responses(project_id, form_id)
        for response_info in response_data:
            response = response_info.get("responses", {})
            row_id = response_info["id"]
            row = [row_id]
            for pk in response.keys():
                value = response.get(pk)
                if isinstance(value, str) or not value:
                    row.append(value)
                # attachment
                elif isinstance(value, list) and value[0].get("filename"):
                    links = " ".join(
                        [self.attachment_link(rec) for rec in value])
                    row.append(links)
                elif value.get("checked"):
                    row.append(", ".join(value.get("checked")))
                elif value.get("other_text"):
                    row.append(value.get("other_text"))
                # this handles other_checked w/ blank response
                elif value.get("other_checked"):
                    row.append("(Other, blank)")
                # Screendoor dates come across like this:
                # {'day': '01', 'year': '2019', 'month': '01'}
                elif value.get("day") and value.get("year") \
                        and value.get("month"):
                    row.append("{year}-{month}-{day}".format(**value))
                # location, sometimes it's w/o state
                elif value.get("city") and value.get("country"):
                    state = value.get("state")
                    row.append("%s,%s%s" %
                               (value.get("city"), " %s, " %
                                state if state else " ", value.get("country")))
                # time: {'am_pm': 'PM', 'hours': '05', 'minutes': '30'}
                elif value.get("am_pm"):
                    row.append("%s:%s %s" %
                               (value.get("hours", ""), value.get(
                                   "minutes", ""), value.get("am_pm", "")))
                else:
                    logger.error("Unhandled value type: %s (%s)." %
                                 (value, type(value)))
                    # logger.error("Response data structure: %s" % (
                    #     response
                    # ))
                    row.append(None)

            responder = response_info.get("responder", {})
            if "email" in first_responder:
                row.append(responder.get("email", ""))
            if "name" in first_responder:
                row.append(responder.get("name", ""))

            data.append(row)

        return data.export("csv")
예제 #23
0
from tablib import Dataset
from datetime import date

d = Dataset()

d.append([date.today(), 'antani'])
d.append([date.today(), 'antani2'])
d.append([date.today(), 'antani3'])

d.headers = ['test', 'stringa']

with open('test.xls', 'wb') as f:
    f.write(d.export('xlsx'))
예제 #24
0
class Takeover(Module):
    """
    OneForAll多线程子域接管风险检查模块

    Example:
        python3 takeover.py --target www.example.com  --format csv run
        python3 takeover.py --target ./subdomains.txt --thread 10 run

    Note:
        参数format可选格式有'txt', 'rst', 'csv', 'tsv', 'json', 'yaml', 'html',
                          'jira', 'xls', 'xlsx', 'dbf', 'latex', 'ods'
        参数path默认None使用OneForAll结果目录生成路径

    :param any target:  单个子域或者每行一个子域的文件路径(必需参数)
    :param int thread:  线程数(默认100)
    :param str format:  导出格式(默认csv)
    :param str path:    导出路径(默认None)
    """
    def __init__(self, target, thread=100, path=None, format='csv'):
        Module.__init__(self)
        self.subdomains = set()
        self.module = 'Check'
        self.source = 'Takeover'
        self.target = target
        self.thread = thread
        self.path = path
        self.format = format
        self.fingerprints = None
        self.subdomainq = Queue()
        self.cnames = list()
        self.results = Dataset()

    def save(self):
        logger.log('DEBUG', '正在保存检查结果')
        if self.format == 'txt':
            data = str(self.results)
        else:
            data = self.results.export(self.format)
        utils.save_data(self.path, data)

    def compare(self, subdomain, cname, responses):
        domain_resp = self.get('http://' + subdomain, check=False)
        cname_resp = self.get('http://' + cname, check=False)
        if domain_resp is None or cname_resp is None:
            return

        for resp in responses:
            if resp in domain_resp.text and resp in cname_resp.text:
                logger.log('ALERT', f'{subdomain}存在子域接管风险')
                self.results.append([subdomain, cname])
                break

    def worker(self, subdomain):
        cname = get_cname(subdomain)
        if cname is None:
            return
        maindomain = get_maindomain(cname)
        for fingerprint in self.fingerprints:
            cnames = fingerprint.get('cname')
            if maindomain not in cnames:
                continue
            responses = fingerprint.get('response')
            self.compare(subdomain, cname, responses)

    def check(self):
        while not self.subdomainq.empty():  # 保证域名队列遍历结束后能退出线程
            subdomain = self.subdomainq.get()  # 从队列中获取域名
            self.worker(subdomain)
            self.subdomainq.task_done()

    def progress(self):
        # 设置进度
        bar = tqdm()
        bar.total = len(self.subdomains)
        bar.desc = 'Check Progress'
        bar.ncols = 80
        while True:
            done = bar.total - self.subdomainq.qsize()
            bar.n = done
            bar.update()
            if done == bar.total:  # 完成队列中所有子域的检查退出
                break
        # bar.close()

    def run(self):
        start = time.time()
        logger.log('INFOR', f'开始执行{self.source}模块')
        self.subdomains = utils.get_domains(self.target)
        self.format = utils.check_format(self.format, len(self.subdomains))
        timestamp = utils.get_timestamp()
        name = f'takeover_check_result_{timestamp}'
        self.path = utils.check_path(self.path, name, self.format)
        if self.subdomains:
            logger.log('INFOR', f'正在检查子域接管风险')
            self.fingerprints = get_fingerprint()
            self.results.headers = ['subdomain', 'cname']
            # 创建待检查的子域队列
            for domain in self.subdomains:
                self.subdomainq.put(domain)
            # 检查线程
            for _ in range(self.thread):
                check_thread = Thread(target=self.check, daemon=True)
                check_thread.start()
            # 进度线程
            progress_thread = Thread(target=self.progress, daemon=True)
            progress_thread.start()

            self.subdomainq.join()
            self.save()
        else:
            logger.log('FATAL', f'获取域名失败')
        end = time.time()
        elapse = round(end - start, 1)
        logger.log(
            'INFOR', f'{self.source}模块耗时{elapse}秒'
            f'发现{len(self.results)}个子域存在接管风险')
        logger.log('INFOR', f'子域接管风险检查结果 {self.path}')
        logger.log('INFOR', f'结束执行{self.source}模块')
예제 #25
0
class Takeover(Module):
    """
    OneForAll subdomain takeover module

    Example:
        python3 takeover.py --target www.example.com  --format csv run
        python3 takeover.py --targets ./subdomains.txt --thread 10 run

    Note:
        --format rst/csv/tsv/json/yaml/html/jira/xls/xlsx/dbf/latex/ods (result format)
        --path   Result directory (default directory is ./results)

    :param str  target:     One domain (target or targets must be provided)
    :param str  targets:    File path of one domain per line
    :param int thread:  threads number (default 20)
    :param str format:  Result format (default csv)
    :param str path:    Result directory (default None)
    """
    def __init__(self,
                 target=None,
                 targets=None,
                 thread=20,
                 path=None,
                 format='csv'):
        Module.__init__(self)
        self.subdomains = set()
        self.module = 'Check'
        self.source = 'Takeover'
        self.target = target
        self.targets = targets
        self.thread = thread
        self.path = path
        self.format = format
        self.fingerprints = None
        self.subdomainq = Queue()
        self.cnames = list()
        self.results = Dataset()

    def save(self):
        logger.log('DEBUG', 'Saving results')
        if self.format == 'txt':
            data = str(self.results)
        else:
            data = self.results.export(self.format)
        utils.save_data(self.path, data)

    def compare(self, subdomain, cname, responses):
        domain_resp = self.get('http://' + subdomain, check=False)
        cname_resp = self.get('http://' + cname, check=False)
        if domain_resp is None or cname_resp is None:
            return

        for resp in responses:
            if resp in domain_resp.text and resp in cname_resp.text:
                logger.log('ALERT',
                           f'{subdomain}Subdomain takeover threat found')
                self.results.append([subdomain, cname])
                break

    def worker(self, subdomain):
        cname = get_cname(subdomain)
        if cname is None:
            return
        main_domain = utils.get_main_domain(cname)
        for fingerprint in self.fingerprints:
            cnames = fingerprint.get('cname')
            if main_domain not in cnames:
                continue
            responses = fingerprint.get('response')
            self.compare(subdomain, cname, responses)

    def check(self):
        while not self.subdomainq.empty():  # 保证域名队列遍历结束后能退出线程
            subdomain = self.subdomainq.get()  # 从队列中获取域名
            self.worker(subdomain)
            self.subdomainq.task_done()

    def progress(self):
        bar = tqdm()
        bar.total = len(self.subdomains)
        bar.desc = 'Check Progress'
        bar.ncols = 80
        while True:
            done = bar.total - self.subdomainq.qsize()
            bar.n = done
            bar.update()
            if done == bar.total:  # 完成队列中所有子域的检查退出
                break

    def run(self):
        start = time.time()
        logger.log('INFOR', f'Start running {self.source} module')
        if isinstance(self.targets, set):
            self.subdomains = self.targets
        else:
            self.subdomains = utils.get_domains(self.target, self.targets)
        self.format = utils.check_format(self.format, len(self.subdomains))
        timestamp = utils.get_timestamp()
        name = f'takeover_check_result_{timestamp}'
        self.path = utils.check_path(self.path, name, self.format)
        if self.subdomains:
            logger.log('INFOR', f'Checking subdomain takeover')
            self.fingerprints = get_fingerprint()
            self.results.headers = ['subdomain', 'cname']
            # 创建待检查的子域队列
            for domain in self.subdomains:
                self.subdomainq.put(domain)
            # 检查线程
            for _ in range(self.thread):
                check_thread = Thread(target=self.check, daemon=True)
                check_thread.start()
            # 进度线程
            progress_thread = Thread(target=self.progress, daemon=True)
            progress_thread.start()

            self.subdomainq.join()
            self.save()
        else:
            logger.log('FATAL', f'Failed to obtain domain')
        end = time.time()
        elapse = round(end - start, 1)
        logger.log(
            'ALERT', f'{self.source} module takes {elapse} seconds, '
            f'There are {len(self.results)} subdomains exists takeover')
        logger.log('INFOR', f'Subdomain takeover results: {self.path}')
        logger.log('INFOR', f'Finished {self.source} module')