Esempio n. 1
0
def export_all(format, path, datas):
    """
    将所有结果数据导出到一个文件

    :param str format: 导出文件格式
    :param str path: 导出文件路径
    :param list datas: 待导出的结果数据
    """
    format = check_format(format, len(datas))
    timestamp = get_timestamp()
    name = f'all_subdomain_result_{timestamp}'
    path = check_path(path, name, format)
    logger.log('INFOR', f'所有主域的子域结果 {path}')
    row_list = list()
    for row in datas:
        row.pop('header')
        row.pop('response')
        row.pop('module')
        row.pop('source')
        row.pop('elapsed')
        row.pop('count')
        keys = row.keys()
        values = row.values()
        if format in {'xls', 'xlsx'}:
            values = check_value(values)
        row_list.append(Record(keys, values))
    rows = RecordCollection(iter(row_list))
    content = rows.export(format)
    save_data(path, content)
Esempio n. 2
0
    def query(self, sql, columns=None, **kwargs):

        headers = {
            "Accept":
            "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
            "Accept-Language":
            "zh-CN,zh;q=0.8",
            "Connection":
            "keep-alive",
            "Host":
            "192.168.0.159:8007",
            "Referer":
            "http://192.168.0.159:8007/clustering",
            "User-Agent":
            "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36"
        }

        self.params.update({"q": sql})

        rep = requests.get(self.db_url, params=self.params, headers=headers)
        content = rep.text.split('\n')

        rows_gen = (Record(json.loads(row).keys(),
                           json.loads(row).values()) for row in content
                    if row.strip())

        results = RecordCollection(rows_gen)

        return results
Esempio n. 3
0
def export_all_results(path, name, format, datas):
    path = check_path(path, name, format)
    logger.log('ALERT', f'The subdomain result for all main domains: {path}')
    row_list = list()
    for row in datas:
        if 'header' in row:
            row.pop('header')
        if 'response' in row:
            row.pop('response')
        keys = row.keys()
        values = row.values()
        if format in {'xls', 'xlsx'}:
            values = check_value(values)
        row_list.append(Record(keys, values))
    rows = RecordCollection(iter(row_list))
    content = rows.export(format)
    save_data(path, content)
def get_ranking_of_user(
        name: str,
        record_collection: records.RecordCollection) -> Optional[int]:
    """Finds the ranking of a user in SELECT result"""
    for index, user_entry in enumerate(record_collection.all(as_dict=True)):
        if name == user_entry.get("name"):
            return index + 1
    return None
Esempio n. 5
0
def export_all(format, datas):
    format = check_format(format, len(datas))
    dpath = check_dpath()
    timestamp = get_timestamp()
    fpath = dpath.joinpath(f'all_subdomain_{timestamp}.{format}')
    row_list = list()
    for row in datas:
        row.pop('header')
        row.pop('response')
        row.pop('module')
        row.pop('source')
        row.pop('elapsed')
        row.pop('count')
        keys = row.keys()
        values = row.values()
        if format in {'xls', 'xlsx'}:
            values = check_value(values)
        row_list.append(Record(keys, values))
    rows = RecordCollection(iter(row_list))
    content = rows.export(format)
    save_data(fpath, content)
Esempio n. 6
0
    def query(self, sql, columns=None, **kwargs):

        rows = self.conn.execute(sql)

        row_gen = (Record(columns, row) for row in rows)

        # Convert psycopg2 results to RecordCollection.
        results = RecordCollection(row_gen)
        # # # Fetch all results if desired.
        # if fetchall:
        #     results.all()

        return results
Esempio n. 7
0
def iquery(self, query, batches=100):
    cursor = self._conn.execute(text(query))

    columns = cursor.keys()
    history = []
    for i, row in enumerate(cursor, start=1):
        history.extend(
            list(RecordCollection(
                (Record(columns, _row) for _row in (row, )))))
        if i % batches == 0:
            yield history
            history.clear()
    if history:
        yield history
Esempio n. 8
0
 def query(self, sql, columns=None, **kwargs):
     try:
         dsl = json.loads(sql)
         index_name = kwargs.pop("index_name", None)
         type_name = kwargs.pop("type_name", None)
         data_gen = (Record(line['_source'].keys(),
                            line['_source'].values())
                     for line in self.db.search(body=dsl,
                                                index=index_name,
                                                doc_type=type_name,
                                                _source_include=columns)
                     ['hits']['hits'])
         result = RecordCollection(data_gen)
         return result
     except Exception as e:
         print(e)