def help(): encode.init_encode() apiparser = optparse.OptionParser() apiparser.add_option("-u", "--url", help="需要测试的链接 eg:http://xssfork.codersec.net/xssdemo.php?id=23", action="store") apiparser.add_option("-D", "--destination", help="输出位置链接 eg:http://xssfork.codersec.net/output.php", action="store") apiparser.add_option("-l", "--level", help="扫描等级 eg:1-10", action="store", default=1, type=int) apiparser.add_option("-c", "--cookie", help="cookie eg:sessid=284EA45D5C14B2A;flag=1", action="store") apiparser.add_option("-d", "--data", help="post请求数据 eg:username=admin&pass=admin888", action="store") apiparser.add_option("-r", "--readfile", help="post请求数据 eg:username=admin&pass=admin888", action="store") apiparser.add_option("-t", "--temper", help="编码脚本 混合模式或者单个模式 eg:Big ", action="store") apiparser.add_option("-m", "--model", help="扫描模式 eg:light | heavy", action="store", default='light') apiparser.add_option("--ua", action="store", help="User-Agent eg:Mozilla/5.0 Gecko/20100101 Firefox/54.0", default=DEFAULT_UA) apiparser.add_option("--list", help="列出所有编码脚本", action="store_true") apiparser.add_option("--id", action="store", help=optparse.SUPPRESS_HELP) apiparser.add_option("--api", help=optparse.SUPPRESS_HELP, action="store_true") try: (args, _) = apiparser.parse_args() except UnicodeDecodeError: reload(sys) sys.setdefaultencoding('utf8') (args, _) = apiparser.parse_args() is_set_url = True if args.url else False is_set_readfile = True if args.readfile else False if args.level < 1 or args.level > 10: logger = log.get_logger() logger.setLevel(logging.ERROR) logger.error('Error reason. The range of level settings is only between 1-10') exit() if args.list is True: print_temper() elif is_set_url ^ is_set_readfile: task_schedule = TaskSchedule(args.url, args.destination, args.level, args.cookie, args.data, args.readfile, args.temper, args.model, args.ua, args.id, args.api) task_schedule.main() else: logger = log.get_logger() logger.setLevel(logging.DEBUG) logger.error(u'错误原因1:url,readfile至少设置一个') logger.error(u'错误原因2:url,readfile不能同时设置') logger.info(u'查看帮组:python {} -h'.format(__file__))
def task_status(key, task_id): logger = log.get_logger() if task_id not in xssfork_process_map: logger.info("task ID {} isn't existed".format(task_id)) return dumps({"status": -1, "msg": "task isn’t existed"}) if xssfork_process_map[task_id].engine_process() is None: logger.info("task ID {} isn't started".format(task_id)) return dumps({"status": TASK_NOT_START, "msg": "task isn't started"}) if xssfork_process_map[task_id].engine_has_terminated(): logger.info("task ID {} has been done".format(task_id)) return dumps({"status": TASK_DONE, "msg": "task has been done"}) logger.info("task ID {} has is working".format(task_id)) return dumps({"status": TASK_WORKKING, "msg": "task is working"})
def kill_task(key, task_id): logger = log.get_logger() if task_id not in xssfork_process_map: logger.info("task ID {} isn't existed".format(task_id)) return dumps({"status": "false", "msg": "task isn’t existed"}) if xssfork_process_map[task_id].engine_process() is None: logger.info("task ID {} isn't started".format(task_id)) return dumps({"status": "false", "msg": "task isn't started"}) if xssfork_process_map[task_id].engine_has_terminated() is True: logger.info("task ID {} has been done".format(task_id)) return dumps({"status": "false", "msg": "task has been done"}) xssfork_process_map[task_id].engine_kill() logger.info("task ID {} will be killed".format(task_id)) return dumps({"status": "success", "msg": "task will be killed"})
def start_task(key, task_id): logger = log.get_logger() if task_id not in xssfork_process_map: logger.error("sorry, task ID {} isn't existed".format(task_id)) return dumps({"status": "fail", "msg": "task isn't existed"}) if xssfork_process_map[task_id].engine_process() is None: xssfork_process_map[task_id].engine_start() logger.info("task ID {} will start scan".format(task_id)) return dumps({"status": "success", "msg": "task will start"}) if xssfork_process_map[task_id].engine_has_terminated(): logger.error("task ID {} has been done".format(task_id)) return dumps({"status": "fail", "msg": "task has been done"}) logger.warning("task ID {} is working".format(task_id)) return dumps({"status": "fail", "msg": "task is working"})
def create_task(key): logger = log.get_logger() if request.json is None: logger.error("sorry,you cann't post empty data") return dumps({'status': "fail", 'msg': "you cann't post empty data"}) xssfork_task = XssforkTask() for option, value in request.json.items(): if option not in params: logger.error("sorry,you cann't post {} param".format(option)) return dumps({'status': 'fail', 'msg': "you cann't post {} param".format(option)}) xssfork_task.set_option(option, value) try: xssfork_task.set_option('time', get_current_time()) xssfork_task.save() xssfork_task_id = xssfork_task.find_lastest_id() xssfork_process_map[xssfork_task_id] = XssForkProcess(xssfork_task_id, xssfork_task) logger.info("task ID {} provided to scan_start()".format(xssfork_task_id)) return dumps({'status': 'success', 'task_id': xssfork_task_id}) except XssforkTaskError as e: traceback.print_exc(file=open(EXCEPTION_LOG_PATH, 'a')) logger.error("sorry, create task fail, See more information in {}".format(EXCEPTION_LOG_PATH)) return dumps({'status': 'fail', 'msg': 'create task fail, See more information in {}'.format(EXCEPTION_LOG_PATH)})
def help(): encode.init_encode() apiparser = optparse.OptionParser() apiparser.add_option("-p", "--port", help="开启服务的端口", default=2333) apiparser.add_option("-a", "--adapter", help="适配器 eg gevent or eventlet", default="gevent") apiparser.add_option( "-r", "--refresh", help="更新服务器key并将原有扫描记录清空", default="False", ) (args, _) = apiparser.parse_args() if args.refresh not in ['True', 'False']: logger = log.get_logger() logger.setLevel(logging.DEBUG) logger.error(u'refresh只能设置为True或者False') logger.info(u'查看帮组:python {} -h'.format(__file__)) exit() refresh = True if args.refresh == "True" else False web_service.server(int(args.port), args.adapter, refresh)
import os import sys import sqlalchemy import datetime _S2DB_ROOT = os.path.normpath( os.path.join(os.path.abspath(os.path.dirname(__file__)), "..")) sys.path.append(_S2DB_ROOT) from common.config import config from common.log import get_logger logger = get_logger(__name__) engine = sqlalchemy.create_engine(config["db"]["uri"], echo=False) connection = engine.connect() DB_VERSION = 0 def db_insert(query, params={}): query = sqlalchemy.sql.text(query) connection.execute(query, params) def db_select(query, params={}): query = sqlalchemy.sql.text(query) cursor = connection.execute(query, params) rows = cursor.fetchall() return [(dict(row.items())) for row in rows]
""" https://resources.rackspace.net/docs#section/Getting-started/Quick-start:-CLI-SDK-tools """ from typing import Any, List, Dict, Tuple, Optional from common import log from common.clients.identity import IdentityAccount, IdentitySession logger = log.get_logger(__name__) class Zamboni: def __init__(self, endpoint: str, identity_account: IdentityAccount): self.__endpoint = endpoint self.__identity_account = identity_account self.fields = [ "id", # This fixes a pagination bug in Zamboni "name", "location", "provider_account_id", "body.name", "body._rackspace", "body._metadata", "body.availableField", "body.value", "body.config.instanceUuid", "body.config.uuid", "body.guest", ]
# ============================================================================== """Wrappers around tensorlm.model that only need the path to the .txt to do the whole training.""" import os from time import time import tensorflow as tf from common.log import get_logger from common.util import restore_possible from common.trainlog import TrainState from common.lstm_util import get_num_params from dataset import Vocabulary, Dataset from model import GeneratingLSTM LOGGER = get_logger(__name__) # Prefix of the files stored by tf.train.Saver MODEL_FILE_PREFIX = "model" class BaseLM: """Wrappers around tensorlm.model that only needs the path to the .txt to do the training.""" def __init__(self, tf_session, level, train_text_path, max_vocab_size, neurons_per_layer, num_layers, batch_size, num_timesteps, save_dir): """Creates a new language model without training it. Args: tf_session (tf.Session): The session to run the TF Variable initializer in. level (str): The level for tokenizing the text - either "char" or "word".
# -*- coding: utf-8 -*- from __future__ import print_function from __future__ import division from __future__ import unicode_literals import sqlite3 from common.log import get_logger g_databases = {} # TODO: integration to tf-agent or tf-manager logs LOG = get_logger(__name__) def get_connection(path, timeout=0.5): global g_databases if path not in g_databases: conn = sqlite3.connect(path, timeout=timeout) g_databases[path] = conn return g_databases[path] def close_connection(path): global g_databases if path in g_databases: g_databases.pop(path).close() def create(conn, sql):
def help(): encode.init_encode() apiparser = optparse.OptionParser() apiparser.add_option( "-u", "--url", help="需要测试的链接 eg:http://xssfork.codersec.net/xssdemo.php?id=23", action="store") apiparser.add_option( "-D", "--destination", help="输出位置链接 eg:http://xssfork.codersec.net/output.php", action="store") apiparser.add_option("-l", "--level", help="扫描等级 eg:1-10", action="store", default=1, type=int) apiparser.add_option("-c", "--cookie", help="cookie eg:sessid=284EA45D5C14B2A;flag=1", action="store") apiparser.add_option("-d", "--data", help="post请求数据 eg:username=admin&pass=admin888", action="store") apiparser.add_option("-r", "--readfile", help="post请求数据 eg:username=admin&pass=admin888", action="store") apiparser.add_option("-t", "--temper", help="编码脚本 混合模式或者单个模式 eg:Big ", action="store") apiparser.add_option("-m", "--model", help="扫描模式 eg:light | heavy", action="store", default='light') apiparser.add_option("--list", help="列出所有编码脚本", action="store_true") (args, _) = apiparser.parse_args() is_set_url = True if args.url else False is_set_readfile = True if args.readfile else False if args.level < 1 or args.level > 10: logger = log.get_logger() logger.setLevel(logging.ERROR) logger.error('错误原因 level设置的范围只能在1-10之间') exit() if args.list is True: print_temper() elif is_set_url ^ is_set_readfile: task_schedule = TaskSchedule(args.url, args.destination, args.level, args.cookie, args.data, args.readfile, args.temper, args.model) task_schedule.main() else: logger = log.get_logger() logger.setLevel(logging.DEBUG) logger.error('错误原因1:url,readfile至少设置一个') logger.error('错误原因2:url,readfile不能同时设置') logger.info('查看帮组:python {} -h'.format(__file__))
"""Computing node service""" from common import log from cns.api import app SERVICE = "cns" logger, log_fd = log.get_logger(SERVICE) def manage(): from flask.ext.script import Manager manager = Manager(app) manager.run()
from typing import Any, Dict, Union, Tuple from uuid import uuid4 import awsgi import marshmallow import structlog from flask import Flask, json, globals from flask_dotenv import DotEnv from flask_rebar import errors from common import log, constants from controllers import job, host # noqa: F401 from schemas.error import ErrorResponseSchema from server.rebar import rebar logger = log.get_logger(__file__) def create_app() -> Flask: app = Flask(__name__) dot_env = DotEnv() env_path = Path(os.path.abspath(__file__)).parent.parent stage = constants.STAGE if stage != "prod": stage = "dev" logger.debug("Initializing for stage", stage=stage) env_file = os.path.join(env_path, f".env.{stage}") dot_env.init_app(app, env_file=env_file, verbose_mode=True) rebar.init_app(app) logger.debug("Routes configured", routes=app.url_map)
def test_log(self): import common.log as log logger = log.get_logger('test') logger.info('TEST!!!') with open(self.log_path, 'r') as fd: self.assertTrue(len(fd.read()) > 1)
# -*- coding: utf-8 -*- from .lstm import LSTMRecommender from common.log import get_logger __all__ = ['LSTMRecommender'] LOG = get_logger('tf-agent-server') def create_model(model_name, config): try: return globals()[model_name](config) except KeyError: LOG.error('Not found model: {}'.format(model_name)) return None