Ejemplo n.º 1
0
    def __init__(self, os_properties):
        # Read OS properties
        self.os_properties = os_properties
        micropython.alloc_emergency_exception_buf(100)

        # Logger
        self.logger = Logger(self.os_properties['log_level'])
Ejemplo n.º 2
0
 def execute(self):
     text = self.form.txtEdit.toPlainText()
     filewriter = FileWriter(self.form.userfile)
     filewriter.open()
     filewriter.writeLine(text)
     filewriter.close()
     logger = Logger()
     logger.log("saveText()")
Ejemplo n.º 3
0
    def execute(self):
        reply = QMessageBox.question(self.form, 'Message',
                                     "Are you sure to clear text?",
                                     QMessageBox.Yes | QMessageBox.No,
                                     QMessageBox.No)

        if reply == QMessageBox.Yes:
            self.form.txtEdit.setText('')
            logger = Logger()
            logger.log("clearText()")
Ejemplo n.º 4
0
 def execute(self):
     filereader = FileReader(self.form.userfile)
     filereader.open()
     text = ''
     while True:
         temp_str = filereader.readLine()
         if not temp_str:
             break
         text += temp_str
     self.form.txtEdit.setText(text)
     logger = Logger()
     logger.log("readTextFromFile()")
     filereader.close()
Ejemplo n.º 5
0
def dqn_run_experiments():
    for i in range(NUM_EXP):
        # Make environment
        env = rlcard.make('blackjack', config={'seed': i})
        eval_env = rlcard.make('blackjack', config={'seed': i})

        # Set the iterations numbers and how frequently we evaluate/save plot

        # The initial memory size
        memory_init_size = 100

        # Train the agent every X steps
        train_every = 1

        # The paths for saving the logs and learning curves
        log_dir = f"{DQN_RES_DIR}/{i}"

        # Set up the agents
        agent = DQNAgent('dqn',
                         action_num=env.action_num,
                         replay_memory_init_size=memory_init_size,
                         train_every=train_every,
                         state_shape=env.state_shape,
                         mlp_layers=[128, 256, 512],
                         debug=True)
        env.set_agents([agent])
        eval_env.set_agents([agent])


        # Init a Logger to plot the learning curve
        logger = Logger(log_dir, debug=True)

        for episode in range(DQN_TRAINING_DURATION):

            # Generate data from the environment
            trajectories, _ = env.run(is_training=True)

            # Feed transitions into agent memory, and train the agent
            for ts in trajectories[0]:
                agent.feed(ts)

            # Evaluate the performance. Play with random agents.
            if episode % EVALUATE_EVERY == 0:
                logger.log_performance(env.timestep, tournament(eval_env, EVALUATE_NUM_OF_HANDS)[0])

            # Close files in the logger
            # logger.close_files()

        # Plot the learning curve
        logger.plot(f"DQN_{i}")
    BaseAgent.plot_avg(DQN_RES_DIR, "DQN")
Ejemplo n.º 6
0
    def __init__(self):
        """
            Initializes and sets required default global variables by
            collecting metadata from information.yaml
        """
        self.ru_yaml = YAML()
        self.ru_yaml.width = 4096
        self.logger_object = Logger()
        self.webex_wrapper = WebexNotifierWrapper()
        self.logger = logging.getLogger('chatbot_logger')
        self.logger.info("Starting default initialization...")
        self.webex_auth_headers = {'content-type': 'application/json'}
        try:
            # Collecting metadata from information.yaml
            with open('information.yaml', 'r') as ifh:
                self.doc = yaml.safe_load(ifh)

                # Getting Webex Teams Room details
                self.webex_url = str(
                    self.doc["webex_teams_details"]["webex_url"])
                self.webex_room_id = str(
                    self.doc["webex_teams_details"]["webex_room_id"])

                # Getting Webex Teams Bot details
                self.auth_token = str(
                    self.doc["webex_teams_details"]["auth_token"])
                self.webex_bot_name = str(
                    self.doc["webex_teams_details"]["webex_bot_name"])

            # Setting Webex connectivity parameters
            self.webex_auth_headers[
                'authorization'] = 'Bearer ' + self.auth_token
            self.webex_auth_headers['Accept'] = 'application/json'
            # Removing '+ "&max=1"' for now from webex_teams_get_url bcoz of
            # the webex list messages API issue -- including max parameter
            # impacts the sort order of the message results when bot calls
            # list messages api
            self.webex_teams_get_url = self.webex_url + "?mentionedPeople=me" \
                                       + "&roomId=" + self.webex_room_id

            self.logger.info("Default initialization complete!!!")
        except Exception as e:
            self.logger.error("Error -- {}".format(e))
Ejemplo n.º 7
0
	def __init__(self, name, ticker, period, live_mode, periods_needed=200):
		"""
		- name: string, the name of the bot
		- ticker: string, the ticker formatted like that: ASSET1/ASSET2
		- period: string, the period on which the loop will be set, and the resolution of the candles
		- live_mode: bool, should we launch the live loop and start trading live
		- periods_needed: int, the number of candles you will get every loop, optional
		"""
		self.live_mode = live_mode
		self.name = name
		self.ticker = ticker
		self.period_text = period
		self.periods_needed = periods_needed
		self.offset_seconds = 10
		if (not self.name in config.get_config()):
			print("❌ Cannot instantiate bot: no config entry")
			exit(1)
		self.config = config.get_config()[self.name]
		if (not "capitalAllowed" in self.config):
			print("❌ Cannot instantiate bot: no 'capitalAllowed' property")
			exit(1)
		try:
			self.logger = Logger(self.name, live_mode)
		except:
			print("❌ Cannot connect to the log DB, are you sure it's running?")
			raise
		if (self.live_mode):
			self.data = Data(self.name)
		else:
			self.data = Data(self.name + "-test")
		self.exchange = Exchange(self.logger, self.data, self.config['capitalAllowed'], live_mode, self.ticker, self.period_text)
		try:
			self.period = period_matching[period]
		except:
			print("Available periods: 1m, 3m, 5m, 15m, 30m, 1h, 2h, 3h, 4h, 1d, 1w")
			raise
		self.logger.log("ℹ️", f"Bot {self.name} started with a period of {period}")
		self.logger.log("ℹ️", f"Capital allowed: {self.config['capitalAllowed']}%")
		self.setup()
		if (self.live_mode):
			self.preloop()
Ejemplo n.º 8
0
 def __init__(self, env):
     """
     Parameters
     ----------
     env : str
         the environment that the current instance is running
     """
     print("[ENDPOINTS] Initializing...")
     # initialize libraries
     self._env = env
     self._db = DB(self._env, self._workers)
     self._logger = Logger(self._db, self._env)
     self._crypto = Crypto()
     # initialize Flask
     self._app = Flask(__name__)
     self._app.json_encoder = CustomJSONEncoder
     self._api = Api(self._app)
     self._app.before_request(self.detectAuthorization)
     self._app.after_request(self.finishRequest)
     for url in self._endpoints: self.addResource(self._endpoints[url], url)
     print("[ENDPOINTS] Done.")
Ejemplo n.º 9
0
    def __init__(self, env=gym.make('Blackjack-v0'), log_dir=None):
        self._env = env
        self.q = defaultdict(lambda: np.zeros(self._env.action_space.n))
        self.policy = None
        self.eval_policy = None
        self.log_dir = log_dir
        self.logger = Logger(self.log_dir, debug=False)
        # Adding run function to Gym env
        if isinstance(self._env, BlackjackEnv):
            def run(is_training=False):
                observation = self._env.reset()
                while True:
                    if (self.eval_policy is None) or \
                            (observation not in self.eval_policy):
                        action = np.random.choice(
                            np.arange(env.action_space.n))
                    else:
                        action = np.argmax(self.eval_policy[observation])
                    observation, reward, done, _ = self._env.step(action)
                    if done:
                        return _, np.asarray([int(reward)])

            self._env.run = run
            self._env.player_num = 1
Ejemplo n.º 10
0
                        help='date to continue for',
                        default=MODEL_DATE)
    parser.add_argument('--paper-note-path',
                        default='../paper-notes/data/words')
    parser.add_argument('--model-epoch',
                        help='epoch to continue for',
                        default=MODEL_EPOCH,
                        type=int)
    args = parser.parse_args()

    # TRAINING
    LOG_NAME = '{}-{}'.format("otf-iam-paper", args.model_date)
    model_folder = os.path.join(Constants.MODELS_PATH, LOG_NAME)
    models_path = os.path.join(model_folder,
                               'model-{}'.format(args.model_epoch))
    logger = Logger()
    config = Configuration.load(model_folder, "algorithm")
    algorithm = HtrNet(config['algo_config'])
    dataset = PreparedDataset.PreparedDataset(config['dataset'], False,
                                              config['data_config'])

    algorithm.configure(batch_size=config['batch'],
                        learning_rate=config['learning_rate'],
                        sequence_length=dataset.max_length,
                        image_height=dataset.meta["height"],
                        image_width=dataset.meta["width"],
                        vocab_length=dataset.vocab_length,
                        channels=dataset.channels,
                        class_learning_rate=config.default(
                            'class_learning_rate', config['learning_rate']))
    executor = Executor(algorithm, True, config, logger=logger)
Ejemplo n.º 11
0
def main(n, config, seasonality, log_dir, log_prefix):
    """
    Main loop that runs a simulation. This simulation can be configured by passing
    a configuration dictionary, and specifying where all logs will be written to.

    Parameters
    ----------
    n: int
        The Nth simulation.
    config: dict
        Configuration for the simulation. Should contain the following keys:
        - servers:      List of dictionaries, describing a server pool.
        - process:      Sequence of kinds of servers, describing how a process within
                        the simulation runs.
        - runtime:      Until when the simulation should run.
        - max_volumne:  Maximum number of events.
    seasonality: Seasonality
        Seasonality object to use for the simulation. This defines the intervals
        between events.
    log_dir: string
        Path pointing to where all logs should be written.
    log_prefix: string
        Prefix of every log file.

    Returns
    -------
    bool
    """
    # we need a new environment which we can run.
    environment = Environment()

    # we need a server pool
    servers = MultiServers()

    # iterate over all of the servers that need to be configured that
    # we received from the client
    for server in config['servers']:

        # append a new server pool to the multiserver system
        servers.append(
            Servers(environment,
                    size=server['size'],
                    capacity=server['capacity'],
                    kind=server['kind']))

    # we need a logger that will log all events that happen in the simulation
    name = "{0}_{1:04d}_{2}".format(log_prefix, n,
                                    datetime.now().strftime("%Y-%m-%d_%H-%M"))
    logger = Logger(name, directory=log_dir, show_stdout=False, usequeue=False)

    # we also need a logger for all error events that happen in the simulation
    error_logger = Logger(f"error-{name}",
                          directory=log_dir,
                          show_stdout=False)

    # Start QueueListener
    if hasattr(logger, "listener"):
        logger.listener.start()

    # Enter first line for correct .csv headers
    logger.log(
        'Time;Server;Message_type;CPU Usage;Memory Usage;Latency;Transaction_ID;To_Server;Message'
    )
    error_logger.log('Time;Server;Error type;Start-Stop')

    # we can use the logger for the simulation, so we know where all logs will be written
    environment.logger(logger)
    environment.logger(error_logger, type="error")

    # we need a new form of seasonality
    seasonality = Seasonality(seasonality, max_volume=config["max_volume"])

    # now, we can put the process in the simulation
    Processor(environment,
              servers,
              seasonality=seasonality,
              kinds=config['process'])

    # run the simulation with a certain runtime (runtime). this runtime is not equivalent
    # to the current time (measurements). this should be the seasonality of the system.
    # for example, day or week.
    environment.run(until=int(config['runtime']))

    # Start QueueListener
    if hasattr(logger, "listener"):
        logger.listener.stop()

    return name
Ejemplo n.º 12
0
# ----------------------------------------------------------------------------------------------------------------------
# Initialize OS
# ----------------------------------------------------------------------------------------------------------------------
from lib.Kernel import Kernel
from lib.toolkit import load_properties, determine_preferred_wifi
# Start-up Kernel
kernel = Kernel(load_properties("conf/os.properties"))
log = kernel.logger

# ----------------------------------------------------------------------------------------------------------------------
# Logger
# ----------------------------------------------------------------------------------------------------------------------
import gc
from lib.Logger import Logger
log = Logger("DEBUG")

log.info("Hello!")
log.error("Critical Issue!!")
log.debug("Free memory: " + str(gc.mem_free()))

# ----------------------------------------------------------------------------------------------------------------------
# Update DuckDNS
# ----------------------------------------------------------------------------------------------------------------------
from lib.toolkit import update_duck_dns

# Update DuckDNS service
update_duck_dns("mydomain", "mytoken", "myip")

# ----------------------------------------------------------------------------------------------------------------------
#
Ejemplo n.º 13
0
def main():
    banner()
    opts = CmdLineParser().cmd_parser()

    python_version = platform.python_version()
    if not python_version.startswith('3'):
        ColorFormatter.fatal('此脚本只能运行在 Python3 之中')
        sys.exit(1)

    if opts.show_config:
        space = "-"
        line_len_list = []
        line_1 = "| # ['database']"
        line_2 = "| DATABASE_USER = '******'".format(DATABASE_USER)
        line_3 = "| DATABASE_PASSWORD = '******'".format(DATABASE_PASSWORD)
        line_4 = "| DATABASE = '{}'".format(DATABASE)
        line_5 = "| TABLE_NAME = '{}'".format(TABLE_NAME)

        line_len_list.append(len(line_2))
        line_len_list.append(len(line_3))
        line_len_list.append(len(line_4))
        line_len_list.append(len(line_5))
        line_len_list.sort(reverse=True)
        longest_line = line_len_list[0]

        print(colored('Show database config', 'white'))

        print(colored(space * (longest_line + 2), 'white'))
        chajia = longest_line - len(line_1)
        print(colored("{}{} |".format(line_1, chajia * ' '), 'white'))
        chajia = longest_line - len(line_2)
        print(colored("{}{} |".format(line_2, chajia * ' '), 'white'))
        chajia = longest_line - len(line_3)
        print(colored("{}{} |".format(line_3, chajia * ' '), 'white'))
        chajia = longest_line - len(line_4)
        print(colored("{}{} |".format(line_4, chajia * ' '), 'white'))
        chajia = longest_line - len(line_5)
        print(colored("{}{} |".format(line_5, chajia * ' '), 'white'))
        print(colored(space * (longest_line + 2), 'white'))

        # 安全退出程序
        sys.exit(0)

    # control log level, default is 1
    log_level = str(opts.log_level)
    if log_level == '1':
        log_level = 'WARNING'
    elif log_level == '2':
        log_level = 'INFO'
    elif log_level == '3':
        log_level = 'DEBUG'
    else:
        log_level = 'DEBUG'

    # setting Logger object
    logger = Logger(log_level=log_level)
    # debug.log 应该包含所有的日志信息, 如果命令行没有指定 log_level 为3 的话, debug信息 不会出现在 debug.log 中
    debug_logger = logger.get_logger(logger_name='debug_logger')
    # error.log 只包含错误信息
    error_logger = logger.get_logger(logger_name='error_logger')

    # 初始化数据库
    try:
        database = Database(host='127.0.0.1',
                            database=opts.database_name,
                            logger=logger)
    except pymysql.err.InternalError as e:
        ColorFormatter.error('Init database InternalError: {}'.format(e))
        debug_logger.error('Init database InternalError: {}'.format(e))
        error_logger.error('Init database InternalError: {}'.format(e))
        if 'Unknown database' in str(e):
            ColorFormatter.fatal(
                'Please open the lib/settings.py file and move to database section content, modify '
                'DATABASE parameter and try again, for example see below: ')
            error_string = """
----------------------------------------
| # ['database']                       |
| DATABASE_USER = '******' |
| DATABASE_PASSWORD = '******'  |
| DATABASE = 'testdb'                  |
| TABLE_NAME = 'test_table'            |
----------------------------------------
            """
            ColorFormatter.fatal(error_string)
        sys.exit(1)
    except Exception as e:
        ColorFormatter.error('Init database Unknow Error: {}'.format(e))
        debug_logger.error('Init database Unknow Error: {}'.format(e))
        error_logger.error('Init database Unknow Error: {}'.format(e))
        sys.exit(1)

    # 运行程序
    try:
        if not len(sys.argv[1:]):
            ColorFormatter.fatal(
                "You failed to provide an option, redirecting to help menu")
            debug_logger.debug(
                'You failed to provide an option, redirecting to help menu')
            # 停顿2秒之后再显示 help banner
            time.sleep(2)
            print()
            CmdLineParser().cmd_parser(get_help=True)

        else:
            if opts.drop_table:
                prompt_result = ColorFormatter.prompt(
                    'Are you sure to drop "{}" table?(y/N)'.format(
                        opts.drop_table),
                    opts='y/n')
                debug_logger.debug('type --drop-table option')
                if prompt_result == 'y':
                    # result = db.create_table('大爷', ['id', 'name', 'age'])
                    result = database.drop_table(opts.drop_table)
                    debug_logger.warning('input y')
                    if result:
                        ColorFormatter.info('Drop table {} successful'.format(
                            opts.drop_table))
                        debug_logger.warning('Drop table {} successful'.format(
                            opts.drop_table))
                    else:
                        ColorFormatter.error('Drop table {} failed'.format(
                            opts.drop_table))
                        error_logger.error('Drop table {} failed'.format(
                            opts.drop_table))
                        debug_logger.error('Drop table {} failed'.format(
                            opts.drop_table))

                # 执行完 drop table 就正常退出程序
                sys.exit(0)

            if opts.csv_to_database:
                csv_file_path = opts.csv_to_database

                if not os.path.isfile(csv_file_path):
                    # 不存在 csv 文件
                    debug_logger.warning(
                        'File "{}" not exists'.format(csv_file_path))
                    ColorFormatter.fatal(
                        'File "{}" not exists'.format(csv_file_path))
                    time.sleep(1)
                    ColorFormatter.fatal('Program exit')
                    debug_logger.warning('Program exist')
                    sys.exit(1)

                else:
                    if opts.fast:
                        start_time = time.time()

                        # 使用多核前, 将大文件进行分割, 分割成 4 份, 再加上不足的那一份
                        ColorFormatter.info('启用多进程导入数据库')
                        ColorFormatter.info('开始分割原数据文件')

                        # 这里需要再分割前 删除 split 目录下的所有文件
                        cmd = 'rm -rf split && mkdir split'
                        debug_logger.debug('开始运行 {} 命令'.format(cmd))
                        p = subprocess.Popen(cmd,
                                             stderr=subprocess.PIPE,
                                             stdout=subprocess.PIPE,
                                             shell=True)
                        (stdout, stderr) = p.communicate()
                        return_code = p.returncode
                        if return_code != 0 or stderr:
                            ColorFormatter.error(
                                '删除split文件时出错, 出错信息为: {}'.format(
                                    stderr.strip()))
                            debug_logger.error(
                                '删除split文件时出错, 出错信息为: {}'.format(
                                    stderr.strip()))
                            error_logger.error(
                                '删除split文件时出错, 出错信息为: {}'.format(
                                    stderr.strip()))
                            # 异常退出
                            sys.exit(1)
                        else:
                            ColorFormatter.success('初始化split文件夹成功')
                            debug_logger.debug(
                                '初始化split文件夹成功, 返回码: {}, 输出为: {}'.format(
                                    return_code, stdout))
                            debug_logger.info('初始化split文件夹成功')

                        # 开始分割
                        split_result = split_file(csv_file_path, log_level)
                        if split_result:
                            ColorFormatter.success('原数据文件分割完成')
                            debug_logger.info('原数据文件分割完成')
                        else:
                            ColorFormatter.error('原数据文件分割失败')
                            ColorFormatter.fatal('异常, 退出程序')
                            debug_logger.error('原数据文件分割失败')
                            error_logger.error('原数据文件分割失败')
                            sys.exit(1)
                        # 分割完成

                        # 多进程, 使用多核一起干
                        table_name = TABLE_NAME
                        process_list = []

                        for i in range(1, os.cpu_count() + 1):
                            process = Process(target=handle_data_to_database,
                                              args=(
                                                  i,
                                                  database,
                                                  table_name,
                                                  debug_logger,
                                                  error_logger,
                                                  opts.skip_error,
                                              ))
                            process_list.append(process)
                            process.start()

                        # 计算 csv 文件的总行
                        cmd = "wc -l split/split_file_* | grep total | awk '{print $1}'"
                        debug_logger.debug('开始运行 {} 命令'.format(cmd))
                        p = subprocess.Popen(cmd,
                                             stderr=subprocess.PIPE,
                                             stdout=subprocess.PIPE,
                                             shell=True)
                        (stdout, stderr) = p.communicate()
                        try:
                            total_lines = int(stdout.strip())
                        except:
                            total_lines = 0
                        return_code = p.returncode
                        if return_code != 0 or stderr:
                            ColorFormatter.error(
                                '计算文件 "{}" 总行数 时出错, 出错信息为: {}'.format(
                                    csv_file_path, stderr.strip()))
                            debug_logger.error(
                                '计算文件 "{}" 总行数 时出错, 出错信息为: {}'.format(
                                    csv_file_path, stderr.strip()))
                            error_logger.error(
                                '计算文件 "{}" 总行数 时出错, 出错信息为: {}'.format(
                                    csv_file_path, stderr.strip()))
                            ColorFormatter.fatal(
                                'Please contact author or look through logs/error.log file to examine error place'
                            )
                            # 异常退出
                            sys.exit(1)
                        else:
                            ColorFormatter.info(
                                '计算文件 "{}" 总行数结束, 一共 {} 行'.format(
                                    csv_file_path, str(total_lines)))
                            debug_logger.debug(
                                '计算文件 "{}" 总行数命令结果, 返回码: {}, 输出为: {}'.format(
                                    csv_file_path, return_code,
                                    str(total_lines)))
                            debug_logger.info(
                                '计算需要分割文件 "{}" 行数结束, 一共 {} 行'.format(
                                    csv_file_path, str(total_lines)))

                        # 进度条 对象
                        progress_bar = ProgressBar(total_line=total_lines,
                                                   description='正在插入数据: ')
                        while True:
                            # 每 2 秒读一次总数文件
                            time.sleep(2)
                            all_progress = 0
                            for suffix_name in range(1, os.cpu_count() + 1):
                                record_line_file = os.path.join(
                                    SPLIT_PATH, 'record_line_{}.txt'.format(
                                        str(suffix_name)))
                                try:
                                    with open(record_line_file, 'r') as r:
                                        per_line = r.readline()
                                    all_progress += int(per_line)
                                except:
                                    # 有可能开始还没有读到文件
                                    pass

                            # 打印进度条
                            progress_bar.handle_multiprocessing_progress(
                                current_progress=all_progress)

                            if all_progress >= total_lines:
                                # 有可能这个判断不准确, 然后卡在这里
                                break

                        for _ in process_list:
                            _.join()

                        end_time = time.time()
                        print(
                            colored('总共用时: {}'.format(end_time - start_time),
                                    'white'))

                        # 清除 split 文件夹
                        cmd = "rm -rf split"
                        debug_logger.debug('开始运行 {} 命令'.format(cmd))
                        p = subprocess.Popen(cmd,
                                             stderr=subprocess.PIPE,
                                             stdout=subprocess.PIPE,
                                             shell=True)
                        (stdout, stderr) = p.communicate()
                        return_code = p.returncode
                        if return_code != 0 or stderr:
                            ColorFormatter.error(
                                '清除 split 文件夹时出错, 出错信息为: {}'.format(
                                    csv_file_path, stderr.strip()))
                            debug_logger.error(
                                '清除 split 文件夹时出错, 出错信息为: {}'.format(
                                    csv_file_path, stderr.strip()))
                            error_logger.error(
                                '清除 split 文件夹时出错, 出错信息为: {}'.format(
                                    csv_file_path, stderr.strip()))
                            ColorFormatter.fatal(
                                'Please contact author or look through logs/error.log file to examine error place'
                            )
                            # 异常退出
                            sys.exit(1)
                        else:
                            ColorFormatter.info('清除 split 文件夹成功')
                            debug_logger.debug('清除 split 文件夹成功')

                        ColorFormatter.success('插入数据完成')
                        debug_logger.info('insert data to database done')

                        # 显示汇总信息
                        # 多进程无法显示 失败 和 成功 的条数, 暂时丢着吧

                    else:
                        # 单进程, 速度很慢
                        start_time = time.time()
                        table_name = TABLE_NAME

                        with open(csv_file_path, 'r') as r:
                            # 这里是用, 分隔的 csv文件
                            line = csv.reader(r,
                                              delimiter=',',
                                              quoting=csv.QUOTE_NONE)
                            csv_file_path = '\ '.join(csv_file_path.split())
                            columns_str = ''

                            ColorFormatter.info(
                                '开始计算csv文件 {} 的行数'.format(csv_file_path))
                            debug_logger.info(
                                '开始计算csv文件 {} 的行数'.format(csv_file_path))
                            # 计算 csv 文件的行
                            cmd = "wc -l %s | awk '{print $1}'" % csv_file_path
                            debug_logger.debug('开始运行 {} 命令'.format(cmd))
                            p = subprocess.Popen(cmd,
                                                 stderr=subprocess.PIPE,
                                                 stdout=subprocess.PIPE,
                                                 shell=True)
                            (stdout, stderr) = p.communicate()
                            return_code = p.returncode
                            if return_code != 0 or stderr:
                                ColorFormatter.error(
                                    '计算csv文件 "{}" 行数时出错, 出错信息为: {}'.format(
                                        csv_file_path, stderr.strip()))
                                debug_logger.error(
                                    '计算csv文件 "{}" 行数时出错, 出错信息为: {}'.format(
                                        csv_file_path, stderr.strip()))
                                error_logger.error(
                                    '计算csv文件 "{}" 行数时出错, 出错信息为: {}'.format(
                                        csv_file_path, stderr.strip()))
                                # 异常退出
                                sys.exit(1)
                            else:
                                ColorFormatter.info(
                                    '计算 "{}" 文件行数结束, 一共 "{}" 行'.format(
                                        csv_file_path, stdout.strip()))
                                debug_logger.debug(
                                    '计算 "{}" 文件行数命令结果, 返回码: {}, 输出为: {}'.
                                    format(csv_file_path, return_code,
                                           stdout.strip()))
                                csv_file_line = int(stdout.strip())
                                debug_logger.info(
                                    '计算 "{}" 文件行数结束, 一共 "{}" 行'.format(
                                        csv_file_path, csv_file_line))

                            # 进度条 对象
                            progress_bar = ProgressBar(
                                total_line=csv_file_line,
                                description='正在插入数据: ')

                            count = 1
                            for all_cols in line:
                                # 跳过空行
                                if all_cols:
                                    if count == 1:
                                        # 第一行,获取所有数据的列名, 然后创建数据库表
                                        columns_str = ", ".join(all_cols)

                                        debug_logger.debug(
                                            'create table {}'.format(
                                                table_name))
                                        create_result = database.create_table(
                                            table_name=table_name,
                                            table_column_list=all_cols)
                                        if create_result == 'True':
                                            debug_logger.info(
                                                'create table {} successful'.
                                                format(table_name))
                                            ColorFormatter.success(
                                                '创建表 "{}" 成功'.format(
                                                    table_name))
                                        elif 'already exists' or 'Duplicate column name' in create_result:
                                            debug_logger.warning(
                                                'table "{}" has been existed'.
                                                format(table_name))
                                            ColorFormatter.info(
                                                '表 "{}" 已经存在'.format(
                                                    table_name))
                                        else:
                                            debug_logger.error(
                                                'create table "{}" failed'.
                                                format(table_name))
                                            error_logger.error(
                                                'create table "{}" failed'.
                                                format(table_name))
                                            ColorFormatter.error(
                                                '创建表 "{}" 失败'.format(
                                                    table_name))

                                    else:
                                        # 不为第一行
                                        #  剔除 \r\n 的列
                                        all_cols = escape_file_string(all_cols)
                                        # 将每一列的数据拼成一行
                                        data_list = ', '.join([
                                            '"{}"'.format(_) for _ in all_cols
                                        ])
                                        database.insert_data(
                                            table_name=table_name,
                                            column_list=columns_str,
                                            data_list=data_list,
                                            skip_error=opts.skip_error)

                                    progress_bar.handle_progress()
                                    count += +1

                            # 事务提交, 插入数据
                            debug_logger.debug('execute insert sql commit')
                            database.execute_commit()
                            debug_logger.info(
                                'execute insert sql commit successful')

                            # 输出插入数据总共用时
                            end_time = time.time()
                            print(
                                colored(
                                    '总共用时: {:.2f}秒'.format(end_time -
                                                           start_time),
                                    'white'))

                        r.close()

                        ColorFormatter.success('插入数据完成')
                        debug_logger.info('insert data to database done')

                        # 显示汇总信息
                        ColorFormatter.info(
                            '总共插入 "{}" 条数据, 成功 "{}" 条, 失败 "{}" 条'.format(
                                database.insert_total_count,
                                database.insert_success_count,
                                database.insert_failed_count))
                        debug_logger.info(
                            '总共插入 "{}" 条数据, 成功 "{}" 条, 失败 "{}" 条'.format(
                                database.insert_total_count,
                                database.insert_success_count,
                                database.insert_failed_count))

                # 安全退出
                sys.exit(0)

            if opts.txt_to_database:
                txt_files = opts.txt_to_database
                table_name = opts.table_name
                skip_error = opts.skip_error
                # 用来分隔 txt 文件的列的
                separator = opts.separator
                column_list = opts.column_list

                if not column_list:
                    ColorFormatter.error(
                        'Must with -c option if you want to use txt-to-database option'
                    )
                    ColorFormatter.fatal('sleep 2s redirect help page')
                    time.sleep(2)
                    print()
                    CmdLineParser().cmd_parser(get_help=True)

                if not table_name:
                    table_name = TABLE_NAME

                if separator == '\\t':
                    separator = '\t'
                insert_txt_to_database(txt_files=txt_files,
                                       separator=separator,
                                       database_obj=database,
                                       column_list=column_list,
                                       debug_logger=debug_logger,
                                       error_logger=error_logger,
                                       table_name=table_name,
                                       skip_error=skip_error)

                # 正常退出程序
                sys.exit(0)

            if opts.mysql_command:
                cmd = opts.mysql_command
                dict_result = database.execute_command(cmd)
                for i in dict_result:
                    print(i)

            if opts.clean_cache:
                ColorFormatter.warning('Clean log cache')
                debug_logger.warning('Clean log cache')
                try:
                    os.remove(os.path.join(LOG_PATH, 'error.log'))
                    os.remove(os.path.join(LOG_PATH, 'debug.log'))
                except:
                    pass
                ColorFormatter.success('Clean log cache successful')
                debug_logger.info('Clean log cache success')

                # 执行完 clean log cache 就正常的退出程序
                sys.exit(0)

    except KeyboardInterrupt as e:
        ColorFormatter.fatal('user abort')
        debug_logger.error('user abort: {}'.format(e))
        error_logger.error('user abort: {}'.format(e))

    except Exception as e:
        ColorFormatter.error('Total program Unknow error: {}, type: {}'.format(
            e, type(e)))
        ColorFormatter.fatal('Please contact author')
        debug_logger.error('Total program Unknow error: {}, type: {}'.format(
            e, type(e)))
        error_logger.error('Total program Unknow error: {}, type: {}'.format(
            e, type(e)))
Ejemplo n.º 14
0
from lib.Exchange import Exchange
from lib.Logger import Logger
import time

log = Logger("MACD")

log.balance(100, "USDT")

# exchange = Exchange(log, 10, "BTC/USDT", "1m")
# exchange.buy()
# exchange.sell()

# log.order('buy', 60123, 31)
# time.sleep(10)
# log.order('sell', 61003, 36)
# time.sleep(20)
# log.order('buy', 61129, 33)
# time.sleep(13)
# log.order('sell', 60890, 29)

# log.pnl(0.1, 20)
# time.sleep(10)
# log.pnl(-0.4, -6)
# time.sleep(20)
# log.pnl(1.4, 130)
Ejemplo n.º 15
0
#!/usr/bin/env python
# author: samren
import logging
import traceback
import unittest
import HTMLTestRunner
import time
from lib.Logger import Logger
from testcases.cases_login_logout.admin_login_logout import Bugfree管理员登录退出

if __name__ == '__main__':
    logger = Logger('./log/logger.log', logging.INFO)
    logging.info("本次测试开始执行,以下是详细日志")
    try:
        suite = unittest.TestSuite()  # 新建一个suite,测试套件
        loader = unittest.TestLoader()  # 新建一个加载器,自定义的方式把测试用例加载到suite里
        suite.addTests(loader.loadTestsFromTestCase(
            Bugfree管理员登录退出))  # 把测试类所有的方法都加载到suite里
        # unittest.TextTestRunner(verbosity=2).run(suite) # unittest运行suite
        fp = open(
            'reports/report_bugfree_{0}.html'.format(
                time.strftime("%Y-%m-%d %H-%M-%S")), 'wb')
        runner = HTMLTestRunner.HTMLTestRunner(
            stream=fp, title='Bugfree的测试报告', description='Bugfree的所有测试用例执行细节')
        runner.run(suite)
        logging.info("测试顺利结束^_^ ")
    except Exception:
        """print_exc() 把异常输出到屏幕上,而format_exc() 把异常返回成字符串"""
        traceback.print_exc()
        logging.error(traceback.format_exc())
        logging.error("测试异常终止")
Ejemplo n.º 16
0
 def __init__(self, config={}, globalConfig={}):
     self.config = Configuration(config)
     self.globalConfig = Configuration(globalConfig)
     self._parse_config()
     self.logger = Logger()
     self.config()
Ejemplo n.º 17
0
    def simulation():
        """
        Function to install handlers on the /simulation path. This allows for
        requesting simulation data or starting a new simulation.

        Parameters
        ----------
        POST:

            servers: list
                List containing configurations for a server pool as dicts.
                { capacity: int, size: int, kind: string }
                For example, { size: 10, capacity: 10, kind: 'regular' }.

            process: list
                List specifying how a process should go (from server to server).
                This should contain a sequence of server kinds.
                For example, ["regular", "balance", "pay"].

            runtime: int
                Runtime of the simulation (defined by simpy package).

        Returns
        -------
        GET: dict
        POST: int
        """
        if request.method == "POST":

            # nonlocal use of the simulation count
            nonlocal simc

            # increment the simulation count
            simc += 1

            # we need a new environment which we can run.
            environment = Environment()

            # we need a server pool
            servers = MultiServers()

            # iterate over all of the servers that need to be configured that
            # we received from the client
            for kind in request.form['kinds'].split(','):

                # append a new server pool to the multiserver system
                servers.append(
                    Servers(environment,
                            size=int(request.form['size']),
                            capacity=int(request.form['capacity']),
                            kind=kind.strip()))

            # Get the current date and time to append to the logger file name
            log_timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M")

            # now that we have an output dir, we can construct our logger which
            # we can use for the simulation
            name = "{0}_{1:04d}_{2}".format(file_prefix, simc, log_timestamp)
            logger = Logger(name, directory=LOG_PATH)

            # we also need a logger for all error events that happen in the simulation
            error_logger = Logger(f"error-{name}", directory=LOG_PATH)

            # Enter first line for correct .csv headers
            logger.info(
                'Time;Server;Message_type;CPU Usage;Memory Usage;Latency;Transaction_ID;To_Server;Message'
            )
            error_logger.info('Time;Server;Error type;Start-Stop')

            # we can use the logger for the simulation, so we know where all logs will be written
            environment.logger(logger)
            environment.logger(error_logger, type="error")

            # we need a new form of seasonality
            seasonality = Seasonality(
                join(Seasonality_folder, Seasonality_file),
                max_volume=int(request.form['max_volume']))

            # now, we can put the process in the simulation
            Processor(environment,
                      servers,
                      seasonality=seasonality,
                      kinds=[
                          kind.strip()
                          for kind in request.form['process'].split(',')
                      ])

            # run the simulation with a certain runtime (runtime). this runtime is not equivalent
            # to the current time (measurements). this should be the seasonality of the system.
            # for example, day or week.
            environment.run(until=int(request.form['runtime']))

            # expose the id of the simulation
            return jsonify(simc)

        if request.method == "GET":

            if 'id' in request.args:
                logfile_id = "{:04d}".format(int(request.args.get('id')))

            # Scan the logfile directory
            list_of_files = glob.glob(os.path.join(LOG_PATH, 'log_*.csv'))

            # Return only the filename to get no errors with old functions
            log_filenames = [
                os.path.basename(filename) for filename in list_of_files
            ]

            if log_filenames:

                logfile_ids = [f.split('_')[1] for f in log_filenames]
                name_id_dict = dict(zip(logfile_ids, log_filenames))

                if logfile_id in logfile_ids:
                    # Logfile associated to given ID was successfully found
                    return jsonify({
                        "data": name_id_dict[logfile_id],
                        "message": "success"
                    })

                else:
                    # No logfile associated to given ID was found
                    return jsonify(
                        {"message": "No logfile (.csv) with given ID exists."})
            else:
                # No logfiles found (/logs is empty)
                return jsonify({"message": "No logfiles were found in /logs."})
Ejemplo n.º 18
0
# ----------------------------------------------------------------------------------------------------------------------
# Initialize OS
# ----------------------------------------------------------------------------------------------------------------------
from lib.Kernel import Kernel
from lib.toolkit import load_properties
# Start-up Kernel
kernel = Kernel(load_properties("conf/os.properties"))
log = kernel.logger

# ----------------------------------------------------------------------------------------------------------------------
# Logger
# ----------------------------------------------------------------------------------------------------------------------
import gc
from lib.Logger import Logger

log = Logger()

log.info("Hello!")
log.error("Critical Issue!!")
log.debug("Free memory: " + str(gc.free_mem()))

# ----------------------------------------------------------------------------------------------------------------------
# Update DuckDNS
# ----------------------------------------------------------------------------------------------------------------------
from lib.toolkit import update_duck_dns

# Update DuckDNS service
update_duck_dns("mydomain", "mytoken", "192.168.0.10")

# ----------------------------------------------------------------------------------------------------------------------
#
Ejemplo n.º 19
0
from lib.EncryptUtil import EncryptUtil
pub_key_path = dir_sign + '/lib/pub.key'
pri_key_path = dir_sign + '/lib/pri.key'
enc = EncryptUtil(pub_key_path, pri_key_path)

from django.shortcuts import render
import json
from django.http import HttpResponse
from lib.Logger import Logger
import time
######1、接收到post请求,解密
########2、提取custId、custNm、recevieUrl,新生成traceNo并组装返回给pjs的post报文
########3、根据recevieUrl,post相应的 pjs环境
########4、组装xml报文,发送给pjs前置机 这个暂时不开发

log_debug = Logger('all.log', level='debug')
log_error = Logger('error.log', level='error')

dict_t036 = {
    "orgCode": "105584099990002",
    "custId": "",
    "traceNo": "",
    "reqSn": '',
    "retcode": "00000",
    "errCode": "0000",
    "errMsg": "处理成功",
    "settleDay": "",
    "retMsg": "充值确认交易成功."
}

Ejemplo n.º 20
0
def job51_shoot(username, password):
    """智联招聘简历投递"""
    print username + "   " + password
    #智能等待页面完成加载
    zl = Job51(username, password)
    #zl.login()
    zl.search_job()
    zl.custom_select_job()


if __name__ == "__main__":
    d1 = datetime.datetime.now()
    #生成日志文件
    ZHAOPIN_SITE = {'zhilian': zhilian_shoot}
    now = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time()))
    logger = Logger("./log/test_shoot_%s.log" % now,
                    loglevel=logging.INFO).getlog()
    count = len(open('stu_data.txt', 'rU').readlines())
    user_dict = get_user_dict_from_file('stu_data.txt')

    for k, v in user_dict.items():
        print k, v

        try:
            #cjol_shoot(k, v)
            #zhilian_shoot(k, v)
            job51_shoot(k, v)
            logger.info(k + ' is success!!!')
        except Exception as e:
            traceback.print_exc()
            logger.error(k + ' is failed!!!')
        exit(0)
Ejemplo n.º 21
0
from lib.HTMLTestRunner import HTMLTestRunner
from testcase.admin_login_logout.admin_login_correction import AdminLoginCorrection
from lib.SendEmail import send_email

FROM_ADDR = u"*****@*****.**"
FROM_PSWD = u"qwer1234"  # 163设置的第三方授权码
TO_ADDR = u"*****@*****.**"


def suites():
    suite = unittest.TestSuite()
    loader = unittest.TestLoader()
    suite.addTests(loader.loadTestsFromTestCase(AdminLoginCorrection))
    return suite


if __name__ == "__main__":
    logger = Logger().getlog()
    logger.info('start testcase...')
    report_path = 'result/test_result_%s.html' % time.strftime(
        "%Y-%m-%d %H-%M-%S")

    fp = open(report_path, 'wb')
    runner = HTMLTestRunner(stream=fp,
                            title=u"测试报告",
                            description=u"测试用例执行情况: ")
    runner.run(suites())
    fp.close()
    send_email(FROM_ADDR, FROM_PSWD, TO_ADDR, u"测试报告", report_path)

    logger.info('stop testcase...')
Ejemplo n.º 22
0
from lib.Logger import Logger
import logging


def suites():
    suite = unittest.TestSuite()
    loader = unittest.TestLoader()
    suite.addTests(loader.loadTestsFromTestCase(WeatherTest))
    #suite.addTests(loader.loadTestsFromTestCase(BugfreeImportFile))
    # suite.addTests(loader.loadTestsFromTestCase(ProductAdd))
    #suite.addTests(loader.loadTestsFromTestCase(LoginLogoutTest))
    return suite


if __name__ == "__main__":
    logger = Logger(loglevel=logging.ERROR).getlog()
    logger.info('日志开始')

    try:
        suite = suites()
        fp = open(
            './reports/results_%s.html' % time.strftime("%Y-%m-%d %H-%M-%S"),
            'wb')
        runner = HTMLTestRunner(stream=fp,
                                title=u'接口测试报告',
                                description=u"测试用例执行情况:")
        runner.run(suite)
    except Exception, e:
        raise e
    finally:
        fp.close()
Ejemplo n.º 23
0
from testcases.weather import WeatherTest
from lib.Logger import Logger
import logging

def suites():
    suite=unittest.TestSuite()
    loader=unittest.TestLoader()
    suite.addTests(loader.loadTestsFromTestCase(WeatherTest))
    #suite.addTests(loader.loadTestsFromTestCase(BugfreeImportFile))
    # suite.addTests(loader.loadTestsFromTestCase(ProductAdd))
    #suite.addTests(loader.loadTestsFromTestCase(LoginLogoutTest))
    return suite


if __name__ == "__main__":
    logger = Logger(loglevel=logging.INFO).getlog()
    logger.info(u'日志开始')

    try:
        suite = suites()
        fp = open('./reports/results_%s.html' % time.strftime("%Y-%m-%d %H-%M-%S"), 'wb')
        runner = HTMLTestRunner(
            stream=fp,
            title=u'接口测试报告',
            description=u"测试用例执行情况:")
        runner.run(suite)
    except Exception, e:
        raise e
    finally:
        fp.close()
    logging.info(u'日志结束')
Ejemplo n.º 24
0
def split_file(file_path, log_level, split_count=os.cpu_count()):
    """
    有可能每个系统的换行符不一样, 可能会造成数据分割之后的数据"变多"
    例如:
    如果有一行的数据为: CHEN^M GUIDE,,,VSA,USER,NAME, Python 会把 ^M 当成换行符, 从而数据变成两行:
    1. CHEN
    2.  GUIDE,,,VSA,USER,NAME,

    这个只能当做坏数据处理, 使用 error.log 手动导入即可, 不过这种情况应该不会很多, so do not worry about it.

    :param file_path: 需要分割文件的文件路径
    :param log_level: log level
    :param split_count: 需要分成多少份
    :return: Boolean, 成功或者失败
    """

    try:
        # setting Logger object
        logger = Logger(log_level=log_level)
        # debug.log 应该包含所有的日志信息, 如果命令行没有指定 log_level 为3 的话, debug信息 不会出现在 debug.log 中
        debug_logger = logger.get_logger(logger_name='debug_logger')
        # error.log 只包含错误信息
        error_logger = logger.get_logger(logger_name='error_logger')
        # normalization file path
        file_path = '\ '.join(file_path.split())

        ColorFormatter.info('开始计算需要分割的文件 "{}" 的行数'.format(file_path))
        debug_logger.info('开始计算需要分割的文件 "{}" 的行数'.format(file_path))
        # 计算 csv 文件的行
        cmd = "wc -l %s | awk '{print $1}'" % file_path
        debug_logger.debug('开始运行 {} 命令'.format(cmd))
        p = subprocess.Popen(cmd,
                             stderr=subprocess.PIPE,
                             stdout=subprocess.PIPE,
                             shell=True)
        (stdout, stderr) = p.communicate()
        return_code = p.returncode
        if return_code != 0:
            ColorFormatter.error('计算需要分割文件 "{}" 行数时出错, 出错信息为: {}'.format(
                file_path, stderr.strip()))
            debug_logger.error('计算需要分割文件 "{}" 行数时出错, 出错信息为: {}'.format(
                file_path, stderr.strip()))
            error_logger.error('计算需要分割文件 "{}" 行数时出错, 出错信息为: {}'.format(
                file_path, stderr.strip()))
            ColorFormatter.fatal(
                'Please contact author or look through logs/error.log file to examine error place'
            )
            # 异常退出
            sys.exit(1)
        else:
            ColorFormatter.info('计算需要分割文件 "{}" 行数结束, 一共 {} 行'.format(
                file_path, str(stdout.strip())))
            debug_logger.debug('计算需要分割文件 "{}" 行数命令结果, 返回码: {}, 输出为: {}'.format(
                file_path, return_code, stdout.strip()))
            file_line = int(stdout.strip())
            debug_logger.info('计算需要分割文件 "{}" 行数结束, 一共 {} 行'.format(
                file_path, str(file_line)))

        if file_line > 0:
            per_file_line = int(file_line / split_count)
        else:
            per_file_line = 1

        # 计数器
        flag = 0
        # 文件名
        name = 1
        # 存放数据
        data_list = []

        ColorFormatter.info('开始分割文件')
        debug_logger.info('开始分割文件')

        file_path = ''.join(file_path.split('\\'))
        try:
            with open(file_path, 'r') as r:
                for line in r:
                    flag += 1
                    data_list.append(line)

                    # 如果等于设置的每个文件的行数, 就分一个文件, 直到全部分完
                    if flag == per_file_line:
                        split_file = os.path.join(
                            SPLIT_PATH, 'split_file_{}.txt'.format(str(name)))
                        with open(split_file, 'w+') as f:
                            for data in data_list:
                                f.write(data)
                        ColorFormatter.info("文件{}: {} 分割完成".format(
                            name, split_file))
                        debug_logger.info("文件{}: {} 分割完成".format(
                            name, split_file))

                        # 分割一次 重新初始化一次
                        name += 1
                        flag = 0
                        data_list = []
        except Exception as e:
            ColorFormatter.error('分割文件 {} 时候出错, 出错信息为: {}'.format(name, e))
            ColorFormatter.fatal('Please contact author')
            debug_logger.error('分割文件 {} 时候出错, 出错信息为: {}'.format(name, e))
            error_logger.error('分割文件 {} 时候出错, 出错信息为: {}'.format(name, e))

        try:
            if name != split_count:
                # 如果不相等, 就是还有多出来的数据
                last_split_file = os.path.join(
                    SPLIT_PATH, 'split_file_{}.txt'.format(str(name - 1)))
                # 处理最后一批
                with open(last_split_file, 'a+') as f_target:
                    for data in data_list:
                        f_target.write(data)
        except Exception as e:
            ColorFormatter.error('分割文件 合并最后一个文件时出错, 出错信息为: {}'.format(e))
            ColorFormatter.fatal('Please contact author')
            debug_logger.error('分割文件 合并最后一个文件时出错, 出错信息为: {}'.format(e))
            error_logger.error('分割文件 合并最后一个文件时出错, 出错信息为: {}'.format(e))

        ColorFormatter.success("文件 {} 分割完成".format(file_path))
        debug_logger.info("文件 {} 分割完成".format(file_path))
        return True

    except:
        return False