示例#1
0
文件: poezio.py 项目: krackou/poezio
def main():
    """
    Enter point
    """
    sys.stdout.write("\x1b]0;poezio\x07")
    sys.stdout.flush()
    import config
    config_path = config.check_create_config_dir()
    config.run_cmdline_args(config_path)
    config.create_global_config()
    config.check_create_log_dir()
    config.check_create_cache_dir()
    config.setup_logging()
    config.post_logging_setup()

    from config import options

    if options.check_config:
        config.check_config()
        sys.exit(0)

    import theming
    theming.update_themes_dir()

    import logger
    logger.create_logger()

    import roster
    roster.create_roster()

    import core

    log = logging.getLogger('')

    signal.signal(signal.SIGINT, signal.SIG_IGN) # ignore ctrl-c
    cocore = singleton.Singleton(core.Core)
    signal.signal(signal.SIGUSR1, cocore.sigusr_handler) # reload the config
    signal.signal(signal.SIGHUP, cocore.exit_from_signal)
    signal.signal(signal.SIGTERM, cocore.exit_from_signal)
    if options.debug:
        cocore.debug = True
    cocore.start()

    # Warning: asyncio must always be imported after the config. Otherwise
    # the asyncio logger will not follow our configuration and won't write
    # the tracebacks in the correct file, etc
    import asyncio
    loop = asyncio.get_event_loop()

    loop.add_reader(sys.stdin, cocore.on_input_readable)
    loop.add_signal_handler(signal.SIGWINCH, cocore.sigwinch_handler)
    cocore.xmpp.start()
    loop.run_forever()
    # We reach this point only when loop.stop() is called
    try:
        cocore.reset_curses()
    except:
        pass
示例#2
0
文件: poezio.py 项目: Perdu/poezio
def main():
    """
    Enter point
    """
    sys.stdout.write("\x1b]0;poezio\x07")
    sys.stdout.flush()
    import config
    config_path = config.check_create_config_dir()
    config.run_cmdline_args(config_path)
    config.create_global_config()
    config.check_create_log_dir()
    config.check_create_cache_dir()
    config.setup_logging()
    config.post_logging_setup()

    from config import options

    import theming
    theming.update_themes_dir()

    import logger
    logger.create_logger()

    import roster
    roster.create_roster()

    import core

    log = logging.getLogger('')

    signal.signal(signal.SIGINT, signal.SIG_IGN)  # ignore ctrl-c
    cocore = singleton.Singleton(core.Core)
    signal.signal(signal.SIGUSR1, cocore.sigusr_handler)  # reload the config
    signal.signal(signal.SIGHUP, cocore.exit_from_signal)
    signal.signal(signal.SIGTERM, cocore.exit_from_signal)
    if options.debug:
        cocore.debug = True
    cocore.start()

    # Warning: asyncio must always be imported after the config. Otherwise
    # the asyncio logger will not follow our configuration and won't write
    # the tracebacks in the correct file, etc
    import asyncio
    loop = asyncio.get_event_loop()

    loop.add_reader(sys.stdin, cocore.on_input_readable)
    loop.add_signal_handler(signal.SIGWINCH, cocore.sigwinch_handler)
    cocore.xmpp.start()
    loop.run_forever()
    # We reach this point only when loop.stop() is called
    try:
        cocore.reset_curses()
    except:
        pass
示例#3
0
def main():
    """Main entry point"""
    log = logger.create_logger()
    dry_run = os.environ['dry_run']

    def str_to_bool(string):
        """Convert string to boolean"""
        return bool(string == 'True')

    dry_run = str_to_bool(dry_run)

    log.info("Default VPC killer: Start")
    if dry_run:
        log.info("Dry run flag enabled - no delete operations will occur.")

    account_inventory = {}

    profiles = local.fetch_profiles()
    regions = ec2.describe_regions(profiles)

    # Account inventory assembly begins
    sts.fetch_account_ids(account_inventory, profiles)
    ec2.describe_default_vpcs(account_inventory, regions)
    whitelist.decorate(account_inventory, regions)

    ec2.network_interfaces(account_inventory)

    ec2.subnets(account_inventory, dry_run)
    ec2.internet_gateways(account_inventory, dry_run)
    ec2.process_vpcs(account_inventory, dry_run)
示例#4
0
 def __init__(self, config, vocab):
     vocab_size = len(vocab.word)
     logger = create_logger(name="EMBED")
     super(Embedding, self).__init__()
     unk = vocab.word.unk
     pad = vocab.word.pad
     if hasattr(config, 'embedding_dir'):
         files = glob.glob(config.embedding_dir + "/*")
         flag = False
         for filename in files:
             if str(config.emb_dim) in filename:
                 logger.info(f"[-] Use the data from {filename}.")
                 flag = True
                 break
         if flag:
             cover = 0
             weight = torch.randn(vocab_size, config.emb_dim)
             with codecs.open(filename, 'r', encoding='utf-8') as file:
                 for line in file:
                     data = line.strip().split(' ')
                     word, emb = data[0], list(map(float, data[1:]))
                     token = vocab.word.w2t(word)
                     if token != unk:
                         cover += 1
                         weight[token] = torch.FloatTensor(emb)
             self.model = nn.Embedding.from_pretrained(weight)
             logger.info((
                 f"[-] Coverage: {cover}/{vocab_size} "
                 f"({cover / vocab_size * 100:.2f}%)."))
         else:
             logger.info("[-] Match file not found. Train from scratch.")
             self.model = nn.Embedding(vocab_size, config.emb_dim)
     else:
         self.model = nn.Embedding(vocab_size, config.emb_dim)
         logger.info("[-] Train from scratch.")
    def __init__(self, logger):
        """
        初始化获取系config信息,配置日志
        :param logger:
        """
        config = configparser.ConfigParser()
        config.read('config.cfg', encoding='utf-8')
        accessKeyId = config['common']['accessKeyId']
        accessSecret = config['common']['accessSecret']
        self.s_RegionId = config['source']['s_RegionId']
        self.s_InstanceId_list = config['source']['s_InstanceId']
        self.s_ImageName = config['source']['s_ImageName']
        self.s_Description = config['source']['s_Description']

        self.d_DestinationRegionId = config['destination'][
            'd_DestinationRegionId']
        self.d_DestinationImageName = config['destination'][
            'd_DestinationImageName']
        self.d_DestinationDescription = config['destination'][
            'd_DestinationDescription']
        self.ecshelper = client.AcsClient(accessKeyId, accessSecret,
                                          self.s_RegionId)

        logger = logger.LogHelper()
        logname = logger.create_dir()
        self.logoper = logger.create_logger(logname)
示例#6
0
    def get_logger(self):
        """
        creates a logger
        """
        command = ["python", sys.argv[0]]
        for x in sys.argv[1:]:
            if x.startswith('--'):
                assert '"' not in x and "'" not in x
                command.append(x)
            else:
                assert "'" not in x
                if re.match('^[a-zA-Z0-9_]+$', x):
                    command.append("%s" % x)
                else:
                    command.append("'%s'" % x)
        command = ' '.join(command)
        self.command = command + ' --exp_id "%s"' % self.exp_id

        # create a logger
        logger = create_logger(os.path.join(self.exp_dir, 'train.log'), rank=0)
        logger.info("============ Initialized logger ============")
        logger.info("\n".join("%s: %s" % (k, str(v))
                              for k, v in sorted(dict(vars(self)).items())))
        logger.info("The experiment will be stored in %s\n" % self.exp_dir)
        logger.info("Running command: %s" % command)
        logger.info("")
        return logger
示例#7
0
def fetch_profiles():
    """Iterate through local AWS credentials file"""
    log = logger.create_logger()

    profiles = []

    aws_dir = os.environ['HOME'] + '/.aws/'
    credentials_file = os.environ['HOME'] + '/.aws/credentials'

    log.debug("Root dir contents: %s", os.listdir(os.environ['HOME']))
    log.debug("Mounted .aws dir contents: %s", os.listdir(aws_dir))

    if os.path.exists(credentials_file):
        if not os.path.getsize(credentials_file) > 0:
            log.error("Credentials file empty. Exiting.")
            sys.exit(1)
    else:
        log.error("Credentials file %s missing. Exiting.", credentials_file)
        sys.exit(1)

    log.debug("Credentials file identified as %s", credentials_file)

    with open(credentials_file) as file:
        for line in file:
            if line.startswith("["):
                profile = re.search(r"\[([A-Za-z0-9\-\_]+)\]", line).group()
                profile = profile[1:-1]

                profiles.append(profile)

    log.debug("Profiles: %s", profiles)

    return profiles
示例#8
0
	def __init__(self, username, connection, board, position):
		super(Player, self).__init__(board, position)

		self.username = username
		self.connection = connection
		self.logger = logger.create_logger(username)
		self.snowballs = 0
示例#9
0
    def interrctive_with_server(self):
        '''与服务器的主交互'''
        # 创建日志
        logger = create_logger('ftpClient')

        #连接服务端
        self.client = self.connect()


        #输入命令
        while 1:
            execution_handler = self.client.recv(1024).decode()
            operator = input(execution_handler).strip()
            if len(operator) < 1:
                continue

            cmd = operator.split()[0]
            if hasattr(self, cmd):
                '''self.'cmd'存在,调用就行'''
                func = getattr(self, cmd)
                func(operator, execution_handler)

                if cmd == 'logout':
                    break

            else:
                print("Ftp200")
    def __init__(self,
                 collect_gradients=False,
                 warmup=50,
                 ddpg_args=DDPGArgs(),
                 input_states=['joint_velocities', 'task_low_dim_state']):
        super(ReachTargetRLAgent,
              self).__init__(collect_gradients=collect_gradients,
                             warmup=warmup)
        # action should contain 1 extra value for gripper open close state

        input_dims = [
            0 if st not in STATE_DIM_MAP else STATE_DIM_MAP[st]
            for st in input_states
        ]
        input_dims = [sum(input_dims)]
        self.neural_network = DDPG(
            arguements=ddpg_args, input_dims=input_dims,
            n_actions=8)  # 1 DDPG Setup with Different Predictors.
        self.agent_name = "DDPG__AGENT"
        self.logger = logger.create_logger(self.agent_name)
        self.input_states = [st for st in input_states if st in STATE_DIM_MAP]
        self.logger.propagate = 0
        self.data_loader = None
        self.dataset = None
        self.print_every = 40
        self.curr_state = None
        self.logger.info("Agent Wired With Input States : %s",
                         ','.join(self.input_states))
示例#11
0
 def __init__(self, logger):
     """
     初始化,获取配置文件信息
     """
     self.url = 'http://tool.chinaz.com/iframe.ashx?t=port'
     self.headers = {
         'Accept':
         'text/javascript, application/javascript, application/ecmascript, application/x-ecmascript, */*; q=0.01',
         'Accept-Encoding': 'gzip, deflate',
         'Accept-Language': 'zh-CN,zh;q=0.8',
         'Connection': 'keep-alive',
         'Content-Length': '62',
         'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
         'Host': 'tool.chinaz.com',
         'Origin': 'http://tool.chinaz.com',
         'Referer': 'http://tool.chinaz.com/port/',
         'User-Agent':
         'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
         'X-Requested-With': 'XMLHttpRequest'
     }
     config = ConfigParser()
     config.read('info.cfg', encoding='utf-8')
     self.address_list = config['port_check_info']['address']
     self.port_list = config['port_check_info']['ports']
     #初始化logger
     logger = logger.LogHelper()
     logname = logger.create_dir()
     self.logoper = logger.create_logger(logname)
示例#12
0
    def __init__(self, logger, host='0.0.0.0', port=8080):

        config = ConfigParser()
        config.read('config.py', encoding='utf-8')
        # 定义服务器监听地址和端口
        self.host = host
        self.port = port
        #
        self.sToken = config['recmsg']['Token']
        self.sEncodingAESKey = config['recmsg']['EncodingAESKey']
        self.sCorpID = config['common']['corpid']

        # 转发接收消息的应用信息配置
        self.agent_id = config['appconfig']['agentid']
        self.agent_secret = config['appconfig']['secret']
        self.userid = config['appconfig']['userid']
        self.partid = config['appconfig']['partid']
        self.send_msg_url = config['urlconfig']['send_msg_url']
        self.get_access_token_url = config['urlconfig']['get_access_token_url']

        # 服务器日志信息配置
        logger = logger.LogHelper()
        logname = logger.create_dir()
        self.logoper = logger.create_logger(logname)

        # 获取access_token
        self.access_token = json.loads(
            requests.get(
                self.get_access_token_url.format(
                    self.sCorpID, self.agent_secret)).content)['access_token']
示例#13
0
文件: main.py 项目: sonynka/StarGAN
def main(config):
    # For fast training
    cudnn.benchmark = True

    # create logging folders
    if not os.path.exists(config.output_path):
        os.makedirs(config.output_path)
    subfolders = ['logs', 'samples', 'models', 'results']
    for subfolder in subfolders:
        subfolder_path = os.path.join(config.output_path, subfolder,
                                      config.output_name)
        if not os.path.exists(subfolder_path):
            os.makedirs(subfolder_path)

    print_logger = create_logger(
        os.path.join(
            config.output_path, 'logs', config.output_name,
            'train{}.log'.format(datetime.now().strftime("%Y%m%d-%H%M%S"))))
    print_logger.info('============ Initialized logger ============')
    print_logger.info('\n'.join(
        '%s: %s' % (k, str(v)) for k, v in sorted(dict(vars(config)).items())))

    # Data loader
    data_loaders = get_loaders(config.root, config.attrs, config.categories,
                               config.image_size, config.batch_size)

    # Solver
    solver = Solver(data_loaders, config)

    if config.mode == 'train':
        solver.train()
    elif config.mode == 'test':
        solver.test()
示例#14
0
def load(connection, download_dir=FEEDS_DOWNLOAD_PATH,
         output_dir=FEEDS_OUTPUT_PATH,
         logger=create_logger('loader')):
    """Function for loading data from feed files to database.

    :param connection: MySQLdb connection object.
    :param download_dir: Directory with downloaded files.
    :param output_dir: Directory for storing processed feeds as timestamped
    txt files.
    :param logger: Logger object for monitoring feed uploading.
    """
    for source in get_sources_info(connection):
        if source['adaptor'] != 'noadaptor':
            feed = Feed(connection, source['id'], download_dir, output_dir,
                        logger)
            if not feed.downloaded_md5():
                feed.logger.warning("No file to process for %s" % feed.name)
            elif feed.md5_changed():
                feed.make_dirs()
                feed.save_data()
                feed.upload()
                feed.update_md5()
                connection.commit()
                if feed.logger:
                    feed.logger.info("Finished uploading %s." % feed.name)
            else:
                if feed.logger:
                    feed.logger.info("%s already up-to-date." % feed.name)
示例#15
0
def describe_regions(profiles):
    """Find regions available to AWS profiles"""
    log = logger.create_logger()

    for profile in profiles:
        log.debug("Using profile %s to identify available regions ...",
                  profile)

        session = boto3.Session(profile_name=profile)
        ec2 = session.client('ec2')

        try:
            response = ec2.describe_regions()

        except ClientError as error:
            if error.response['Error']['Code'] == 'InvalidClientTokenId':
                log.warning(
                    """The keypair associated with profile %s
                    is not currently able to authenticate against AWS EC2.
                    Please investigate or remove and rerun.""", profile)
            else:
                log.warning("Unhandled exception occurred: %s", error)

        regions = []
        for region in response['Regions']:
            for key, value in region.items():
                if key == "RegionName":
                    regions.append(value)

    log.debug("Regions identified: %s", regions)

    return regions
示例#16
0
    def __init__(self, config):
        self.p_sent = config.p_sent
        self.p_seq_len = config.p_seq_len
        self.q_seq_len = config.q_seq_len
        self.c_seq_len = config.c_seq_len
        self.epochs = config.epochs
        self.batch_size = config.batch_size

        self.logger = create_logger(name="TRAIN")
        self.data_dir = Path(config.data_dir) / "save"
        self.exp_dir = Path(config.data_dir) / "exp" / config.exp
        self.device = \
            torch.device('cuda:{}'.format(config.device)) \
            if config.device >= 0 else torch.device('cpu')
        config.device = self.device

        self.metrics = ["ACC", "LOSS"]

        self.__cur_epoch = 0
        self.vocab = gen_vocab(
            data_dir=Path(config.data_dir),
            word_freq_path=self.data_dir / "vocab.pkl",
            special_tokens=config.special_tokens,
            size=config.vocab_size)

        self.model = Model(config, self.vocab)
示例#17
0
def initialize_exp(params):
    """
    Initialize experiment.
    """
    # initialization
    if getattr(params, 'seed', -1) >= 0:
        np.random.seed(params.seed)
        torch.manual_seed(params.seed)
        if params.cuda:
            torch.cuda.manual_seed(params.seed)

    # dump parameters
    params.exp_path = get_exp_path(
        params) if not params.exp_path else params.exp_path
    pickle.dump(params, open(os.path.join(params.exp_path, 'params.pkl'),
                             'wb'))

    # create logger
    logger = create_logger(os.path.join(params.exp_path, 'train.log'),
                           vb=params.verbose)
    logger.info('============ Initialized logger ============')
    logger.info('\n'.join('%s: %s' % (k, str(v))
                          for k, v in sorted(dict(vars(params)).items())))
    logger.info('The experiment will be stored in %s' % params.exp_path)
    return logger
示例#18
0
def network_interfaces(inventory):
    """Find network interfaces"""
    log = logger.create_logger()

    for account, attribute in inventory.items():
        for region in attribute['Regions']:
            for key, value in region.items():
                region_name = key
                vpc = value['DefaultVpc']

                if not value['Whitelist']:

                    try:
                        session = boto3.Session(
                            profile_name=attribute['ProfileName'],
                            region_name=region_name)

                        ec2_client = session.client('ec2')

                        response = ec2_client.describe_network_interfaces(
                            Filters=[{
                                'Name': 'vpc-id',
                                'Values': [
                                    vpc,
                                ]
                            }])

                        if not response['NetworkInterfaces']:
                            log.debug(
                                "Account ID %s associated with keypair name"
                                " %s: No network interfaces present in %s",
                                account, attribute['ProfileName'], vpc)

                        else:
                            log.warning(
                                "Account ID %s associated with keypair name"
                                " %s: Network interfaces found in %s. "
                                "No action will be taken on this VPC.",
                                account, attribute['ProfileName'], vpc)

                            for vpc in inventory[account]['Regions']:
                                vpc.update((v, "True") for k, v in vpc.items()
                                           if k == 'NetworkInterfaces')
                            continue

                    except ClientError as error:
                        if error.response['Error'][
                                'Code'] == 'InvalidClientTokenId':
                            log.warning(
                                "The keypair associated with profile %s is not"
                                " currently able to authenticate against AWS"
                                " EC2. Please investigate, remove and rerun.",
                                profile)

                        else:
                            log.warning("Unhandled exception occurred: %s",
                                        error)
                            continue
    def __init__(self, log_q, queue, event_handler, sim_event_handler):
        super().__init__()
        self.log = create_logger(log_q, controller=True)
        self.queue = queue
        self.eventH = event_handler
        self.eventH_sim = sim_event_handler
        self.log.info("Initialized the controller")

        self.controller = LaneDetector(self.log)
示例#20
0
def generate_callgraph():
    logger = create_logger('log.log')

    repo_path = os.path.join('ecosystem', '**', '*.py')
    stt = time.clock()
    callgraph = dump_callgraph(repo_path, logger=logger)
    edt = time.clock()
    print("Generate callgraph cost %.2f minutes." % ((edt - stt) / 60.0, ))
    return callgraph
示例#21
0
def main(config_path, output_file):
    warnings.filterwarnings("ignore", message="numpy.dtype size changed")
    warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
    config = Box.from_yaml(config_path.open())
    logger = create_logger(name="MAIN")
    logger.info(f'[-] Config loaded from {config_path}')

    vocab = gen_vocab(Path(config.data_dir),
                      Path(config.data_dir) / "save" / "vocab.pkl",
                      config.special_tokens, config.vocab_size)

    model = Model(config, vocab)

    if hasattr(config, "test_ckpt"):
        logger.info(f'[-] Test checkpoint: {config.test_ckpt}')
        model.load_state(config.test_ckpt)
    else:
        logger.info(f'[-] Test experiment: {config.exp}')
        model.load_best_state(Path(config.data_dir) / "exp" / config.exp)

    random.seed(config.random_seed)
    np.random.seed(config.random_seed)
    torch.manual_seed(config.random_seed)
    torch.cuda.manual_seed(config.random_seed)
    logger.info('[-] Random seed set to {}'.format(config.random_seed))

    logger.info(f'[*] Initialize data loader...')
    data_loader = create_data_loader(Path(config.data_dir) / "save" /
                                     "test.pkl",
                                     vocab,
                                     config.p_sent,
                                     config.p_seq_len,
                                     config.q_seq_len,
                                     config.c_seq_len,
                                     config.batch_size,
                                     debug=False)
    logger.info('[*] Start testing...')
    writer = csv.DictWriter(open(output_file, 'w'), fieldnames=['id', 'ans'])
    writer.writeheader()
    for batch in data_loader:
        batch['passage'] = batch['passage'].to(model._device)
        batch['question'] = batch['question'].to(model._device)
        batch['choices'] = batch['choices'].to(model._device)
        logits = model(batch['passage'], batch['question'], batch['choices'])
        _, predictions = torch.max(logits, dim=-1)
        predictions = predictions.tolist()
        for idx, (ID, pred) in enumerate(zip(batch['id'], predictions)):
            question = batch['question'][idx].tolist()
            if vocab.word.pad in question:
                question = question[:question.index(vocab.word.pad)]
            answer = batch['choices'][pred - 1][idx].tolist()
            if vocab.word.pad in answer:
                answer = answer[:answer.index(vocab.word.pad)]
            writer.writerow({'id': ID, 'ans': int(pred) + 1})
            print(f"Question: {''.join(vocab.word.t2s(question))}")
            print(f"Answer: {''.join(vocab.word.t2s(answer))}")
示例#22
0
 def _setup_logger(self):
     self.save_dir = os.path.join('./checkpoints', args.logdir)
     if not os.path.isdir(self.save_dir):
         os.makedirs(self.save_dir)
     log_path = os.path.join(
         self.save_dir,
         datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
     self.logger = L.create_logger(args.logdir, log_path)
     for arg in vars(args):
         self.logger.info("%-25s %-20s" % (arg, getattr(args, arg)))
示例#23
0
def internet_gateways(inventory, dry_run):
    """Find, detach, and delete internet gateways"""
    log = logger.create_logger()

    for _account, attribute in inventory.items():
        for region in attribute['Regions']:
            for key, value in region.items():
                region_name = key
                vpc = value['DefaultVpc']

                if not value['NetworkInterfacesPresent'] and not value[
                        'Whitelist']:

                    try:
                        session = boto3.Session(
                            profile_name=attribute['ProfileName'],
                            region_name=region_name)

                        ec2_client = session.client('ec2')

                        response = ec2_client.describe_internet_gateways(
                            Filters=[{
                                'Name': 'attachment.vpc-id',
                                'Values': [
                                    vpc,
                                ]
                            }])

                        for igw in response['InternetGateways']:
                            resource = igw['InternetGatewayId']

                            response = ec2_client.detach_internet_gateway(
                                DryRun=dry_run,
                                InternetGatewayId=resource,
                                VpcId=vpc)

                            log.debug(
                                "Attempting to delete %s from %s - dry-run: %s",
                                resource, vpc, dry_run)
                            response = ec2_client.delete_internet_gateway(
                                InternetGatewayId=resource, DryRun=dry_run)

                    except ClientError as error:
                        if error.response['Error'][
                                'Code'] == 'InvalidClientTokenId':
                            log.warning(
                                "The keypair associated with profile %s"
                                " is not currently able to authenticate against"
                                " AWS EC2. Please investigate or remove and rerun.",
                                profile)

                        else:
                            log.warning("Unhandled exception occurred: %s",
                                        error)
                            continue
示例#24
0
 def __init__(self, config, vocab):
     self._logger = create_logger(name="MODEL")
     self._device = config.device
     self._logger.info("[*] Creating model.")
     self._stats = None
     self._net = Net(config, vocab)
     self._net.to(device=self._device)
     optim = getattr(torch.optim, config.optim)
     self._optim = optim(
         filter(lambda p: p.requires_grad, self._net.parameters()),
         **config.optim_param)
def create_app(config=None):
    app = Flask(__name__, instance_relative_config=True)

    config_name = os.getenv('FLASK_CONFIG', 'default')
    app.config.from_object(configs[config_name])

    if config:
        app.config.from_mapping(test_config)
    else:
        app.config.from_pyfile('config.py', silent=True)

    app.identifier = str(uuid.uuid4())
    app.created = datetime.utcnow()
    app.logger = create_logger(app.config)

    login_manager = LoginManager()
    login_manager.init_app(app)

    from easychatbot.database import db
    db.init_app(app)

    with app.app_context():

        import easychatbot.database.models
        db.create_all()

        if app.env == 'development' and not easychatbot.database.models.User.query.count(
        ):
            from easychatbot.database import mockdata
            mockdata.create_mockdata()

        from easychatbot.api import api
        from easychatbot.api.endpoints.root import ns as root_namespace
        from easychatbot.api.endpoints.users import ns as users_namespace
        from easychatbot.api.endpoints.chatbot import ns as chatbot_namespace
        from easychatbot.api.endpoints.qas import ns as qas_namespace
        from easychatbot.api.endpoints.engine import ns as engine_namespace
        from easychatbot.api.endpoints.statistics import ns as statistics_namespace

        blueprint = Blueprint('api', __name__, url_prefix='/api')
        api.init_app(blueprint)
        api.add_namespace(root_namespace)
        api.add_namespace(users_namespace)
        api.add_namespace(chatbot_namespace)
        api.add_namespace(qas_namespace)
        api.add_namespace(engine_namespace)
        api.add_namespace(statistics_namespace)
        app.register_blueprint(blueprint)

        from easychatbot import normalization
        normalization.init_normalization()

    return app
示例#26
0
async def create_app(config: dict):
    app = web.Application()
    app['config'] = config
    aiohttp_jinja2.setup(app,
                         loader=jinja2.PackageLoader('empty', 'templates'))
    setup_routes(app)
    setup_middlewares(app)
    app['logger'] = create_logger(config)

    app.on_startup.append(on_start)
    app.on_cleanup.append(on_shutdown)
    return app
示例#27
0
def get_re_from_all_langs():
    logger = create_logger("logs/all_langs.log")
    task = "wiki"
    shuffle = False
    folder = None
    model = "BLEU"

    columns = [
        'dataset size (sent)', 'Source lang word TTR',
        'Source lang subword TTR', 'Target lang word TTR',
        'Target lang subword TTR', 'Source lang vocab size',
        'Source lang subword vocab size', 'Target lang vocab size',
        'Target lang subword vocab size', 'Source lang Average Sent. Length',
        'Target lang average sent. length', 'Source lang word Count',
        'Source lang subword Count', 'Target lang word Count',
        'Target lang subword Count', 'geographic', 'genetic', 'inventory',
        'syntactic', 'phonological', 'featural'
    ]

    # organized data
    org_data = read_data(task, shuffle, folder, selected_feats=None)

    # length of data points
    lens = len(org_data[model]["feats"])

    # languages
    langs = org_data[model]["langs"]

    # train ids
    index = langs.index

    # test index
    for i, (source_lang, target_lang) in enumerate(langs.values):
        test_ids = langs[(langs["Source"] == source_lang)
                         & ((langs["Target"]) == target_lang)].index
        train_ids = index.delete(test_ids)

        # splitter
        split_data = get_split_data(org_data,
                                    "specific_split",
                                    train_ids=[train_ids],
                                    test_ids=[test_ids])

        # run once
        re = get_result(split_data, "xgboost", get_ci=False, quantile=0.95)

        logger.info(
            f"Source Lang: {source_lang}, Target Lang: {target_lang}, rmse: {re['BLEU']['test_rmse'][0]}"
        )

        if (i + 1) % 100 == 0:
            logger.info(f"[{i+1}/{lens}] processed ")
示例#28
0
def checkForUpdatedConfig():
    from logger import create_logger
    logger = create_logger('config')
    logger.info('Running checkForUpdateConfig() from config')

    with open('./config.json') as file:
        configs = json.load(file)

    with db(**dbConfig) as DB:
        has_changed = DB.has_changed(configs, 'config_history', 'config',
                                     'configchangedat')
    if has_changed:
        updateConfigs(configs)
示例#29
0
def download_feeds(connection, download_dir=FEEDS_DOWNLOAD_PATH,
                   output_dir=FEEDS_OUTPUT_PATH,
                   logger=create_logger('loader')):
    """Function for downloading feed files.

    :param connection: MySQLdb connection object.
    :param download_dir: Directory to download files
    :param logger: Logger object for monitoring downloading process.
    """
    for source in get_sources_info(connection):
        feed = Feed(connection, source['id'], logger=logger,
                    download_dir=download_dir, output_dir=output_dir)
        feed.download()
示例#30
0
def process_vpcs(inventory, dry_run):
    """Find and delete VPCs"""
    log = logger.create_logger()

    summary = []

    for account, attribute in inventory.items():

        account_summary = {'AccountId': account}
        vpcs_removed = []

        for region in attribute['Regions']:
            for key, value in region.items():
                region_name = key
                vpc = value['DefaultVpc']

                if not value['NetworkInterfacesPresent'] and not value[
                        'Whitelist']:

                    try:
                        session = boto3.Session(
                            profile_name=attribute['ProfileName'],
                            region_name=region_name)

                        ec2_client = session.client('ec2')

                        log.debug("Attempting to delete %s - dry-run: %s", vpc,
                                  dry_run)
                        ec2_client.delete_vpc(VpcId=vpc, DryRun=dry_run)

                    except ClientError as error:
                        if error.response['Error'][
                                'Code'] == 'InvalidClientTokenId':
                            log.warning(
                                "The keypair associated with profile %s"
                                " is not currently able to authenticate against"
                                " AWS EC2. Please investigate or remove and rerun.",
                                profile)

                        else:
                            log.warning("Unhandled exception occurred: %s",
                                        error)
                            vpcs_removed.append(vpc)
                            continue

                    vpcs_removed.append(vpc)

        account_summary['VpcsRemoved'] = vpcs_removed
        summary.append(account_summary)

    log.info("Summary: %s", summary)
示例#31
0
 def __init__(self,id):
     self.id = id 
     self.Session = Session
     self.session = Session()
     self.process = self.session.query(models.Process).options(joinedload('queues').joinedload('queues_tasks').joinedload('task')).filter(models.Process.id == self.id).first()
     if not self.process:
         raise Exception(f"Can't find process with id ='{self.id}'")
     with open('./config.yaml','r') as config:
         settings = yaml.safe_load(config)
     self.logger = create_logger('main', os.path.join(settings['logs']['test']['logpath'],self.process.name, dt.datetime.now().strftime("%Y%m%d_%H%M%S")))
     self.logger.info(f"Executor initiated for process with id = '{self.id}'")
     self.logger.debug(self.process)
     self.threads = []
     self.threads_with_errors = []
示例#32
0
 def __init__(self, timeout, loglevel, output_dir=FEEDS_OUTPUT_PATH,
              json_path=os.path.join(JSON_PATH, 'loaderd.json')):
     self.timeout = timeout
     self.loglevelstr = loglevel
     self.loglevel = getattr(logging, loglevel)
     self.output_dir = output_dir
     self.context = daemon.DaemonContext()
     self.json_path = json_path
     self.pidpath = os.path.join(DAEMON_PATH, 'loaderd.pid')
     self.pidlock = lockfile.FileLock(self.pidpath)
     self.logger = create_logger(logger_name='loader')
     self.logger.handlers[0].setLevel(self.loglevel)
     self.context.files_preserve = [self.logger.handlers[0].stream]
     self.connection = None
示例#33
0
def main():

    # create logger
    cwd = os.getcwd()
    log_file_folder_path = os.path.join(cwd, log_files_folder)
    logger = create_logger(log_file_folder_path=log_file_folder_path,
                           log_file_name=log_file)

    # connect to db
    collection = connect_to_mongodb(mongodb_instance=instance,
                                    mongodb=db,
                                    mongodb_collection=db_collection,
                                    logger=logger)

    # get LIVE API results, record values to db
    get_api_results_for_n_days(days=days_to_request,
                               pickle_file=pickle_file,
                               base_url=base_url,
                               headers=headers,
                               cabin_class=cabin_class,
                               country=country,
                               currency=currency,
                               locale_lang=locale_lang,
                               city_from=city_from,
                               city_to=city_to,
                               country_from=country_from,
                               country_to=country_to,
                               outbound_date=outbound_date,
                               adults_count=adults_count,
                               max_retries=max_retries,
                               json_files_folder=json_files_folder,
                               json_file=json_file,
                               collection=collection,
                               logger=logger,
                               save_to_file=save_to_file,
                               live_api_mode=live_api_mode)

    # find flights with price < threshold
    find_flights_under_threshold_price(threshold=price_threshold,
                                       search_date=outbound_date,
                                       collection=collection,
                                       logger=logger)

    # clean up log files
    log_path_to_clean = os.path.join(cwd, log_files_folder)
    files_cleaner(path_to_clean=log_path_to_clean,
                  extension='log',
                  to_keep_number=log_files_to_keep,
                  logger=logger)
示例#34
0
 def __init__(self,
              action_mode=DEFAULT_ACTION_MODE,
              headless=True,
              num_episodes=120,
              episode_length=40,
              dataset_root=''):
     super(ReachTargetSimulationEnv,
           self).__init__(action_mode=action_mode,
                          task=ReachTarget,
                          headless=headless,
                          dataset_root=dataset_root)
     self.num_episodes = num_episodes
     self.episode_length = episode_length
     self.logger = logger.create_logger(__class__.__name__)
     self.logger.propagate = 0
示例#35
0
 def __init__(self, learning_rate=0.01, batch_size=64, collect_gradients=False):
     super(TorchAgent, self).__init__(collect_gradients=collect_gradients)
     self.learning_rate = learning_rate
     # action should contain 1 extra value for gripper open close state
     self.neural_network = FullyConnectedPolicyEstimator(10,8)
     self.optimizer = optim.SGD(self.neural_network.parameters(), lr=learning_rate, momentum=0.9)
     self.loss_function = nn.MSELoss()
     self.training_data = None
     self.logger = logger.create_logger(__class__.__name__)
     self.logger.propagate = 0
     self.input_state = 'joint_positions'
     self.output_action = 'joint_velocities'
     self.data_loader = None
     self.dataset = None
     self.batch_size = batch_size
示例#36
0
	def __init__(self):
		self.logger = logger.create_logger('Controller')
		self.load_config(CONFIG_FILE)
		self.board = GameBoard(self.width, self.height)
		self.running = False
		self.paused = False
		self.visibility = random.randint(self.minvisibility, self.maxvisibility)
		
		# Put some trees on the board
		num_trees = random.randint(self.mintrees*self.width*self.height,
		                           self.maxtrees*self.width*self.height)

		for i in xrange(num_trees):
			x = random.randint(0, self.width - 1)
			y = random.randint(0, self.height - 1)

			if not self.board.get_object((x, y)):
				self.board.add_object(Tree(self.board, (x, y)))
示例#37
0
        else: # En semaine
            if maintenant.hour < h_deb: 
                sleeptime = maintenant.replace(hour=h_deb, minute = 0) - maintenant
                sleeptime = sleeptime.seconds
            if maintenant.hour >= h_fin:
                sleeptime = maintenant - maintenant.replace(hour=h_deb, minute = 0)
                sleeptime = sleeptime.seconds + 24*60*60
        print "************* SLEEP MODE *************"
        print "** "+str(sleeptime//60)+" MINUTES & "+str(sleeptime%60)+" SECONDES **"
        print " "
        sleeptime = 30
        myConnexion.send(str(sleeptime)+"#")
        return False


LOG=logger.create_logger()
LOG.info("##################################################")
LOG.info("#######         Lancement du serveur        ######") 
LOG.info("##################################################")

# Connexion au serveur MySQL
try:
    sql_con = mysql.connector.connect(**dbConfig)
    cursor = sql_con.cursor()
except mysql.connector.Error as err :
    if err.errno == errorDB.ER_ACCESS_DENIED_ERROR:
        LOG.error("probleme de droit d'acces à la BDD mysql")
    elif err.errno == errorDB.ER_BAD_DB_ERROR:
        LOG.error("La base de données demandée n'existe pas sur ce serveur")
    else:
        LOG.error(str(err))
    def __init__(self, logger, root, notebooks, folders):
        # Store the main variables
        # This is a reference to the global logger
        self.logger = logger
        # root stores the absolute path to the noteorganiser folder. It should
        # point to ~/.noteorganiser on a Unix type machine, and I don't know
        # where on a Windows.
        self.root = root
        # level stores the current path in the root folder (still in absolute
        # path, though)
        self.level = root

        # notebooks is the list of notebooks files (ending with EXTENSION),
        # found in "level". Folders contains the list of non-empty, non-hidden
        # folders in this directory.
        self.notebooks = notebooks
        self.folders = folders

        # Reference towards the currently edited/previewed notebook
        self.current_notebook = ''
        # Stores the SHA sum for every notebook, in order to avoid re-analyzing
        # the entire file for each filtering TODO
        self.sha = {}


if __name__ == "__main__":
    from logger import create_logger
    LOGGER = create_logger()
    print(initialise(LOGGER))
示例#39
0
from cw.datasources.cache_controller import CacheController
from cw.models.html import Html
import logger
import importlib
import file_hash_creator


if __name__ == '__main__':
    request_file = sys.argv[1]
    request = json.load(open(request_file))
    print request

    request_id = sys.argv[2]

    start_at = datetime.now()

    logger = logger.create_logger(request['logger']['level'], request['logger']['handler_type'], request['logger']['option'])

    exporter_module = importlib.import_module(request['exporter']['name'])

    mongo = cw.datasources.datasource_mongodb.DataAccess(host=config.mongo['host'],
                                                         port=config.mongo['port'],
                                                         db_name=config.mongo['db_name'],
                                                         html_regex=request['datasource']['content_type_regex_to_datasource'],
                                                         prefix=request_id)
    exporter = exporter_module.Exporter(datasource=mongo, option=request['exporter']['option'], logger=logger)

    data_count = exporter.export()
    logger.info({'message': 'export %d data' % data_count})
    logger.info({'message': 'bye'})
示例#40
0
if __name__ == "__main__":
    from config import config
    from feeder.HistoricData import HistoricData as DataHandler
    from execution.SimulatedExecutionHandler import SimulatedExecutionHandler as ExecutionHandler
    from strategy.MovingAverageCrossover import MovingAverageCrossover as Strategy
    from portfolio.Portfolio import Portfolio
    from logger import create_logger
    import logging.handlers

    print(config.dt_end_date, config.dt_start_date)
    event_queue = queue.Queue()
    heartbeat = 0
    initial_capital = 100000

    log_queue = queue.Queue()
    logger = create_logger(log_queue)
    logger.setLevel(logging.DEBUG)
    listener = logging.handlers.QueueListener(log_queue, logging.StreamHandler())
    listener.start()

    system = Engine(
        config=config,
        data_handler=DataHandler,
        execution_handler=ExecutionHandler,
        portfolio=Portfolio,
        strategy=Strategy,
        event_queue=event_queue,
    )
    system.run()
    listener.stop()
    print("Complete.")
示例#41
0
        """
        folder = os.path.join(self.data_folder, "index_constituents")

        # Check if the default folder exists
        if not os.path.exists(folder):
            os.makedirs(folder)

        for index in self.index_names:
            file_path = os.path.join(folder, index + ".xlsx")

            # Check if datafile exists
            if not os.path.exists(file_path):
                save_sp500_to_file(folder)

            self.logger.info("Retreiving the list of constituents for index {}.".format(index))
            df = pd.read_excel(file_path, parse_dates=True)
            self.symbol_list += list(df['ticker'].values)

        # Remove duplicate tickers from the list
        self.symbol_list = list(set(self.symbol_list))


if __name__ == '__main__':
    from logger import create_logger
    import queue
    logger = create_logger(queue.Queue())
    logger.setLevel(logging.DEBUG)
    prepare_data = PrepareData()


示例#42
0
# -*- coding=utf-8 -*-
import base64
import json

from logger import create_logger
from trans import Trans, get_DES_key
from pyDes import *
from twisted.internet.protocol import Factory
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor

logger = create_logger()


class CardTrans(LineReceiver):

    def __init__(self):
        print 'dataReceiving'
        self.setRawMode()
        self.data = ''

    def connectionMade(self):
        logger.info(u'连接已建立')

    def rawDataReceived(self, data):
        logger.info(u'正在接收数据')
        self.data += data
        if self.data.endswith('\n'):
            self.data = self.data[:-1]
            self.handler_card_trans(self.data)
            self.data = ''