示例#1
0
def main(in_folder, out_folder, pipeline, num_threads, log_file=None):
    if not os.path.isdir(in_folder):
        print("Input folder %s does not exist" % in_folder)

    if not os.path.isdir(out_folder):
        print("Creating output folder %s" % out_folder)
        os.makedirs(out_folder)

    set_logger(log_file)

    p = pipeline_registry.all[pipeline](in_folder, out_folder, num_threads)
    p.run()
示例#2
0
def main(parser, appConfig):
    import utils.logger as logger
    (options, args) = parser.parse_args(args=None, values=None)
    loggerConfig = util.DotDict(logging_config=options.log_config,
                                file=APPNAME + '.log',
                                name=options.logger)
    logger_dictConfig = logger.set_logger(loggerConfig, options.log_path,
                                          options.log_level)

    validate_options_config(options)

    util.print_options_attrs2(options, [
        ("springboot", "version"),
        ("group", "groupId"),
        ("artifact", "artifactId"),
        ("name", "name"),
        ("ver", "artifact.version"),
        ("description", "description"),
        ("packagename", "packageName"),
        ("packaging", "packaging"),
        ("java", "java.version"),
        ("port", "server.port"),
        ("context_path", "server.servlet.context-path"),
    ])

    create_sb2web_project(appConfig, options)
    pass
def main(argv=None):
    print("start of main")
    main_time = time.time()

    os.makedirs(RESULT_DIR)

    # loging
    LOG_FILE = os.path.join(RESULT_DIR, "log.txt")
    logger.set_logger(level=FLAGS.get('log_level'),
                      stream=True,
                      fileh=True,
                      filename=LOG_FILE)

    # file handling
    logger.info("create folder for results: {}".format(RESULT_DIR))
    if FLAGS.checkpoint_step > 0:
        os.mkdir(CHECKPOINT_DIR)
        logger.info("create checkpoints folder: {}".format(CHECKPOINT_DIR))

    # import the corresponding module
    # what about models.model ????????
    try:
        model_path = 'models.' + FLAGS.get('model').lower()
        model_module = __import__(model_path, fromlist=[''])
    except ImportError:
        raise ValueError("no such model exists: {}".format())

    # parse all FLAGS
    FLAGS.parse_and_log()

    # start training
    train(model_module.Model)

    # pring something before end
    logger.newline(2)
    logger.info("total time used: {}".format(time.time() - main_time))
    logger.info("summary dir: " + RESULT_DIR)
    logger.newline()
    logger.info("~end of main~")
示例#4
0
    def setup_callables(self):
        monitor = "val_dice_coef"
        # Setup callback to save best weights after each epoch
        checkpointer = ModelCheckpoint(filepath=os.path.join(
            self.model_dir, 'weights.{epoch:02d}-{val_loss:.2f}.hdf5'),
                                       verbose=0,
                                       save_best_only=True,
                                       save_weights_only=True,
                                       monitor=monitor,
                                       mode='max')
        # setup callback to register training history
        csv_logger = CSVLogger(os.path.join(self.log_dir, 'log.csv'),
                               append=True,
                               separator=';')

        # setup logger to catch warnings and info messages
        set_logger(os.path.join(self.log_dir, 'train_val.log'))

        # setup callback to retrieve tensorboard info
        tensorboard = TensorBoard(log_dir=self.log_dir,
                                  write_graph=True,
                                  histogram_freq=0)

        # setup early stopping to stop training if val_loss is not increasing after 3 epochs
        early_stopping = EarlyStopping(monitor=monitor,
                                       patience=5,
                                       mode='max',
                                       verbose=0)
        lr_reducer = ReduceLROnPlateau(monitor=monitor,
                                       factor=0.05,
                                       cooldown=0,
                                       patience=5,
                                       verbose=0,
                                       mode='max')

        return [
            checkpointer, csv_logger, tensorboard, early_stopping, lr_reducer
        ]
示例#5
0
 def __init__(self, command_prefix: str, intents: discord.Intents,
              **kwargs):
     super().__init__(command_prefix=command_prefix,
                      intents=intents,
                      **kwargs)
     self.logger = set_logger()
     self.verification_queue = dict()
     self.event_queue = Queue()
     self.obj_cache = Cache()
     self.running = True
     self.default_invite = \
         "https://discord.com/api/oauth2/authorize?client_id=767842408758771742&permissions=51200&scope=bot"
     self.reddit = self.create_reddit_connection()
     self.load_data()
示例#6
0
def main(parser, appConfig):
    import utils.logger as logger
    (options, args) = parser.parse_args(args=None, values=None)
    loggerConfig = {
        'logging_config': options.log_config,
        'file': APPNAME + '.log',
        'name': options.logger
    }
    logger_dictConfig = logger.set_logger(loggerConfig, options.log_path,
                                          options.log_level)

    # 设置模板环境
    tmplProjectDir = options.input

    templatesDir = os.path.join(tmplProjectDir, "templates")
    j2env = Environment(loader=FileSystemLoader(templatesDir))

    # 处理每个配置文件
    num = 0
    flist = os.listdir(tmplProjectDir)
    for name in flist:
        _, extname = os.path.splitext(name)
        if extname == ".yaml":
            configYaml = os.path.join(tmplProjectDir, name)

            if not os.path.isdir(configYaml):
                # 载入 yaml 配置文件
                fd = open(configYaml)
                dictcfg = yaml.load(fd.read())
                fd.close()

                num += 1
                util.info("[%d] processing config: %s" % (num, configYaml))
                generate(parser, dictcfg, templatesDir, j2env, options)

    util.info("success: total %d config file(s) processed." % num)
    pass
示例#7
0
import os
import sys
import time

import pandas as pd
import numpy as np
from matplotlib import pyplot as plt

sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utils.folder import make_folder
from utils.logger import set_logger

logger = set_logger("trend")
base_path = os.path.dirname(os.path.abspath('..'))
data_path = os.path.join(base_path, 'data')
save_path = os.path.join(data_path, 'stock')
folder_path = os.path.join(save_path, 'folder')
day_path = os.path.join(folder_path, '일봉_20190323')

invest_path = os.path.join(base_path, 'invest')
trend_path = os.path.join(invest_path, '3.추세주')
result_path = os.path.join(trend_path, 'result')

make_folder(invest_path, trend_path, result_path)


def load_day_data(files, code_list, standard_day):
    df_list = []

    for i, file in enumerate(files):
        try:
示例#8
0
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utils.folder import make_folder
from utils.logger import set_logger


base_path = os.path.dirname(os.path.abspath('..'))
data_path = os.path.join(base_path, 'data')
stock_path = os.path.join(data_path, 'stock')
folder_path = os.path.join(stock_path, 'folder')
minute_path = os.path.join(folder_path, '분별매매가격_가상화폐')

code_path = os.path.join(base_path, 'code')
crawler_path = os.path.join(code_path, 'crawler')

make_folder(base_path, data_path, stock_path, folder_path, minute_path, code_path, crawler_path)
logger = set_logger('minute_logger')


def main(code, dates):
    for date in dates:
        tables = []

        for num in range(1, 43):
            page = "https://finance.naver.com/item/sise_time.nhn?code=" + str(code) + "&thistime=2019" + str(date) + "160000&page=" + str(num)
            html = urlopen(page)
            bs_object = BeautifulSoup(html, "html.parser")
            bs_object_table = bs_object.table
            tables.append(bs_object_table)
            sleep(0.1)

        # data frame
示例#9
0
GROUP_VMI = config_raw.get('VirtualMachineImage', 'group')
PLURAL_VM_DISK = config_raw.get('VirtualMachineDisk', 'plural')
VERSION_VM_DISK = config_raw.get('VirtualMachineDisk', 'version')
GROUP_VM_DISK = config_raw.get('VirtualMachineDisk', 'group')
PLURAL_VM_SNAPSHOT = config_raw.get('VirtualMachineSnapshot', 'plural')
VERSION_VM_SNAPSHOT = config_raw.get('VirtualMachineSnapshot', 'version')
GROUP_VM_SNAPSHOT = config_raw.get('VirtualMachineSnapshot', 'group')
PLURAL_BLOCK_DEV_UIT = config_raw.get('VirtualMahcineBlockDevUit', 'plural')
VERSION_BLOCK_DEV_UIT = config_raw.get('VirtualMahcineBlockDevUit', 'version')
GROUP_BLOCK_DEV_UIT = config_raw.get('VirtualMahcineBlockDevUit', 'group')

LABEL = 'host=%s' % (socket.gethostname())

TIMEOUT = config_raw.get('WatcherTimeout', 'timeout')

logger = logger.set_logger(os.path.basename(__file__), '/var/log/virtctl.log')

'''
Handle support CMDs settings in default.cfg.
NOTE: if the key ends up with 'WithNameField' means that the CMD is using 'name' variable as index.
      The key ends up with 'WithDomainField' means that the CMD is using 'domain' variable as index.
      The key ends up with 'WithVolField' means that the CMD is using 'vol' variable as index.
'''
ALL_SUPPORT_CMDS = {}
ALL_SUPPORT_CMDS_WITH_NAME_FIELD = {}
ALL_SUPPORT_CMDS_WITH_DOMAIN_FIELD = {}
ALL_SUPPORT_CMDS_WITH_VOL_FIELD = {}
ALL_SUPPORT_CMDS_WITH_SNAPNAME_FIELD = {}

for k,v in config_raw._sections.items():
    if string.find(k, 'SupportCmds') != -1:
示例#10
0
        split = arg.find("=")
        setattr(args, arg[2:split], evol(arg[split+1:]))
    return args


if __name__ == "__main__":
   
    args = parse_args()
    
    if args.jobid is not None:
        args.run_name = args.run_name + '-' + args.jobid
        # if args.losswise_tag is not None:
        #     args.losswise_tag = args.losswise_tag + '-' + args.jobid

    try:
        args.shortname = args.run_name
    except:
        setattr(args, "shortname", args.run_name)
    # create dir for saving
    args.saverootpath = osp.abspath(args.saverootpath)
    savepath = osp.join(args.saverootpath, args.run_name)
    if not osp.exists(savepath):
        os.makedirs(savepath)

    # np.random.seed(args.seed)
    # random.seed(args.seed)
    logger = set_logger(name=args.shortname, level=args.loglevel,
                        filepath=osp.join(savepath, 'log.txt'))
    logger.info("=> Training mode")
    train(args)
示例#11
0
import traceback
from xml.etree.ElementTree import fromstring
from xmljson import badgerfish as bf
from json import dumps, loads
from sys import exit

from utils.exception import *
# from utils.libvirt_util import get_pool_info, get_volume_xml, get_volume_path, get_volume_snapshots, is_pool_started, \
#     is_pool_defined
from utils.libvirt_util import get_xml, vm_state
from utils.utils import *
from utils import logger

LOG = "/var/log/kubesdvm.log"

logger = logger.set_logger(os.path.basename(__file__), LOG)


class Executor(object):
    def __init__(self, cmd, params, with_result=False):
        if cmd is None or cmd == "":
            raise Exception("plz give me right cmd.")
        if not isinstance(params, dict):
            raise Exception("plz give me right parameters.")

        self.params = params
        self.cmd = cmd
        self.with_result = with_result

    def get_cmd(self):
        cmd = self.cmd
示例#12
0
'''
Import third party libs
'''
from kubernetes import client, watch
from kubernetes.client import V1DeleteOptions
from libvirt import libvirtError
'''
Import local libs
'''
from utils.libvirt_util import destroy, \
    create, is_vm_active, is_vm_exists
from utils import logger, constants
from utils.utils import ExecuteException, \
    report_failure, randomUUID, now_to_datetime, get_hostname_in_lower_case, UserDefinedEvent

logger = logger.set_logger(os.path.basename(__file__),
                           constants.VIRTCTL_DEBUG_LOG)


class Executor():
    def __init__(self, policy, invoke_cmd, query_cmd):
        self.policy = policy
        self.invoke_cmd = invoke_cmd
        self.query_cmd = query_cmd

    def execute(self):

        if not self.invoke_cmd or not self.query_cmd:
            logger.debug('Missing the command.')
            return result('', 'Error', 'Missing the command.')

        invoke_process = subprocess.Popen(self.invoke_cmd,
示例#13
0
from utils import CUR_TIME
from utils import logger
from utils import FLAGS

# ================================
# test logging
# ================================
# utils.set_logging(stream=False)
# utils.set_logger(stream=True)
# logger.info("logger111111")
logger.set_logger(stream=True)
logger.info(CUR_TIME)
logger.newline()
logger.error("newline beneath~")
logger.newline(2)
logger.info("haha")

# ================================
# test FLAGS
# ================================
FLAGS.add("--aa", type=float, default=11., help="doc for dd")
logger.info("aa: {}".format(FLAGS.get('aa')))
# for flag that should be overwrite later, don't set default
FLAGS.add("--bb", type=int, default=None, help="doc for dd")
if FLAGS.get('aa') == 11:
    FLAGS.overwrite_none(bb=15)

FLAGS.add("--cc", type=bool, default=False, help="doc for dd")
FLAGS.add("--dd", type=str, default="dddddd", help="doc for dd")
# for flag that should be overwrite later, don't set default
FLAGS.add("--ff", type=str, help="doc for dd")
示例#14
0
import os
import sys

BASE_DIR = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(BASE_DIR)

from utils.logger import set_logger
logger = set_logger(__name__)

#----------------------------------------------

import numpy as np

#----------------------------------------------


def _get_filepath_vkitti3d_dataset(dataset_path):
    """
  VKITTK3Dのデータセットのファイル名を取得する
  input: 
    dataset_path: path/to/vkitti3d_dataset_v1.0/*
  output:
    ALL_FILES: ex[..., path/to/vkitti3d_dataset_v1.0/06/0020_00500.npy']
  """
    import glob
    folders = glob.glob(
        dataset_path
    )  #[... , '/Users/washizakikai/data/vkitti3d_dataset_v1.0/06']
    ALL_FILES = []
    for f in folders:
        ALL_FILES += glob.glob(f + "/*")
示例#15
0
def main(config, parser):
    import utils.logger as logger

    (options, args) = parser.parse_args(args=None, values=None)

    logConfigDict = logger.set_logger(config['logger'], options.log_path, options.log_level)

    loggers = {}

    if config['loggers'] and len(config['loggers']):
        loggers = load_logger_workers('loggers', config['loggers'], {
                'logger_config' : logConfigDict,
                'logger_stash' : options.logger_stash,
                'batch_rows' : options.batch_rows,
                'end_time' : options.end_time,
                'end_rowid' : options.end_rowid
            })

    if len(loggers) > LOGGER_WORKERS_MAX:
        elog.error("too many logger workers. please increase LOGGER_WORKERS_MAX and try!")
        exit(-1)

    found_workers = list_logger_workers(logConfigDict, config['loggers_abspath'])

    if options.list_logger_workers:
        for logger_worker in found_workers:
            elog.info("found worker: %s (%s/%s.py)", logger_worker, config['loggers_abspath'], logger_worker)
        elog.force("total %d workers: %r", len(found_workers), found_workers)
        return

    if options.add_logger:
        add_logger(config, found_workers)
        return

    if options.remove_logger:
        remove_logger(config, found_workers)
        return

    if len(loggers) == 0 and options.force:
        loggers = load_logger_workers('loggers', found_workers, {
                'logger_config' : logConfigDict,
                'logger_stash' : options.logger_stash,
                'batch_rows' : options.batch_rows,
                'end_time' : options.end_time,
                'end_rowid' : options.end_rowid
            })

    if options.reset_logger_position:
        if len(loggers):
            reset_logger_position(loggers, config['loggers_abspath'], options.start_time, options.start_rowid)
        else:
            elog.error("--reset-position ignored: logger worker not found. use --force for all.")
        pass

    if options.startup:
        if len(loggers):
            startup(loggers, config)
        else:
            elog.error("--startup ignored: logger worker not found. use --force for all.")
        pass

    pass
示例#16
0
_TRANS_RESPONSE = {
    _MSGTYPE: _TRANS_RESP_TYPE,
    _MSGID: "",
    _MSGIDRESPONDTO: "",
    _MSGFLAG: _FAIL_FLAG,
    _TRANSLATIONRESULTS: ""
}
_STATUS_RESPONSE = {
    _MSGTYPE: _STA_MSG_TYPE,
    _MSGID: "",
    _MSGIDRESPONDTO: "",
    _MSGFLAG: _HEALTH_FLAG
}

logger = set_logger("api_service.log")


def create_app():
    app = Flask(__name__)
    app.config.from_object(Params)

    @app.route('/', methods=['POST'])
    def index():
        return json.dumps({"found": "ok"})

    @app.route('/translator', methods=['POST'])
    def translator():
        if request.method == 'POST':
            msgType = request.json[_MSGTYPE]
            msgID = request.json[_MSGID]
示例#17
0
import os
import time
from collections import deque
from utils.logger import set_logger

logger = set_logger()

TRUE = 1
FALSE = 0
UNASSIGN = -1


class FileFormatError(Exception):
    """ Raised when file format is not in DIMACS CNF format """
    pass


class Solver:
    def __init__(self, filename):
        logger.info('========= create pysat from %s =========', filename)
        self.filename = filename
        self.cnf, self.vars = Solver.read_file(filename)
        self.learnts = set()
        self.assigns = dict.fromkeys(list(self.vars), UNASSIGN)
        self.level = 0
        self.nodes = dict(
            (k, ImplicationNode(k, UNASSIGN)) for k in list(self.vars))
        self.branching_vars = set()
        self.branching_history = {}  # level -> branched variable
        self.propagate_history = {}  # level -> propagate variables list
        self.branching_count = 0
import sys
import datetime
from time import sleep
from multiprocessing import Pool, Value
from itertools import repeat

import pandas as pd
from urllib.request import urlopen
from bs4 import BeautifulSoup

sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))

from utils.folder import make_folder
from utils.logger import set_logger

logger = set_logger('basic_finance_crawler')

base_path = os.path.dirname(os.path.abspath('..'))
data_path = os.path.join(base_path, 'data')
stock_path = os.path.join(data_path, 'stock')
folder_path = os.path.join(stock_path, 'folder')
summary_path = os.path.join(folder_path, '재무제표_20190331')

make_folder(base_path, data_path, stock_path, folder_path, summary_path)


def get_summary_finance(code):
    url = 'http://media.kisline.com/highlight/mainHighlight.nice?nav=1&paper_stock=' + str(
        code)

    try:
분봉 데이터를 가져오는 코드
"""
import os
import sys
import time

from multiprocessing import Pool, Value
from pykrx import Krx
import pandas as pd

sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utils.folder import make_folder
from utils.logger import set_logger


logger = set_logger('day_crawler')
base_path = os.path.dirname(os.path.abspath('..'))
data_path = os.path.join(base_path, 'data')
save_path = os.path.join(data_path, 'stock')
folder_path = os.path.join(save_path, 'folder')
day_path = os.path.join(folder_path, '일별매매가격_190414')

make_folder(base_path, data_path, save_path, folder_path, day_path)
krx = Krx()


def get_day_info(code):
    df = krx.get_market_ohlcv("20100101", "20190414", code)
    df.to_csv(os.path.join(day_path, code + '.csv'))
    logger.info("successfully saved " + str(code))
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utils.folder import make_folder
from utils.logger import set_logger


base_path = os.path.dirname(os.path.abspath('..'))
data_path = os.path.join(base_path, 'data')
stock_path = os.path.join(data_path, 'stock')
folder_path = os.path.join(stock_path, 'folder')
comment_path = os.path.join(folder_path, '네이버댓글_190414')

code_path = os.path.join(base_path, 'code')
crawler_path = os.path.join(code_path, 'crawler')

make_folder(base_path, data_path, stock_path, folder_path, comment_path, code_path, crawler_path)
logger = set_logger('comment_logger')
krx = Krx()


def get_naver_comment(code):
    comments = []
    views = []
    page = 40

    for num in range(1, page):
        page = "https://finance.naver.com/item/board.nhn?code=" + str(code) + "&page=" + str(num)
        html = urlopen(page)
        soup = BeautifulSoup(html, "html.parser")
        comment = soup.select('span.tah.gray03')

        for i in range(len(comment)):
示例#21
0
文件: vnclet.py 项目: 742362144/vnc
class parser(ConfigParser.ConfigParser):
    def __init__(self, defaults=None):
        ConfigParser.ConfigParser.__init__(self, defaults=None)

    def optionxform(self, optionstr):
        return optionstr


cfg = "%s/default.cfg" % os.path.dirname(os.path.realpath(__file__))
config_raw = parser()
config_raw.read(cfg)

TOKEN = config_raw.get('Kubernetes', 'token_file')
HOSTNAME = socket.gethostname()
logger = logger.set_logger(os.path.basename(__file__), '/var/log/vnclet.log')


class ClientDaemon(CDaemon):
    def __init__(self,
                 name,
                 save_path,
                 stdin=os.devnull,
                 stdout=os.devnull,
                 stderr=os.devnull,
                 home_dir='.',
                 umask=022,
                 verbose=1):
        CDaemon.__init__(self, save_path, stdin, stdout, stderr, home_dir,
                         umask, verbose)
        self.name = name
示例#22
0
    # Some Arguments Check
    assert opt.labeled > 0. and opt.labeled < 1.0
    assert opt.unlabeled > 0. and opt.unlabeled <= 1.0
    return opt


opt = main()

####################### Output path, logger, device and random seed configuration #################

exp_path = opt.read_model_path if opt.testing else hyperparam_pseudo_method(
    opt)
if not os.path.exists(exp_path):
    os.makedirs(exp_path)

logger = set_logger(exp_path, testing=opt.testing)
logger.info("Parameters: " + str(json.dumps(vars(opt), indent=4)))
logger.info("Experiment path: %s" % (exp_path))
sp_device, qg_device = set_torch_device(opt.deviceId[0]), set_torch_device(
    opt.deviceId[1])
set_random_seed(opt.seed, device='cuda')

################################ Vocab and Data Reader ###########################

sp_copy, qg_copy = 'copy__' in opt.read_sp_model_path, 'copy__' in opt.read_qg_model_path
sp_vocab, qg_vocab = Vocab(opt.dataset, task='semantic_parsing',
                           copy=sp_copy), Vocab(opt.dataset,
                                                task='question_generation',
                                                copy=qg_copy)
logger.info("Semantic Parsing model vocabulary ...")
logger.info("Vocab size for input natural language sentence is: %s" %
示例#23
0
                ret[1][0]
            ])
        predictions.append(prediction)
    return predictions


if __name__ == "__main__":
    args = parse_args()
    if args.jobid is not None:
        args.run_name = args.run_name + '-' + args.jobid

    if args.shortname is None:
        args.shortname = args.run_name
    np.random.seed(args.seed)
    random.seed(args.seed)
    logger = set_logger(name=args.shortname, level=args.loglevel)
    if args.mode == "train":
        logger.info("=> Training mode")
        train(args)
    else:
        logger.info("=> Evaluation mode")

        n_features = 35 if args.no_reflex else 36
        eval_data, eval_loader = get_eval_dataset(args)

        if args.pixor_fusion:
            pixor = PixorNet_Fusion(n_features,
                                    groupnorm=args.groupnorm,
                                    resnet_type=args.resnet_type,
                                    image_downscale=args.image_downscale,
                                    resnet_chls=args.resnet_chls)