Exemple #1
0
    def __init__(self, cfg, args, video_path):

        rospy.init_node('coordinate_publisher')
        rospy.loginfo('Video stream has started')

        self.cfg = cfg
        self.args = args
        self.video_path = video_path
        self.logger = get_logger("root")

        use_cuda = args.use_cuda and torch.cuda.is_available()
        if not use_cuda:
            warnings.warn("Running in cpu mode which maybe very slow!",
                          UserWarning)

        if args.display:
            cv2.namedWindow("test", cv2.WINDOW_NORMAL)
            cv2.resizeWindow("test", args.display_width, args.display_height)

        if args.cam != -1:
            print("Using webcam " + str(args.cam))
            self.vdo = cv2.VideoCapture(args.cam)
        else:
            self.vdo = cv2.VideoCapture()
        self.detector = build_detector(cfg, use_cuda=use_cuda)
        self.deepsort = build_tracker(cfg, use_cuda=use_cuda)
        self.class_names = self.detector.class_names
Exemple #2
0
    def __init__(self, cfg):

        self.gpu_id = cfg.SYSTEM.GPU_ID
        self.num_workers = cfg.SYSTEM.NUM_WORKERS
        self.train_dir = cfg.DATASET.TRAIN_DIR
        self.val_dir = cfg.DATASET.VAL_DIR
        self.test_dir = cfg.DATASET.TEST_DIR
        self.sub_dir = cfg.OUTPUT_DIR.SUB_DIR
        self.log_dir = cfg.OUTPUT_DIR.LOG_DIR 
        self.out_dir = cfg.OUTPUT_DIR.OUT_DIR
        self.model_name = cfg.MODEL.MODEL_NAME
        self.train_batch_size = cfg.TRAIN_PARAM.TRAIN_BATCH_SIZE
        self.val_batch_size= cfg.TRAIN_PARAM.VAL_BATCH_SIZE
        self.test_batch_size = cfg.TRAIN_PARAM.TEST_BATCH_SIZE
        self.momentum = cfg.TRAIN_PARAM.MOMENTUM
        self.weight_decay = cfg.TRAIN_PARAM.WEIGHT_DECAY
        self.num_epochs = cfg.TRAIN_PARAM.NUM_EPOCHS
        self.lr = cfg.TRAIN_PARAM.LR
        self.val_interval = cfg.TRAIN_PARAM.VAL_INTERVAl
        self.print_interval = cfg.TRAIN_PARAM.PRINT_INTERVAL
        self.min_save_epoch = cfg.TRAIN_PARAM.MIN_SAVE_EPOCH
        self.real_json = '69.json'
        
        create_dir(self.sub_dir)
        create_dir(self.out_dir)
        create_dir(self.log_dir)
        create_dir(os.path.join(self.sub_dir,self.model_name))
        create_dir(os.path.join(self.out_dir,self.model_name))
        self.logger = get_logger(os.path.join(self.log_dir, self.model_name+'.log'))
Exemple #3
0
def tickers_command(args):
    logger = get_logger('tickers_command')

    sql = '''
            INSERT INTO tiingo_ticker_list (ticker, start_date, end_date, asset_type, exchange, price_currency, updated_on)
            VALUES (%(ticker)s, %(startDate)s, %(endDate)s, %(assetType)s, %(exchange)s, %(priceCurrency)s, current_date)
            ON CONFLICT (ticker) DO UPDATE
            SET start_date  = excluded.start_date,
                end_date    = excluded.end_date,
                asset_type  = excluded.asset_type,
                exchange    = excluded.exchange,
                price_currency = excluded.price_currency,
                updated_on  = current_date;
                '''

    stocks = tiingohelper.client.list_stock_tickers()
    logger.info('[%d] stocks are returned from Tiingo.', len(stocks))
    stocks = process_tickers_data(stocks)
    if args.yes:
        pg.insert(sql, stocks)
    logger.info('Stocks are updated.')

    etfs = tiingohelper.client.list_etf_tickers()
    logger.info('[%d] ETFs are returned from Tiingo.', len(etfs))
    etfs = process_tickers_data(etfs)
    if args.yes:
        pg.insert(sql, etfs)
    logger.info('ETFs are updated.')

    funds = tiingohelper.client.list_fund_tickers()
    logger.info('[%d] funds are returned from Tiingo.', len(funds))
    funds = process_tickers_data(funds)
    if args.yes:
        pg.insert(sql, funds)
    logger.info('Funds are updated.')
    def __init__(self, args):
        self.logger = get_logger('root')
        self.txt_logger = txt_logger(
            os.path.join(args.save_path, args.dataset_name, 'info.txt'))

        self.dataset_name = args.dataset_name
        self.images_path = os.path.join(args.image_sequence_path,
                                        self.dataset_name, "img1")
        assert os.path.isdir(self.images_path), "Images path error"
        self.imgs_filenames = os.listdir(
            os.path.join(args.image_sequence_path, self.dataset_name, "img1"))
        self.frame_length = len(self.imgs_filenames)

        self.gt_file_path = os.path.join(args.image_sequence_path,
                                         self.dataset_name, "gt/gt.txt")
        assert os.path.isfile(self.gt_file_path), "gt file path error"

        self.save_path = os.path.join(args.save_path, self.dataset_name)

        os.makedirs(os.path.join(self.save_path, 'train'), exist_ok=True)

        self.frame_interval = args.frame_interval
        self.train_rate = args.train_rate
        self.end_frame = int(self.frame_length * self.train_rate) - 1

        first_img = cv2.imread(
            os.path.join(self.images_path, self.imgs_filenames[0]))
        self.im_width = first_img.shape[1]
        self.im_height = first_img.shape[0]
    def __init__(self, cfg, args, video_path):
        self.cfg = cfg
        self.args = args
        self.video_path = video_path
        self.logger = get_logger("root")
        self.logger.info(f"Video rate is {self.video_path}")

        use_cuda = args.use_cuda and torch.cuda.is_available()
        if not use_cuda:
            warnings.warn("Running in cpu mode which maybe very slow!",
                          UserWarning)

        if args.display:
            cv2.namedWindow("test", cv2.WINDOW_NORMAL)
            cv2.resizeWindow("test", args.display_width, args.display_height)

        if args.cam != -1:
            print("Using webcam " + str(args.cam))
            self.vdo = cv2.VideoCapture(args.cam)
        else:
            self.vdo = cv2.VideoCapture()

        # Uniformly sample frames to save resources
        # Copy the previous result when a frame is skipped
        self.logger.info(f"Sample rate is {args.sample_rate}")
        self.skip_frame = int(1 / args.sample_rate)

        self.logger.info(f"Detection model is set to {args.detection_model}")
        self.detector = build_detector(args.detection_model,
                                       cfg,
                                       use_cuda=use_cuda)
        self.deepsort = build_tracker(cfg, use_cuda=use_cuda)
        self.class_names = self.detector.class_names
        self.temp_tesult = []
Exemple #6
0
def status_command(args):
    logger = get_logger('status_command')

    files = os.listdir(args.inputdir)
    files = fnmatch.filter(files, '*.csv.gz')

    ds = {}
    for f in files:
        name, rest = f.split('_', 1)
        timestamp, _ = rest.split('.', 1)

        if name not in ds:
            ds[name] = set()

        ds[name].add(timestamp)
    
    tables = dict([(x[1], x[0]) for x in dataset.DATASET])

    rst = {}
    for name in ds:

        timestamps = ds[name]
        if name not in tables:
            raise Exception('No table found for dataset [%s].' % name)
        tblname = tables[name]

        tmp = pg.select('select quote_date, count(1) as count from %(table)s group by quote_date' % {'table': tblname})

        dates = set([x[0].strftime('%Y-%m-%d') for x in tmp])

        missing = sorted(list(timestamps - dates))

        rst[name] = {'Table': tblname, 'Count': len(missing), 'Missing': '\n'.join(missing)}

    console.print_table(rst)
Exemple #7
0
    def __init__(self, args):
        self.logger = get_logger('root')
        self.txt_logger = txt_logger(
            os.path.join(args.save_path, args.dataset_name, 'info.txt'))

        self.dataset_name = args.dataset_name
        self.images_path = os.path.join(args.image_sequence_path,
                                        self.dataset_name, "img1")
        assert os.path.isdir(self.images_path), "Images path error"

        self.gt_file_path = os.path.join(args.image_sequence_path,
                                         self.dataset_name, "gt/gt.txt")
        assert os.path.isfile(self.gt_file_path), "gt file path error"

        self.save_path = os.path.join(args.save_path, self.dataset_name)
        if os.path.exists(self.save_path):
            shutil.rmtree(self.save_path)
        os.makedirs(os.path.join(self.save_path, 'train'), exist_ok=True)
        os.makedirs(os.path.join(self.save_path, 'test'), exist_ok=True)
        os.makedirs(os.path.join(self.save_path, 'query'), exist_ok=True)

        self.sampling_rate = args.sampling_rate
        self.sampled_imgs_filenames = self.sample_frames()

        self.partition_rate = args.partition_rate
        self.gt_result = []
        self.id_set = {'train_id_set': set(), 'test_id_set': set()}
        self.id_list = []
        self.id_images_details = {}

        first_img = cv2.imread(
            os.path.join(self.images_path, self.sampled_imgs_filenames[0]))
        self.im_width = first_img.shape[1]
        self.im_height = first_img.shape[0]
Exemple #8
0
    def __init__(self, trader_api, user_id, password, tradingday):
        self.logger = log.get_logger(category="TraderSpi")

        shfetraderapi.CShfeFtdcTraderSpi.__init__(self)

        self.trader_api = trader_api

        self.userId = user_id
        self.password = password
        self.tradingday = tradingday

        self.is_connected = False
        self.is_logined = False

        self.cache_md_status = dict()

        self.request_id = 0
        self.lock = threading.Lock()

        self.private_worker = False
        # 初始化锁文件【防止多进程重复擦送合约状态信息】
        try:
            os.mknod("private_worker.con", 0600)
            self.private_worker = True
        except OSError:
            pass
Exemple #9
0
def test():
    model.load(os.path.join(save_dir, 'model.tar'))
    model.build_test_loader()
    accm = Accumulator(*model.test_metrics)
    logger = get_logger(exp_id, os.path.join(save_dir, 'test.log'))
    model.test(accm)
    logger.info(accm.info(header='test'))
Exemple #10
0
    def __init__(self, desc):

        self.desc = desc
        # A dictionary of Config Parameters
        self.config = get_default_argument(desc=self.desc)

        self.project_dir = self.config['project_dir'] if self.config['project_dir'] != "" \
            else str(BASE_DIR)

        self.project_log = self.config["project_log"]
        if not exists(self.project_log):
            self.project_log = join(os.path.dirname(self.project_dir), 'logs',
                                    'log.txt')
            create_dir(os.path.dirname(self.project_log))

        # logger interface
        self.isDebug = self.config['debug']
        self.logger = get_logger(self.desc, self.project_log, self.isDebug)

        if self.config['config'] is not None:
            with open(self.config['config']) as config_file:
                import yaml
                config_content = yaml.safe_load(config_file)
                pass
        else:
            pass
        self.data = self.config["data"]

        init_rng(seed=0)
        warnings.filterwarnings('ignore')
Exemple #11
0
    def __init__(self, bot):
        self.logger = get_logger(__name__)
        self.bot = bot

        # Override default event exception handling
        self.bot.on_error = self.on_error
        self.last_error = []
Exemple #12
0
    def __init__(self, cfg, args, video_path):
        self.cfg = cfg
        self.args = args
        self.video_path = video_path
        self.logger = get_logger("root")

        use_cuda = args.use_cuda and torch.cuda.is_available()
        if not use_cuda:
            warnings.warn("Running in cpu mode which maybe very slow!",
                          UserWarning)

        if args.display:
            cv2.namedWindow("test", cv2.WINDOW_NORMAL)
            cv2.resizeWindow("test", args.display_width, args.display_height)

        if args.cam != -1:
            print("Using webcam " + str(args.cam))
            self.vdo = cv2.VideoCapture(args.cam)
        else:
            # 把一个ViedeoCapture 类 赋值给vdo 实例
            # 这个实例 包含 open method 可以
            self.vdo = cv2.VideoCapture()

        # 返回 yolov3 和 deepsort 的实例对象
        self.detector = build_detector(cfg, use_cuda=use_cuda)
        self.deepsort = build_tracker(cfg, use_cuda=use_cuda)
        self.class_names = self.detector.class_names  # ????
Exemple #13
0
def get_current_tradingday(context, conf):
    logger = log.get_logger(category="GetTradingDay")

    logger.info("[get tradingday with %s] begin" %
                (json.dumps(conf, encoding="UTF-8", ensure_ascii=False), ))

    current_trading_day = ""

    trade_system_id = conf.get("tradeSystemId")
    mysql_pool = mysql(configs=context.get("mysql").get(conf.get("mysqlId")))
    mysql_conn = mysql_pool.get_cnx()
    mysql_conn.set_charset_collation('utf8')
    try:
        cursor = mysql_conn.cursor()

        logger.info("[get current trading day]......")
        sql = """SELECT t1.tradingday FROM siminfo.t_tradesystemtradingday t1 WHERE t1.tradesystemid = %s"""
        cursor.execute(sql, (trade_system_id, ))
        row = cursor.fetchone()

        current_trading_day = str(row[0])
        logger.info("[get current trading day] current_trading_day = %s" %
                    (current_trading_day))
    finally:
        mysql_conn.close()

    logger.info("[get tradingday with %s] end" %
                (json.dumps(conf, encoding="UTF-8", ensure_ascii=False), ))

    return current_trading_day
Exemple #14
0
def start_md_service(context, conf):
    logger = log.get_logger(category="MdService")
    logger.info("[start stock md service with %s] begin" %
                (json.dumps(conf, encoding="UTF-8", ensure_ascii=False)))

    exchange_conf = context.get("exchange").get(conf.get("targetExchangeId"))

    exchange_front_addr = str(exchange_conf["mdAddress"])

    user_id = conf["userId"]
    password = conf["password"]
    topic_id_list = conf["topicId"]

    xmq_target_conf = context.get("xmq").get(conf.get("targetMQ"))
    target_mq_addr = xmq_target_conf["address"]
    target_mq_topic = xmq_target_conf["topic"]
    msg_queue_pusher = xmq_queue_pusher(target_mq_addr, target_mq_topic)

    md_api = shfemdapi.CShfeFtdcMduserApi_CreateFtdcMduserApi()
    md_handler = MdHandler(md_api, user_id, password)

    for topic_id in topic_id_list:
        md_api.SubscribeMarketDataTopic(topic_id, shfemdapi.TERT_QUICK)

    md_api.RegisterFront(exchange_front_addr)
    md_api.RegisterSpi(md_handler)

    md_handler.set_msg_puber(msg_queue_pusher)

    md_api.Init()

    while not md_handler.is_logined:
        time.sleep(1)

    md_api.Join()
Exemple #15
0
    def __init__(self, trigger):
        super().__init__()

        if not hasattr(self, 'logger'):
            self.logger = get_logger(__class__.__name__)

        self.trigger = trigger
Exemple #16
0
    def __init__(self, cfg, args, video_path, video_bar, vt_box_dict):
        self.cfg = cfg
        self.args = args
        self.video_path = video_path
        self.video_name = os.path.splitext(os.path.split(video_path)[1])[0]
        self.logger = get_logger("root")
        self.video_bar = video_bar
        self.vt_box_dict = vt_box_dict
        use_cuda = args.use_cuda and torch.cuda.is_available()
        if not use_cuda:
            warnings.warn("Running in cpu mode which maybe very slow!",
                          UserWarning)

        if args.display:
            cv2.namedWindow("test", cv2.WINDOW_NORMAL)
            cv2.resizeWindow("test", args.display_width, args.display_height)

        if args.cam != -1:
            print("Using webcam " + str(args.cam))
            self.vdo = cv2.VideoCapture(args.cam)
        else:
            self.vdo = cv2.VideoCapture()

        self.detector = build_detector(cfg, use_cuda=use_cuda)
        self.deepsort = build_tracker(cfg, use_cuda=use_cuda)
        self.class_names = self.detector.class_names
Exemple #17
0
def snap_data(context, conf):
    result_code = 0
    logger = log.get_logger(category="SnapData")

    broker_system_id = conf.get("brokerSystemId")

    logger.info("[snap data %s] begin" %
                (json.dumps(conf, encoding="UTF-8", ensure_ascii=False)))
    mysql_pool = mysql(configs=context.get("mysql").get(conf.get("mysqlId")))
    mysql_conn = mysql_pool.get_cnx()
    mysql_conn.set_charset_collation('utf8')
    try:
        mysql_conn.start_transaction()

        cursor = mysql_conn.cursor()

        logger.info("[snap t_activityinvestorevaluation]......")
        sql = """INSERT INTO snap.t_s_activityinvestorevaluation(TradingDay,ActivityID,TermNo,InvestorID,InitialAsset,PreAsset,CurrentAsset,TotalReturnRate,ReturnRateOf1Day,RankingStatus,PreRanking,Ranking)
                            SELECT DATE_FORMAT(NOW(), '%Y%m%d'),ActivityID,TermNo,InvestorID,InitialAsset,PreAsset,CurrentAsset,TotalReturnRate,ReturnRateOf1Day,RankingStatus,PreRanking,Ranking
                            FROM siminfo.t_activityinvestorevaluation"""
        cursor.execute(sql)

        mysql_conn.commit()
    except Exception as e:
        logger.error("[snap data] Error: %s" % e)
        result_code = -1
    finally:
        mysql_conn.close()
    logger.info("[snap data] end")
    return result_code
Exemple #18
0
def execute_command(context, conf):
    logger = log.get_logger(category="ServiceShell")

    logger.info("[service shell with %s] begin" %
                (json.dumps(conf, encoding="UTF-8", ensure_ascii=False), ))

    hosts_config = context.get("hosts")
    host_id = conf.get("host")
    command = conf.get("command")

    parameters = conf.get("parameters", None)
    if parameters is not None:
        for parameter in parameters:
            command = command.replace("@%s@" % parameter,
                                      parameters.get(parameter))

    host_config = hosts_config.get(host_id)

    rsh = rshell(host_config)
    rsh.connect()
    stdin, stdout, stderr = rsh.execute(command)

    for line in stdout.readlines():
        line = line.replace("\n", "")
        print("\033[1;32m %s \033[0m" % line)

    for line in stderr.readlines():
        os.sys.stderr.write(line)

    rsh.disconnect()

    logger.info("[service shell with %s] end" %
                (json.dumps(conf, encoding="UTF-8", ensure_ascii=False), ))
Exemple #19
0
    def __init__(self, **kwargs):
        self.logger = log.get_logger(category="MdSpi")
        shfemdapi.CShfeFtdcMduserSpi.__init__(self)

        self.api = kwargs.get("api")
        self.uid = kwargs.get("uid")
        self.pwd = kwargs.get("pwd")
        self.mysql = kwargs.get("mysql")
        self.redis_raw = kwargs.get("redis_raw")
        self.redis_adv = kwargs.get("redis_adv")
        self.exchange = kwargs.get("exchange")
        self.sgid = kwargs.get("settlementgroup")
        self.file = kwargs.get("file")

        self.is_connected = False
        self.is_login = False

        self.context = {}
        self.threadContext = {}

        self.request_id = 0
        self.lock = threading.Lock()

        # 缓存csv数据
        self.attr = dict()
        if self.file is not None:
            with open(path.convert(self.file)) as f:
                for line in f:
                    line = line.replace("\r\n", "").split(",")
                    self.attr.update({line[0]: line[1]})
Exemple #20
0
def start_server(context, conf):
    logger = log.get_logger(category="PubSubServer")

    logger.info("[start pubsub server with %s] begin" % (json.dumps(conf, encoding="UTF-8", ensure_ascii=False)))

    xmq_conf = context.get("xmq").get(conf.get("xmqServerId"))

    pub_addr = xmq_conf["pubAddress"]
    sub_addr = xmq_conf["subAddress"]

    context = zmq.Context(1)
    frontend = context.socket(zmq.XSUB)
    frontend.setsockopt(zmq.RCVHWM, 20000)
    frontend.setsockopt(zmq.SNDHWM, 20000)
    frontend.bind(pub_addr)

    backend = context.socket(zmq.XPUB)
    backend.setsockopt(zmq.RCVHWM, 20000)
    backend.setsockopt(zmq.SNDHWM, 20000)
    backend.bind(sub_addr)

    zmq.device(zmq.QUEUE, frontend, backend)

    frontend.close()
    backend.close()
    context.term()
Exemple #21
0
def reserve_accounts(context, conf):
    mysql_pool = mysql(configs=context.get("mysql").get(conf.get("mysqlId")))

    logger = log.get_logger(category="ReserveAccounts")

    logger.info("[reserve accounts with %s] begin" % (json.dumps(conf, encoding="UTF-8", ensure_ascii=False)))

    mysql_conn = mysql_pool.get_cnx()
    mysql_conn.set_charset_collation('utf8')
    try:
        mysql_conn.start_transaction()

        cursor = mysql_conn.cursor()

        sql = """UPDATE siminfo.t_investor t SET t.investorstatus = '6' WHERE t.investorstatus = '0' AND t.investorid < '00001000'"""
        cursor.execute(sql)

        sql = """UPDATE siminfo.t_investor t SET t.investorstatus = '6' WHERE t.investorstatus = '0' AND t.investorid REGEXP '^0*(1{2,8}|2{2,8}|3{2,8}|5{2,8}|6{2,8}|7{2,8}|8{2,8}|9{2,8})$'"""
        cursor.execute(sql)

        mysql_conn.commit()

    except Exception as e:
        logger.error("[reserve accounts] Error: %s" % (e))
    finally:
        mysql_conn.close()

    logger.info("[reserve accounts with %s] end" % (json.dumps(conf, encoding="UTF-8", ensure_ascii=False)))
Exemple #22
0
    def __init__(self, **kwargs):
        self.logger = get_logger(__name__)
        for key, value in kwargs.items():
            setattr(self, key, value)

        s_dim = self.env.observation_space.shape[0]
        a_dim = self.env.action_space.shape[0]
        if device.type == 'cuda':
            self.actor = Actor(s_dim, 128, a_dim).cuda()
            self.actor_target = Actor(s_dim, 128, a_dim).cuda()
            self.critic = Critic(s_dim + a_dim, 128, a_dim).cuda()
            self.critic_target = Critic(s_dim + a_dim, 128, a_dim).cuda()
        else:
            assert device.type == 'cpu'
            self.actor = Actor(s_dim, 128, a_dim)
            self.actor_target = Actor(s_dim, 128, a_dim)
            self.critic = Critic(s_dim + a_dim, 128, a_dim)
            self.critic_target = Critic(s_dim + a_dim, 128, a_dim)
        self.actor_optim = optim.Adam(
            self.actor.parameters(), lr=self.actor_lr)
        self.critic_optim = optim.Adam(
            self.critic.parameters(), lr=self.critic_lr)
        self.buffer = []

        self.actor_target.load_state_dict(self.actor.state_dict())
        self.critic_target.load_state_dict(self.critic.state_dict())
Exemple #23
0
    def __init__(self, cfg, args, video_path):
        self.cfg = cfg
        self.args = args
        self.video_path = video_path
        self.logger = get_logger("root")

        # delete all files
        # for f in glob.glob("./data/*"):
        #     os.remove(f)

        self.status_dict = {0: "OK", 1: "ACQUIRED", 2: "VIOLATION"}

        self.telegram_bot = telegram()

        use_cuda = args.use_cuda and torch.cuda.is_available()
        if not use_cuda:
            warnings.warn("Running in cpu mode which maybe very slow!",
                          UserWarning)

        if args.display:
            cv2.namedWindow("test", cv2.WINDOW_NORMAL)
            cv2.resizeWindow("test", args.display_width, args.display_height)

        if args.cam != -1:
            print("Using webcam " + str(args.cam))
            # self.vdo = cv2.VideoCapture(args.cam)
            self.vdo = cv2.VideoCapture(args.cam)
        else:
            self.vdo = cv2.VideoCapture()
        self.detector = build_detector(cfg, use_cuda=use_cuda)
        self.deepsort = build_tracker(cfg, use_cuda=use_cuda)
        self.class_names = self.detector.class_names
        self.H = get_homography()
Exemple #24
0
    def __init__(self, cfg, args, image_path, save_filename, im_width,
                 im_height):
        self.cfg = cfg
        self.args = args
        self.image_path = image_path

        self.logger = get_logger("root")
        self.save_filename = save_filename
        self.im_width = im_width
        self.im_height = im_height

        use_cuda = args.use_cuda and torch.cuda.is_available()
        if not use_cuda:
            warnings.warn("Running in cpu mode which maybe very slow!",
                          UserWarning)

        # if args.display:
        #     cv2.namedWindow("test", cv2.WINDOW_NORMAL)
        #     cv2.resizeWindow("test", args.display_width, args.display_height)

#         if args.cam != -1:
#             print("Using webcam " + str(args.cam))
#             self.vdo = cv2.VideoCapture(args.cam)
#         else:
#             self.vdo = cv2.VideoCapture()
        self.detector = detect(is_xywh=True, use_cuda=True)
        #print(self.detector)
        self.deepsort = build_tracker(cfg, use_cuda=use_cuda)
Exemple #25
0
def load_file(filename):
    '''
    load_file(filename) -> module
    
    Loads python module with specified filename.
    '''
    dirname = os.path.dirname(filename)
    dirname = os.path.abspath(dirname)
    modulename = os.path.basename(filename)

    modulename = modulename.rsplit('.', 1)[0]

    if dirname:
        sys.path.insert(0, dirname)

    mod = None
    try:
        #print sys.path
        mod = __import__(modulename, {}, {}, [''])
        reload(mod)
    except:
        errinfo = traceback.format_exc()

        _log = get_logger('smartcube.util.load_file')
        _log.error(errinfo)

    if dirname:
        del sys.path[0]

    return mod
def publish_future(context, conf):
    result_code = 0
    logger = log.get_logger(category="PublishfutureExchange")

    mysql_pool = mysql(configs=context.get("mysql").get(conf.get("mysqlId")))
    mysql_conn = mysql_pool.get_cnx()
    mysql_conn.set_charset_collation('utf8')

    trade_system_id = conf.get("tradeSystemId")

    logger.info("[publish future exchange %s] begin" %
                (json.dumps(conf, encoding="UTF-8", ensure_ascii=False)))

    try:
        mysql_conn.start_transaction()

        cursor = mysql_conn.cursor()

        logger.info("[get current trading day]......")
        sql = """SELECT DISTINCT t1.tradingday 
                 FROM siminfo.t_tradesystemtradingday t1
                 WHERE t1.tradesystemid = %s"""
        cursor.execute(sql, (trade_system_id, ))
        row = cursor.fetchone()

        current_trading_day = str(row[0])
        logger.info("[get current trading day] current_trading_day = %s" %
                    current_trading_day)

        logger.info("[get next trading day]......")

        # 判断是否跳过节假日
        holiday = conf.get("holiday")
        if holiday is True or holiday is None:
            sql = """SELECT DAY FROM siminfo.t_TradingCalendar t WHERE t.day > %s AND t.tra = '1' ORDER BY DAY LIMIT 1"""
        else:
            sql = """SELECT DAY FROM siminfo.t_TradingCalendar t WHERE t.day > %s ORDER BY DAY LIMIT 1"""
        cursor.execute(sql, (current_trading_day, ))
        row = cursor.fetchone()

        next_trading_day = str(row[0])
        logger.info("[get next trading day] next_trading_day = %s" %
                    next_trading_day)

        # 更新交易所系统交易日
        logger.info("[update trade system tradingday]......")
        sql = """UPDATE siminfo.t_tradesystemtradingday t1
                                     SET t1.tradingday = %s, t1.lasttradingday = %s
                                   WHERE t1.tradingday = %s AND t1.tradesystemid = %s"""
        cursor.execute(sql, (next_trading_day, current_trading_day,
                             current_trading_day, trade_system_id))

        mysql_conn.commit()
    except Exception as e:
        logger.error("[publish future exchange] Error: %s" % e)
        result_code = -1
    finally:
        mysql_conn.close()
    logger.info("[publish future exchange] end")
    return result_code
    def __init__(self, args):
        self.logger = get_logger('root')
        self.txt_logger = txt_logger(os.path.join(args.save_path, args.dataset_name, 'info.txt'))

        self.dataset_name = args.dataset_name
        self.images_path = os.path.join(args.image_sequence_path, self.dataset_name, "img1")
        assert os.path.isdir(self.images_path), "Images path error"

        self.track_result_path = os.path.join(args.track_result_path, f'{self.dataset_name}.txt')
        assert os.path.isfile(self.track_result_path), "Tracking result path error"

        self.save_path = os.path.join(args.save_path, self.dataset_name)
        if os.path.exists(self.save_path):
            shutil.rmtree(self.save_path)
        os.makedirs(os.path.join(self.save_path, 'train'), exist_ok=True)
        os.makedirs(os.path.join(self.save_path, 'test'), exist_ok=True)
        os.makedirs(os.path.join(self.save_path, 'query'), exist_ok=True)

        self.sampling_rate = args.sampling_rate
        self.sampled_imgs_filenames = self.sample_frames()

        self.partition_rate = args.partition_rate
        self.track_result = []
        self.id_set = {'train_id_set': set(), 'test_id_set': set()}
        self.id_list = []
        self.id_images_details = {}
Exemple #28
0
def start_server(context, conf):
    logger = log.get_logger(category="PushPullServer")
    logger.info("[start pushpull server with %s] begin" % (json.dumps(conf, encoding="UTF-8", ensure_ascii=False)))

    xmq_conf = context.get("xmq").get(conf.get("xmqServerId"))

    push_addr = xmq_conf["pushAddress"]
    pull_addr = xmq_conf["pullAddress"]

    context = zmq.Context()
    frontend = context.socket(zmq.PULL)
    frontend.setsockopt(zmq.RCVHWM, 20000)
    frontend.setsockopt(zmq.SNDHWM, 20000)
    frontend.bind(push_addr)

    backend = context.socket(zmq.PUSH)
    backend.setsockopt(zmq.RCVHWM, 20000)
    backend.setsockopt(zmq.SNDHWM, 20000)
    backend.bind(pull_addr)

    zmq.device(zmq.STREAMER, frontend, backend)

    frontend.close()
    backend.close()
    context.term()
Exemple #29
0
def start_md_service(context, conf):
    logger = log.get_logger(category="MdService")
    logger.info("[start stock md service with %s] begin" %
                (json.dumps(conf, encoding="UTF-8", ensure_ascii=False)))

    # 获取API连接地址
    exchange_conf = context.get("exchange").get(conf.get("targetExchangeId"))
    exchange_front_addr = str(exchange_conf["mdAddress"])

    # 获取行情用户
    user_id = conf["userId"]
    password = conf["password"]

    # 建立mysql数据库连接
    mysqlDB = mysql(configs=context.get("mysql")[conf.get("mysqlId")])

    # 建立redis RAW库
    redis_conf = context.get("redis").get(conf.get("redis_row"))
    pool = redis.ConnectionPool(host=redis_conf.get('host'),
                                port=redis_conf.get('port'),
                                password=redis_conf.get('password'),
                                db=redis_conf.get('db'))
    redis_raw = redis.Redis(connection_pool=pool)

    # 建立redis ADV库
    redis_conf = context.get("redis").get(conf.get("redis_adv"))
    pool = redis.ConnectionPool(host=redis_conf.get('host'),
                                port=redis_conf.get('port'),
                                password=redis_conf.get('password'),
                                db=redis_conf.get('db'))
    redis_adv = redis.Redis(connection_pool=pool)

    # 建立API对象
    md_api = shfemdapi.CShfeFtdcMduserApi_CreateFtdcMduserApi(
        str(conf.get("exchange")))
    md_handler = MdHandler(api=md_api,
                           uid=user_id,
                           pwd=password,
                           mysql=mysqlDB,
                           redis_raw=redis_raw,
                           redis_adv=redis_adv,
                           settlementgroup=conf.get("settlementgroup"),
                           exchange=conf.get("exchange"),
                           file=conf.get("filepath"))

    sql = "select TopicID from siminfo.t_marketdatatopic where SettlementGroupID = %s"
    result = mysqlDB.select(sql, (conf.get("settlementgroup"), ))
    for topic_id in result:
        md_api.SubscribeMarketDataTopic(topic_id[0], shfemdapi.TERT_RESTART)

    md_api.RegisterFront(exchange_front_addr)
    md_api.RegisterSpi(md_handler)

    md_api.Init()

    while not md_handler.is_login:
        time.sleep(1)

    md_api.Join()
Exemple #30
0
async def when_ready(bot, ext_list):
    """Wait until the bot is ready, then load extensions.

    """
    await bot.wait_until_ready()
    logger = get_logger(__name__)
    logger.info('Logged in as %s, id: %s', bot.user.name, bot.user.id)
    load_extensions(bot, ext_list)
Exemple #31
0
 def __init__(self, bot, config):
     self.logger = get_logger(__name__)
     self.bot = bot
     self.config = config
     self.client = Client(loop=bot.loop)
     self.client.event(self.on_ready)
     self.client.event(self.on_message)
     self._embed_colour_cache = {}
     bot.loop.create_task(self.client.start(config["token"]))
Exemple #32
0
    def __init__(self, widget):
        super().__init__(widget)

        # Add logger to this class (if it doesn't have one already)
        if not hasattr(self, 'logger'):
            self.logger = get_logger(__class__.__name__)
        # self.setAcceptHoverEvents(True)
        self._zoom = 0
        self.scale(0.3, 0.3)
Exemple #33
0
    def __init__(self, bot: commands.Bot):
        logger = get_logger(__name__)

        if EsiCog._esi_app_task is None:
            logger.info("Creating esipy App...")
            EsiCog._esi_app_task = bot.loop.run_in_executor(
                None, self._create_esi_app)
            EsiCog._esi_app_task.add_done_callback(
                lambda f: logger.info("esipy App created"))
Exemple #34
0
def load_module(modulename):
    mod = None
    try:
        mod = __import__(modulename, {}, {}, [''])
        reload(mod)
    except:
        errinfo = traceback.format_exc()

        _log = get_logger('smartcube.util.load_module')
        _log.error(errinfo)

    return mod
Exemple #35
0
def execute_command(args, stdin = '', **kwargs):

    logger = log.get_logger('execute_command')

    if isinstance(args, (tuple, list)):
        args = [str(e) for e in args]

    logger.debug('Execute command [%s].' % ' '.join(args))

    p = Popen(args, stdin = PIPE, stdout = PIPE, stderr = PIPE, **kwargs)
    out, err = p.communicate(stdin)
    
    return p.returncode, out, err
Exemple #36
0
    def __init__(self, config:dict, filename:str):
        """Set up api and logger instances for later use.

        Arguments:
            config: Config dictionary created on start up.
            filename: The name of the file to be used during logging.
        """
        # store config, mainly for use in `log_in_out`
        self.config = config

        # store api and log references
        self.api = config.get('api')
        self.log = get_logger(config, filename)
Exemple #37
0
def main(): 
    """ 
    The main process 
    Setup logging and instantiate the gatherer 
    """
 
    # Get the commandline args 
    commandline_args = get_commandline() 

    # Get the environmental variables 
    environmental_vars = usite.get_environmental_variables() 
    site = commandline_args.site 

    # If no commandline site, look to environmental args 
    if site == None: 
        site = environmental_vars["RAPD_SITE"] 

    # Determine the site 
    site_file = usite.determine_site(site_arg=site) 

    # Handle no site file 
    if site_file == False: 
        print text.error+"Could not determine a site file. Exiting."+text.stop
        sys.exit(9) 

    # Import the site settings 
    SITE = importlib.import_module(site_file) 

    # Single process lock? 
    ulock.file_lock(SITE.GATHERER_LOCK_FILE) 

    # Set up logging 
    if commandline_args.verbose: 
        log_level = 10 
    else: 
        log_level = SITE.LOG_LEVEL 
    logger = ulog.get_logger(logfile_dir="/tmp", 
                             logfile_id="rapd_gatherer", 
                             level=log_level 
                            ) 
    logger.debug("Commandline arguments:") 
    for pair in commandline_args._get_kwargs(): 
        logger.debug("  arg:%s  val:%s" % pair) 

    # Instantiate the Gatherer 
    GATHERER = Gatherer(site=SITE, 
                        overwatch_id=commandline_args.overwatch_id) 
 def __init__(self, org, client_id=None, client_secret=None):
     """
     :param org: github org name
     :type org: unicode
     :param client_id: github app client id
     :type client_id: unicode
     :param client_secret: github app client secret
     :type client_secret: unicode
     """
     self._logger = log.get_logger('PopularGithubRepoFinder')
     self._client = Client(
         user_agent='popular-github-repo-finder',
         client_id=client_id,
         client_secret=client_secret,
     )
     self._org = org
     self._all_repos = self._get_all_repos()
Exemple #39
0
def metadata_command(args):
    logger = get_logger('metadata_command')

    counter = count.Counter()

    newtickers = pg.select('''
                            SELECT a.ticker 
                            FROM tiingo_ticker_list a 
                            LEFT OUTER JOIN tiingo_ticker_metadata b 
                            ON a.ticker = b.ticker 
                            WHERE b.ticker IS NULL AND NOT a.deprecated''')
    newtickers = [x[0] for x in newtickers]

    cutoff = datetime.date.today() - datetime.timedelta(days = args.age)
    if args.top:
        oldtickers = pg.select('SELECT ticker FROM tiingo_ticker_metadata WHERE updated_on < %s::date ORDER BY updated_on DESC LIMIT %s', (cutoff, args.top))
    else:
        oldtickers = pg.select('SELECT ticker FROM tiingo_ticker_metadata WHERE updated_on < %s::date', (cutoff, ))
    oldtickers = [x[0] for x in oldtickers]

    logger.info('Updating [%d] new tickers, and [%d] old tickers.', len(newtickers), len(oldtickers))

    sql = '''
    INSERT INTO tiingo_ticker_metadata (ticker, name, description, exchange, start_date, end_date, updated_on)
    VALUES (%(ticker)s, %(name)s, %(description)s, %(exchangeCode)s, %(startDate)s, %(endDate)s, current_date)
    ON CONFLICT (ticker) DO UPDATE
    SET name        = excluded.name,
        description = excluded.description,
        exchange    = excluded.exchange,
        start_date  = excluded.start_date,
        end_date    = excluded.end_date,
        updated_on  = current_date;
        '''
    alltickers = newtickers + oldtickers
    cnt = 0
    pct = 0.1
    total = len(alltickers)

    for ticker in alltickers:
        try:
            metadata = tiingohelper.client.get_ticker_metadata(ticker)
        except KeyboardInterrupt, e:
            raise e
        except:
Exemple #40
0
def main(*args, **kwargs):

    logger = get_logger('main')

    progname = kwargs.get('progname')
    parser = kwargs.get('argparser')

    args = parse_command(parser)

    filesys.touch(__lockfile__)

    lockfp = open(__lockfile__, 'w')
    lock = filelock.FileWriteLock(lockfp)
    if not lock.trylock():
        logger.warning('Another instance is loading data.')
        return     

    if args.command == 'tickers':
        tickers_command(args)
    elif args.command == 'metadata':
        metadata_command(args)
    elif args.command == 'quotes':
        quotes_command(args)
Exemple #41
0
def main():
    """Set up configuration and start any required scripts or tasks.

    Raises:
        SetupError
    """
    # set available command line arguments
    parser = ArgumentParser(prog='./cresbot.py')
    parser.add_argument('config',
                        help='Set path to config file.')
    parser.add_argument('-t',
                        choices=['all', 'hiscorecounts'],
                        default=[],
                        dest='tasks',
                        help='Run tasks on startup. To run all tasks on startup use `all`. To run specific tasks, add them by name delimited by a space. Allowed task names: `hiscorecounts`.',
                        metavar='task',
                        nargs='*')

    # parse arguments and convert to a dictionary
    args = parser.parse_args()
    args = vars(args)

    # load config from file
    config_path = args.pop('config', 0)

    with open(config_path) as f:
        try:
            config = yaml.load(f)
        except FileNotFoundError as e:
            raise SetupError from e

        # merge args into config
        config.update(args)

    # setup logging
    try:
        log = get_logger(config, 'cresbot')
    except FileNotFoundError as e:
        raise SetupError('Log file could not be found. Please check the directory exists.') from e

    # setup api instance
    api = MediaWiki(config.get('api_url'), config.get('api_config'))

    try:
        logged_in = api.login(config.get('api_username'), config.get('api_password'))
    # @todo catch more specific exception
    #       ApiError?
    except Exception as e:
        raise SetupError('MediaWiki API URL could not be verified. Please check your config file.') from e

    # check login attempt was successful
    if not logged_in:
        raise SetupError('Incorrect password or username in config.')

    # clean up
    api.logout()

    # store in config for convenience
    config.update({'api': api})

    log.info('Setup complete!')

    try:
        tasks.start_tasks(config)
    except Exception as e:
        log.exception('Uncaught exception: %s', e)
Exemple #42
0
BETA_ABSOLUTE = 'BETA_ABSOLUTE'
GAMMA_ABSOLUTE = 'GAMMA_ABSOLUTE'
DELTA_ABSOLUTE = 'DELTA_ABSOLUTE'
THETA_ABSOLUTE = 'THETA_ABSOLUTE'


class DataType(Enum):
    EEG = 0
    ALPHA_ABSOLUTE = 1
    BETA_ABSOLUT = 2
    GAMMA_ABSOLUTE = 3
    DELTA_ABSOLUTE = 4
    THETA_ABSOLUTE = 5


logger = get_logger('InputParser')


def extract_cues(reader):
    it = iter(reader)
    cues = []
    for row in it:
        cues.append((row[0], row[1]))
    return cues


class InputParser:
    def __init__(self, folder_path):
        self.folderPath = folder_path
        for f in listdir(folder_path):
            if isfile:
import sqlite3

from utils.log import get_logger
from datetime import datetime
import time

logger = get_logger('ExperimentData')
META_DATA_TABLE = 'experiment_meta_data'
TYPE_COL = 'type'
ID_COL = 'id'
DATE_COL = 'date'
PROCESSING_DATE = 'process_date'
INSERT_METADATA = 'INSERT INTO {} ({},{}) values (?,?)'.format(META_DATA_TABLE, TYPE_COL, DATE_COL)
RAW_EEG_TABLE = 'raw_eeg'
EEG_TYPE_COL = 'eeg_type'
SERVER_TIMESTAMP_COL = 'server_timestamp'
DEVICE_TIMESTAMP_COL = 'device_timestamp'
TRIAL_ID_COL = 'trial_id'
TP9 = 'tp9'
AF7 = 'af7'
AF8 = 'af8'
TP10 = 'tp10'
AUXR = 'auxr'

WAVE_EEG_TABLE = 'eeg_waves'
CUES_TABLE = 'cues_tables'
CUE_NAME_COL = 'cue_name'


def adapt_datetime(ts):
    return time.mktime(ts.timetuple())
Exemple #44
0
def load_command(args):
    logger = get_logger('load_command')

    lockfp = open(__lockfile__, 'w')
    lock = filelock.FileWriteLock(lockfp)
    if not lock.trylock():
        logger.warning('Another instance is loading data.')
        return         

    if args.dateback and '-' not in args.dateback:
        args.dateback = (datetime.date.today() - datetime.timedelta(days = int(args.dateback))).strftime('%Y-%m-%d')
    #if args.dateback:
    #    logger.info('Date back to [%s].' % args.dateback)

    files = os.listdir(args.inputdir)
    files = fnmatch.filter(files, '*.csv.gz')

    ds = {}
    for f in files:
        name, rest = f.split('_', 1)
        ts, _ = rest.split('.', 1)

        if name not in ds:
            ds[name] = set()

        if not args.dateback or ts > args.dateback:
            ds[name].add(ts)
    
    tables = dict([(x[1], x[0]) for x in dataset.DATASET])

    for name in ds:

        logger.info('Processing dataset [%s].' % name)

        timestamps = ds[name]
        if name not in tables:
            raise Exception('No table found for dataset [%s].' % name)
        tblname = tables[name]

        if args.dateback:
            tmp = pg.select('select quote_date, count(1) as count from ' + tblname + ' where quote_date >= %(dateback)s group by quote_date', \
                        {'dateback': args.dateback})
        else:
            tmp = pg.select('select quote_date, count(1) as count from ' + tblname + ' group by quote_date')
        existing = set([x[0].strftime('%Y-%m-%d') for x in tmp])
        # print existing

        missing = sorted(list(timestamps - existing), reverse = args.reverse)
        # print missing

        for ts in missing:
            srcfile = os.path.join(args.inputdir, '%s_%s.csv.gz' % (name, ts))
            logger.info('  +++ Loading [%s] into [%s].' % (srcfile, tblname))
            if args.yes:
                try:
                    pg.copy_from_csv(srcfile, tblname)
                except KeyboardInterrupt, e:
                    raise e
                except:
                    errinfo = traceback.format_exc()
                    logger.error(errinfo)
def log_unhandled_exception(exctype, value, tb):
    logger = log.get_logger('unhandled_exception')
    logger.exception(traceback.format_exception(exctype, value, tb))
Exemple #46
0
# FILE_LOCATION = '/Users/shiran/out/17-01-20_13-33/OddBall.csv'
# FILE_LOCATION = '/Users/shiran/out/17-04-08_16-15_Alpha'
# FILE_LOCATION = '/Users/shiran/out/17-04-16_00-41_Alpha'
FILE_LOCATION = '/Users/shiran/out/17-05-05_11-54_OddBall_P300'  # a good alpha data


# raw eeg is measured in micro volts.
#  2 uv is considered to be the noise level of eeg measurements (1 uv == ~ 10^-6 v ==> 0.000002 volts )
# 10 - 100 uv  is the peak to peak amplitude of an average EEG (0.00001 to 0.0001 volts )
# 1 mv == ~10^-3 v ==> 0.001 volts
# Muse raw signals are 0.0 to 1682.815 micro-volts, which is 0 to 1.682815 volt
# http://alexandre.barachant.org/blog/2017/02/05/P300-with-muse.html :
# things that anything beyond 100 uv is noise like blinks.
# the data from my app is ~800 micro volt, which is 800*10^-3 volt which is X/10^-6 ==  800*10^-3/10^-6 milli volt == 800* 10^3


def load_data():
    db = ExperimentData()
    input_parser = InputParser(FILE_LOCATION)
    ex_id = db.insert(input_parser)
    logger.info("new experiment id:" + str(ex_id))
    return ex_id


if __name__ == '__main__':
    logger = get_logger('main')
    # trial = load_data()
    trial_id = 4
    # pl.generate_graph(trial_id)
    p300.plot(trial_id)
# handler_console = logging.StreamHandler()
# # 配置显示格式,并实例化
# fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'
# formatter = logging.Formatter(fmt)
# # 为handler加载显示格式
# handler_file.setFormatter(formatter)
# handler_console.setFormatter(formatter)
# # 获取名为TEST的logger
# logger = logging.getLogger('TEST')
# # 为logger添加handler
# logger.addHandler(handler_file)
# logger.addHandler(handler_console)
# # 配置显示级别
# logger.setLevel(logging.DEBUG)
# ########################################################
logger = get_logger('test.log', 'TEST')

a = ip2int("192111111119999999999999999999")
b = int2ip(a)

logger.debug(a)
logger.debug(b)
ip_sub_mask_str = ('216.239.32.0/19 '
                   , '64.233.160.0/19'
                   , '66.249.80.0/20'
                   , '2.14.192.0/18'
                   , '209.85.128.0/17'
                   , '66.102.0.0/20'
                   , '74.125.0.0/16'
                   , '64.18.0.0/20'
                   , '207.126.144.0/20'
Exemple #48
0
def quotes_command(args):
    if args.update_tickers:
        tickers_command(args)

    logger = get_logger('quotes_command')

    counter = count.Counter()

    tickers = pg.select('''
                        SELECT ticker, start_date, end_date, asset_type, last_quote_date, quote_updated_on, full_quote_updated_on
                        FROM tiingo_ticker_list 
                        WHERE 
                            start_date IS NOT NULL AND 
                            end_date IS NOT NULL AND 
                            NOT deprecated
                        ORDER BY end_date''')
    tickers = [ValueBag({
                    'ticker':x[0], 
                    'start_date':x[1], 
                    'end_date':x[2], 
                    'asset_type':x[3],
                    'last_quote_date': x[4], 
                    'quote_updated_on': x[5],
                    'full_quote_updated_on': x[6]}) for x in tickers]
    if args.type:
        tickers = [x for x in tickers if x.asset_type == args.type]

    if args.full:
        if args.full_update_age > 0:
            cutoff_date = datetime.date.today() - datetime.timedelta(days = args.full_update_age)
            tickers = [x for x in tickers if x.full_quote_updated_on is None or x.full_quote_updated_on < cutoff_date]

        if args.full_update_splits > 1:
            salt = '%s_%d' % (datetime.datetime.now().strftime('%Y%m%d%H'), os.getpid())
            tickers = [x for x in tickers if sample_by_hash('%s_%s' % (x.ticker, salt), args.full_update_splits)]
            logger.info('Full quote update for 1/%d [%d] tickers', args.full_update_splits, len(tickers))
        else:
            logger.info('Full quote update for all [%d] tickers', len(tickers))
    else:
        # last_quote_date IS NULL OR end_date > last_quote_date
        tickers = [x for x in tickers if x.last_quote_date is None or x.end_date > x.last_quote_date]
        logger.info('Incremental quote update for [%d] tickers.', len(tickers))

    sql = '''
    INSERT INTO tiingo_equity_daily (ticker, quote_date, open, high, low, close, volume, open_adjusted, high_adjusted, low_adjusted, close_adjusted, volume_adjusted, dividen, split_factor, original_data)
    VALUES (%(ticker)s, %(date)s, %(open)s, %(high)s, %(low)s, %(close)s, %(volume)s, 
            %(adjOpen)s, %(adjHigh)s, %(adjLow)s, %(adjClose)s, %(adjVolume)s,
            %(divCash)s, %(splitFactor)s, %(originalData)s)
    ON CONFLICT (ticker, quote_date) DO UPDATE
    SET open        = excluded.open,
        high        = excluded.high,
        low         = excluded.low,
        close       = excluded.close,
        volume      = excluded.volume,
        open_adjusted   = excluded.open_adjusted,
        high_adjusted   = excluded.high_adjusted,
        low_adjusted    = excluded.low_adjusted,
        close_adjusted  = excluded.close_adjusted,
        volume_adjusted = excluded.volume_adjusted,
        dividen         = excluded.dividen,
        split_factor    = excluded.split_factor,
        original_data   = excluded.original_data;'''

    cnt = 0
    pct = 0.1
    total = len(tickers)

    logger.info('Loading quotes of [%d] tickers.', total)

    for x in tickers:
        #ticker, start_date, end_date, last_quote_date = tinfo
        # print tinfo
        full_updated = False
        try:
            if not x.last_quote_date or args.full:
                # full range
                if args.verbose:
                    logger.info(' +++ Updating ticker [%s] quotes for full date range between [%s] and [%s].', x.ticker, x.start_date, x.end_date)
                quotes = tiingohelper.client.get_ticker_price(x.ticker, 
                        startDate = x.start_date, endDate = x.end_date, 
                        fmt = 'json', frequency = 'daily')
                quotes, _, _, lq_date = process_quotes_data(x.ticker, quotes)
                full_updated = True
            else:
                if args.verbose:
                    logger.info(' +++ Updating ticker [%s] quotes since last quote date [%s] to [%s].', 
                            x.ticker, x.last_quote_date + datetime.timedelta(days = 1), x.end_date)
                quotes = tiingohelper.client.get_ticker_price(x.ticker, 
                        startDate = x.last_quote_date + datetime.timedelta(days = 1), endDate = x.end_date, 
                        fmt = 'json', frequency = 'daily')
                quotes, has_dividen, has_split, lq_date = process_quotes_data(x.ticker, quotes)

                if has_dividen or has_split:
                    # load full range when there is dividen or split since last update
                    if args.verbose:
                            logger.info(' +++ Dividen or split found, updating ticker [%s] quotes for full date range between [%s] and [%s].', x.ticker, x.start_date, x.end_date)
                    quotes = tiingohelper.client.get_ticker_price(x.ticker, 
                            startDate = x.start_date, endDate = x.end_date, 
                            fmt = 'json', frequency = 'daily')
                    quotes, _, _, lq_date = process_quotes_data(x.ticker, quotes)
                    full_updated = True

            if not quotes:
                raise Exception('No quotes returned.')
        except KeyboardInterrupt, e:
            raise e
        except:
import json
import logging
import os
import pprint
import signal
import time
import threading
from threading import Event

from popular_github_repo_finder import PopularGithubRepoFinder
from utils import log, unhandled_exception_handler


log.LOG_FILE_NAME = 'popular_github_repo_finder.log'
unhandled_exception_handler.configure_unhandled_exception_handler()
logger = log.get_logger('main')


def _parse_args():
    parser = ArgumentParser()
    parser.add_argument(
        '--org',
        type=unicode,
        required=True,
        help='github organization name',
    )
    parser.add_argument(
        '-n',
        type=int,
        required=True,
        help='number of top repos of the org',