},
            "hue_saturation": {
                "use": True,
                "min": -10,
                "max": 10
            }
        }
    }
}
args.aug_settings = AttrDict(aug_settings)  # Add to args.

# Logging
LOGGER = logging.getLogger(__name__)
log_file = os.path.join('logs', '{}.log'.format(args.exp_name))
os.makedirs('logs', exist_ok=True)
setup_logging(log_path=log_file, logger=LOGGER)

if __name__ == '__main__':
    # Experiment directory.
    experiment_path = os.path.join('experiments', args.exp_name)
    os.makedirs(experiment_path, exist_ok=True)

    # Set cudnn profiling, add torchvision path.
    if torch.cuda.is_available():
        cudnn.benchmark = True
    os.environ['TORCH_HOME'] = os.path.join(
        os.path.abspath(os.path.dirname(__file__)), 'torchvision')

    # Initialise trainer.
    trainer = CNNTrainer(args, logger=LOGGER)
示例#2
0
        sys.exit()


def main() -> None:
    logger.info("--- start ---")

    # CSVの差分抽出
    diff_df: DifferenceDf = DifferenceDf(csv_path=path)

    # csv作成
    diff_df.execute()

    # 依頼件数取得
    logger.info(f"件数: {diff_df.row_count} 行")


if __name__ == "__main__":
    try:
        # OSチェック
        check_platform()
        # 引数をparse
        path, enc, debug = parse_arg()
        # loggerの設定
        logger = setup_logging(conf_path=get_log_conf_path(is_debug=debug))

        main()
    except Exception:
        logger.error(format_exc())
    finally:
        logger.info("--- end ---")
示例#3
0
wd = os.getcwd()
sys.path.append(wd)

from lib import alpaca
from lib.utils import setup_logging

credentials = alpaca.load_paper_credentials()

api = alpaca_trade_api.REST(key_id=credentials['key_id'], secret_key=credentials['secret_key'], base_url=credentials['base_url'])

# setup logging
log_out = sys.argv[1] if len(sys.argv) > 1 else '/var/log/apps/alpaca/paper_trade_example.log'

if log_out == 'stdout':
    logger = setup_logging()
else:
    logger = setup_logging(file_name=log_out)


def _get_prices(symbols, end_dt, max_workers=5):
    # Get the map of DataFrame price data from Alpaca's data API.
    start_dt = end_dt - pandas.Timedelta('50 days')
    start = start_dt.strftime('%Y-%m-%d')
    end = end_dt.strftime('%Y-%m-%d')

    def get_barset(symbols):
        return api.get_barset(symbols, 'day', limit = 50, start=start, end=end)

    # The maximum number of symbols we can request at once is 200.
    barset = None
示例#4
0
        if attempts_checkout:

            checkout_lst = CheckoutList()

            try:
                for attempt in attempts_checkout:
                    checkout_lst.append((attempt, client, conn))

                #Execute the checkout procedure for each item
                pool.map(checkout_procedure, checkout_lst)

                transaction.commit()
            except:
                transaction.abort()
                raise

        time.sleep(config.getint('checkout', 'mins_to_wait') * 60)

    pool.close


if __name__ == '__main__':
    config = utils.balaio_config_from_env()
    utils.setup_logging(config)

    models.Session.configure(bind=models.create_engine_from_config(config))

    print('Start checkout process...')

    main(config)
示例#5
0
        """
        filepath = event.pathname
        if not os.path.basename(filepath).startswith('_'):
            if not zipfile.is_zipfile(filepath):
                logger.info('Invalid zipfile: %s.' % filepath)
                return None

            logger.debug('Adding %s to checkin processing pool.' % filepath)
            self.pool.add_job(filepath)


if __name__ == '__main__':
    # App bootstrapping:
    # Setting up the app configuration, logging and SqlAlchemy Session.
    config = utils.balaio_config_from_env()
    utils.setup_logging(config)
    models.Session.configure(bind=models.create_engine_from_config(config))

    # Setting up PyInotify event watcher.
    wm = pyinotify.WatchManager()
    handler = EventHandler(config=config)
    notifier = pyinotify.Notifier(wm, handler)

    wm.add_watch(config.get('monitor', 'watch_path').split(','),
                 mask,
                 rec=config.getboolean('monitor', 'recursive'),
                 auto_add=config.getboolean('monitor', 'recursive'))

    logger.info('Watching %s.' % config.get('monitor', 'watch_path'))

    notifier.loop()
示例#6
0
def logLevel(level=None):
    utils.setup_logging(root_folder, log_level = level)
示例#7
0
    """
    parsed_l = data.parse_input_line(l)
    if parsed_l is None:
        return None
    reddit, text, url = parsed_l
    if not data.is_image_url(url):
        return False, reddit, text, text, url

    doc = tokenizer(text)
    tokens = [t.lower_ for t in doc]
    text_parsed = " ".join(tokens)
    return True, reddit, text, text_parsed, url


if __name__ == "__main__":
    utils.setup_logging()

    parser = argparse.ArgumentParser()
    parser.add_argument("-d", "--data", default=data.DEFAULT_DATA_FILE,
                        help="Data file to read, default=" + data.DEFAULT_DATA_FILE)
    parser.add_argument("-o", "--output", default=data.DEFAULT_OUTPUT_PREFIX,
                        help="Output data prefix to produce. Suffixes will be added "
                             "to this path, default=" + data.DEFAULT_OUTPUT_PREFIX)
    parser.add_argument("--limit", type=int, help="Optional limit of input lines to process, default=No limit")
    args = parser.parse_args()

    # using only tokenizer from spacy, this is much faster
    nlp = English()
    tokenizer = nlp.Defaults.create_tokenizer(nlp)

    data_path = pathlib.Path(args.data)
示例#8
0
import os
import sys
import argparse
from datetime import datetime
import backtrader
import pandas

wd = os.getcwd()
sys.path.append(wd)

from lib import alpaca
from lib.utils import setup_logging

# setup logging
logger = setup_logging()


# moving average strategy
class MovingAverageStretegy(backtrader.Strategy):

    params = dict(day_period=15, week_period=5)

    def log(self, txt, dt=None):
        dt = dt or self.datas[0].datetime.date(0)
        logger.info('%s, %s' % (dt.isoformat(), txt))

    def __init__(self):
        sma_day = backtrader.indicators.SMA(self.data0,
                                            period=self.p.day_period)
        sma_week = backtrader.indicators.SMA(self.data1,
                                             period=self.p.week_period)
示例#9
0
    setattr(settings, 'num_input_channels', 1)
elif settings.dataset == 'kitti_2015':
    setattr(settings, 'left_img_folder', 'image_2')
    setattr(settings, 'right_img_folder', 'image_3')
    setattr(settings, 'disparity_folder', 'disp_noc_0')
    setattr(settings, 'num_val', 40)
    setattr(settings, 'num_input_channels', 3)


# Python logging.
LOGGER = logging.getLogger(__name__)
exp_dir = join('experiments', '{}'.format(settings.exp_name))
log_file = join(exp_dir, 'log.log')
os.makedirs(exp_dir, exist_ok=True)
os.makedirs(join(exp_dir, 'qualitative_samples'), exist_ok=True)
setup_logging(log_path=log_file, log_level=settings.log_level, logger=LOGGER)
settings_file = join(exp_dir, 'settings.log')
with open(settings_file, 'w') as the_file:
    the_file.write(str(settings))


# Set random seed.
# NOTE: The seed affects the train/val split if patch locations data is
# created, useful for reproducing results..
random.seed(settings.seed)


# Model.
device = '/cpu:0' if tfe.num_gpus() == 0 else '/gpu:0'
global_step = tf.Variable(0, trainable=False)
with tf.device(device):
示例#10
0
def logLevel(level=None):
    utils.setup_logging(root_folder, log_level=level)