Exemplo n.º 1
0
def main():
    args = parse_arguments()
    config = get_config(args.config_file, is_test=args.test)
    np.random.seed(config.seed)
    # torch.manual_seed(config.seed)
    # torch.cuda.manual_seed_all(config.seed)
    config.use_gpu = config.use_gpu and torch.cuda.is_available()

    # log info
    log_file = os.path.join(config.save_dir,
                            "log_exp_{}.txt".format(config.run_id))
    logger = setup_logging(args.log_level, log_file)
    logger.info("Writing log file to {}".format(log_file))
    logger.info("Exp instance id = {}".format(config.run_id))
    logger.info("Exp comment = {}".format(args.comment))
    logger.info("Config =")
    print(">" * 80)
    pprint(config)
    print("<" * 80)

    # Run the experiment
    try:
        runner = eval(config.runner)(config)
        if not args.test:
            runner.train()
        else:
            runner.test()
    except:
        logger.error(traceback.format_exc())

    sys.exit(0)
Exemplo n.º 2
0
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
from utils.logger import setup_logging
setup_logging(os.path.join("configs", "logging.json"))


def main():
    os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'prs_dev.settings')
    try:
        from django.core.management import execute_from_command_line
    except ImportError as exc:
        raise ImportError(
            "Couldn't import Django. Are you sure it's installed and "
            "available on your PYTHONPATH environment variable? Did you "
            "forget to activate a virtual environment?") from exc
    execute_from_command_line(sys.argv)


if __name__ == '__main__':
    main()
Exemplo n.º 3
0
from dotenv import find_dotenv, load_dotenv
from utils.auth import jwt

app = Flask(__name__)

app.config.update({
    'SECRET_KEY':
    'FOICLIENTAPP',
    'SQLALCHEMY_TRACK_MODIFICATIONS':
    False,
    'SQLALCHEMY_DATABASE_URI':
    BaseConfig.SQLALCHEMY_DATABASE_URI,
})

setup_logging(os.path.join(BaseConfig.PROJECT_ROOT,
                           'logging.conf'))  # important to do this first


def init_app(run_mode=os.getenv('FLASK_ENV', 'production')):

    app.config.from_object(CONFIG[run_mode])
    CORS(app)

    from .requestapi import REQUESTAPI_BLUEPRINT

    app.register_blueprint(REQUESTAPI_BLUEPRINT)

    ExceptionHandler(app)
    setup_jwt_manager(app, jwt)

    db.init_app(app)
Exemplo n.º 4
0
import sentry_sdk
from flask import Flask, current_app
from pay_api.models import CfsAccount as CfsAccountModel
from pay_api.models import PaymentAccount as PaymentAccountModel
from pay_api.models import db, ma
from pay_api.services.cfs_service import CFSService
from pay_api.services.oauth_service import OAuthService
from pay_api.utils.constants import DEFAULT_COUNTRY, DEFAULT_CURRENCY
from pay_api.utils.enums import AuthHeaderType, ContentType, PaymentMethod
from sentry_sdk.integrations.flask import FlaskIntegration

import config
from utils.logger import setup_logging

setup_logging(
    os.path.join(os.path.abspath(os.path.dirname(__file__)),
                 'logging.conf'))  # important to do this first


def create_app(run_mode=os.getenv('FLASK_ENV', 'production')):
    """Return a configured Flask App using the Factory method."""
    app = Flask(__name__)

    app.config.from_object(config.CONFIGURATION[run_mode])
    # Configure Sentry
    if app.config.get('SENTRY_DSN', None):  # pragma: no cover
        sentry_sdk.init(dsn=app.config.get('SENTRY_DSN'),
                        integrations=[FlaskIntegration()])
    db.init_app(app)
    ma.init_app(app)
Exemplo n.º 5
0
def main():
    parser = argparse.ArgumentParser(description='Phoneme classification task')
    parser.add_argument('--load',
                        type=str,
                        help='Path of model checkpoint to load',
                        required=True)
    parser.add_argument('--config',
                        '-c',
                        type=str,
                        help='Name of config file to load',
                        required=True)

    args = parser.parse_args()
    setup_logging(args)
    cfg = parse(args)

    # use all gpus available
    num_gpu = torch.cuda.device_count()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    normalize = get_normalizer(cfg.data_type)

    # get the dataloaders
    test_data = PhonemeDataset(cfg.num_frames,
                               cfg.phn_idx_map,
                               cfg.data_type,
                               mode='test',
                               transform=normalize)
    test_loader = DataLoader(dataset=test_data,
                             batch_size=cfg.batch_size,
                             shuffle=True)

    logging.info('Cmd: python {0}'.format(' '.join(sys.argv)))

    # set up model and load checkpoint
    if not cfg.model_arch:
        raise ValueError('Model architecture not specified')

    if not cfg.model_arch in ARCH_TO_MODEL:
        raise ValueError('Model architecture {} does not exist.'.format(
            cfg.model_arch))

    model = ARCH_TO_MODEL[cfg.model_arch](cfg.num_channels, cfg.num_frames,
                                          cfg.num_classes, cfg.num_dimensions)

    with open(os.path.join(cfg.load), 'rb') as f:
        model.load_state_dict(torch.load(f))
    model = model.to(device)
    model = torch.nn.DataParallel(model)

    # test the model
    test_err, predictions = predict(model, test_loader, device, cfg)
    logging.info('Test PER: {:.2f}%'.format(test_err))

    # save the output into the a pickle file in the log directory
    output_dir = os.path.join(cfg.log_dir, 'predict.pkl')
    logging.info('Prediction saved in {}'.format(output_dir))

    with open(output_dir, 'w') as file:
        pkl.dump(predictions, file)

    # plot the confusion matrix
    plot_phoneme_cm(predictions, cfg.phn_idx_map)
Exemplo n.º 6
0
from api.server import run_server
from utils.logger import setup_logging

if __name__ == '__main__':
    setup_logging()
    run_server()
Exemplo n.º 7
0
def main():
    args = parse_arguments()
    config = get_bo_config(args.config_file)
    torch.manual_seed(config.seed)
    torch.cuda.manual_seed_all(config.seed)
    config.use_gpu = config.use_gpu and torch.cuda.is_available()
    device = torch.device('cuda' if config.use_gpu else 'cpu')

    # log info
    log_file = os.path.join(config.save_dir,
                            "log_exp_{}.txt".format(config.run_id))
    logger = setup_logging(args.log_level, log_file)
    logger.info("Writing log file to {}".format(log_file))
    logger.info("Exp instance id = {}".format(config.run_id))
    logger.info("Exp comment = {}".format(args.comment))
    logger.info("Config =")
    print(">" * 80)
    pprint(config)
    print("<" * 80)

    #load model
    model = eval(config.model.name)(config.model)
    model_snapshot = torch.load(config.model.pretrained_model,
                                map_location=device)
    model.load_state_dict(model_snapshot["model"], strict=True)
    model.to(device)
    if config.use_gpu:
        model = nn.DataParallel(model, device_ids=config.gpus).cuda()
    # Run the experiment
    results_list = bq_loop(config.bq, model)
    if config.bq.is_GPY:
        if config.bq.is_sparseGP:
            pickle.dump(
                results_list,
                open(
                    os.path.join(
                        config.bq.save_dir,
                        config.bq.name + '_sparseGP_init_p' +
                        str(config.bq.init_num_data) + '_inducing_p' +
                        str(config.bq.num_inducing_pts) + '_results.p'), 'wb'))
        else:
            pickle.dump(
                results_list,
                open(
                    os.path.join(
                        config.bq.save_dir, config.bq.name + '_fullGP_init_p' +
                        str(config.bq.init_num_data) + '_results.p'), 'wb'))
    else:
        if config.bq.is_ai:
            pickle.dump(
                results_list,
                open(
                    os.path.join(
                        config.bq.save_dir, config.bq.name + '_ai_init_p' +
                        str(config.bq.init_num_data) + '_results.p'), 'wb'))
        else:
            pickle.dump(
                results_list,
                open(
                    os.path.join(
                        config.bq.save_dir, config.bq.name + '_opt_init_p' +
                        str(config.bq.init_num_data) + 'iter' +
                        str(config.bq.opt_iter) + 'lr' +
                        str(config.bq.opt_lr) + '_results.p'), 'wb'))
    sys.exit(0)
Exemplo n.º 8
0
def main():
    parser = argparse.ArgumentParser(description='Phoneme classification task')
    parser.add_argument('--config',
                        '-c',
                        type=str,
                        help='Name of config file to load',
                        required=True)

    args = parser.parse_args()
    setup_logging(args)
    cfg = parse(args)

    # use all gpus available
    num_gpu = torch.cuda.device_count()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    if device == 'gpu': logging.info('Look ma, I\'m using a GPU!')

    # get the dataloaders
    train_dataloader, valid_dataloader, test_dataloader = get_dataloaders(
        cfg.batch_size, cfg.num_frames, cfg.phn_idx_map, cfg.data_type)

    # setup hyperparameter sweeping
    if not cfg.num_sweeps:
        cfg.num_sweeps = 1

    logging.info('Cmd: python {0}'.format(' '.join(sys.argv)))
    logging.info('Running experiment <{0}> in train mode.'.format(
        cfg.config, cfg.mode))

    orig_cfg = cfg
    for sweep_count in range(cfg.num_sweeps):
        cfg = sample_cfg(orig_cfg)
        # create a directory for the model to be saved
        cfg.sweep_dir = os.path.join(cfg.log_dir, 's_{}'.format(sweep_count))
        os.mkdir(cfg.sweep_dir)
        logging.info('Sweep Count: {}'.format(sweep_count))
        logging.info('Config:\n {0}'.format(cfg))
        # set up model and load if checkpoint provided
        if not cfg.model_arch:
            raise ValueError('Model architecture not specified')

        if not cfg.model_arch in ARCH_TO_MODEL:
            raise ValueError('Model architecture {} does not exist.'.format(
                cfg.model_arch))

        model = ARCH_TO_MODEL[cfg.model_arch](cfg.num_channels, cfg.num_frames,
                                              cfg.num_classes,
                                              cfg.num_dimensions)

        if cfg.load:
            with open(os.path.join(cfg.load), 'rb') as f:
                model.load_state_dict(torch.load(f))
        model = model.to(device)
        model = torch.nn.DataParallel(model)

        # setup cross entropy loss for multi-class classification
        loss = NLLLoss()

        # setup the optimizer
        optim = Adam(model.parameters(),
                     lr=cfg.learning_rate,
                     weight_decay=cfg.l2_regularizer)

        # train the model
        logging.info('Commencing training')
        lowest_err = 100.
        for epoch in range(1, cfg.epochs + 1):
            train_loss, train_err = train(model, train_dataloader, loss,
                                          device, cfg, optim)
            val_loss, val_err = evaluate(model, valid_dataloader, loss, device,
                                         cfg)
            logging.info(
                'Epoch: {}\tTrain Loss: {:.6f}\tTrain PER: {:.2f}\tVal Loss: {:.6f}\tVal PER: {:.2f}%'
                .format(epoch, train_loss, train_err, val_loss, val_err))
            if val_err < lowest_err:
                logging.info(
                    'Lowest validation error achieved. Saving the model.')
                save_model(model, cfg.sweep_dir, best=True)
                lowest_err = val_err
        # save the checkpoint of the latest version of the model
        save_model(model, cfg.sweep_dir)
        # test the model
        _, test_err = evaluate(model, test_dataloader, loss, device, cfg)
        logging.info('Test PER: {:.2f}%'.format(test_err))
Exemplo n.º 9
0
            for redundant in redundants:
                use_redundant = (redundant == 'redundant')
                store_data_path = os.path.join('./data/processed', data_process_type, cut, redundant)
                if not os.path.exists(store_data_path):
                    os.makedirs(store_data_path)

                processor = RawProcessor(file_paths=file_paths, store_path=store_data_path, use_cut=use_cut,
                                         redundant=use_redundant)
                # 处理数据
                processor.process_raw_data()
                # 写入数据
                processor.prepare_data(data_split_dict={'train': 0.7, 'dev': 0.2, 'test': 0.1})
                # 写入统计词频
                write_counter(processor.words_counter, os.path.join(store_data_path, 'word_count.json'),
                              key=lambda kv: (kv[1], kv[0]), reverse=True)
                write_counter(processor.length_counter, os.path.join(store_data_path, 'length_count.json'))


if __name__ == '__main__':
    STOPWORDS = get_stopwords('./data/stopwords.txt')
    if os.path.exists('./logs/data_info.log'):
        os.remove('./logs/data_info.log')

    setup_logging(default_path='./utils/logger_config.yaml')
    logger = logging.getLogger("data_logger")

    start()
    # 清理已保存的数据
    subprocess.call(r'find ./data/processed/data_all -name *_data -type f -print -exec rm {} \;', shell=True)