예제 #1
0
def create_app():
    """create flask-sports app"""

    app = Flask(__name__)

    app.secret_key = '4A8BF09E6732FDC682988A8SYZ666AB7CF53176D08631E'

    config = load_config(CONFIGS['2'])  # 选择环境

    # load logger
    setup_log(config)

    # load config
    app.config.from_object(config)

    # register blueprint
    # app.register_blueprint(test)

    celery_app.init_app(app)  # 注册celery应用
    redis_app.init_app(app)  # 注册redis应用
    sms.init_app(app)  # 注册阿里云短信服务
    signal.init_app(app)  # 注册发送验证码信号
    db.init_app(app)  # 注册mongodb实例
    oss.init_app(app)  # 注册OSS服务

    with app.app_context():
        # 手动推送上下文
        # get_user_model(app) # 注册用户模型表
        pass

    got_request_exception.connect(log_exception, app)  # 记录请求的异常

    return app
예제 #2
0
파일: __init__.py 프로젝트: hucxgit/ppy
def create_app():

    print(app)
    # Load config
    config = load_config()
    app.config.from_object(config)

    #login_manager.init_app(app)
    registerblueprint(app)

    #init_vies(app)

    return app
예제 #3
0
파일: app.py 프로젝트: Pix-00/olea
def create_manager(env=os.getenv('FLASK_ENV', 'production')):
    from flask import Flask
    from flask_migrate import Migrate

    from configs import load_config
    from core.singleton import db

    app = Flask(__name__)

    app.config.from_object(load_config(env))

    db.init_app(app)
    migrate = Migrate(app, db)

    set_shellcontext(app)
    register_commands(app)

    return app
예제 #4
0
파일: __init__.py 프로젝트: Pix-00/olea
def create_app(env=os.getenv('FLASK_ENV', 'production')):
    from configs import load_config

    from .blueprints import register_blueprints
    from .errors import register_error_handlers

    print(f'\n- - - - - olea [{env}] - - - - -\n')
    app = Flask(__name__)
    app.env = env
    app.config.from_object(load_config(env))

    # configure_logger(app)
    register_error_handlers(app)
    hook_hooks(app)
    init_extensions(app)
    register_blueprints(app)

    return app
예제 #5
0
    '--logname',
    default="default",
    help="Experiment prefix for log directory, relative to ./data/env")
parser.add_argument('--configid',
                    help="Config name string to use when "
                    "starting new training. Can be one of:\n"
                    "{}".format(list(configs.config_index.keys())))
parser.add_argument('--show-embeddings',
                    help="Project model progress using labelled embeddings",
                    action='store_true')
args = parser.parse_args()
scriptdir = os.path.dirname(os.path.realpath(__file__))
if args.logdir is not None:
    logdir = os.path.join(scriptdir, args.logdir)
    # logdir = args.logdir
    config = configs.load_config(logdir)
else:
    config = configs.get_config(args.configid)
    # logdir = os.path.join('data', config['env'], args.logname, 'train')
    logdir = os.path.join(scriptdir, 'data', args.logname, config['env'])
    logdir = increment_path(os.path.join(logdir, "run"))
    os.makedirs(logdir)
    configs.save_config(config, logdir)

import gym
import gym_mnist
import tensorflow as tf
from modellearner import ModelLearner

env = gym.make(config['env'])
logger.info("Logging results to {}".format(logdir))
예제 #6
0
from aiohttp import web
from routes import setup_routes
import argparse
from configs import load_config
import aiohttp_jinja2
import jinja2
import asyncpgsa


parser = argparse.ArgumentParser(description='App1')
parser.add_argument("-c", "--config",type=argparse.FileType('r'))
args = parser.parse_args()
config=load_config(args.config)


app = web.Application()
app['config'] = config
#setup templates
aiohttp_jinja2.setup(app,
loader = jinja2.PackageLoader('aiohttpapp1','templates') #create empty aiohttpapp1.py file for this shit
)
setup_routes(app)


async def on_start(app):
	confif = app['config']
	app['db'] = await asyncpgsa.create_pool(dsn=config['database_uri'],
		user='******',
		password='******')

예제 #7
0
        with TOKEN_PATH.open('w') as f:
            f.write(cache.serialize())

    return redirect(url_for('done'))


def _build_msal_app(cache=None):
    return msal.ConfidentialClientApplication(CLIENT_ID,
                                              authority=AUTHORITY,
                                              client_credential=CLIENT_SECRET,
                                              token_cache=cache)


if __name__ == '__main__':
    DIR = Path(__file__).parents[1]
    sys.path.append(str(DIR))

    from configs import load_config

    config = load_config()

    data_dir: Path = config['ONEDRIVE_DATA_DIR']
    data_dir.mkdir(exist_ok=True)

    TOKEN_PATH = data_dir / 'token.json'
    CLIENT_ID = config['ONEDRIVE_CLIENT_ID']
    CLIENT_SECRET = config['ONEDRIVE_CLIENT_SECRET']

    app.run()
예제 #8
0
def main(config):

    # Load checkpoint config
    old_config = config
    config_dir = os.path.dirname(os.path.dirname(config['checkpoint']))
    config_path = os.path.join(config_dir, 'config.json')
    config = configs.load_config(config_path)

    # Remove multigpu flags and adjust batch size
    config['multigpu'] = 0
    config['batch_size'] = 1

    # Overwrite config params
    config['checkpoint'] = old_config['checkpoint']
    if old_config['n_steps'] is not None:
        config['n_steps'] = old_config['n_steps']
    config['n_seqs'] = old_config['n_seqs']
    config['n_samples'] = old_config['n_samples']

    # Set up device
    local_rank = 0
    config['local_rank'] = 0
    config['device'] = 'cuda:{}'.format(local_rank)

    train_loader, val_loader = get_dataset(config)
    print('Dataset loaded')

    model = init_model(config)
    print(model)
    print('Model loaded')

    # Define output dirs
    out_dir = config_dir
    samples_dir = os.path.join(out_dir, 'samples')
    if not os.path.exists(samples_dir):
        os.makedirs(samples_dir, exist_ok=True)

    # Define saving function
    def save_samples(preds, gt, ctx, out_dir, seq_id):

        # Compute number of samples and sequences
        seq_dir = os.path.join(samples_dir, '{:0>4}'.format(seq_id))
        n_samples = len(preds)
        timesteps = gt.shape[1]

        # Save samples
        for sample_id in range(n_samples):
            sample_dir = os.path.join(seq_dir, '{:0>4}'.format(sample_id))
            os.makedirs(sample_dir, exist_ok=True)
            Parallel(n_jobs=20)(
                delayed(save_sample_png)(sample_dir, frame, f_id)
                for f_id, frame in enumerate(preds[sample_id]))

        # Save ctx
        sample_dir = os.path.join(seq_dir, 'ctx')
        os.makedirs(sample_dir, exist_ok=True)
        Parallel(n_jobs=20)(delayed(save_sample_png)(sample_dir, frame, f_id)
                            for f_id, frame in enumerate(ctx[0]))

        # Save gt
        sample_dir = os.path.join(seq_dir, 'gt')
        os.makedirs(sample_dir, exist_ok=True)
        Parallel(n_jobs=20)(delayed(save_sample_png)(sample_dir, frame, f_id)
                            for f_id, frame in enumerate(gt[0]))

    model.eval()
    n_seqs = 0
    # for batch_idx, batch in enumerate(tqdm(val_loader, desc='Sequence loop')):
    for batch_idx, batch in enumerate(val_loader):

        if n_seqs >= config['n_seqs']:
            break

        frames, idxs = train_fns.prepare_batch(batch, config)

        # Find id of the sequence and decide whether to work on it or not
        sequence_id = idxs[0]
        sequence_dir = os.path.join(samples_dir, '{:0>4}'.format(sequence_id))
        if os.path.exists(sequence_dir):
            n_seqs += frames.shape[0]
            continue
        os.makedirs(sequence_dir, exist_ok=True)

        batch_size = 1
        frames = frames.repeat(batch_size, 1, 1, 1, 1)
        samples_done = 0
        all_preds = []

        sampling_ok = True
        while samples_done < config['n_samples']:
            try:
                (preds,
                 targets), _ = train_fns.sample_step(model, config, frames)
            except:
                sampling_ok = False
                break

            preds = preds[:, config['n_ctx']:].contiguous()
            preds = preds.detach()
            targets = targets.detach()
            all_preds.append(preds)
            samples_done += batch_size

        if not sampling_ok:
            continue

        # Trim extra samples
        all_preds = torch.cat(all_preds, 0)
        all_preds = all_preds[:config['n_samples']]

        # Convert to numpy
        ctx = targets[:, :config['n_ctx']]
        targets = targets[:, config['n_ctx']:]
        targets = targets.detach().cpu().numpy().transpose(0, 1, 3, 4, 2)
        ctx = ctx.detach().cpu().numpy().transpose(0, 1, 3, 4, 2)
        all_preds = all_preds.detach().cpu().numpy().transpose(0, 1, 3, 4, 2)

        # Save samples to PNG files
        save_samples(all_preds, targets, ctx, out_dir, sequence_id)

        # Update number of samples
        n_seqs += frames.shape[0]

    print('All done')