Esempio n. 1
0
def main_handler(*args, **kwargs):
    # 推送数据
    raw_info = []

    # 加载用户配置文件
    user_config_dict = load_config(kwargs['user_config_path'])
    for user_config in user_config_dict:
        if not user_config['phone']:
            continue
        log.info(user_config['welcome'])

        # 单人打卡
        check_dict = check_in(user_config)
        # 单人推送
        if info_push(user_config['push'], check_dict):
            pass
        else:
            log.info("当前用户并未配置 push 参数,将统一进行推送")
            raw_info.extend(check_dict)

    # 统一推送
    if raw_info:
        all_push_config = load_config(kwargs['push_config_path'])
        if info_push(all_push_config, raw_info):
            pass
        else:
            log.info('统一推送未开启,如要开启,请修改 conf/push.json 配置文件')
    else:
        log.info('所有打卡数据已推送完毕,无需统一推送')
Esempio n. 2
0
    def _load_pretrained_model(self):
        base_dir = "/media/scatter/scatterdisk/reply_matching_model/runs/delstm_1024_nsrandom4_lr1e-3/"
        config_dir = base_dir + "config.json"
        best_model_dir = base_dir + "best_loss/best_loss.ckpt"
        model_config = load_config(config_dir)
        model_config.add_echo = False
        preprocessor = DynamicPreprocessor(model_config)
        preprocessor.build_preprocessor()

        infer_config = load_config(config_dir)
        setattr(infer_config, "tokenizer", "SentencePieceTokenizer")
        setattr(
            infer_config, "soynlp_scores",
            "/media/scatter/scatterdisk/tokenizer/soynlp_scores.sol.100M.txt")
        infer_preprocessor = DynamicPreprocessor(infer_config)
        infer_preprocessor.build_preprocessor()
        graph = tf.Graph()
        tf_config = tf.ConfigProto()
        tf_config.gpu_options.allow_growth = True

        with graph.as_default():
            Model = get_model(model_config.model)
            data = DataGenerator(preprocessor, model_config)
            infer_model = Model(data, model_config)
            infer_sess = tf.Session(config=tf_config, graph=graph)
            infer_sess.run(tf.global_variables_initializer())
            infer_sess.run(tf.local_variables_initializer())

        infer_model.load(infer_sess, model_dir=best_model_dir)
        self.infer_preprocessor = infer_preprocessor
        return infer_model, infer_sess
Esempio n. 3
0
def main():
    """Main function for the model calibration tool."""

    parser = argparse.ArgumentParser(
        "Extract calibration parameters from CMS monitoring data.")
    parser.add_argument(
        "--conf",
        default="calibration.json",
        help="Path to the configuration file used for calibration")

    args = parser.parse_args()

    # Load configuration file
    try:
        config.load_config(args.conf)
    except ValueError as e:
        # Directly print as logging infrastructure is not set up yet
        print("Could not load configuration file. Error: {}".format(e))
        print("Exiting.")
        sys.exit(1)

    log_subdir = os.path.join(config.outputDirectory, 'log')
    setup_logging(log_subdir)
    logging.getLogger().setLevel(logging.DEBUG)

    logging.info("Starting Model Calibration.")

    logging.debug("Running with Pandas version: {}".format(pd.__version__))
    logging.debug("Running with Numpy version: {}".format(np.__version__))

    # Load workflow module from configuration file
    def import_class(name):
        modname, classname = name.rsplit('.', 1)
        module = __import__(modname, fromlist=[classname])
        attr = getattr(module, classname)
        return attr

    try:
        workflow_class = import_class(config.workflow)
    except ImportError as e:
        logging.error("Could not import workflow class {}. Error: {}".format(
            config.workflow, str(e)))
        sys.exit(1)

    if not issubclass(workflow_class, CalibrationWorkflow):
        logging.error(
            "Workflows must implement the CalibrationWorkflow interface.")
        sys.exit(1)

    # Run workflow
    workflow = workflow_class()
    try:
        workflow.run()
    except Exception:
        logging.error("An error occured during execution of the workflow!")
        logging.error(traceback.format_exc())
        sys.exit(1)

    logging.info("Model Calibration Finished")
Esempio n. 4
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
    except:
        print("missing or invalid arguments")
        exit(0)

    exp_name = args.opt
    args.opt = 'experiments/{0}/config.json'.format(exp_name)
    config = load_config(args.opt, is_train=False, exp_name=exp_name)
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    if not config.exp_name:
        raise Exception('please specify experiment name')

    # create the data generator
    data = Data(config)

    # create an instance of the model
    Model = GetModel(config.model)
    model = Model(config)

    # create trainer and pass all the previous components to it
    model.compile(data)

    #load model if exists
    model.load()

    # evaluate the model
    model.evaluate()
def main(args):
    root_dir = Path(args.autoencoder_checkpoint).parent.parent
    output_dir = root_dir / args.output_dir
    output_dir.mkdir(exist_ok=True, parents=True)

    config = load_config(args.autoencoder_checkpoint, None)
    config['batch_size'] = 1
    autoencoder = get_autoencoder(config).to(args.device)
    autoencoder = load_weights(autoencoder,
                               args.autoencoder_checkpoint,
                               key='autoencoder')

    input_image = Path(args.image)
    data_loader = build_data_loader(input_image,
                                    config,
                                    config['absolute'],
                                    shuffle_off=True,
                                    dataset_class=DemoDataset)

    image = next(iter(data_loader))
    image = {k: v.to(args.device) for k, v in image.items()}

    reconstructed = Image.fromarray(
        make_image(autoencoder(image['input_image'])[0].squeeze(0)))

    output_name = Path(
        args.output_dir
    ) / f"reconstructed_{input_image.stem}_stylegan_{config['stylegan_variant']}_{'w_only' if config['w_only'] else 'w_plus'}.png"
    reconstructed.save(output_name)
Esempio n. 6
0
def create_dirs(config):
    """
    dirs - a list of directories to create if these directories are not found
    :param dirs:
    :return exit_code: 0:success -1:failed
    """
    try:
        checkpoint_dir = config.checkpoint_dir
        name = config.name
        dir = os.path.join(checkpoint_dir, name)
        config.checkpoint_dir = dir + "/"
        if not os.path.exists(dir):
            os.makedirs(dir)
            os.makedirs(dir + "/summaries/")
        else:
            config_dir = dir + "/config.json"
            if os.path.isfile(config_dir):
                old_config = load_config(config_dir)
                for param in ["best_loss", "best_step", "best_epoch"]:
                    if (param not in vars(config)) and (param
                                                        in vars(old_config)):
                        value = getattr(old_config, param)
                        setattr(config, param, value)
                compare_configs(old_config, config)
            config.checkpoint_dir = dir + "/"
        return config

    except Exception as err:
        print("Creating directories error: {0}".format(err))
        exit(-1)
Esempio n. 7
0
async def init_app() -> web.Application:
    app = await create_app()

    # read app config
    config = load_config(settings.BASE_DIR, settings.CONFIG_TRAFARET)
    app['config'] = config

    # setup logging settings
    logging_settings = import_from_string(app['config']['logging'])
    logging.config.dictConfig(logging_settings)

    # create db
    db = await create_db_engine(**config['database'])
    app['db'] = db

    # create HTTP client
    http_client = ClientSession()
    app['http_client'] = http_client

    # init sub apps
    await init_subapps(app)

    # init swagger if it need
    if app['config']['swagger']:
        logger.debug(f'Init swagger')
        setup_swagger(app)

    app.on_cleanup.append(deinit_app)

    return app
Esempio n. 8
0
    def run(self, *args, **kwargs):
        self.kwargs = kwargs
        cfg = config.load_config(kwargs['config_path'], 'misc.json')
        self.workers_min = cfg[
            'workers_min'] if 'workers_min' in cfg else self.workers_min
        self.workers_max = cfg[
            'workers_max'] if 'workers_max' in cfg else self.workers_max

        self.logger.info('Misc Service v[{0}], name=[{1}], starting...'.format(
            config.version, self.name))

        # give pub sub some time... not using syncho notifications...
        time.sleep(1)

        # start worker processes
        for n in range(0, self.workers_min):
            self.start_worker()

        # start listening
        self.listener([MiscService.MISC_SERVICE_CHANNEL], None)
        self.logger.warning('Misc Service listener exit!')

        # force kill any remaining workers
        while self.workers:
            p = self.workers.popitem()
            self.logger.warning('Terminating remaining worker {0}!'.format(
                p[0]))
            p[1].terminate()
        self.logger.warning('Misc Service process exit!')
Esempio n. 9
0
def evaluation(config_path, checkpoint_path, transforms=None):
    config = load_config(config_path)

    device = config["device"]
    dataset_option = config["dataset_option"]

    val_loader, class_list = load_dataset(
        dataset_name=dataset_option["name"],
        data_dir=dataset_option["test_data_dir"],
        split="TEST",
        options=dataset_option,
        transforms=transforms["TEST"],
        batch_size=32)

    # checkpoint = "data/weight/2020-08-14_12_20_44_mobilenetv2_q_ssd_pascalvoc_v2/epoch390_checkpoint.pth.tar"  # MobilenetV2 SSD
    checkpoint = torch.load(checkpoint_path, map_location=device)
    model = checkpoint['model']
    print("model", model)

    label_map = {k: v + 1 for v, k in enumerate(class_list)}
    label_map['background'] = 0
    rev_label_map = {v: k for k, v in label_map.items()}  # Inverse mapping

    ret = eval_objectdetection(model=model,
                               data_loader=val_loader,
                               n_classes=len(class_list) + 1,
                               label_map=label_map,
                               rev_label_map=rev_label_map,
                               device=device)
    print(ret)
Esempio n. 10
0
def run(config_file):
    config = load_config(config_file)

    os.makedirs(config.work_dir, exist_ok=True)
    save_config(config, config.work_dir + '/config.yml')

    os.environ['CUDA_VISIBLE_DEVICES'] = '0'

    all_transforms = {}
    all_transforms['train'] = get_transforms(config.transforms.train)
    all_transforms['valid'] = get_transforms(config.transforms.test)

    dataloaders = {
        phase: make_loader(
            data_folder=config.data.train_dir,
            df_path=config.data.train_df_path,
            phase=phase,
            batch_size=config.train.batch_size,
            num_workers=config.num_workers,
            idx_fold=config.data.params.idx_fold,
            transforms=all_transforms[phase],
            num_classes=config.data.num_classes,
            pseudo_label_path=config.train.pseudo_label_path,
            task='cls'
        )
        for phase in ['train', 'valid']
    }

    # create model
    model = CustomNet(config.model.encoder, config.data.num_classes)

    # train setting
    criterion = get_loss(config)
    params = [
        {'params': model.base_params(), 'lr': config.optimizer.params.encoder_lr},
        {'params': model.fresh_params(), 'lr': config.optimizer.params.decoder_lr}
    ]
    optimizer = get_optimizer(params, config)
    scheduler = get_scheduler(optimizer, config)

    # model runner
    runner = SupervisedRunner(model=model)

    callbacks = [MultiClassAccuracyCallback(threshold=0.5), F1ScoreCallback()]
    if os.path.exists(config.work_dir + '/checkpoints/best.pth'):
        callbacks.append(CheckpointCallback(resume=config.work_dir + '/checkpoints/best_full.pth'))

    # model training
    runner.train(
        model=model,
        criterion=criterion,
        optimizer=optimizer,
        scheduler=scheduler,
        loaders=dataloaders,
        logdir=config.work_dir,
        num_epochs=config.train.num_epochs,
        callbacks=callbacks,
        verbose=True,
        fp16=True,
    )
Esempio n. 11
0
def main():
    # Create main logger
    logger = get_logger('Graph matching requester')

    parser = argparse.ArgumentParser(description='Graph matching')
    parser.add_argument('--config', type=str, help='Path to the YAML config file', default = CONFIG_PATH)
    args = parser.parse_args()

    # Load and log experiment configuration
    config = load_config(args.config)
    logger.info(config)

    manual_seed = config.get('manual_seed', None)
    if manual_seed is not None:
        logger.info(f'Seed the RNG for all devices with {manual_seed}')
        torch.manual_seed(manual_seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    # Create the model
    module_path = "models.model"
    if torch.cuda.device_count() > 1:
        model = torch.nn.DataParallel(_get_model(module_path, config))
    else:
        model = _get_model(module_path, config)

    # put the model on GPUs
    logger.info(f"Sending the model to '{config['device']}', using {torch.cuda.device_count()} GPUs...")
    model = model.to(config['device'])

    # Create data loaders
    loaders = get_data_loaders(config)
Esempio n. 12
0
def main():
    config = args.parse_args()
    # Load pre-defined config if possible
    if config.config:
        config = load_config(config.config)

    config_str = " | ".join([
        "{}={}".format(attr.upper(), value)
        for attr, value in vars(config).items()
    ])
    print(config_str)

    # create the experiments dirs
    config = create_dirs(config)

    # create tensorflow session
    device_config = tf.ConfigProto()
    device_config.gpu_options.allow_growth = True
    sess = tf.Session(config=device_config)

    # build preprocessor
    preprocessor = DynamicPreprocessor(config)

    # load data, preprocess and generate data
    data = DataGenerator(preprocessor, config)

    # create tensorboard summary writer
    summary_writer = SummaryWriter(sess, config)

    # create trainer and pass all the previous components to it
    trainer = Seq2SeqTrainer(sess, preprocessor, data, config, summary_writer)

    # here you train your model
    trainer.train()
Esempio n. 13
0
    def run(self, *args, **kwargs):
        cfg = config.load_config(kwargs['config_path'], 'queue.json')
        period_s = cfg['period_s'] if 'period_s' in cfg else 10

        self.logger.info('Queue v[{0}], poll period=[{1}]s, starting...'.format(config.version, period_s))

        try:
            while True:
                # get the next items from the queue
                # set look ahead value to half of the wait time
                items = self.data.buffer.get_next_queue_items(period_s / 2.0)

                self.logger.info('{0} items...'.format(len(items)))

                # post notifications for each item
                for itm in items:
                    self.logger.info('Notifying: {0}'.format(itm))
                    # item format: "gid:target"
                    item = itm.split(':')
                    self.broadcast_command(S1.publisher_channel_name(item[1]), S1.msg_publish(), item[0])

                # sleep random interval
                s = random.randrange(period_s - (period_s / 10.0), period_s + (period_s / 10.0))
                self.logger.info('Sleeping {0} seconds...'.format(s))
                time.sleep(s)

        except Exception as e:
            self.logger.warning('Queue is terminating (exception): {0}'.format(e))
            self.logger.exception(traceback.format_exc())
Esempio n. 14
0
def train_options(def_config, parser=None):
    if parser is None:
        parser = argparse.ArgumentParser()

    parser.add_argument('--config', default=def_config)
    parser.add_argument('-c', default="checkpoint")

    parser.add_argument('--device', default="auto")
    parser.add_argument('--cores', type=int, default=4)
    parser.add_argument('--source', nargs='*',
                        default=["models", "modules", "utils"])

    args = parser.parse_args()
    config = load_config(os.path.join(MODEL_CNF_DIR, args.config))

    if args.device == "auto":
        args.device = torch.device("cuda" if torch.cuda.is_available()
                                   else "cpu")

    if args.source is None:
        args.source = []

    args.source = [os.path.join(BASE_DIR, dir) for dir in args.source]

    for arg in vars(args):
        print("{}:{}".format(arg, getattr(args, arg)))
    print()

    return args, config
Esempio n. 15
0
async def init():
    app = web.Application()
    app['config'] = load_config()
    app.on_startup.append(init_db)
    app.on_cleanup.append(close_db)
    app.add_routes(routes)
    return app
Esempio n. 16
0
def train_options(def_config):
    parser = argparse.ArgumentParser()
    parser.add_argument('--config', default=def_config)
    parser.add_argument('--name')
    parser.add_argument('--desc')
    parser.add_argument('--device', default="auto")
    parser.add_argument('--cores', type=int, default=1)
    parser.add_argument('--source',
                        nargs='*',
                        default=["models", "modules", "utils"])

    args = parser.parse_args()
    config = load_config(os.path.join(MODEL_CNF_DIR, args.config))

    if args.name is None:
        # args.name = os.path.splitext(args.config)[0]
        args.name = config["name"]
    # config["name"] = args.name
    config["desc"] = args.desc

    if args.device == "auto":
        args.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

    if args.source is None:
        args.source = []

    args.source = [os.path.join(BASE_DIR, dir) for dir in args.source]

    for arg in vars(args):
        print("{}:{}".format(arg, getattr(args, arg)))
    print()

    return args, config
Esempio n. 17
0
def create_db():
    config = load_config()
    engine = sa.create_engine(
        f'postgresql://{config["db"]["user"]}:{config["db"]["password"]}'
        f'@{config["db"]["host"]}:{config["db"]["port"]}/{config["db"]["database"]}'
    )
    metadata.create_all(engine)
Esempio n. 18
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-i", "--input", required=False,
                        default='basic_model.yaml',
                        help="config file of input data")
    parser.add_argument('--device', default=0, help='Which device to run one; 0 for CPU', type=int)
    parser.add_argument('--data', default='resources', help='where to store data')
    parser.add_argument('--embeddings', default='.embeddings', help='where to store embeddings')
    parser.add_argument('--dataset', default='TREC-6',
                        choices={'IWSLT', 'SST-2', 'SST-5', 'IMDB', 'TREC-6', 'TREC-50', 'SNLI'},
                        help='')
    parser.add_argument('--embeddings_type', default='all',
                        choices={'glove', 'cove_1', 'cove_2', 'all', 'decove'},
                        help='variation of embeddings to be used')
    parser.add_argument('--checkpoint', default='CoVex2_TREC-6')

    args = parser.parse_args()
    input_config = args.input
    data_file = args.data
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.device)

    config = load_config(os.path.join(MODEL_CNF_DIR, input_config))
    config["device"] = 'cuda' if args.device >= 0 else 'cpu'
    print("\nThis experiment runs on {}...\n".format(config["device"]))

    bcn(config, data_file, args.embeddings, args.device, args.checkpoint, args.dataset, args.embeddings_type)
Esempio n. 19
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
    except:
        print("missing or invalid arguments")
        exit(0)

    exp_name = args.opt.split('/')[-1].replace('.json', '')
    config = load_config(args.opt, is_train=True, exp_name=exp_name)
    os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu_to_use

    # create your data generator
    data = Data(config)

    # create an instance of the model
    Model = GetModel(config.model)
    model = Model(config)

    # create trainer
    model.compile(data)

    #load model if exists
    model.load()

    # train the model
    model.fit()
Esempio n. 20
0
def train(args):
    """Trains model with settings specified in config

    :param args: Run configuration (directory names etc.)
    :param config: Model configuration (hyperparameters, datasets etc.)
    :return:
    """

    config_file = infer_optional_directory(args.config, './config')
    config = load_config(config_file)

    warm_start_path = infer_optional_directory(args.warm_start, args.checkpoints)
    resume_path = infer_optional_directory(args.resume, args.checkpoints)

    if warm_start_path is not None and resume_path is not None:
        raise RuntimeError('When resuming there is automatic warmstart from resume dir, warm start should be empty')

    if resume_path is None:
        model_dir = get_new_model_directory(args.checkpoints, args.name, config.dataset.name)
    else:
        warm_start_path = resume_path
        model_dir = resume_path

    def input_fn():
        return load_dataset(config, config.train.batch_size, epochs=-1, shuffle=config.train.shuffle)
    model = make_model(model_dir, warm_start_path, config)
    hooks = [CopyConfigHook(config_file, model_dir)]

    model.train(input_fn=input_fn, steps=config.train.steps, hooks=hooks)
Esempio n. 21
0
def run_cls(config_file_cls):
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'

    # ------------------------------------------------------------------------------------------------------------
    # 1. classification inference
    # ------------------------------------------------------------------------------------------------------------
    config = load_config(config_file_cls)

    validloader = make_loader(data_folder=config.data.train_dir,
                              df_path=config.data.train_df_path,
                              phase='valid',
                              batch_size=config.train.batch_size,
                              num_workers=config.num_workers,
                              idx_fold=config.data.params.idx_fold,
                              transforms=get_transforms(
                                  config.transforms.test),
                              num_classes=config.data.num_classes,
                              task='cls')

    model = CustomNet(config.model.encoder, config.data.num_classes)
    model.to(config.device)
    model.eval()
    checkpoint = load_checkpoint(f"{config.work_dir}/checkpoints/best.pth")
    model.load_state_dict(checkpoint['model_state_dict'])

    all_predictions = []
    all_targets = []
    with torch.no_grad():
        for i, (batch_images, batch_targets) in enumerate(tqdm(validloader)):
            batch_images = batch_images.to(config.device)
            batch_preds = predict_batch(model,
                                        batch_images,
                                        tta=config.test.tta,
                                        task='cls')

            all_targets.append(batch_targets)
            all_predictions.append(batch_preds)

    all_predictions = np.concatenate(all_predictions)
    all_targets = np.concatenate(all_targets)

    # evaluation
    all_accuracy_scores = []
    all_f1_scores = []
    thresholds = np.linspace(0.1, 0.9, 9)
    for th in thresholds:
        accuracy = accuracy_score(all_targets > th, all_predictions > th)
        f1 = f1_score(all_targets > th,
                      all_predictions > th,
                      average='samples')
        all_accuracy_scores.append(accuracy)
        all_f1_scores.append(f1)

    for th, score in zip(thresholds, all_accuracy_scores):
        print('validation accuracy for threshold {} = {}'.format(th, score))
    for th, score in zip(thresholds, all_f1_scores):
        print('validation f1 score for threshold {}  = {}'.format(th, score))

    np.save('valid_preds', all_predictions)
Esempio n. 22
0
def main():
    args = parse_args()
    bot_config, web_config, db_config = load_config(args.config_path)

    web_app = init_web_app(web_config)

    globibot = Globibot(bot_config, db_config, web_app, args.plugin_path)

    run_async(web_app.run(), globibot.boot())
Esempio n. 23
0
def main():
    # Create main logger
    logger = get_logger('GCN Trainer')

    parser = argparse.ArgumentParser(description='GCN training')
    parser.add_argument('--config', type=str, help='Path to the YAML config file', default = CONFIG_PATH)
    args = parser.parse_args()

    # Load and log experiment configuration
    config = load_config(args.config)
    logger.info(config)

    manual_seed = config.get('manual_seed', None)
    if manual_seed is not None:
        logger.info(f'Seed the RNG for all devices with {manual_seed}')
        torch.manual_seed(manual_seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    # Create the model
    module_path = "models.gcn.model"
    if torch.cuda.device_count() > 1:
        model = torch.nn.DataParallel(_get_model(module_path, config))
    else:
        model = _get_model(module_path, config)

    # put the model on GPUs
    logger.info(f"Sending the model to '{config['device']}', using {torch.cuda.device_count()} GPUs...")
    model = model.to(config['device'])
    # weights initialization
    model.apply(weights_init)
    
    # Log the number of learnable parameters
    logger.info(f'Number of learnable params {get_number_of_learnable_parameters(model)}')

    # Create loss criterion
    loss_criterion = get_loss_criterion(config)
    # Create evaluation metric
    eval_criterion = get_evaluation_metric(config)

    # Create data loaders
    loaders = get_data_loaders(config)

    # Create the optimizer
    optimizer = _create_optimizer(config, model)

    # Create learning rate adjustment strategy
    lr_scheduler = _create_lr_scheduler(config, optimizer)

    # Create model trainer
    trainer = _create_trainer(config, model=model, optimizer=optimizer, lr_scheduler=lr_scheduler,
                              loss_criterion=loss_criterion, eval_criterion=eval_criterion, loaders=loaders,
                              logger=logger)
    # Start training
    trainer.fit()
    print('best evaluation score is:', trainer.best_eval_score)
Esempio n. 24
0
def main():

    args = parser.parse_args()

    # Load a config file (.yml)
    params = load_config(join(args.model_path, 'config.yml'), is_eval=True)

    # Setting for logging
    logger = set_logger(args.model_path)

    for i, data_type in enumerate(['dev', 'test']):
        # Load dataset
        dataset = Dataset(data_save_path=args.data_save_path,
                          backend=params['backend'],
                          input_freq=params['input_freq'],
                          use_delta=params['use_delta'],
                          use_double_delta=params['use_double_delta'],
                          data_type=data_type,
                          label_type=params['label_type'],
                          batch_size=args.eval_batch_size,
                          splice=params['splice'],
                          num_stack=params['num_stack'],
                          num_skip=params['num_skip'],
                          sort_utt=False,
                          tool=params['tool'])

        if i == 0:
            params['num_classes'] = dataset.num_classes

            # Load model
            model = load(model_type=params['model_type'],
                         params=params,
                         backend=params['backend'])

            # Restore the saved parameters
            epoch, _, _, _ = model.load_checkpoint(save_path=args.model_path,
                                                   epoch=args.epoch)

            # GPU setting
            model.set_cuda(deterministic=False, benchmark=True)

            logger.info('beam width: %d' % args.beam_width)
            logger.info('epoch: %d' % (epoch - 1))

        per, df = eval_phone(model=model,
                             dataset=dataset,
                             map_file_path='./conf/phones.60-48-39.map',
                             eval_batch_size=args.eval_batch_size,
                             beam_width=args.beam_width,
                             max_decode_len=MAX_DECODE_LEN_PHONE,
                             min_decode_len=MIN_DECODE_LEN_PHONE,
                             length_penalty=args.length_penalty,
                             coverage_penalty=args.coverage_penalty,
                             progressbar=True)
        logger.info('  PER (%s): %.3f %%' % (data_type, (per * 100)))
        logger.info(df)
Esempio n. 25
0
def main_handler(*args, **kwargs):
    # 推送数据
    raw_info = []

    # 加载用户配置文件
    user_config_dict = load_config(kwargs['user_config_path'])
    for user_config in user_config_dict:
        if not user_config['phone']:
            continue
        log.info(user_config['welcome'])

        # 单人打卡
        check_dict = check_in(user_config)
        # 单人推送
        info_push(user_config['push'], check_dict)
        raw_info.extend(check_dict)

    # 统一推送
    all_push_config = load_config(kwargs['push_config_path'])
    info_push(all_push_config, raw_info)
def main():

    args = parser.parse_args()

    # Load a config file (.yml)
    params = load_config(join(args.model_path, 'config.yml'), is_eval=True)

    # Load dataset
    test_data = Dataset(
        data_save_path=args.data_save_path,
        backend=params['backend'],
        input_freq=params['input_freq'],
        use_delta=params['use_delta'],
        use_double_delta=params['use_double_delta'],
        data_type='eval1',
        # data_type='eval2',
        # data_type='eval3',
        data_size=params['data_size'],
        label_type=params['label_type'],
        label_type_sub=params['label_type_sub'],
        batch_size=args.eval_batch_size,
        splice=params['splice'],
        num_stack=params['num_stack'],
        num_skip=params['num_skip'],
        sort_utt=False,
        reverse=False,
        tool=params['tool'])

    params['num_classes'] = test_data.num_classes
    params['num_classes_sub'] = test_data.num_classes_sub

    # Load model
    model = load(model_type=params['model_type'],
                 params=params,
                 backend=params['backend'])

    # Restore the saved parameters
    model.load_checkpoint(save_path=args.model_path, epoch=args.epoch)

    # GPU setting
    model.set_cuda(deterministic=False, benchmark=True)

    a2c_oracle = False

    # Visualize
    plot(model=model,
         dataset=test_data,
         eval_batch_size=args.eval_batch_size,
         beam_width=args.beam_width,
         beam_width_sub=args.beam_width_sub,
         length_penalty=args.length_penalty,
         a2c_oracle=a2c_oracle,
         save_path=mkdir_join(args.model_path, 'att_weights'))
Esempio n. 27
0
def main():

    args = parser.parse_args()

    # Load a config file (.yml)
    params = load_config(join(args.model_path, 'config.yml'), is_eval=True)

    # Load dataset
    vocab_file_path = '../metrics/vocab_files/' + \
        params['label_type'] + '_' + params['data_size'] + '.txt'
    vocab_file_path_sub = '../metrics/vocab_files/' + \
        params['label_type_sub'] + '_' + params['data_size'] + '.txt'
    test_data = Dataset(
        backend=params['backend'],
        input_channel=params['input_channel'],
        use_delta=params['use_delta'],
        use_double_delta=params['use_double_delta'],
        data_type='test_clean',
        # data_type='test_other',
        data_size=params['data_size'],
        label_type=params['label_type'],
        label_type_sub=params['label_type_sub'],
        vocab_file_path=vocab_file_path,
        vocab_file_path_sub=vocab_file_path_sub,
        batch_size=args.eval_batch_size,
        splice=params['splice'],
        num_stack=params['num_stack'],
        num_skip=params['num_skip'],
        sort_utt=True,
        reverse=True,
        save_format=params['save_format'])
    params['num_classes'] = test_data.num_classes
    params['num_classes_sub'] = test_data.num_classes_sub

    # Load model
    model = load(model_type=params['model_type'],
                 params=params,
                 backend=params['backend'])

    # Restore the saved parameters
    model.load_checkpoint(save_path=args.model_path, epoch=args.epoch)

    # GPU setting
    model.set_cuda(deterministic=False, benchmark=True)

    # Visualize
    decode(model=model,
           dataset=test_data,
           beam_width=args.beam_width,
           max_decode_len=args.max_decode_len,
           max_decode_len_sub=args.max_decode_len_sub,
           eval_batch_size=args.eval_batch_size,
           save_path=None)
Esempio n. 28
0
    def __init__(self, args, abort_condition=None):
        self.args = args
        self.abort_condition = abort_condition

        self.device = args.device
        self.config = load_config(args.ckpt, args.config)
        self.generator = self.load_generator()
        self.generator.eval()
        self.debug_step = args.debug_step

        self.psnr = PSNR()
        self.log = []
Esempio n. 29
0
def main_handler(*args, **kwargs):
    # 推送数据
    raw_info = []
    
    # 加载用户配置文件
    user_config_path = kwargs['user_config_path'] if kwargs.get('user_config_path') else './conf/user.json'
    push_config_path = kwargs['push_config_path'] if kwargs.get('push_config_path') else './conf/push.json'
    user_config_dict = load_config(user_config_path)
    for user_config in user_config_dict:
        if not user_config['phone']:
            continue
        log.info(user_config.get('welcome'))
        
        # 单人打卡
        check_dict = check_in(user_config)
        # 单人推送
        info_push(user_config['push'], check_dict)
        raw_info.extend(check_dict)
    
    # 统一推送
    all_push_config = load_config(push_config_path)
    info_push(all_push_config, raw_info)
Esempio n. 30
0
def main():
    # Create main logger
    logger = get_logger('UNet3DTrainer')

    parser = argparse.ArgumentParser(description='UNet3D training')
    parser.add_argument('--config', type=str, help='Path to the YAML config file', default='/home/SENSETIME/shenrui/Dropbox/SenseTime/edgeDL/resources/train_config_unet.yaml')
    args = parser.parse_args()

    # Load and log experiment configuration
    config = load_config(args.config)
    logger.info(config)

    manual_seed = config.get('manual_seed', None)
    if manual_seed is not None:
        logger.info(f'Seed the RNG for all devices with {manual_seed}')
        torch.manual_seed(manual_seed)
        # see https://pytorch.org/docs/stable/notes/randomness.html
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    # Create the model
    model = get_model(config)
    # put the model on GPUs
    logger.info(f"Sending the model to '{config['device']}'")
    model = model.to(config['device'])
    # Log the number of learnable parameters
    logger.info(f'Number of learnable params {get_number_of_learnable_parameters(model)}')

    # Create loss criterion
    loss_criterion = get_loss_criterion(config)
    # Create evaluation metric
    eval_criterion = get_evaluation_metric(config)

    # Create data loaders
    loaders = get_train_loaders(config)


    

    # Create the optimizer
    optimizer = _create_optimizer(config, model)

    # Create learning rate adjustment strategy
    lr_scheduler = _create_lr_scheduler(config, optimizer)

    # Create model trainer
    trainer = _create_trainer(config, model=model, optimizer=optimizer, lr_scheduler=lr_scheduler,
                              loss_criterion=loss_criterion, eval_criterion=eval_criterion, loaders=loaders,
                              logger=logger)
    # Start training
    trainer.fit()
Esempio n. 31
0
def main():
    args = parse_args()
    bot_config, web_config, db_config = load_config(args.config_path)

    web_app = init_web_app(web_config)

    globibot = Globibot(
        bot_config,
        db_config,
        web_app,
        args.plugin_path
    )

    run_async(
        web_app.run(),
        globibot.boot()
    )
Esempio n. 32
0
# -*- coding:utf-8 -*-

if __name__ == "__main__":
    # load configuration
    from utils import config

    config.load_config()

    from app import app

    app.main()
Esempio n. 33
0
import sys
import argparse
import logging
from utils.config import load_config, get_commands, get_log_path, get_delete_limit
from utils.alert_email import send_email, add_message
from utils.logger import setupLogger

# Parse the input params
parser = argparse.ArgumentParser()
parser.add_argument("--verbose", help="Run in debug mode", action="store_true")
parser.add_argument("--config-file", help="Config file", type=str, default="../etc/config.yml")
args = parser.parse_args()

# Logging
config_file = args.config_file
load_config(config_file)
log_path = get_log_path()
setupLogger(log_path, args.verbose)
logger = logging.getLogger('rsync_backup')

# Generate the commands
commands = get_commands()

for command_pair in commands:
    logger.debug("Noexec: %s", command_pair[0])
    logger.debug("Exec: %s", command_pair[1])

    # Work out how many files there are to delete
    try:
        status = run(command_pair[0], stdout=PIPE, universal_newlines=True )
    except CalledProcessError as e: