示例#1
0
def main():
    load_options()
    set_logger()
    mongo_connect()

    from api import user, report, utils
    run(server='tornado', host=options.api_host, port=options.api_port)
示例#2
0
def main():
    load_options()

    set_logger()

    mongo_connect()

    from bot.nemesis import Nemesis
    Nemesis().read()
示例#3
0
文件: train.py 项目: lhaippp/GyroFlow
def main(params):
    mge.dtr.eviction_threshold = "5GB"
    mge.dtr.enable()

    rank = dist.get_rank()

    # Set the logger
    logger = utils.set_logger(os.path.join(params.model_dir, 'train.log'))

    # Set the tensorboard writer
    log_dir = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    tb_dir = os.path.join(params.model_dir, "summary")
    os.makedirs(tb_dir, exist_ok=True)
    writter = SummaryWriter(log_dir=tb_dir)

    # Create the input data pipeline
    if rank == 0:
        logger.info("Loading the datasets from {}".format(params.data_dir))

    # fetch dataloaders
    dataloaders = data_loader.fetch_dataloader(params)

    # Define the model and optimizer
    model = net.fetch_net(params)

    # add regulizer to weights and bias
    param_groups = [
        {
            "params": utils.bias_parameters(model)
        },
        {
            "params": utils.weight_parameters(model),
            "weight_decay": 1e-6
        },
    ]

    optimizer = Adam(param_groups, lr=params.learning_rate, eps=1e-7)
    milestones = [50, 150, 250, 350, 450]
    scheduler = MultiStepLR(optimizer, milestones, 0.5)

    # initial status for checkpoint manager
    manager = Manager(model=model,
                      optimizer=optimizer,
                      scheduler=scheduler,
                      params=params,
                      dataloaders=dataloaders,
                      writer=writter,
                      logger=logger)

    # Train the model
    if rank == 0:
        logger.info("Starting training for {} epoch(s)".format(
            params.num_epochs))

    train_and_evaluate(model, manager)
示例#4
0
        raise "Model keyword argument is None!"
    dataset = params.data['dataset']
    trainloader_kwargs = params.data['trainloader-kwargs']
    trainset_kwargs = params.data['trainset-kwargs']
    valloader_kwargs = params.data['valloader-kwargs']
    valset_kwargs = params.data['valset-kwargs']
    optim_type = params.optimizer['type']
    optim_kwargs = params.optimizer['kwargs']
    lr_type = params.scheduler['type']
    lr_kwargs = params.scheduler['kwargs']

    # tensorboard
    writer = SummaryWriter(args.run_dir)

    # set the logger
    set_logger(os.path.join(args.run_dir, 'train.log'))

    # use GPU if available
    params.cuda = torch.cuda.is_available()
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    # set random seed for reproducible experiments
    torch.manual_seed(200)
    if params.cuda:
        torch.cuda.manual_seed(200)

    ### ------ instantiations ----- ###
    # build model
    model = get_network_builder(
        params.model['network'])(**model_kwargs).to(device)
示例#5
0
if __name__ == '__main__':
    # Load the parameters
    args = parser.parse_args()
    json_path = os.path.join(args.model_dir, 'params.json')
    assert os.path.isfile(
        json_path), "No json configuration file found at {}".format(json_path)
    with open(json_path) as f:
        params = EasyDict(json.load(f))
    # Only load model weights
    params.only_weights = True

    # Update args into params
    params.update(vars(args))

    # Get the logger
    logger = utils.set_logger(os.path.join(args.model_dir, 'evaluate.log'))

    # Create the input data pipeline
    logging.info("Creating the dataset...")

    # Fetch dataloaders
    params.eval_type = 'test'
    dataloaders = data_loader.fetch_dataloader(params)

    # Define the model and optimizer
    model = net.fetch_net(params)

    # Initial status for checkpoint manager
    manager = Manager(model=model,
                      optimizer=None,
                      scheduler=None,