コード例 #1
0
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)

    # Set the logger
    log_dir = os.path.join(args.model_dir_transfer, "logs")
    if not os.path.exists(log_dir):
        print("Making log directory {}".format(log_dir))
        os.mkdir(log_dir)
    utils.set_logger(os.path.join(log_dir, "train.log"))

    # Create the input data pipeline
    logging.info("Loading the datasets...")

    # fetch dataloaders
    params_transfer.encoding = params_transfer.encoding_source
    train_dl = dataloader.fetch_dataloader(
        args.data_dir, args.txt_train, 'train', params_transfer)

    val_dl_source = dataloader.fetch_dataloader(
        args.data_dir, args.txt_val_source, 'val', params_transfer)

    params_transfer.encoding = params_transfer.encoding_target
    val_dl_target = dataloader.fetch_dataloader(
        args.data_dir, args.txt_val_target, 'val', params_transfer)

    logging.info("- done.")

    # Define the model and optimizer
    model_source = get_network(params_source).to(params_transfer.device)
    model_target = get_network(params_target).to(params_transfer.device)
    transfer = get_transfer(params_transfer).to(params_transfer.device)
コード例 #2
0
    args = parser.parse_args()
    json_path = os.path.join(args.model_dir, 'params.json')
    assert os.path.isfile(
        json_path), "No json configuration file found at {}".format(json_path)
    params = utils.Params(json_path)

    # use GPU if available
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    # Set the random seed for reproducible experiments
    seed = 42
    torch.manual_seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True
    np.random.seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)

    train_dl = data_loader.fetch_dataloader(args.data_dir, args.txt_train, 'train', params)

    # Define the model and optimizer
    model = get_network(params).to(device)
    opt = optim.AdamW(model.parameters(), lr=params.learning_rate)
    loss_fn = get_loss_fn(loss_name=params.loss_fn)

    if args.checkpoint_dir:
        model = utils.load_checkpoint(model, is_best=False, ckpt_dir=args.checkpoint_dir)[0]
    
    log_lrs, losses = find_lr(train_dl, opt, model, loss_fn, device)
    plot_lr(log_lrs, losses)
コード例 #3
0
    # use GPU if available
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    params_transfer.device = device

    # Set the random seed for reproducible experiments
    seed = 42
    torch.manual_seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True
    np.random.seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)

    params_transfer.encoding = params_transfer.encoding_target
    val_dl = dataloader.fetch_dataloader(args.data_dir, args.txt_val, 'val',
                                         params_transfer)

    # logging.info("- done.")

    # Define the model and optimizer
    model_source = get_network(params_source).to(params_transfer.device)
    model_target = get_network(params_target).to(params_transfer.device)
    transfer = get_transfer(params_transfer).to(params_transfer.device)

    #load source and target model before training and extract backbones
    ckpt_source_file_path = os.path.join(args.checkpoint_dir_source,
                                         best_ckpt_filename)
    if os.path.exists(ckpt_source_file_path):
        model_source = utils.load_checkpoint(
            model_source,
            ckpt_dir=args.checkpoint_dir_source,
コード例 #4
0
    # use GPU if available
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    params.device = device

    # Set the random seed for reproducible experiments
    seed = 42
    torch.manual_seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True
    np.random.seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)

    # fetch dataloaders
    # fetch dataloaders
    val_dl = dataloader.fetch_dataloader(args.data_dir, args.txt_val, "val",
                                         params)
    # Define the model
    model = get_network(params).to(params.device)

    #num_classes+1 for background.
    metrics = OrderedDict({})
    for metric in params.metrics:
        metrics[metric] = get_metrics(metric, params)

    # Reload weights from the saved file
    model = utils.load_checkpoint(model,
                                  is_best=True,
                                  checkpoint_dir=args.checkpoint_dir)[0]

    # Evaluate
    eval_loss, val_metrics = evaluate(model,
コード例 #5
0
    # Set the logger
    log_dir = os.path.join(args.model_dir_transfer, "logs")
    if not os.path.exists(log_dir):
        print("Making log directory {}".format(log_dir))
        os.mkdir(log_dir)
    utils.set_logger(os.path.join(log_dir, "train.log"))

    # Create the input data pipeline
    logging.info("Loading the datasets...")

    # fetch dataloaders
    params_transfer.encoding = params_transfer.encoding_source
    train_dl_all = dataloader.fetch_dataloader(args.data_dir,
                                               args.txt_train_carla,
                                               'train',
                                               params_transfer,
                                               sem_depth=True,
                                               txt_file2=args.txt_train_cs)

    # train_dl_depth_target = dataloader.fetch_dataloader(
    #     args.data_dir, args.txt_train_cs, 'train', params_source)

    val_dl_all = dataloader.fetch_dataloader(args.data_dir,
                                             args.txt_val_source,
                                             'val',
                                             params_transfer,
                                             sem_depth=True,
                                             txt_file2=args.txt_val_target)

    params_transfer.encoding = params_transfer.encoding_target
    val_dl_target = dataloader.fetch_dataloader(args.data_dir,
コード例 #6
0
    np.random.seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)

    # Set the logger
    log_dir = os.path.join(args.model_dir, "logs")
    if not os.path.exists(log_dir):
        print("Making log directory {}".format(log_dir))
        os.mkdir(log_dir)
    utils.set_logger(os.path.join(log_dir, "train.log"))

    # Create the input data pipeline
    logging.info("Loading the datasets...")

    # fetch dataloaders
    train_dl = dataloader.fetch_dataloader(args.data_dir, args.txt_train,
                                           "train", params)
    val_dl = dataloader.fetch_dataloader(args.data_dir, args.txt_val, "val",
                                         params)

    logging.info("- done.")

    # Define the model and optimizer
    model = get_network(params).to(params.device)
    opt = optim.AdamW(model.parameters(), lr=params.learning_rate)
    lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(
        opt,
        max_lr=params.learning_rate,
        steps_per_epoch=len(train_dl),
        epochs=params.num_epochs,
        div_factor=20)