def train(model, criterion, train_loader, query_loader, gallery_loader, optimizer, experiment_name):
    for epoch in range(args.n_epochs):
        train_loss, metric = train_epoch(
            model, criterion, optimizer, train_loader)
        print_train_progress(epoch, train_loss, metric)
        if epoch % args.print_every == 0:
            evaluation(model, query_loader, gallery_loader)
    save_model(model, experiment_name)
metrics = initialize_metrics()

for p_id in partitions_train:
    utils.print_log("Starting partition training for id: {}".format(p_id))

    model = initialize_model(args.model, args.embedding_dim)
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    train_loader = get_train_loader(train_partitions[p_id])

    model_name = experiment_name + "_{}".format(p_id)
    train(model, criterion, train_loader, query_loader, gallery_loader,
          optimizer, model_name)

    utils.print_log(model_name)
    ks = evaluation(model, query_loader, gallery_loader)
    metrics = update_metrics(ks, *metrics)

if args.continuous_learning_method == "lfl":
    criterion = Triplet_LFL(triplet_criterion=criterion, lamb=args.lambda_lfl)
if args.continuous_learning_method == "lwf":
    criterion = Triplet_LWF(triplet_criterion=criterion, lamb=args.lambda_lwf)
if args.continuous_learning_method == "ewc":
    criterion = Triplet_EWC(triplet_criterion=criterion, lamb=args.lambda_ewc)

# Fine-Tune
if partitions_tune:
    for idx, continuous_set in enumerate(partitions_tune):
        utils.print_log(
            "Loading model trained on continual batch {} and training on batch {}"
            .format(continuous_set["trained"], continuous_set["tune"]))
示例#3
0
                                        shuffle=False,
                                        num_workers=10,
                                        pin_memory=True)

n_classes = 12

model = SegNet(in_channels=3, n_classes=n_classes)
model.init_encoder()
# model = torch.nn.DataParallel(model,
#                              device_ids=range(torch.cuda.device_count()))
model.cuda()
epochs = [400]
lrs = [0.0001]

metrics = evaluation(n_classes=n_classes,
                     lr=lrs[0],
                     modelstr="SegNet",
                     textfile="newlog.txt")

weights = NormalizedWeightComputationMedian(labels_path=label_path,
                                            n_classes=n_classes)

weights = torch.from_numpy(weights).float().cuda()

criterion = nn.CrossEntropyLoss(weight=weights, reduce=True,
                                size_average=True).cuda()

# criterion = nn.CrossEntropyLoss().cuda()

for ep in epochs:

    for lr in lrs: