Esempio n. 1
0
          (args.savename, old_savename))

    checkpoint = torch.load(args.resume)
    args = checkpoint['args']  # Load args!
    [model_g1, model_g2], model_f1 = get_multichannel_model(
        net_name=args.net,
        input_ch_list=args.inch_list,
        n_class=args.n_class,
        method=detailed_method,
        res=args.res,
        is_data_parallel=args.is_data_parallel)

    optimizer = get_optimizer(list(model_g1.parameters()) +
                              list(model_g2.parameters()) +
                              list(model_f1.parameters()),
                              opt=args.opt,
                              lr=args.lr,
                              momentum=args.momentum,
                              weight_decay=args.weight_decay)
    model_g1.load_state_dict(checkpoint['g1_state_dict'])
    model_g2.load_state_dict(checkpoint['g2_state_dict'])
    model_f1.load_state_dict(checkpoint['f1_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer'])
    print("=> loaded checkpoint '{}'".format(args.resume))

    json_fn = os.path.join(args.outdir, "param_%s_resume.json" % args.savename)
    check_if_done(json_fn)
    args.machine = os.uname()[1]
    save_dic_to_json(args.__dict__, json_fn)

    start_epoch = checkpoint['epoch']
args.savename = get_model_name_from_path(
    args.checkpoint) + "AND" + get_model_name_from_path(args.extra_checkpoint)

print("savename is %s " % (args.savename))

checkpoint = torch.load(args.checkpoint)
model_g_3ch, model_g_1ch, model_f1, model_f2 = get_models(
    net_name=args.net,
    res=args.res,
    input_ch=args.input_ch,
    n_class=args.n_class,
    method=detailed_method,
    is_data_parallel=args.is_data_parallel)
optimizer_g = get_optimizer(list(model_g_3ch.parameters()) +
                            list(model_g_1ch.parameters()),
                            lr=args.lr,
                            opt=args.opt,
                            momentum=args.momentum,
                            weight_decay=args.weight_decay)
optimizer_f = get_optimizer(list(model_f1.parameters()) +
                            list(model_f2.parameters()),
                            lr=args.lr,
                            opt=args.opt,
                            momentum=args.momentum,
                            weight_decay=args.weight_decay)

model_g_3ch.load_state_dict(checkpoint['g_state_dict'])

print("=> loading checkpoint '{}'".format(args.extra_checkpoint))
model_g_1ch.load_state_dict(torch.load(args.extra_checkpoint)['g_state_dict'])

if args.uses_one_classifier:
Esempio n. 3
0
    old_savename = args.savename
    args.savename = infn.split("-")[0]
    print("savename is %s (original savename %s was overwritten)" %
          (args.savename, old_savename))

    checkpoint = torch.load(args.resume)
    args = checkpoint['args']  # Load args!

    model = get_full_model(net=args.net,
                           res=args.res,
                           n_class=args.n_class,
                           input_ch=args.input_ch)
    optimizer = get_optimizer(model.parameters(),
                              opt=args.opt,
                              lr=args.lr,
                              momentum=args.momentum,
                              weight_decay=args.weight_decay)
    model.load_state_dict(checkpoint['state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer'])
    print("=> loaded checkpoint '{}'".format(args.resume))

    json_fn = os.path.join(args.outdir, "param_%s_resume.json" % args.savename)
    check_if_done(json_fn)
    args.machine = os.uname()[1]
    save_dic_to_json(args.__dict__, json_fn)

else:
    model = get_full_model(net=args.net,
                           res=args.res,
                           n_class=args.n_class,
    # -------------------------------------- #
    args.epochs = epochs
    model_enc, model_dec = get_segbd_multitask_models(
        net_name=args.net,
        input_ch=args.input_ch,
        n_class=args.n_class,
        is_data_parallel=args.is_data_parallel,
        semseg_criterion=criterion,
        discrepancy_criterion=criterion_d)

    model_enc.load_state_dict(checkpoint['enc_state_dict'])
    model_dec.load_state_dict(checkpoint['dec_state_dict'])

    optimizer_enc = get_optimizer(model_enc.parameters(),
                                  lr=args.lr,
                                  momentum=args.momentum,
                                  opt=args.opt,
                                  weight_decay=args.weight_decay)
    optimizer_dec = get_optimizer(model_dec.parameters(),
                                  opt=args.opt,
                                  lr=args.lr,
                                  momentum=args.momentum,
                                  weight_decay=args.weight_decay)
    optimizer_enc.load_state_dict(checkpoint["optimizer_enc"])
    optimizer_dec.load_state_dict(checkpoint["optimizer_dec"])
    print("=> loaded checkpoint '{}'".format(args.resume))

else:
    model_enc, model_dec = get_segbd_multitask_models(
        net_name=args.net,
        input_ch=args.input_ch,
# if args.opt == 'sgd':
#     optimizer_g = torch.optim.SGD(model_g.parameters(), lr=args.lr, momentum=args.momentum,
#                                   weight_decay=args.weight_decay)
#     optimizer_d = torch.optim.SGD(model_d.parameters(), lr=args.lr, momentum=args.momentum,
#                                   weight_decay=args.weight_decay)
#     optimizer_f = torch.optim.SGD(list(model_f.parameters()), lr=args.lr,
#                                   momentum=args.momentum,
#                                   weight_decay=args.weight_decay)
# if args.opt == 'adam':
#     optimizer_g = torch.optim.Adam(model_g.parameters(), lr=args.lr, betas=[0.5, 0.999],
#                                    weight_decay=args.weight_decay)
#     optimizer_f = torch.optim.Adam(list(model_f.parameters()) + list(model_f2.parameters()), lr=args.lr,
#                                    betas=[0.5, 0.999],
#                                    weight_decay=args.weight_decay)

optimizer_g = get_optimizer(model_g.parameters(), args.opt, args.lr,
                            args.momentum, args.weight_decay)
optimizer_d = get_optimizer(model_d.parameters(), args.opt, args.lr,
                            args.momentum, args.weight_decay)
optimizer_f = get_optimizer(model_f.parameters(), args.opt, args.lr,
                            args.momentum, args.weight_decay)

args.start_epoch = 0
if args.resume:
    print("=> loading checkpoint '{}'".format(args.resume))
    if not os.path.exists(args.resume):
        raise OSError("%s does not exist!" % args.resume)

    indir, infn = os.path.split(args.resume)
    savename = infn.split("-")[0]
    print("savename is %s (%s was overwritten)" % (savename, args.savename))