Beispiel #1
0
def main(args):
    if args.use_cuda:
        print("Using GPU")
        torch.cuda.set_device(args.gpu_id)
    else: 
        print("Using CPU")

    check_dir(args.loss_path)
    check_dir(args.model_path)
    check_dir(args.optimizer_path)

    pre_trans_img = [Transpose(), TensorFlip(0), TensorFlip(1)]
    datasets = {"train": BuildDataSet(args.data_root_path, args.train_folder, pre_trans_img, args.data_length["train"], "train"),
            "val": BuildDataSet(args.data_root_path, args.val_folder, None, args.data_length["val"], "val")}

    data_length = {x:len(datasets[x]) for x in ["train", "val"]}
    print("Data length:Train:{} Val:{}".format(data_length["train"], data_length["val"]))
    
    kwargs = {"num_workers": args.num_workers, "pin_memory": True if args.mode is "train" else False}
    dataloaders = {x: DataLoader(datasets[x], args.batch_size[x], shuffle=args.is_shuffle, **kwargs) for x in ["train", "val"]}

    ## *********************************************************************************************************
    model = WGAN_SACNN_AE(args.batch_size[args.mode], args.root_path, "v2")
    if args.mode is "train":
        train_model(model = model,
                dataloaders = dataloaders,
                args=args
                )
        print("Run train.py Success!")
    else:
        print("\nargs.mode is wrong!\n")
        sys.exit(0)
Beispiel #2
0
def main(args):
    if args.use_cuda:
        print("Using GPU")
        torch.cuda.set_device(args.gpu_id)
    else:
        print("Using CPU")

    check_dir(args.result_path)
    check_dir(args.loss_path)
    check_dir(args.model_path)
    check_dir(args.optimizer_path)

    geo_full = build_geo(args.full_view)
    geo_sparse = build_geo(args.sparse_view)

    pre_trans_img = [Transpose(), TensorFlip(0), TensorFlip(1)]
    datasets_v = {
        "train":
        BuildDataSet(args.data_root_path, args.train_folder, geo_full,
                     geo_sparse, pre_trans_img, "train"),
        "val":
        BuildDataSet(args.data_root_path, args.val_folder, geo_full,
                     geo_sparse, None, "val"),
        "test":
        BuildDataSet(args.data_root_path, args.test_folder, geo_full,
                     geo_sparse, None, "test")
    }

    data_length = {x: len(datasets_v[x]) for x in ["train", "val", "test"]}
    print("Data length:Train:{} Val:{} Test:{}".format(data_length["train"],
                                                       data_length["val"],
                                                       data_length["test"]))
    if not data_length == args.data_length:
        print("Args.data_length is wrong!")
        sys.exit(0)

    batch_num = {
        x: int(data_length[x] / args.batch_size[x])
        for x in ["train", "val", "test"]
    }
    kwargs = {
        "num_workers": args.num_workers,
        "pin_memory": True if args.mode is "train" else False
    }
    dataloaders = {
        x: DataLoader(datasets_v[x],
                      args.batch_size[x],
                      shuffle=args.is_shuffle,
                      **kwargs)
        for x in ["train", "val", "test"]
    }

    ## *********************************************************************************************************
    model_parser = ModelInit()
    model = UnetDa(model_parser)
    criterion = torch.nn.MSELoss()
    if args.use_cuda:
        criterion = criterion.cuda()
        model = model.cuda()
    optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999))
    scheduler = lr_scheduler.StepLR(
        optimizer, step_size=args.step_size,
        gamma=args.gamma) if args.is_lr_scheduler else None
    ## *********************************************************************************************************
    if args.mode is "train":
        train_model(model=model,
                    optimizer=optimizer,
                    geo_full=geo_full,
                    geo_sparse=geo_sparse,
                    dataloaders=dataloaders,
                    batch_num=batch_num,
                    criterion=criterion,
                    scheduler=scheduler,
                    args=args)
        print("Run train_function.py Success!")
    elif args.mode is "test":
        old_modle_path = args.old_modle_path
        model_reload_path = old_modle_path + "/" + args.old_modle_name + ".pkl"
        if os.path.isfile(model_reload_path):
            print("Loading previously trained network...")
            print(model_reload_path)
            checkpoint = torch.load(model_reload_path,
                                    map_location=lambda storage, loc: storage)
            model_dict = model.state_dict()
            checkpoint = {
                k: v
                for k, v in checkpoint.items() if k in model_dict
            }
            model_dict.update(checkpoint)
            model.load_state_dict(model_dict)
            del checkpoint
            torch.cuda.empty_cache()
            if args.use_cuda:
                model = model.cuda()
            print("Loading Done!")
        else:
            print("Loading Fail...")
            sys.exit(0)
        test_model(model=model,
                   dataloaders=dataloaders,
                   criterion=criterion,
                   batch_num=batch_num,
                   args=args)
        print("Run test_function.py Success!")
    else:
        print("\nPlease go to 'exhibit_main.py' to get more information!!\n")
Beispiel #3
0
])
trainLoader, valLoader = dataLoader.get_train_valid_loader(
    cf.photo_url, 50, 32, 'all', imgTransform, 0.1, -1)

#define learningRate
learningRate = 1e-3

#Definition of our loss.
#The MSELoss function
criterion = nn.MSELoss()

new_pretrained_model = modified_resnet.resnet50(pretrained=True)
new_pretrained_model.fc = nn.Linear(512 * 4, 1)

# Definition of optimization strategy.
optimizer = optim.SGD(new_pretrained_model.parameters(), lr=learningRate)

# Train the previously defined model.
result = train.train_model(new_pretrained_model,
                           criterion,
                           optimizer,
                           trainLoader,
                           valLoader,
                           n_epochs=10,
                           use_gpu=True,
                           batch_size=50,
                           notebook=False,
                           save_name="resnet_all_category")
print result

torch.save(new_pretrained_model.state_dict(), "./resnet_all_category_final")
Beispiel #4
0
from unet import UNet
import torch
import torch.optim as optim
from train_function import train_model

torch.cuda.set_device(torch.device("cuda", 0))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

model = UNet().to(device)
optimizer_ft = optim.Adam(model.parameters(), lr=1e-3)
exp_lr_scheduler = optim.lr_scheduler.StepLR(optimizer_ft,
                                             step_size=4,
                                             gamma=0.9)

model = train_model(model, optimizer_ft, exp_lr_scheduler, num_epochs=80)
torch.save(model.state_dict(), "./save_model")
    nn.Linear(512 * 7 * 7, 4096),
    nn.ReLU(True),
    nn.Dropout(),
    nn.Linear(4096, 4096),
    nn.ReLU(True),
    nn.Dropout(),
    nn.Linear(4096, 1),
)

# Definition of our loss.
# The MSELoss function
criterion = nn.MSELoss()

# Definition of optimization strategy.
optimizer = optim.SGD(network.parameters(), lr=learningRate)

result = []
# Train the previously defined model.
result = train.train_model(network,
                           criterion,
                           optimizer,
                           trainLoader,
                           valLoader,
                           n_epochs=10,
                           use_gpu=True,
                           notebook=False)
print result

utils.save_loss(result[2], result[3], './test_loss.png')
utils.save_accuracy(result[0], result[1], './test_accu.png')
Beispiel #6
0
    if param.requires_grad:
        if param_name.endswith('.bias'):
            biases.append(param)
        else:
            not_biases.append(param)

lr = 1e-4
optimizer = torch.optim.SGD(params=[{
    'params': biases,
    'lr': 2 * lr
}, {
    'params': not_biases
}],
                            lr=lr,
                            momentum=0.9,
                            weight_decay=5e-4)

exp_lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=7,
                                                   gamma=0.1)

train_model(cnn,
            optimizer,
            exp_lr_scheduler,
            dataloader,
            dataset_sizes,
            device,
            lr,
            loadModel=True,
            num_epochs=1000)
Beispiel #7
0
#  is True.
params_to_update = model_ft.parameters()
print("Params to learn:")
if feature_extract:
    params_to_update = []
    for name, param in model_ft.named_parameters():
        if param.requires_grad == True:
            params_to_update.append(param)
            print("\t", name)
else:
    for name, param in model_ft.named_parameters():
        if param.requires_grad == True:
            print("\t", name)

# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(params_to_update, lr=0.001, momentum=0.9)

# Setup the loss fxn
#TODO modify the loss function.

criterion = nn.CrossEntropyLoss()

# Train and evaluate
model_ft, hist = train_model(model_ft,
                             dataloaders_dict,
                             criterion,
                             optimizer_ft,
                             num_epochs=num_epochs,
                             device=device,
                             is_inception=(model_name == "inception"))