コード例 #1
0
def main():
    global opt, model
    opt = parser.parse_args()
    print opt

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    train_set = DatasetFromHdf5("data/train.h5")
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=opt.threads,
                                      batch_size=opt.batchSize,
                                      shuffle=True)

    print("===> Building model")
    model = Net()
    criterion = nn.MSELoss(size_average=False)

    print("===> Setting GPU")
    if cuda:
        model = torch.nn.DataParallel(model).cuda()
        criterion = criterion.cuda()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    print("===> Setting Optimizer")
    optimizer = optim.SGD(model.parameters(),
                          lr=opt.lr,
                          momentum=opt.momentum,
                          weight_decay=opt.weight_decay)

    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, optimizer, model, criterion, epoch)
        save_checkpoint(model, epoch)
コード例 #2
0
def main():
    global opt, model
    opt = parser.parse_args()
    print(opt)

    cuda = opt.cuda
    if cuda:
        print("=> use gpu id: '{}'".format(opt.gpus))
        os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus
        if not torch.cuda.is_available():
                raise Exception("No GPU found or Wrong gpu id, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    train_set = DatasetFromHdf5("data/BSD300.h5")
    training_data_loader = DataLoader(dataset=train_set, batch_size=opt.batchSize, shuffle=True)

    print("===> Building model")
    model = Net()
    criterion = nn.MSELoss(size_average=False)

    print("===> Setting GPU")
    if cuda:
        model = model.cuda()
        criterion = criterion.cuda()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['model'].state_dict())
        else:
            print("=> no model found at '{}'".format(opt.pretrained))  

    print("===> Setting Optimizer")
    optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay)

    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, optimizer, model, criterion, epoch)
        if(epoch % 100 == 0):
            save_checkpoint(model, epoch)
コード例 #3
0
def main():
    warnings.filterwarnings("ignore")
    global opt, model
    opt = parser.parse_args()
    print(opt)

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    train_set = DatasetFromHdf5(opt.dataset)
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=opt.threads,
                                      batch_size=opt.batchSize,
                                      shuffle=True)

    print("===> Building model")
    # load the pre-trained teacher model and the lightweight model
    model = Net()

    criterion = nn.MSELoss(size_average=False)

    print("===> Setting GPU")
    if cuda:
        model = torch.nn.DataParallel(model).cuda()
        criterion = criterion.cuda()

    print("===> Setting Optimizer")
    optimizer = optim.SGD([{
        'params': model.module.parameters()
    }],
                          lr=opt.lr,
                          momentum=opt.momentum,
                          weight_decay=opt.weight_decay)

    print("===> Training")
    num = 0
    lossAarry = np.zeros(opt.nEpochs)
    pbar = tqdm(range(opt.start_epoch, opt.nEpochs + 1))
    for epoch in pbar:
        lossAarry[num] = train(training_data_loader, optimizer, model,
                               criterion, epoch)
        pbar.set_description("loss: %.8f" % (lossAarry[num]))
        pbar.update()
        num = num + 1
    pbar.close()
    save_checkpoint(model)
コード例 #4
0
ファイル: valid.py プロジェクト: juingzhou/SAM
def main():
    pickle.load = partial(pickle.load, encoding="latin1")
    pickle.Unpickler = partial(pickle.Unpickler, encoding="latin1")
    weights = torch.load(opt.model)
    model = Net()
    model.load_state_dict(weights['model'].state_dict())

    weights_sam = torch.load(opt.model_sam)
    model_sam = Net_SAM(n_intervals=[6,12], n_blocks=18, inchannels=1, nfeats=64, outchannels=1)
    model_sam.load_state_dict(weights_sam['model'].state_dict())

    if opt.cuda:
        model.cuda()
        model_sam.cuda()

    test_set = TestSetLoader(dataset_dir=opt.testset_dir + '/' + opt.dataset, scale_factor=opt.scale)
    test_loader = DataLoader(dataset=test_set, num_workers=1, batch_size=1, shuffle=False)

    import datetime
    oldtime = datetime.datetime.now()
    valid(test_loader, model)
    wotime = datetime.datetime.now()
    print('Time consuming: ', wotime - oldtime)
    valid_sam(test_loader, model_sam)
    wtime = datetime.datetime.now()
    print('Time consuming: ', wtime-wotime)
コード例 #5
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description="PyTorch VDSR")
    parser.add_argument("--batchSize",
                        type=int,
                        default=128,
                        help="Training batch size")
    parser.add_argument("--nEpochs",
                        type=int,
                        default=50,
                        help="Number of epochs to train for")
    parser.add_argument("--lr",
                        type=float,
                        default=0.1,
                        help="Learning Rate. Default=0.1")
    parser.add_argument(
        "--step",
        type=int,
        default=10,
        help=
        "Sets the learning rate to the initial LR decayed by momentum every n epochs, Default: n=10"
    )
    parser.add_argument("--cuda", action="store_true", help="Use cuda?")
    parser.add_argument("--resume",
                        default="",
                        type=str,
                        help="Path to checkpoint (default: none)")
    parser.add_argument("--start-epoch",
                        default=1,
                        type=int,
                        help="Manual epoch number (useful on restarts)")
    parser.add_argument("--clip",
                        type=float,
                        default=0.4,
                        help="Clipping Gradients. Default=0.4")
    parser.add_argument(
        "--threads",
        type=int,
        default=1,
        help="Number of threads for data loader to use, Default: 1")
    parser.add_argument("--momentum",
                        default=0.9,
                        type=float,
                        help="Momentum, Default: 0.9")
    parser.add_argument("--weight-decay",
                        "--wd",
                        default=1e-4,
                        type=float,
                        help="Weight decay, Default: 1e-4")
    parser.add_argument('--pretrained',
                        default='',
                        type=str,
                        help='path to pretrained model (default: none)')
    parser.add_argument("--gpu",
                        default="0",
                        type=str,
                        help="gpu ids (default: 0)")
    parser.add_argument("--num_filter", default=64, type=int)
    parser.add_argument("--num_block", default=18, type=int)
    parser.add_argument("--train_data",
                        type=str,
                        default="../Data/train_data/train.h5")
    parser.add_argument("--test_data",
                        type=str,
                        default="../Data/test_data/Set5_mat")
    parser.add_argument("-p", "--project_name", type=str)
    parser.add_argument("--debug", action="store_true")
    parser.add_argument("--sharpen", action="store_true")
    parser.add_argument("--drop_ratio", type=float, default=0)
    opt = parser.parse_args()

    # Set up directories and logs etc
    if opt.debug:
        opt.project_name = "test"
    project_path = pjoin("../Experiments", opt.project_name)
    rec_img_path = pjoin(project_path, "reconstructed_images")
    weights_path = pjoin(project_path, "weights")  # to save torch model
    if not opt.resume:
        if os.path.exists(project_path):
            respond = "Y"  # input("The appointed project name has existed. Do you want to overwrite it (everything inside will be removed)? (y/n) ")
            if str.upper(respond) in ["Y", "YES"]:
                shutil.rmtree(project_path)
            else:
                exit(1)
        if not os.path.exists(rec_img_path):
            os.makedirs(rec_img_path)
        if not os.path.exists(weights_path):
            os.makedirs(weights_path)
    TIME_ID = os.environ["SERVER"] + time.strftime("-%Y%m%d-%H%M")
    log_path = pjoin(weights_path, "log_" + TIME_ID + ".txt")
    log = sys.stdout if opt.debug else open(log_path, "w+")
    logprint(str(opt._get_kwargs()), log)

    cuda = opt.cuda
    if cuda:
        logprint("=> use gpu id: '{}'".format(opt.gpu), log)
        os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu
        if not torch.cuda.is_available():
            raise Exception(
                "No GPU found or Wrong gpu id, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    logprint("Random Seed: %s" % opt.seed, log)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    logprint("===> Loading datasets", log)
    train_set = DatasetFromHdf5(opt.train_data)
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=opt.threads,
                                      batch_size=opt.batchSize,
                                      shuffle=True)

    logprint("===> Building model", log)
    model = Net(opt.num_filter, opt.num_block, opt.sharpen,
                opt.drop_ratio)  ##### creat model
    criterion = nn.MSELoss(size_average=False)

    logprint("===> Setting GPU", log)
    if cuda:
        model = model.cuda()
        criterion = criterion.cuda()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            logprint("=> loading checkpoint '{}'".format(opt.resume), log)
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            logprint("=> no checkpoint found at '{}'".format(opt.resume), log)

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            logprint("=> loading model '{}'".format(opt.pretrained), log)
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['model'].state_dict())
        else:
            logprint("=> no model found at '{}'".format(opt.pretrained), log)

    logprint("===> Setting Optimizer", log)
    optimizer = optim.SGD(model.parameters(),
                          lr=opt.lr,
                          momentum=opt.momentum,
                          weight_decay=opt.weight_decay)

    logprint("===> Training", log)
    test(model, opt, log)
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, optimizer, model, criterion, epoch, opt,
              log)
        save_checkpoint(model, epoch, log, weights_path, TIME_ID)
        test(model, opt, log)
コード例 #6
0
def main():
    print("in loop")
    global opt, model
    opt = parser.parse_args()
    print(opt)

    cuda = opt.cuda
    if cuda:
        print("=> use gpu id: '{}'".format(opt.gpus))
        os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus
        if not torch.cuda.is_available():
            raise Exception(
                "No GPU found or Wrong gpu id, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    #train_set = DatasetFromHdf5("data/train.h5")   #改下面一行自己包的H5檔案
    #train_set = DatasetFromHdf5("D:/mytestfile_41x41_all_small_x2.h5") #自己包的
    train_set = DatasetFromHdf5("D:/train.h5")  #作者的

    training_data_loader = DataLoader(
        dataset=train_set,
        num_workers=0,
        batch_size=opt.batchSize,
        shuffle=True)  #num_workers=opt.threads改成0

    print("===> Building model")
    model = Net()  #重新訓練用 若繼續續練改下一行加載pth預訓練檔

    #model = torch.load("checkpoint/model_epoch_lr01_1.pth", map_location=lambda storage, loc: storage)["model"] #預訓練檔 會報錯改下一行寫法

    criterion = nn.MSELoss(size_average=False)

    print("===> Setting GPU")
    if cuda:
        model = model.cuda()
        criterion = criterion.cuda()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['model'].state_dict())
        else:
            print("=> no model found at '{}'".format(opt.pretrained))

    print("===> Setting Optimizer")
    optimizer = optim.SGD(model.parameters(),
                          lr=opt.lr,
                          momentum=opt.momentum,
                          weight_decay=opt.weight_decay)

    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        #print("============train()前=========================")
        train(training_data_loader, optimizer, model, criterion, epoch)
        #print("============train()後=========================")
        save_checkpoint(model, epoch)
コード例 #7
0
ファイル: main_vdsr.py プロジェクト: HengRuiZ/BReLU
def main():
    global opt, model
    opt = parser.parse_args()
    print(opt)

    cudnn.benchmark = True

    print("===> Building model")
    model = Net()
    criterion = nn.MSELoss(reduction='sum')

    print("===> Using GPU %d" % opt.gpu)
    torch.cuda.set_device(opt.gpu)
    model = model.cuda(opt.gpu)
    criterion = criterion.cuda(opt.gpu)

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            model.load_state_dict(checkpoint["model"].state_dict(),
                                  strict=False)
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    if opt.quant:
        if os.path.isfile(opt.quant_param):
            model.quantize_from(opt.quant_param)
            print('model quantized from ' + opt.quant_param)
        else:
            print("=> no quantize checkpoint found at '{}'".format(
                opt.quant_param))
            exit(1)

    if opt.blu:
        model.load_blu('blu_train.data')
        print('loaded blu from ' + 'blu_train.data')

    print("===> Setting Optimizer")
    #optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay)
    optimizer = optim.Adam(model.parameters(),
                           lr=opt.lr,
                           weight_decay=opt.weight_decay)

    print("===> Loading datasets")
    #train_set = DatasetFromHdf5("data/train.h5")
    #training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
    training_data_loader = ndarrayLoader('data\\input.data',
                                         'data\\target.data',
                                         shuffle=True,
                                         batch_size=opt.batchSize)

    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, optimizer, model, criterion, epoch, opt)
        result = eval.main(model, opt.blu, 'Set5')
        with open('result.txt', 'a') as f:
            f.write('epoch:%d\n' % epoch)
            f.write(result)
        save_checkpoint(model, epoch)
コード例 #8
0
# # Parse arguments
# args, _ = parser.parse_known_args()

# model_module = importlib.import_module('models.' +
#                                      args.model if args.model else 'models')
# model_module.update_argparser(parser)
# params = parser.parse_args()
# model, criterion, optimizer, lr_scheduler, metrics = model_module.get_model_spec(params)

with torch.cuda.device(7):

    args = get_args()

    # net = WDSR_B(args)
    # net = EDSR(args)
    net = Net()
    # args.num_classes = 10
    # net = model
    # net = MLP(dim_in = 1024, dim_hidden = 200, dim_out = 10)
    # net = CNNMnist(args=args)
    # net = vivo.Vivo8ch29RBs()
    # net = models.densenet161()
    flops, params = get_model_complexity_info(net, (1, 1280, 720),
                                              as_strings=True,
                                              print_per_layer_stat=True)
    # flops, params = get_model_complexity_info(net, (3, 224, 224), as_strings=True, print_per_layer_stat=True)
    print('{:<30}  {:<8}'.format('Computational complexity: ', flops))
    print('{:<30}  {:<8}'.format('Number of parameters: ', params))

# 144P(256×144) 240p(426×240) 360P(640×360) 480P(854×480)
コード例 #9
0
ファイル: eval.py プロジェクト: HengRuiZ/BReLU
                psnr_predicted = PSNR(im_gt_y, im_h_y, shave_border=scale)
                avg_psnr_predicted += psnr_predicted
                #print(image_name,':Bicubic ',psnr_bicubic,'predicted:',psnr_predicted)
        result+="Scale=%d, PSNR_bicubic=%.3f PSNR_predicted=%.3f\n" % (scale,avg_psnr_bicubic/count, avg_psnr_predicted / count)
    print(result)
    return result

if __name__ =='__main__':
    model_path='model\\model_bias_blu_epoch_54.pth'
    gpu=0
    quant=True
    quant_param='quant.data'
    blu=True
    torch.cuda.set_device(gpu)
    model = Net()
    checkpoint = torch.load(model_path, map_location='cpu')
    model.load_state_dict(checkpoint["model"].state_dict(),strict=False)
    print('loaded ' + model_path)

    #model.dump('model.data')
    #exit(0)
    model.cuda()
    if quant:
        if os.path.isfile(quant_param):
            model.quantize_from(quant_param)
            print('model quantized from ' + quant_param)
        else:
            model.quantize('quant.data')
    if blu:
        model.load_blu('blu_train.data')