Beispiel #1
0
def train_fcnn(params):
    print("Training fcnn")
    params_dict = file_utils.get_FCNN_hyperparams(params)
    trainer = FCNN.Trainer(params_dict)
    history = trainer.train()
    trainer.save_model()
    trainer.save_params(params_dict)
    trainer.save_losses(history)
Beispiel #2
0
def learn(inputs, error):    
    fcnn.backprop(net.receptors, net.synapses, net.bias, net.deltas, error) 
    cnn.backprop(net.filters, net.conv_bias, net.pooled, net.switches, net.conv_error, net.conv_delta, net.deltas[0], inputs)
Beispiel #3
0
def execute_net(inputs):
    cnn.fwdPass(inputs, net.convolved, net.filters, net.conv_bias, net.pooled, net.switches)        
    fcnn.fwdPass(net.pooled[net.conv_layers-1], net.receptors, net.synapses, net.bias)    #pass the CNN output to FC NNet
Beispiel #4
0
                                             num_workers=1)
        test_loader = data_utils.DataLoader(test_set,
                                            batch_size=N_test,
                                            shuffle=False,
                                            num_workers=1)

        for u in upscale_method:
            for lr in learning_rates:
                for m in momentums:
                    for wd in weight_decays:
                        for cw in class_weights:

                            torch.manual_seed(1984)

                            if u == 1:
                                model = FCNN.FCNN()
                            elif u == 2:
                                model = FCNN2.FCNN2()
                            else:
                                print("WARNING Unexpected upscale method!")
                                continue

                            expid = str(exp).zfill(3)
                            exp += 1
                            print("\n\n", expid, "Model LR:", lr, "Momentum:",
                                  m, "WDecay:", wd, "Class Weights:", cw,
                                  "Batch size:", n, "Imsize:", w)

                            model = train.train(expid, model, train_loader,
                                                test_loader, n, T, w, lr, m,
                                                wd, cw, lr_stepsize, lr_decay)
Beispiel #5
0
def main():
    # Training settings
    nEpochs = 200
    lr = 0.001
    step = 80
    cuda = True
    resume = ""
    start_epoch = 1
    clip = 0.005
    threads = 1
    momentum = 0.9
    weight_decay = 1e-4

    # model_path = 'model_HSI_SR_DUALNET2_d2_totalloss_SAVE_s2_250/model_epoch_150.pth'

    path_hr = 'D:\data\hr.mat'
    path_lr = 'D:\data\lr_3.mat'

    global model

    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    seed = random.randint(1, 10000)
    print("Random Seed: ", seed)
    torch.manual_seed(seed)
    if cuda:
        torch.cuda.manual_seed(seed)

    cudnn.benchmark = True

    # print("===> Loading datasets")
    # train_set = DatasetFromMat(opt.dataset, sigma)
    # train_set = DatasetFromMat7_3(opt.dataset)

    print("===> Loading datasets ")
    train_data_lr, train_data_hr = DataFromMat(path_lr, path_hr)
    _, _, S, H, W = train_data_lr.shape
    torch_dataset = data.TensorDataset(train_data_lr, train_data_hr)
    training_data_loader = data.DataLoader(dataset=torch_dataset,
                                           num_workers=threads,
                                           batch_size=batchSize,
                                           shuffle=False)

    print("===> Building model")
    #  model = torch.load(model_path)["model"]
    model = FCNN.FCNN(1, 1)
    # criterion = nn.MSELoss()
    # criterion = nn.MSELoss(size_average=False)

    print("===> Setting GPU")
    if cuda:
        # model = to2rch.nn.DataParallel(model).cuda()
        model = dataparallel(model, 1)
        # set the number of parallel GPUs
        # criterion = criterion.cuda()
    if resume:
        if os.path.isfile(resume):
            print("=> loading checkpoint '{}'".format(resume))
            checkpoint = torch.load(resume)
            start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("=> no checkpoint found at '{}'".format(resume))

    print("===> Setting Optimizer")
    # optimizer = optim.SGD([
    #     {'params': model.parameters()}
    # ], lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay)
    optimizer = optim.Adam([{
        'params': model.parameters()
    }],
                           lr=lr,
                           weight_decay=weight_decay)

    print("===> Training")
    lossAarry = np.zeros(nEpochs)
    losspath = 'losses/'
    if not os.path.exists(losspath):
        os.makedirs(losspath)

    for epoch in range(start_epoch, nEpochs + 1):
        start_time = time.time()
        lossAarry[epoch - 1] = lossAarry[epoch - 1] + train(
            training_data_loader, optimizer, model, epoch, step, lr, cuda)
        print("===> Epoch[{}]: Loss={:.7f}, time = {:.4f}".format(
            epoch, lossAarry[epoch - 1],
            time.time() - start_time))
        save_checkpoint(model, epoch)

    sio.savemat(losspath + method_name + '_lossArray.mat',
                {'lossArray': lossAarry})