Exemplo n.º 1
0
    denseED = DenseED(in_channels=args.nic,
                      out_channels=1,
                      blocks=args.blocks,
                      growth_rate=args.growth_rate,
                      init_features=args.init_features,
                      bn_size=args.bn_size,
                      drop_rate=args.drop_rate,
                      bottleneck=False,
                      out_activation=None).to(args.device)

    # Bayesian neural network
    bayes_nn = BayesNN(args, denseED)
    # Stochastic weighted averages
    swag_nn = SwagNN(args, bayes_nn, full_cov=True, max_models=30)
    # Load network
    swag_nn.loadModel(200, file_dir="./networks")

    with torch.no_grad():
        uPred, betas, uTarget = testSample(args,
                                           swag_nn,
                                           testing_loader,
                                           tstep=400,
                                           n_samples=30)

    tTest = np.arange(0, 400 * args.dt + 1e-8, args.dt)
    xTest = np.linspace(x0, x1, args.nel + 1)

    # With the neural network simulated, now load numerical simulators
    # Finite element
    uFEM1 = readSimulatorData('../solver/fenics_data_dt0.0005_T2.0',
                              test_cases,
Exemplo n.º 2
0
    denseED = DenseED(in_channels=args.nic,
                      out_channels=args.noc,
                      blocks=args.blocks,
                      growth_rate=args.growth_rate,
                      init_features=args.init_features,
                      bn_size=args.bn_size,
                      drop_rate=args.drop_rate,
                      bottleneck=False,
                      out_activation=None).to(args.device)

    # Bayesian neural network
    bayes_nn = BayesNN(args, denseED)
    swag_nn = SwagNN(args, bayes_nn, full_cov=True, max_models=10)

    # First predict with determinisitic
    swag_nn.loadModel(100, file_dir='networks')
    with torch.no_grad():
        mse_error = testMSE(args, swag_nn.base, testing_loader, tstep=nsteps)

    # Predict with Bayesian
    swag_nn.loadModel(200, file_dir='networks')
    with torch.no_grad():
        mse_error_bayes = testSamplesMSE(args,
                                         swag_nn,
                                         testing_loader,
                                         tstep=nsteps,
                                         n_samples=30)

    tT = np.arange(0, nsteps * args.dt + 1e-8, args.dt)
    plotError(tT, mse_error.cpu().numpy(), mse_error_bayes.cpu().numpy())
Exemplo n.º 3
0
    # Bayesian neural network
    bayes_nn = BayesNN(args, denseED)
    # Stochastic weighted averages
    swag_nn = SwagNN(args, bayes_nn, full_cov=True, max_models=args.swag_max)
    
    # Create optimizer and learning rate scheduler
    parameters = [{'params': [bayes_nn.model.log_beta], 'lr': args.lr_beta},
                    {'params': bayes_nn.model.features.parameters()}]
    optimizer = torch.optim.Adam(parameters, lr=args.lr, weight_decay=0.0)
    # Learning rate scheduler
    scheduler = ExponentialLR(optimizer, gamma=0.995)


    # If we are starting from a specific epoch, attempt to load a model
    if(args.epoch_start > 0):
        optimizer, scheduler = swag_nn.loadModel(args.epoch_start, optimizer, scheduler, file_dir=args.ckpt_dir)

    # Create Ks time integrator
    # Here we will use 4th order finite differences for spacial derivatives
    ksInt = KSIntegrate(args.dx, grad_kernels=[5, 5, 7], device=args.device)

    # Progressively increase the time step to help stabilize training
    dtStep = 25
    dtArr = np.linspace(np.log10(args.dt)-3, np.log10(args.dt), dtStep)
    dtArr = 10**(dtArr)

    # ========== Epoch loop ============
    for epoch in range(args.epoch_start+1, args.epochs + 1):

        if(epoch == args.swag_start):
            print('Starting to sample weights every {:d} epochs'.format(args.swag_freq))