def simulate(batch_dict, res, net, sim_method):
    with torch.autograd.no_grad():
        net.eval()

        dt = 0.1
        maccormackStrength = 0.6
        sampleOutsideFluid = False

        buoyancyScale = 0 * (res / 128)
        gravityScale = 0 * (res / 128)

        # Get p, U, flags and density from batch.
        p = batch_dict['p']
        U = batch_dict['U']
        flags = batch_dict['flags']
        density = batch_dict['density']

        # First advect all scalar fields.
        density = fluid.advectScalar(dt, density, U, flags, method="maccormackFluidNet", \
                    boundary_width=1, sample_outside_fluid=sampleOutsideFluid, \
                    maccormack_strength=maccormackStrength)

        # Self-advect velocity
        U = fluid.advectVelocity(dt, U, flags, method="maccormackFluidNet", \
                boundary_width=1, maccormack_strength=maccormackStrength)

        # Set the manual BCs.
        setConstVals(batch_dict, p, U, flags, density)

        # Set the constant domain values.
        if (sim_method != 'convnet'):
            fluid.setWallBcs(U, flags)
        setConstVals(batch_dict, p, U, flags, density)

        if (sim_method == 'convnet'):
            # fprop the model to perform the pressure projection and velocity calculation.
            # Set wall BCs is performed inside the model, before and after the projection.
            # No need to call it again.
            data = torch.cat((p, U, flags), 1)
            out_p, out_U = net(data)
            p = out_p.clone()
            U = out_U.clone()

        else:
            div = fluid.velocityDivergence(U, flags)

            is3D = (U.size(2) > 1)
            pTol = 0
            maxIter = 34

            _p, residual = fluid.solveLinearSystemJacobi(flags, div, is3D, p_tol=pTol, \
                    max_iter=maxIter)

            p = _p
            fluid.velocityUpdate(p, U, flags)

        setConstVals(batch_dict, p, U, flags, density)
예제 #2
0
    def val():
        net.eval()

        #initialise loss scores
        total_loss = 0
        p_l2_total_loss = 0
        div_l2_total_loss = 0
        p_l1_total_loss = 0
        div_l1_total_loss = 0
        div_lt_total_loss = 0

        n_batches = 0  # Number processed

        # Loss types
        _pL2Loss = nn.MSELoss()
        _divL2Loss = nn.MSELoss()
        _divLTLoss = nn.MSELoss()
        _pL1Loss = nn.L1Loss()
        _divL1Loss = nn.L1Loss()

        # Loss lambdas (multiply the corresponding loss)
        pL2Lambda = mconf['pL2Lambda']
        divL2Lambda = mconf['divL2Lambda']
        pL1Lambda = mconf['pL1Lambda']
        divL1Lambda = mconf['divL1Lambda']
        divLTLambda = mconf['divLongTermLambda']

        for batch_idx, (data, target) in zip(count(step=1), test_loader):
            with torch.no_grad():
                if torch.cuda.is_available():
                    data, target = data.cuda(), target.cuda()
                if batch_idx in batch_print:
                    out_p, out_U = net(data)
                    flags = data[:, 3].unsqueeze(1).contiguous()
                    target_p = target[:, 0].unsqueeze(1)
                    out_div = fluid.velocityDivergence(\
                            out_U.contiguous(), \
                            flags)
                    target_div = torch.zeros_like(out_div)

                    # Measure loss and save it
                    pL2Loss = pL2Lambda * _pL2Loss(out_p, target_p)
                    divL2Loss = divL2Lambda * _divL2Loss(out_div, target_div)
                    pL1Loss = pL1Lambda * _pL1Loss(out_p, target_p)
                    divL1Loss = divL1Lambda * _divL1Loss(out_div, target_div)

                    loss_size = pL2Loss + divL2Loss + pL1Loss + divL1Loss

                    # Print statistics
                    p_l2_total_loss += pL2Loss.data.item()
                    div_l2_total_loss += divL2Loss.data.item()
                    p_l1_total_loss += pL1Loss.data.item()
                    div_l1_total_loss += divL1Loss.data.item()
                    #if (divLTLambda > 0):
                    #    div_lt_total_loss += divLTLoss.data.item()
                    total_loss += loss_size.data.item()

                    # Measure loss and save it
                    lib.plotField(out=[out_p, out_U, out_div],
                                  tar=target,
                                  flags=flags,
                                  loss=[
                                      total_loss, p_l2_total_loss,
                                      div_l2_total_loss, div_lt_total_loss,
                                      p_l1_total_loss, div_l1_total_loss
                                  ],
                                  mconf=mconf,
                                  save=False,
                                  y_slice=8)
예제 #3
0
                v1[minY:maxY:skip, minX:maxX:skip],
                scale_units='height',
                scale=scale,
                #headwidth=headwidth, headlength=headlength,
                color='black')

        # Main loop
        while (it < max_iter):
            #if it < 50:
            #    method = 'jacobi'
            #else:
            method = mconf['simMethod']
            lib.simulate(mconf, batch_dict, net, method)
            if (it % outIter == 0):
                print("It = " + str(it))
                tensor_div = fluid.velocityDivergence(
                    batch_dict['U'].clone(), batch_dict['flags'].clone())
                pressure = batch_dict['p'].clone()
                tensor_vel = fluid.getCentered(batch_dict['U'].clone())
                density = batch_dict['density'].clone()
                div = torch.squeeze(tensor_div).cpu().data.numpy()
                np_mask = torch.squeeze(
                    flags.eq(2)).cpu().data.numpy().astype(float)
                rho = torch.squeeze(density).cpu().data.numpy()
                p = torch.squeeze(pressure).cpu().data.numpy()
                img_norm_vel = torch.squeeze(
                    torch.norm(tensor_vel, dim=1,
                               keepdim=True)).cpu().data.numpy()
                img_velx = torch.squeeze(tensor_vel[:, 0]).cpu().data.numpy()
                img_vely = torch.squeeze(tensor_vel[:, 1]).cpu().data.numpy()
                img_vel_norm = torch.squeeze( \
                        torch.norm(tensor_vel, dim=1, keepdim=True)).cpu().data.numpy()
예제 #4
0
    def forward(self, input_):

        # data indexes     |           |
        #       (dim 1)    |    2D     |    3D
        # ----------------------------------------
        #   DATA:
        #       pDiv       |    0      |    0
        #       UDiv       |    1:3    |    1:4
        #       flags      |    3      |    4
        #       densityDiv |    4      |    5
        #   TARGET:
        #       p          |    0      |    0
        #       U          |    1:3    |    1:4
        #       density    |    3      |    4

        # For now, we work ONLY in 2d

        assert self.is3D == False, 'Input can only be 2D'

        assert self.mconf['inputChannels']['pDiv'] or \
                self.mconf['inputChannels']['UDiv'] or \
                self.mconf['inputChannels']['div'], 'Choose at least one field (U, div or p).'

        pDiv = None
        UDiv = None
        div = None

        # Flags are always loaded
        if self.is3D:
            flags = input_[:, 4].unsqueeze(1)
        else:
            flags = input_[:, 3].unsqueeze(1).contiguous()

        if (self.mconf['inputChannels']['pDiv'] or (self.mconf['normalizeInput'] \
            and self.mconf['normalizeInputChan'] == 'pDiv')):
            pDiv = input_[:, 0].unsqueeze(1).contiguous()

        if (self.mconf['inputChannels']['UDiv'] or self.mconf['inputChannels']['div'] \
            or (self.mconf['normalizeInput'] \
            and self.mconf['normalizeInputChan'] == 'UDiv')):
            if self.is3D:
                UDiv = input_[:, 1:4].contiguous()
            else:
                UDiv = input_[:, 1:3].contiguous()

            if 'periodic-x' in self.mconf and 'periodic-y' in self.mconf:
                U_temp = UDiv.clone()

            # Apply setWallBcs to zero out obstacles velocities on the boundary
            #UDiv = fluid.setWallBcs(UDiv, flags)
            if 'periodic-x' in self.mconf and 'periodic-y' in self.mconf:
                if self.mconf['periodic-x']:
                    UDiv[:, 1, :, :, 1] = U_temp[:, 1, :, :, UDiv.size(4) - 1]
                if self.mconf['periodic-y']:
                    UDiv[:, 0, :, 1] = U_temp[:, 0, :, UDiv.size(3) - 1]

            if self.mconf['inputChannels']['div']:
                div = fluid.velocityDivergence(UDiv, flags)

        # Apply scale to input
        if self.mconf['normalizeInput']:
            if self.mconf['normalizeInputChan'] == 'UDiv':
                s = self.scale(UDiv)
            elif self.mconf['normalizeInputChan'] == 'pDiv':
                s = self.scale(pDiv)
            elif self.mconf['normalizeInputChan'] == 'div':
                s = self.scale(div)
            else:
                raise Exception('Incorrect normalize input channel.')

            if pDiv is not None:
                pDiv = torch.div(pDiv, s)
            if UDiv is not None:
                UDiv = torch.div(UDiv, s)
            if div is not None:
                div = torch.div(div, s)

        x = torch.FloatTensor(input_.size(0), \
                              self.inDims,    \
                              input_.size(2), \
                              input_.size(3), \
                              input_.size(4)).type_as(input_)

        chan = 0
        if self.mconf['inputChannels']['pDiv']:
            x[:, chan] = pDiv[:, 0]
            chan += 1
        elif self.mconf['inputChannels']['UDiv']:
            if self.is3D:
                x[:, chan:(chan + 3)] = UDiv
                chan += 3
            else:
                x[:, chan:(chan + 2)] = UDiv
                chan += 2
        elif self.mconf['inputChannels']['div']:
            x[:, chan] = div[:, 0]
            chan += 1

        # FlagsToOccupancy creates a [0,1] grid out of the manta flags
        x[:, chan, :, :, :] = fluid.flagsToOccupancy(flags).squeeze(1)

        if not self.is3D:
            # Squeeze unary dimension as we are in 2D
            x = torch.squeeze(x, 2)

        if self.mconf['model'] == 'ScaleNet':
            p = self.multiScale(x)

        else:
            # Inital layers
            x = F.relu(self.conv1(x))

            # We divide the network in 3 banks, applying average pooling
            x1 = self.modDown1(x)
            x2 = self.modDown2(x)

            # Process every bank in parallel
            x0 = self.convBank(x)
            x1 = self.convBank(x1)
            x2 = self.convBank(x2)

            # Upsample banks 1 and 2 to bank 0 size and accumulate inputs
            #x1 = self.upscale1(x1)
            #x2 = self.upscale2(x2)
            x1 = self.deconv1(x1)
            x2 = self.deconv2(x2)

            x = torch.cat((x0, x1, x2), dim=1)
            #x = x0 + x1 + x2

            # Apply last 2 convolutions
            x = F.relu(self.conv2(x))

            # Output pressure (1 chan)
            p = self.convOut(x)

        # Add back the unary dimension
        if not self.is3D:
            p = torch.unsqueeze(p, 2)

        # Correct U = UDiv - grad(p)
        # flags is the one with Manta's values, not occupancy in [0,1]
        fluid.velocityUpdate(pressure=p, U=UDiv, flags=flags)

        # We now UNDO the scale factor we applied on the input.
        if self.mconf['normalizeInput']:
            p = torch.mul(p, s)  # Applies p' = *= scale
            UDiv = torch.mul(UDiv, s)

        if 'periodic-x' in self.mconf and 'periodic-y' in self.mconf:
            U_temp = UDiv.clone()

        # Set BCs after velocity update.
        UDiv = fluid.setWallBcs(UDiv, flags)
        if 'periodic-x' in self.mconf and 'periodic-y' in self.mconf:
            if self.mconf['periodic-x']:
                UDiv[:, 1, :, :, 1] = U_temp[:, 1, :, :, UDiv.size(4) - 1]
            if self.mconf['periodic-y']:
                UDiv[:, 0, :, 1] = U_temp[:, 0, :, UDiv.size(3) - 1]
        return p, UDiv
예제 #5
0
    def run_epoch(epoch, loader, training=True):
        if training:
            #set model to train
            net.train()
        else:
            #otherwise, set it to eval.
            net.eval()

        #initialise loss scores
        total_loss = 0
        p_l2_total_loss = 0
        div_l2_total_loss = 0
        p_l1_total_loss = 0
        div_l1_total_loss = 0
        div_lt_total_loss = 0

        n_batches = 0  # Number of processed batches

        # Loss types
        _pL2Loss = nn.MSELoss()
        _divL2Loss = nn.MSELoss()
        _divLTLoss = nn.MSELoss()
        _pL1Loss = nn.L1Loss()
        _divL1Loss = nn.L1Loss()

        # Loss lambdas (multiply the corresponding loss)
        pL2Lambda = mconf['pL2Lambda']
        divL2Lambda = mconf['divL2Lambda']
        pL1Lambda = mconf['pL1Lambda']
        divL1Lambda = mconf['divL1Lambda']
        divLTLambda = mconf['divLongTermLambda']

        #loop through data, sorted into batches
        for batch_idx, (data, target) in enumerate(loader):
            if torch.cuda.is_available():
                data, target = data.cuda(), target.cuda()

            if training:
                # Set gradients to zero, clearing previous batches.
                optimizer.zero_grad()

            # data indexes     |           |
            #       (dim 1)    |    2D     |    3D
            # ----------------------------------------
            #   DATA:
            #       pDiv       |    0      |    0
            #       UDiv       |    1:3    |    1:4
            #       flags      |    3      |    4
            #       densityDiv |    4      |    5
            #   TARGET:
            #       p          |    0      |    0
            #       U          |    1:3    |    1:4
            #       density    |    3      |    4

            is3D = data.size(1) == 6
            assert (is3D and data.size(1) == 6) or (not is3D and data.size(1)
                                                    == 5), "Data must have \
                    5 input chan for 2D, 6 input chan for 3D"

            # Run the model forward
            flags = data[:, 3].unsqueeze(1).contiguous()
            out_p, out_U = net(data)

            # Calculate targets
            target_p = target[:, 0].unsqueeze(1)
            out_div = fluid.velocityDivergence(out_U.contiguous(), flags)
            target_div = torch.zeros_like(out_div)

            # Measure loss and save it
            pL2Loss = pL2Lambda * _pL2Loss(out_p, target_p)
            divL2Loss = divL2Lambda * _divL2Loss(out_div, target_div)
            pL1Loss = pL1Lambda * _pL1Loss(out_p, target_p)
            divL1Loss = divL1Lambda * _divL1Loss(out_div, target_div)

            loss_size = pL2Loss + divL2Loss + pL1Loss + divL1Loss

            # We calculate the divergence of a future frame.
            if (divLTLambda > 0):
                # Check if additional buoyancy or gravity is added to future frames.
                # Adding Buoyancy means adding a source term in the momentum equation, of
                # the type f = delta_rho*g and rho = rho_0 + delta_rho (constant term + fluctuation)
                # with rho' << rho_0
                # Adding gravity: source of the type f = rho_0*g
                # Source term is a vector (direction and magnitude).

                oldBuoyancyScale = mconf['buoyancyScale']
                # rand(1) is an uniform dist on the interval [0,1)
                if torch.rand(1)[0] < mconf['trainBuoyancyProb']:
                    # Add buoyancy to this batch (only in the long term frames)
                    var = torch.tensor([1.], device=cuda0)
                    mconf['buoyancyScale'] = torch.normal(
                        mconf['trainBuoyancyScale'], var)

                oldGravityScale = mconf['gravityScale']
                # rand(1) is an uniform dist on the interval [0,1)
                if torch.rand(1)[0] < mconf['trainGravityProb']:
                    # Add gravity to this batch (only in the long term frames)
                    var = torch.tensor([1.], device=cuda0)
                    mconf['gravityScale'] = torch.normal(
                        mconf['trainGravityScale'], var)

                oldGravity = mconf['gravityVec']
                if mconf['buoyancyScale'] > 0 or mconf['gravityScale'] > 0:
                    # Set to 0 gravity vector (direction of gravity)
                    mconf['gravityVec']['x'] = 0
                    mconf['gravityVec']['y'] = 0
                    mconf['gravityVec']['z'] = 0

                    # Chose randomly one of three cardinal directions and set random + or - dir
                    card_dir = 0
                    if is3D:
                        card_dir = random.randint(0, 2)
                    else:
                        card_dir = random.randint(0, 1)

                    updown = random.randint(0, 1) * 2 - 1
                    if card_dir == 0:
                        mconf['gravityVec']['x'] = updown
                    elif card_dir == 1:
                        mconf['gravityVec']['y'] = updown
                    elif card_dir == 2:
                        mconf['gravityVec']['z'] = updown

                base_dt = mconf['dt']

                if mconf['timeScaleSigma'] > 0:
                    # FluidNet: randn() returns normal distribution with mean 0 and var 1.
                    # The mean of abs(randn) ~= 0.7972, hence the 0.2028 value below.
                    scale_dt = 0.2028 + torch.abs(torch.randn(1))[0] * \
                            mconf['timeScaleSigma']
                    mconf['dt'] = base_dt * scale_dt

                num_future_steps = mconf['longTermDivNumSteps'][0]
                # rand(1) is an uniform dist on the interval [0,1)
                # longTermDivProbability is the prob that longTermDivNumSteps[0] is taken.
                # otherwise, longTermDivNumSteps[1] is taken with prob 1 - longTermDivProbability
                if torch.rand(1)[0] > mconf['longTermDivProbability']:
                    num_future_steps = mconf['longTermDivNumSteps'][1]

                batch_dict = {}
                batch_dict['p'] = out_p
                batch_dict['U'] = out_U
                batch_dict['flags'] = flags

                # Set the simulation forward n steps (using model, no grad calculation),
                # but on the last do not perform a pressure projection.
                # We then input last state to model with grad calculation and add to global loss.
                with torch.no_grad():
                    for i in range(0, num_future_steps):
                        output_div = (i == num_future_steps)
                        lib.simulate(mconf, batch_dict, net, \
                                'convnet', output_div=output_div)

                data_lt = torch.zeros_like(data)
                data_lt[:, 0] = batch_dict['p'].squeeze(1)
                data_lt[:, 1:3] = batch_dict['U']
                data_lt[:, 3] = batch_dict['flags'].squeeze(1)
                data_lt = data_lt.contiguous()

                mconf['dt'] = base_dt

                out_p_LT, out_U_LT = net(data_lt)
                out_div_LT = fluid.velocityDivergence(out_U_LT.contiguous(),
                                                      flags)
                target_div_LT = torch.zeros_like(out_div)
                divLTLoss = divLTLambda * _divLTLoss(out_div_LT, target_div_LT)

                loss_size += divLTLoss

            # Print statistics
            p_l2_total_loss += pL2Loss.data.item()
            div_l2_total_loss += divL2Loss.data.item()
            p_l1_total_loss += pL1Loss.data.item()
            div_l1_total_loss += divL1Loss.data.item()
            if (divLTLambda > 0):
                div_lt_total_loss += divLTLoss.data.item()
            total_loss += loss_size.data.item()

            shuffled = True
            if shuffle_training and not training:
                shuffled = False
            if not shuffle_training and training:
                shuffled = False

            # Print fields for debug
            if print_training and (not shuffled) and (batch_idx*len(data) in list_to_plot) \
                and ((epoch-1) % 5 == 0):
                print_list = [batch_idx * len(data), epoch]
                filename_p = 'output_p_{0:05d}_ep_{1:03d}.png'.format(
                    *print_list)
                filename_vel = 'output_v_{0:05d}_ep_{1:03d}.png'.format(
                    *print_list)
                filename_div = 'output_div_{0:05d}_ep_{1:03d}.png'.format(
                    *print_list)
                file_plot_p = glob.os.path.join(m_path, filename_p)
                file_plot_vel = glob.os.path.join(m_path, filename_vel)
                file_plot_div = glob.os.path.join(m_path, filename_div)
                with torch.no_grad():
                    lib.plotField(out=[
                        out_p[0].unsqueeze(0), out_U[0].unsqueeze(0),
                        out_div[0].unsqueeze(0)
                    ],
                                  tar=target[0].unsqueeze(0),
                                  flags=flags[0].unsqueeze(0),
                                  loss=[
                                      total_loss, p_l2_total_loss,
                                      div_l2_total_loss, div_lt_total_loss,
                                      p_l1_total_loss, div_l1_total_loss
                                  ],
                                  mconf=mconf,
                                  epoch=epoch,
                                  filename=file_plot_p,
                                  save=save_or_show,
                                  plotPres=True,
                                  plotVel=False,
                                  plotDiv=False,
                                  title=False,
                                  x_slice=104)
                    lib.plotField(out=[
                        out_p[0].unsqueeze(0), out_U[0].unsqueeze(0),
                        out_div[0].unsqueeze(0)
                    ],
                                  tar=target[0].unsqueeze(0),
                                  flags=flags[0].unsqueeze(0),
                                  loss=[
                                      total_loss, p_l2_total_loss,
                                      div_l2_total_loss, div_lt_total_loss,
                                      p_l1_total_loss, div_l1_total_loss
                                  ],
                                  mconf=mconf,
                                  epoch=epoch,
                                  filename=file_plot_vel,
                                  save=save_or_show,
                                  plotPres=False,
                                  plotVel=True,
                                  plotDiv=False,
                                  title=False,
                                  x_slice=104)
                    lib.plotField(out=[
                        out_p[0].unsqueeze(0), out_U[0].unsqueeze(0),
                        out_div[0].unsqueeze(0)
                    ],
                                  tar=target[0].unsqueeze(0),
                                  flags=flags[0].unsqueeze(0),
                                  loss=[
                                      total_loss, p_l2_total_loss,
                                      div_l2_total_loss, div_lt_total_loss,
                                      p_l1_total_loss, div_l1_total_loss
                                  ],
                                  mconf=mconf,
                                  epoch=epoch,
                                  filename=file_plot_div,
                                  save=save_or_show,
                                  plotPres=False,
                                  plotVel=False,
                                  plotDiv=True,
                                  title=False,
                                  x_slice=104)

            if training:
                # Run the backpropagation for all the losses.
                loss_size.backward()

                # Step the optimizer
                optimizer.step()

            n_batches += 1

            if training:
                # Print every 20th batch of an epoch
                if batch_idx % 20 == 0:
                    print('Train Epoch: {} [{}/{} ({:.0f}%)] \t'.format(
                        epoch, batch_idx * len(data), len(loader.dataset),
                        100. * batch_idx / len(loader)))

        # Divide loss by dataset length
        p_l2_total_loss /= n_batches
        div_l2_total_loss /= n_batches
        p_l1_total_loss /= n_batches
        div_l1_total_loss /= n_batches
        div_lt_total_loss /= n_batches
        total_loss /= n_batches

        # Print for the whole dataset
        if training:
            sstring = 'Train'
        else:
            sstring = 'Validation'
        print('\n{} set: Avg total loss: {:.6f} (L2(p): {:.6f}; L2(div): {:.6f}; \
                L1(p): {:.6f}; L1(div): {:.6f}; LTDiv: {:.6f})'                                                               .format(\
                        sstring,
                        total_loss, p_l2_total_loss, div_l2_total_loss, \
                        p_l1_total_loss, div_l1_total_loss, div_lt_total_loss))

        # Return loss scores
        return total_loss, p_l2_total_loss, div_l2_total_loss, \
                p_l1_total_loss, div_l1_total_loss, div_lt_total_loss
예제 #6
0
def simulate(conf, mconf, batch_dict, net, sim_method, output_div=False):
    r"""Top level simulation loop.

    Arguments:
        conf (dict): Configuration dictionnary.
        mconf (dict): Model configuration dictionnary.
        batch_dict (dict): Dictionnary of torch Tensors.
            Keys must be 'U', 'flags', 'p', 'density'.
            Simulations are done INPLACE.
        net (nn.Module): convNet model.
        sim_method (string): Options are 'convnet' and 'jacobi'
        output_div (bool, optional): returns just before solving for pressure.
            i.e. leave the state as UDiv and pDiv (before substracting divergence)

    """
    cuda = torch.device('cuda')
    assert sim_method == 'convnet' or sim_method == 'jacobi', 'Simulation method \
                not supported. Choose either convnet or jacobi.'

    dt = float(mconf['dt'])
    maccormackStrength = mconf['maccormackStrength']
    sampleOutsideFluid = mconf['sampleOutsideFluid']

    buoyancyScale = mconf['buoyancyScale']
    gravityScale = mconf['gravityScale']

    viscosity = mconf['viscosity']
    assert viscosity >= 0, 'Viscosity must be positive'

    # Get p, U, flags and density from batch.
    p = batch_dict['p']
    U = batch_dict['U']

    flags = batch_dict['flags']
    stick = False
    if 'flags_stick' in batch_dict:
        stick = True
        flags_stick = batch_dict['flags_stick']

    # If viscous model, add viscosity
    if (viscosity > 0):
        orig = U.clone()
        fluid.addViscosity(dt, orig, flags, viscosity)

    if 'density' in batch_dict:
        density = batch_dict['density']

        # First advect all scalar fields.
        density = fluid.advectScalar(dt, density, U, flags, \
                method="maccormackFluidNet", \
                boundary_width=1, sample_outside_fluid=sampleOutsideFluid, \
                maccormack_strength=maccormackStrength)
        if mconf['correctScalar']:
            div = fluid.velocityDivergence(U, flags)
            fluid.correctScalar(dt, density, div, flags)
    else:
        density = torch.zeros_like(flags)

    if viscosity == 0:
        # Self-advect velocity if inviscid
        U = fluid.advectVelocity(dt=dt, orig=U, U=U, flags=flags, method="maccormackFluidNet", \
            boundary_width=1, maccormack_strength=maccormackStrength)
    else:
        # Advect viscous velocity field orig by the non-divergent
        # velocity field U.
        U = fluid.advectVelocity(dt=dt, orig=orig, U=U, flags=flags, method="maccormackFluidNet", \
            boundary_width=1, maccormack_strength=maccormackStrength)

    # Set the manual BCs.
    setConstVals(batch_dict, p, U, flags, density)

    if 'density' in batch_dict:
        if buoyancyScale > 0:
            # Add external forces: buoyancy.
            gravity = torch.FloatTensor(3).fill_(0).cuda()
            gravity[0] = mconf['gravityVec']['x']
            gravity[1] = mconf['gravityVec']['y']
            gravity[2] = mconf['gravityVec']['z']
            gravity.mul_(-buoyancyScale)
            rho_star = mconf['operatingDensity']
            U = fluid.addBuoyancy(U, flags, density, gravity, rho_star, dt)
        if gravityScale > 0:
            gravity = torch.FloatTensor(3).fill_(0).cuda()
            gravity[0] = mconf['gravityVec']['x']
            gravity[1] = mconf['gravityVec']['y']
            gravity[2] = mconf['gravityVec']['z']
            # Add external forces: gravity.
            gravity.mul_(-gravityScale)
            U = fluid.addGravity(U, flags, gravity, dt)

    if (output_div):
        return

    if sim_method != 'convnet':
        if 'periodic-x' in mconf and 'periodic-y' in mconf:
            U_temp = U.clone()
        U = fluid.setWallBcs(U, flags)
        if 'periodic-x' in mconf and 'periodic-y' in mconf:
            if mconf['periodic-x']:
                U[:, 1, :, :, 1] = U_temp[:, 1, :, :, U.size(4) - 1]
            if mconf['periodic-y']:
                U[:, 0, :, 1] = U_temp[:, 0, :, U.size(3) - 1]
    elif stick:
        fluid.setWallBcsStick(U, flags, flags_stick)

    # Set the constant domain values.
    setConstVals(batch_dict, p, U, flags, density)

    if (sim_method == 'convnet'):
        # fprop the model to perform the pressure projection and velocity calculation.
        # Set wall BCs is performed inside the model, before and after the projection.
        # No need to call it again.
        net.eval()
        data = torch.cat((p, U, flags, density), 1)
        p, U = net(data)

    elif (sim_method == 'jacobi'):
        div = fluid.velocityDivergence(U, flags)

        is3D = (U.size(2) > 1)
        pTol = mconf['pTol']
        maxIter = mconf['jacobiIter']

        p, residual = fluid.solveLinearSystemJacobi( \
                flags=flags, div=div, is_3d=is3D, p_tol=pTol, \
                max_iter=maxIter)
        fluid.velocityUpdate(pressure=p, U=U, flags=flags)

    if sim_method != 'convnet':
        if 'periodic-x' in mconf and 'periodic-y' in mconf:
            U_temp = U.clone()
        U = fluid.setWallBcs(U, flags)
        if 'periodic-x' in mconf and 'periodic-y' in mconf:
            if mconf['periodic-x']:
                U[:, 1, :, :, 1] = U_temp[:, 1, :, :, U.size(4) - 1]
            if mconf['periodic-y']:
                U[:, 0, :, 1] = U_temp[:, 0, :, U.size(3) - 1]
    elif stick:
        fluid.setWallBcsStick(U, flags, flags_stick)

    setConstVals(batch_dict, p, U, flags, density)
    batch_dict['U'] = U
    batch_dict['density'] = density
    batch_dict['p'] = p
    def val(data, target, it):
        net.eval()
        loss = nn.MSELoss()
        total_val_loss = 0
        p_l2_total_loss = 0
        div_l2_total_loss = 0
        p_l1_total_loss = 0
        div_l1_total_loss = 0

        # Loss types
        _pL2Loss = nn.MSELoss()
        _divL2Loss = nn.MSELoss()
        _pL1Loss = nn.L1Loss()
        _divL1Loss = nn.L1Loss()

        # Loss lambdas (multiply the corresponding loss)
        pL2Lambda = mconf['pL2Lambda']
        divL2Lambda = mconf['divL2Lambda']
        pL1Lambda = mconf['pL1Lambda']
        divL1Lambda = mconf['divL1Lambda']

        dt = 0.1
        maccormackStrength = 0.6
        with torch.no_grad():
            if torch.cuda.is_available():
                data, target = data.cuda(), target.cuda()
            U = data[:, 1:3].clone()
            flags = data[:, 3].unsqueeze(1)
            U = fluid.advectVelocity(dt, U, flags, \
                    method="maccormackFluidNet", \
                    boundary_width=1, maccormack_strength=maccormackStrength)
            data[:, 1:3] = U
            out_p, out_U = net(data)
            target_p = target[:, 0].unsqueeze(1)
            out_div = fluid.velocityDivergence(\
                    out_U.contiguous(), \
                    data[:,3].unsqueeze(1).contiguous())
            target_div = torch.zeros_like(out_div)

            loss_size = 0
            # Measure loss and save it
            pL2Loss = pL2Lambda * _pL2Loss(out_p, target_p)
            divL2Loss = divL2Lambda * _divL2Loss(out_div, target_div)
            pL1Loss = pL1Lambda * _pL1Loss(out_p, target_p)
            divL1Loss = divL1Lambda * _divL1Loss(out_div, target_div)

            loss_size = pL2Loss + divL2Loss + pL1Loss + divL1Loss

            # Just 1 batch
            p_l2_total_loss += pL2Loss.data.item()
            div_l2_total_loss += divL2Loss.data.item()
            p_l1_total_loss += pL1Loss.data.item()
            div_l1_total_loss += divL1Loss.data.item()
            total_val_loss += loss_size.item()

            flags = data[:, 3].unsqueeze(1).contiguous()
            out_list = [out_p, out_U, out_div]
            loss = [total_val_loss, p_l2_total_loss, div_l2_total_loss, \
                    p_l1_total_loss, div_l1_total_loss]
            filename = 'figures/fig_' + str(it) + '.png'
            #if (it % 8 == 0):
            #    plotField(out_list, target, flags, loss, mconf, filename)
            data[:, 1:3] = out_U.clone()

        return data, target, loss