Exemplo n.º 1
0
                X[:maxX_win:skip],
                Y[:maxY_win:skip],
                u1[minY:maxY:skip, minX:maxX:skip],
                v1[minY:maxY:skip, minX:maxX:skip],
                scale_units='height',
                scale=scale,
                #headwidth=headwidth, headlength=headlength,
                color='black')

        # Main loop
        while (it < max_iter):
            #if it < 50:
            #    method = 'jacobi'
            #else:
            method = mconf['simMethod']
            lib.simulate(mconf, batch_dict, net, method)
            if (it % outIter == 0):
                print("It = " + str(it))
                tensor_div = fluid.velocityDivergence(
                    batch_dict['U'].clone(), batch_dict['flags'].clone())
                pressure = batch_dict['p'].clone()
                tensor_vel = fluid.getCentered(batch_dict['U'].clone())
                density = batch_dict['density'].clone()
                div = torch.squeeze(tensor_div).cpu().data.numpy()
                np_mask = torch.squeeze(
                    flags.eq(2)).cpu().data.numpy().astype(float)
                rho = torch.squeeze(density).cpu().data.numpy()
                p = torch.squeeze(pressure).cpu().data.numpy()
                img_norm_vel = torch.squeeze(
                    torch.norm(tensor_vel, dim=1,
                               keepdim=True)).cpu().data.numpy()
Exemplo n.º 2
0
    def run_epoch(epoch, loader, training=True):
        if training:
            #set model to train
            net.train()
        else:
            #otherwise, set it to eval.
            net.eval()

        #initialise loss scores
        total_loss = 0
        p_l2_total_loss = 0
        div_l2_total_loss = 0
        p_l1_total_loss = 0
        div_l1_total_loss = 0
        div_lt_total_loss = 0

        n_batches = 0  # Number of processed batches

        # Loss types
        _pL2Loss = nn.MSELoss()
        _divL2Loss = nn.MSELoss()
        _divLTLoss = nn.MSELoss()
        _pL1Loss = nn.L1Loss()
        _divL1Loss = nn.L1Loss()

        # Loss lambdas (multiply the corresponding loss)
        pL2Lambda = mconf['pL2Lambda']
        divL2Lambda = mconf['divL2Lambda']
        pL1Lambda = mconf['pL1Lambda']
        divL1Lambda = mconf['divL1Lambda']
        divLTLambda = mconf['divLongTermLambda']

        #loop through data, sorted into batches
        for batch_idx, (data, target) in enumerate(loader):
            if torch.cuda.is_available():
                data, target = data.cuda(), target.cuda()

            if training:
                # Set gradients to zero, clearing previous batches.
                optimizer.zero_grad()

            # data indexes     |           |
            #       (dim 1)    |    2D     |    3D
            # ----------------------------------------
            #   DATA:
            #       pDiv       |    0      |    0
            #       UDiv       |    1:3    |    1:4
            #       flags      |    3      |    4
            #       densityDiv |    4      |    5
            #   TARGET:
            #       p          |    0      |    0
            #       U          |    1:3    |    1:4
            #       density    |    3      |    4

            is3D = data.size(1) == 6
            assert (is3D and data.size(1) == 6) or (not is3D and data.size(1)
                                                    == 5), "Data must have \
                    5 input chan for 2D, 6 input chan for 3D"

            # Run the model forward
            flags = data[:, 3].unsqueeze(1).contiguous()
            out_p, out_U = net(data)

            # Calculate targets
            target_p = target[:, 0].unsqueeze(1)
            out_div = fluid.velocityDivergence(out_U.contiguous(), flags)
            target_div = torch.zeros_like(out_div)

            # Measure loss and save it
            pL2Loss = pL2Lambda * _pL2Loss(out_p, target_p)
            divL2Loss = divL2Lambda * _divL2Loss(out_div, target_div)
            pL1Loss = pL1Lambda * _pL1Loss(out_p, target_p)
            divL1Loss = divL1Lambda * _divL1Loss(out_div, target_div)

            loss_size = pL2Loss + divL2Loss + pL1Loss + divL1Loss

            # We calculate the divergence of a future frame.
            if (divLTLambda > 0):
                # Check if additional buoyancy or gravity is added to future frames.
                # Adding Buoyancy means adding a source term in the momentum equation, of
                # the type f = delta_rho*g and rho = rho_0 + delta_rho (constant term + fluctuation)
                # with rho' << rho_0
                # Adding gravity: source of the type f = rho_0*g
                # Source term is a vector (direction and magnitude).

                oldBuoyancyScale = mconf['buoyancyScale']
                # rand(1) is an uniform dist on the interval [0,1)
                if torch.rand(1)[0] < mconf['trainBuoyancyProb']:
                    # Add buoyancy to this batch (only in the long term frames)
                    var = torch.tensor([1.], device=cuda0)
                    mconf['buoyancyScale'] = torch.normal(
                        mconf['trainBuoyancyScale'], var)

                oldGravityScale = mconf['gravityScale']
                # rand(1) is an uniform dist on the interval [0,1)
                if torch.rand(1)[0] < mconf['trainGravityProb']:
                    # Add gravity to this batch (only in the long term frames)
                    var = torch.tensor([1.], device=cuda0)
                    mconf['gravityScale'] = torch.normal(
                        mconf['trainGravityScale'], var)

                oldGravity = mconf['gravityVec']
                if mconf['buoyancyScale'] > 0 or mconf['gravityScale'] > 0:
                    # Set to 0 gravity vector (direction of gravity)
                    mconf['gravityVec']['x'] = 0
                    mconf['gravityVec']['y'] = 0
                    mconf['gravityVec']['z'] = 0

                    # Chose randomly one of three cardinal directions and set random + or - dir
                    card_dir = 0
                    if is3D:
                        card_dir = random.randint(0, 2)
                    else:
                        card_dir = random.randint(0, 1)

                    updown = random.randint(0, 1) * 2 - 1
                    if card_dir == 0:
                        mconf['gravityVec']['x'] = updown
                    elif card_dir == 1:
                        mconf['gravityVec']['y'] = updown
                    elif card_dir == 2:
                        mconf['gravityVec']['z'] = updown

                base_dt = mconf['dt']

                if mconf['timeScaleSigma'] > 0:
                    # FluidNet: randn() returns normal distribution with mean 0 and var 1.
                    # The mean of abs(randn) ~= 0.7972, hence the 0.2028 value below.
                    scale_dt = 0.2028 + torch.abs(torch.randn(1))[0] * \
                            mconf['timeScaleSigma']
                    mconf['dt'] = base_dt * scale_dt

                num_future_steps = mconf['longTermDivNumSteps'][0]
                # rand(1) is an uniform dist on the interval [0,1)
                # longTermDivProbability is the prob that longTermDivNumSteps[0] is taken.
                # otherwise, longTermDivNumSteps[1] is taken with prob 1 - longTermDivProbability
                if torch.rand(1)[0] > mconf['longTermDivProbability']:
                    num_future_steps = mconf['longTermDivNumSteps'][1]

                batch_dict = {}
                batch_dict['p'] = out_p
                batch_dict['U'] = out_U
                batch_dict['flags'] = flags

                # Set the simulation forward n steps (using model, no grad calculation),
                # but on the last do not perform a pressure projection.
                # We then input last state to model with grad calculation and add to global loss.
                with torch.no_grad():
                    for i in range(0, num_future_steps):
                        output_div = (i == num_future_steps)
                        lib.simulate(mconf, batch_dict, net, \
                                'convnet', output_div=output_div)

                data_lt = torch.zeros_like(data)
                data_lt[:, 0] = batch_dict['p'].squeeze(1)
                data_lt[:, 1:3] = batch_dict['U']
                data_lt[:, 3] = batch_dict['flags'].squeeze(1)
                data_lt = data_lt.contiguous()

                mconf['dt'] = base_dt

                out_p_LT, out_U_LT = net(data_lt)
                out_div_LT = fluid.velocityDivergence(out_U_LT.contiguous(),
                                                      flags)
                target_div_LT = torch.zeros_like(out_div)
                divLTLoss = divLTLambda * _divLTLoss(out_div_LT, target_div_LT)

                loss_size += divLTLoss

            # Print statistics
            p_l2_total_loss += pL2Loss.data.item()
            div_l2_total_loss += divL2Loss.data.item()
            p_l1_total_loss += pL1Loss.data.item()
            div_l1_total_loss += divL1Loss.data.item()
            if (divLTLambda > 0):
                div_lt_total_loss += divLTLoss.data.item()
            total_loss += loss_size.data.item()

            shuffled = True
            if shuffle_training and not training:
                shuffled = False
            if not shuffle_training and training:
                shuffled = False

            # Print fields for debug
            if print_training and (not shuffled) and (batch_idx*len(data) in list_to_plot) \
                and ((epoch-1) % 5 == 0):
                print_list = [batch_idx * len(data), epoch]
                filename_p = 'output_p_{0:05d}_ep_{1:03d}.png'.format(
                    *print_list)
                filename_vel = 'output_v_{0:05d}_ep_{1:03d}.png'.format(
                    *print_list)
                filename_div = 'output_div_{0:05d}_ep_{1:03d}.png'.format(
                    *print_list)
                file_plot_p = glob.os.path.join(m_path, filename_p)
                file_plot_vel = glob.os.path.join(m_path, filename_vel)
                file_plot_div = glob.os.path.join(m_path, filename_div)
                with torch.no_grad():
                    lib.plotField(out=[
                        out_p[0].unsqueeze(0), out_U[0].unsqueeze(0),
                        out_div[0].unsqueeze(0)
                    ],
                                  tar=target[0].unsqueeze(0),
                                  flags=flags[0].unsqueeze(0),
                                  loss=[
                                      total_loss, p_l2_total_loss,
                                      div_l2_total_loss, div_lt_total_loss,
                                      p_l1_total_loss, div_l1_total_loss
                                  ],
                                  mconf=mconf,
                                  epoch=epoch,
                                  filename=file_plot_p,
                                  save=save_or_show,
                                  plotPres=True,
                                  plotVel=False,
                                  plotDiv=False,
                                  title=False,
                                  x_slice=104)
                    lib.plotField(out=[
                        out_p[0].unsqueeze(0), out_U[0].unsqueeze(0),
                        out_div[0].unsqueeze(0)
                    ],
                                  tar=target[0].unsqueeze(0),
                                  flags=flags[0].unsqueeze(0),
                                  loss=[
                                      total_loss, p_l2_total_loss,
                                      div_l2_total_loss, div_lt_total_loss,
                                      p_l1_total_loss, div_l1_total_loss
                                  ],
                                  mconf=mconf,
                                  epoch=epoch,
                                  filename=file_plot_vel,
                                  save=save_or_show,
                                  plotPres=False,
                                  plotVel=True,
                                  plotDiv=False,
                                  title=False,
                                  x_slice=104)
                    lib.plotField(out=[
                        out_p[0].unsqueeze(0), out_U[0].unsqueeze(0),
                        out_div[0].unsqueeze(0)
                    ],
                                  tar=target[0].unsqueeze(0),
                                  flags=flags[0].unsqueeze(0),
                                  loss=[
                                      total_loss, p_l2_total_loss,
                                      div_l2_total_loss, div_lt_total_loss,
                                      p_l1_total_loss, div_l1_total_loss
                                  ],
                                  mconf=mconf,
                                  epoch=epoch,
                                  filename=file_plot_div,
                                  save=save_or_show,
                                  plotPres=False,
                                  plotVel=False,
                                  plotDiv=True,
                                  title=False,
                                  x_slice=104)

            if training:
                # Run the backpropagation for all the losses.
                loss_size.backward()

                # Step the optimizer
                optimizer.step()

            n_batches += 1

            if training:
                # Print every 20th batch of an epoch
                if batch_idx % 20 == 0:
                    print('Train Epoch: {} [{}/{} ({:.0f}%)] \t'.format(
                        epoch, batch_idx * len(data), len(loader.dataset),
                        100. * batch_idx / len(loader)))

        # Divide loss by dataset length
        p_l2_total_loss /= n_batches
        div_l2_total_loss /= n_batches
        p_l1_total_loss /= n_batches
        div_l1_total_loss /= n_batches
        div_lt_total_loss /= n_batches
        total_loss /= n_batches

        # Print for the whole dataset
        if training:
            sstring = 'Train'
        else:
            sstring = 'Validation'
        print('\n{} set: Avg total loss: {:.6f} (L2(p): {:.6f}; L2(div): {:.6f}; \
                L1(p): {:.6f}; L1(div): {:.6f}; LTDiv: {:.6f})'                                                               .format(\
                        sstring,
                        total_loss, p_l2_total_loss, div_l2_total_loss, \
                        p_l1_total_loss, div_l1_total_loss, div_lt_total_loss))

        # Return loss scores
        return total_loss, p_l2_total_loss, div_l2_total_loss, \
                p_l1_total_loss, div_l1_total_loss, div_lt_total_loss