Ejemplo n.º 1
0
def train(train_loader, model, optimizer, epoch, train_writer):
    global n_iter, args
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    flow2_EPEs = AverageMeter()

    epoch_size = len(train_loader) if args.epoch_size == 0 else min(
        len(train_loader), args.epoch_size)

    # switch to train mode
    model.train()

    end = time.time()
    #import ipdb; ipdb.set_trace()

    for i, (input, target) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)
        target = target.to(device)
        input = torch.cat(input, 1).to(device)

        # compute output
        output = model(input)
        if args.sparse:
            # Since Target pooling is not very precise when sparse,
            # take the highest resolution prediction and upsample it instead of downsampling target
            h, w = target.size()[-2:]
            output = [F.interpolate(output[0], (h, w)), *output[1:]]

        loss = multiscaleEPE(output,
                             target,
                             weights=args.multiscale_weights,
                             sparse=args.sparse)
        flow2_EPE = args.div_flow * realEPE(
            output[0], target, sparse=args.sparse)
        # record loss and EPE
        losses.update(loss.item(), target.size(0))
        train_writer.add_scalar('train_loss', loss.item(), n_iter)
        flow2_EPEs.update(flow2_EPE.item(), target.size(0))

        # compute gradient and do optimization step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            print(
                'Epoch: [{0}][{1}/{2}]\t Time {3}\t Data {4}\t Loss {5}\t EPE {6}'
                .format(epoch, i, epoch_size, batch_time, data_time, losses,
                        flow2_EPEs))
        n_iter += 1
        if i >= epoch_size:
            break

    return losses.avg, flow2_EPEs.avg
Ejemplo n.º 2
0
def train(train_loader, model, optimizer, epoch, train_writer):
    global n_iter, args
    batch_time = AverageMeter()#个人习惯??
    data_time = AverageMeter()
    losses = AverageMeter()
    flow2_EPEs = AverageMeter()

    epoch_size = len(train_loader) if args.epoch_size == 0 else min(len(train_loader), args.epoch_size)

    # switch to train mode
    model.train()

    end = time.time()

    for i, (input, target) in enumerate(train_loader):#len(train_loader)= 22 (170 / batch_size = 22 )
        # measure data loading time
        data_time.update(time.time() - end)
        target = target.to(device)
        input = torch.cat(input,1).to(device)

        # compute output
        #-------------------
        output = model(input)# while train n*[b,2,h,w]
        #-------------------
        if args.sparse:#数据集gt为稀疏型(比如kitti这种来自真实场景而不是虚拟数据集)
            # Since Target pooling is not very precise when sparse,
            # take the highest resolution prediction and upsample it instead of downsampling target
            #output[0] is flow2, 1 is flow3, so on
            h, w = target.size()[-2:]
            output = [F.interpolate(output[0], (h,w)), *output[1:]]
        #in trainning model, outputs is a list
        loss = multiscaleEPE(output, target, weights=args.multiscale_weights, sparse=args.sparse)
                    #default as 20
        flow2_EPE = args.div_flow * realEPE(output[0], target, sparse=args.sparse)

        # record loss and EPE
        losses.update(loss.item(), target.size(0))
        train_writer.add_scalar('train_loss', loss.item(), n_iter)
        flow2_EPEs.update(flow2_EPE.item(), target.size(0))

        # compute gradient and do optimization step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            print('Epoch: [{0}]\nTrain:[{1}/{2}]\t Time {3}\t Data {4}\t Loss {5}\t EPE {6}'
                  .format(epoch, i, epoch_size, batch_time,
                          data_time, losses, flow2_EPEs))
        n_iter += 1
        if i >= epoch_size:#epoch_size default as 1000
            break

    return losses.avg, flow2_EPEs.avg# every epoch has a  losses.avg, EPE
Ejemplo n.º 3
0
def train(train_loader, model, optimizer, epoch, train_writer):
    global n_iter, args
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    flow2_EPEs = AverageMeter()

    epoch_size = len(train_loader) if args.epoch_size == 0 else min(
        len(train_loader), args.epoch_size)

    # switch to train mode
    model.train()

    end = time.time()

    for i, (input, target) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)
        target = target.cuda(async=True)
        input = [j.cuda() for j in input]
        input_var = torch.autograd.Variable(torch.cat(input, 1))
        target_var = torch.autograd.Variable(target)

        # compute output
        output = model(input_var)

        loss = multiscaleEPE(output,
                             target_var,
                             weights=args.multiscale_weights)
        flow2_EPE = args.div_flow * realEPE(output[0], target_var)
        # record loss and EPE
        losses.update(loss.data[0], target.size(0))
        train_writer.add_scalar('train_loss', loss.data[0], n_iter)
        flow2_EPEs.update(flow2_EPE.data[0], target.size(0))

        # compute gradient and do optimization step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            print(
                'Epoch: [{0}][{1}/{2}]\t Time {3}\t Data {4}\t Loss {5}\t EPE {6}'
                .format(epoch, i, epoch_size, batch_time, data_time, losses,
                        flow2_EPEs))
        n_iter += 1
        if i >= epoch_size:
            break

    return losses.avg, flow2_EPEs.avg
Ejemplo n.º 4
0
def train(train_loader, model, optimizer, epoch, train_writer):
    global n_iter, args
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    flow2_EPEs = AverageMeter()

    if args.data_loader == "torch":
        epoch_size = (len(train_loader) if args.epoch_size == 0 else min(
            len(train_loader), args.epoch_size))
    if args.data_loader == "dali":
        epoch_size = (9999 if args.epoch_size == 0 else min(
            len(train_loader), args.epoch_size))

    # switch to train mode
    model.train()

    end = time.time()

    for i, data in enumerate(train_loader):
        if args.data_loader == "torch":
            (input, target) = data
        if args.data_loader == "dali":
            input = [
                data[0]["images"][:, 0:3, :, :],
                data[0]["images"][:, 3:6, :, :],
            ]
            target = data[0]["flow"]

        if args.show_train_images:
            for k in range(len(input[0].cpu())):
                f, axarr = plt.subplots(2, 2)
                axarr[0,
                      0].imshow(np.moveaxis(np.array(input[0].cpu()[k]), 0, 2))
                axarr[0,
                      1].imshow(np.moveaxis(np.array(input[1].cpu()[k]), 0, 2))
                axarr[1, 0].imshow(
                    np.moveaxis(
                        flow2rgb(args.div_flow * np.squeeze(target.cpu()[k]),
                                 max_value=10),
                        0,
                        2,
                    ))
                plt.show()

        # measure data loading time
        data_time.update(time.time() - end)
        target = target.to(device)
        input = torch.cat(input, 1).to(device)

        # compute output
        output = model(input)  # [0], input[1])
        if args.sparse:
            # Since Target pooling is not very precise when sparse,
            # take the highest resolution prediction and upsample it instead of downsampling target
            h, w = target.size()[-2:]
            output = [F.interpolate(output[0], (h, w)), *output[1:]]

        loss = multiscaleEPE(output,
                             target,
                             weights=args.multiscale_weights,
                             sparse=args.sparse)
        flow2_EPE = args.div_flow * realEPE(
            output, target,
            sparse=args.sparse)  # output[0] if using multi-scale loss
        # record loss and EPE
        losses.update(loss.item(), target.size(0))
        train_writer.add_scalar("train_loss", loss.item(), n_iter)
        flow2_EPEs.update(flow2_EPE.item(), target.size(0))

        # compute gradient and do optimization step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            print(
                "Epoch: [{0}][{1}/{2}]\t Time {3}\t Data {4}\t Loss {5}\t EPE {6}"
                .format(epoch, i, epoch_size, batch_time, data_time, losses,
                        flow2_EPEs))
        n_iter += 1
        if i >= epoch_size:
            break

    return losses.avg, flow2_EPEs.avg
Ejemplo n.º 5
0
def train(train_loader, model, optimizer, epoch, train_writer):
    global n_iter, args
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    flow2_EPEs = AverageMeter()

    epoch_size = len(train_loader) if args.epoch_size == 0 else min(len(train_loader), args.epoch_size)

    # switch to train mode
    model.train()

    end = time.time()

    #rot = flow_transforms.RandomRotate(30)
    #trans = flow_transforms.RandomTranslate(5)

    for i, (input, target) in enumerate(train_loader):

        ######Data Augmentation#############
        #input, target = rot(input, target)
        #input, target = trans(input, target)


        # measure data loading time
        data_time.update(time.time() - end)
        target = target.to(device)
        #?????
        #input = torch.cat(input,1).to(device)
        input = input.to(device)


        #print("input is: ", input)
        #print("input[0] is: ", input[0])
        #print("input[0] size: ",input[0].size())

        if False:
            print("time input:")
            print(input[0,1,75:85,150:160])
            print("boolean input")
            print(input[0, 0, 75:85, 150:160])

        # compute output
        output = model(input)
        if args.sparse:
            # Since Target pooling is not very precise when sparse,
            # take the highest resolution prediction and upsample it instead of downsampling target
            h, w = target.size()[-2:]
            output = [F.interpolate(output[0], (h,w)), *output[1:]]

        loss = multiscaleEPE(output, target, weights=args.multiscale_weights, sparse=args.sparse)
        flow2_EPE = args.div_flow * realEPE(output[0], target, sparse=args.sparse)


        if False:
            print("type: ", type(output))
            print("length: ",len(output))
            print("it is: ",output[0])
            print("nextddddddddddddddddddddddd")
            print("it is: ", output[1])
            print("nextddddddddddddddddddddddd")
            print("it is: ", output[2])
            print("EPEEEEEEEEEEEEEEEEEEEEEEE")
            print(flow2_EPE)



        # record loss and EPE
        losses.update(loss.item(), target.size(0))
        train_writer.add_scalar('train_loss', loss.item(), n_iter)
        flow2_EPEs.update(flow2_EPE.item(), target.size(0))

        # compute gradient and do optimization step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t Time {3}\t Data {4}\t Loss {5}\t EPE {6}'
                  .format(epoch, i, epoch_size, batch_time,
                          data_time, losses, flow2_EPEs))
        n_iter += 1
        if i >= epoch_size:
            break

        #if i > 5:
        #    break
    return losses.avg, flow2_EPEs.avg
        patht = patht.to(device)
        #inputv = Variable(input)
        # using unsqueeze is import  for with out bactch situation
        #inputv = Variable(input.unsqueeze(0))

        #labelv = Variable(patht)
        #inputv = input

        #labelv = patht
        inputv = Variable(input)
        #inputv = Variable(input.unsqueeze(0))
        #patht =patht.view(-1, 1).squeeze(1)

        target = Variable(patht.unsqueeze(2).unsqueeze(1))
        output = netD(inputv)
        loss = multiscaleEPE(output, target, multi_scale_weight, False)

        #output = output.view(Batch_size,Path_length).squeeze(1)
        save_out = output
        netD.zero_grad()

        #errD_real = criterion(output, labelv)
        #errD_real.backward()
        loss.backward()

        D_x = loss.data.mean()
        optimizerD.step()
        # train with fake
        if cv2.waitKey(12) & 0xFF == ord('q'):
            break
Ejemplo n.º 7
0
def train(train_loader, model, optimizer, epoch, train_writer, config):
    global n_iter

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    flow2_EPEs = AverageMeter()

    epoch_size = len(train_loader) if args.epoch_size == 0 else min(len(train_loader), args.epoch_size)

    # switch to train mode
    model.train()

    end = time.time()

    if not args.self_supervised_loss:
        # use old loss
        for i, (input, target) in enumerate(train_loader):
            # measure data loading time
            data_time.update(time.time() - end)
            target = target.to(device)
            input = torch.cat(input,1).to(device)

            # compute output
            output = model(input)
            if args.sparse:
                # Since Target pooling is not very precise when sparse,
                # take the highest resolution prediction and upsample it instead of downsampling target
                h, w = target.size()[-2:]
                output = [F.interpolate(output[0], (h,w)), *output[1:]]

            loss = multiscaleEPE(output, target, weights=args.multiscale_weights, sparse=args.sparse)
            flow2_EPE = args.div_flow * realEPE(output[0], target, sparse=args.sparse)
            # record loss and EPE
            losses.update(loss.item(), target.size(0))
            train_writer.add_scalar('train_loss', loss.item(), n_iter)
            flow2_EPEs.update(flow2_EPE.item(), target.size(0))

            # compute gradient and do optimization step
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.print_freq == 0:
                print('Epoch: [{0}][{1}/{2}]\t Time {3}\t Data {4}\t Loss {5}\t EPE {6}'
                      .format(epoch, i, epoch_size, batch_time,
                              data_time, losses, flow2_EPEs))
            n_iter += 1
            if i >= epoch_size:
                break

        return losses.avg, flow2_EPEs.avg
    elif args.unflow:
        weights = [0.005, 0.01, 0.02, 0.08, 0.32]

        for it, (input, target) in enumerate(train_loader):
            # measure data loading time
            data_time.update(time.time() - end)
            target = target.to(device)
            im1 = input[0].to(device)
            im2 = input[1].to(device)
            input_fw = torch.cat(input, 1).to(device)
            pred_fw = model(input_fw)
            input_bw = torch.cat((im2, im1), 1).to(device)
            pred_bw = model(input_bw)



            census_loss = 0
            census_loss_list = []
            if config['census']:
                #weights = [1, 0.34, 0.31, 0.27, 0.09]
                #max_dist = [3, 2, 2, 1, 1]
                for i in range(len(pred_fw)):
                    flow_fw = pred_fw[i] * args.div_flow
                    flow_bw = pred_bw[i] * args.div_flow
                    loss = ternary_loss(im2, im1,  flow_fw, max_distance=1) +\
                        ternary_loss(im1, im2, flow_bw,max_distance=1)
                    census_loss += loss
                    census_loss_list.append(loss.item())
                    if not config['multiscale_census_loss']:
                        break
                train_writer.add_scalar('train_loss_census', census_loss.item(), n_iter)

            sl_loss = 0
            sl_loss_list = []
            if config['sl']:
                for i in range(len(pred_fw)):
                    flow_fw = pred_fw[i] * args.div_flow
                    flow_bw = pred_bw[i] * args.div_flow
                    loss = smoothness_loss(flow_fw,config) + smoothness_loss(flow_bw,config)
                    #loss = smoothness_loss(flow_bw, config)
                    sl_loss += loss
                    sl_loss_list.append(loss.item())
                    if not config['multiscale_sl_loss']:
                        break
                train_writer.add_scalar('train_loss_sl', sl_loss.item(), n_iter)

            ssim_loss = 0
            ssim_loss_list = []
            if config['ssim']:
                for i in range(len(pred_bw)):
                    flow_bw = pred_bw[i] * args.div_flow
                    loss = ssim(im1,im2,flow_bw)
                    ssim_loss += loss
                    ssim_loss_list.append(loss.item())
                    if not config['multiscale_ssim_loss']:
                        break
                train_writer.add_scalar('train_loss_ssim', ssim_loss.item(), n_iter)

            fb_loss = 0
            fb_loss_list = []
            if config['fb']:
                for i in range(len(pred_bw)):
                    flow_fw = pred_fw[i] * args.div_flow
                    flow_bw = pred_bw[i] * args.div_flow
                    loss = forward_backward_loss(im1=im1, im2=im2, flow_fw=flow_fw, flow_bw=flow_bw, config=config)
                    fb_loss += loss
                    fb_loss_list.append(loss.item())
                    if not config['multiscale_fb_loss']:
                        break
                train_writer.add_scalar('train_loss_fb', fb_loss.item(), n_iter)

            # to check the magnitude of both losses
            if it % 500 == 0:
                print("[DEBUG] census_loss:", str(census_loss_list))
                print("[DEBUG] sl_loss:", str(sl_loss_list))
                print("[DEBUG] ssim_loss:", str(ssim_loss_list))
                print("[DEBUG] fb_loss:", str(fb_loss_list))

            loss = census_loss + sl_loss + ssim_loss + 0.001*fb_loss

            # record loss and EPE
            flow = pred_bw[0]
            losses.update(loss.item(), target.size(0))
            flow2_EPE = args.div_flow * realEPE(flow, target, sparse=args.sparse)
            train_writer.add_scalar('train_loss', loss.item(), n_iter)
            flow2_EPEs.update(flow2_EPE.item(), target.size(0))

            # compute gradient and do optimization step
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if it % args.print_freq == 0:
                print('Epoch: [{0}][{1}/{2}]\t Time {3}\t Data {4}\t Loss {5}\t EPE {6}'
                      .format(epoch, it, epoch_size, batch_time,
                              data_time, losses, flow2_EPEs))
            n_iter += 1
            if it >= epoch_size:
                break

        return losses.avg, flow2_EPEs.avg


    else:
        # use self-supervised loss
        for it, (input, target) in enumerate(train_loader):
            # measure data loading time
            data_time.update(time.time() - end)
            target = target.to(device)
            im1 = input[0].to(device)
            im2 = input[1].to(device)
            input = torch.cat(input,1).to(device)
            pred = model(input)

            pl_loss = 0
            pl_loss_list = []
            for i in range(len(pred)):
                flow = pred[i] * args.div_flow
                loss = photometric_loss(im1, im2, flow, config)
                pl_loss += loss
                pl_loss_list.append(loss.item())

                if not config['multiscale_pl_loss']:
                    break

            sl_loss = 0
            sl_loss_list = []
            if config['weighted_sl_loss']:
                for i in range(len(pred)):
                    flow = pred[i] * args.div_flow
                    loss = weighted_smoothness_loss(im1, im2, flow, config)
                    sl_loss += loss
                    sl_loss_list.append(loss.item())

                    if not config['multiscale_sl_loss']:
                        break

            else:
                # smoothness loss for multi resolution flow pyramid
                for i in range(len(pred)):
                    flow = pred[i] * args.div_flow
                    loss = smoothness_loss(flow, config)
                    sl_loss += loss
                    sl_loss_list.append(loss.item())

                    if not config['multiscale_sl_loss']:
                        break
            # to check the magnitude of both losses
            if it % 500 == 0:
                print("[DEBUG] pl_loss:", str(pl_loss_list))
                print("[DEBUG] sl_loss:", str(sl_loss_list))

            loss = pl_loss + sl_loss

            # record loss and EPE
            flow = pred[0]
            losses.update(loss.item(), target.size(0))
            flow2_EPE = args.div_flow * realEPE(flow, target, sparse=args.sparse)
            train_writer.add_scalar('train_loss', loss.item(), n_iter)
            train_writer.add_scalar('train_loss_pl', pl_loss.item(), n_iter)
            train_writer.add_scalar('train_loss_sl', sl_loss.item(), n_iter)
            flow2_EPEs.update(flow2_EPE.item(), target.size(0))

            # compute gradient and do optimization step
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if it % args.print_freq == 0:
                print('Epoch: [{0}][{1}/{2}]\t Time {3}\t Data {4}\t Loss {5}\t EPE {6}'
                      .format(epoch, it, epoch_size, batch_time,
                              data_time, losses, flow2_EPEs))
            n_iter += 1
            if it >= epoch_size:
                break

        return losses.avg, flow2_EPEs.avg