Exemplo n.º 1
0
def train(args, epoch, model, data_loader, optimizer):
    """
    TRAINING PROCEDURE

     Parameters:
    -----------
        - args: various arguments
        - epoch: number of epochs 
        - model: specified model to test
        - data_loader: specified train data_loader
        - optimizer: specified optimizer to use

    Returns:
    --------
        - average_loss: average loss per batch

    """

    statistics = []
    total_loss = 0

    model.train()
    title = 'Training Epoch {}'.format(epoch)
    progress = tqdm(tools.IteratorTimer(data_loader),
                    ncols=120,
                    total=len(data_loader),
                    smoothing=.9,
                    miniters=1,
                    leave=True,
                    desc=title)

    sys.stdout.flush()

    for batch_idx, (data, target) in enumerate(progress):

        #data, target = data.to(args.device), target.to(args.device)

        optimizer.zero_grad()
        d = model(data[0].to(args.device), im_2=data[1].to(args.device))
        loss = _apply_loss(d, target).mean()

        loss.backward()
        optimizer.step()
        total_loss += loss.item()
        assert not np.isnan(total_loss)

        # Print out statistics
        statistics.append(loss.item())
        title = '{} Epoch {}'.format('Training', epoch)

        progress.set_description(title + '\tLoss:\t' + str(statistics[-1]))
        sys.stdout.flush()

    progress.close()

    return total_loss / float(batch_idx + 1)
Exemplo n.º 2
0
def test(args, epoch, model, data_loader):
    """
    TESTING PROCEDURE

    Parameters:
    -----------
        - args: various arguments
        - epoch: number of epochs 
        - model: specified model to test
        - data_loader: specified test data_loader

    Returns:
    --------
        - average_loss: average loss per batch
        - pck: Percentage of Correct Keypoints metric

    """

    statistics = []
    total_loss = 0

    model.eval()
    title = 'Validating Epoch {}'.format(epoch)
    progress = tqdm(tools.IteratorTimer(data_loader),
                    ncols=120,
                    total=len(data_loader),
                    smoothing=.9,
                    miniters=1,
                    leave=True,
                    desc=title)
    predictions = []
    gt = []

    sys.stdout.flush()
    with torch.no_grad():
        for batch_idx, (data, target) in enumerate(progress):

            d = model(data[0].to(args.device), im_2=data[1].to(args.device))
            loss = _apply_loss(d, target).mean()
            total_loss += loss.item()
            predictions.extend(d.numpy())
            gt.extend(target.numpy())

            # Print out statistics
            statistics.append(loss.item())
            title = '{} Epoch {}'.format('Validating', epoch)

            progress.set_description(title + '\tLoss:\t' + str(statistics[-1]))
            sys.stdout.flush()

    progress.close()
    pck = tools.calc_pck(np.asarray(predictions), np.asarray(gt))
    print('PCK for epoch %d is %f' % (epoch, pck))

    return total_loss / float(batch_idx + 1), pck
Exemplo n.º 3
0
    def train(args,
              epoch,
              start_iteration,
              data_loader,
              model,
              optimizer,
              loss,
              logger,
              is_validate=False,
              offset=0):
        statistics = []
        total_loss = 0
        gpu_mem = tools.gpumemusage()

        if is_validate:
            model.eval()
            title = 'Validating {} Epoch {}'.format(gpu_mem, epoch)
            args.validation_n_batches = np.inf if args.validation_n_batches < 0 else args.validation_n_batches
            progress = tqdm(tools.IteratorTimer(data_loader),
                            ncols=100,
                            total=np.minimum(len(data_loader),
                                             args.validation_n_batches),
                            leave=True,
                            position=offset,
                            desc=title)
        else:
            model.train()
            title = 'Training {} Epoch {}'.format(tools.gpumemusage(), epoch)
            args.train_n_batches = np.inf if args.train_n_batches < 0 else args.train_n_batches
            progress = tqdm(tools.IteratorTimer(data_loader),
                            ncols=120,
                            total=np.minimum(len(data_loader),
                                             args.train_n_batches),
                            smoothing=.9,
                            miniters=1,
                            leave=True,
                            position=offset,
                            desc=title)

        last_log_time = progress._time()
        for batch_idx, (data, target) in enumerate(progress):

            data, target = [Variable(d, volatile=is_validate) for d in data], [
                Variable(t, volatile=is_validate) for t in target
            ]
            if args.cuda:
                data, target = [d.cuda(async=True) for d in data
                                ], [t.cuda(async=True) for t in target]

            optimizer.zero_grad() if not is_validate else None

            output = model(data[0])

            loss_labels, loss_values = loss(output, target[0])

            loss_val = loss_values[0]
            total_loss += loss_val.data[0]
            loss_values = [v.data[0] for v in loss_values]

            assert not np.isnan(total_loss)

            if not is_validate and args.fp16:
                loss_val.backward()
                if args.gradient_clip:
                    torch.nn.utils.clip_grad_norm(model.parameters(),
                                                  args.gradient_clip)

                params = list(model.parameters())
                for i in range(len(params)):
                    param_copy[i].grad = params[i].grad.clone().type_as(
                        params[i]).detach()
                    param_copy[i].grad.mul_(1. / args.loss_scale)
                optimizer.step()
                for i in range(len(params)):
                    params[i].data.copy_(param_copy[i].data)

            elif not is_validate:
                loss_val.backward()
                if args.gradient_clip:
                    torch.nn.utils.clip_grad_norm(model.parameters(),
                                                  args.gradient_clip)
                optimizer.step()

            # Update hyperparameters if needed
            global_iteration = start_iteration + batch_idx
            if not is_validate:
                tools.update_hyperparameter_schedule(args, epoch,
                                                     global_iteration,
                                                     optimizer)
                loss_labels.append('lr')
                loss_values.append(optimizer.param_groups[0]['lr'])

            loss_labels.append('load')
            loss_values.append(progress.iterable.last_duration)

            # Print out statistics
            statistics.append(loss_values)
            title = '{} {} Epoch {}'.format(
                'Validating' if is_validate else 'Training',
                tools.gpumemusage(), epoch)

            progress.set_description(
                title + ' ' +
                tools.format_dictionary_of_losses(loss_labels, statistics[-1]))

            if ((((global_iteration + 1) % args.log_frequency) == 0
                 and not is_validate) or
                (is_validate and batch_idx == args.validation_n_batches - 1)):

                global_iteration = global_iteration if not is_validate else start_iteration

                logger.add_scalar(
                    'batch logs per second',
                    len(statistics) / (progress._time() - last_log_time),
                    global_iteration)
                last_log_time = progress._time()

                all_losses = np.array(statistics)

                for i, key in enumerate(loss_labels):
                    logger.add_scalar('average batch ' + key,
                                      all_losses[:,
                                                 i].mean(), global_iteration)
                    logger.add_histogram(key, all_losses[:, i],
                                         global_iteration)

            # Reset Summary
            statistics = []

            if (is_validate and (batch_idx == args.validation_n_batches)):
                break

            if ((not is_validate) and (batch_idx == (args.train_n_batches))):
                break

        progress.close()

        return total_loss / float(batch_idx + 1), (batch_idx + 1)
Exemplo n.º 4
0
def main(args):
    dataset = args.dataset
    bsize = args.batch_size
    root = args.data_root
    cache_root = args.cache
    prediction_root = args.pre

    train_root = root + dataset + '/train'
    val_root = root + dataset + '/val'  # validation dataset

    # mkdir( path [,mode] ):创建一个目录,可以是相对或者绝对路径,mode的默认模式是0777。
    # 如果目录有多级,则创建最后一级。如果最后一级目录的上级目录有不存在的,则会抛出一个OSError。
    # makedirs( path [,mode] ):创建递归的目录树,可以是相对或者绝对路径,mode的默认模式是
    # 0777。如果子目录创建失败或者已经存在,会抛出一个OSError的异常,Windows上Error 183即为
    # 目录已经存在的异常错误。如果path只有一级,与mkdir相同。
    check_root_opti = cache_root + '/opti'  # save checkpoint parameters
    if not os.path.exists(check_root_opti):
        os.makedirs(check_root_opti)

    check_root_feature = cache_root + '/feature'  # save checkpoint parameters
    if not os.path.exists(check_root_feature):
        os.makedirs(check_root_feature)

    # 获取调整后的数据集
    train_loader = torch.utils.data.DataLoader(
        MyData(train_root, transform=True),
        batch_size=bsize, shuffle=True, num_workers=4, pin_memory=True
    )
    val_loader = torch.utils.data.DataLoader(
        MyTestData(val_root, transform=True),
        batch_size=bsize, shuffle=True, num_workers=4, pin_memory=True
    )

    model = densenet169(pretrained=True, new_block=RCL_Module).cuda()

    criterion = nn.BCELoss()
    optimizer_feature = torch.optim.Adam(model.parameters(), lr=args.lr)
    # http://www.spytensor.com/index.php/archives/32/
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer_feature, 'max', verbose=1, patience=10
    )

    progress = tqdm(
        range(args.start_epoch, args.total_epochs + 1), miniters=1,
        ncols=100, desc='Overall Progress', leave=True, position=0
    )

    offset = 1
    best = 0
    result = {'epoch': [], 'F_measure': [], 'MAE': []}
    for epoch in progress:
        # ===============================TRAIN=================================
        title = 'Training Epoch {}'.format(epoch)
        progress_epoch = tqdm(
            tools.IteratorTimer(train_loader), ncols=120,
            total=len(train_loader), smoothing=0.9, miniters=1,
            leave=True, position=offset, desc=title
        )

        train(model, progress_epoch, criterion, optimizer_feature, epoch, args)

        # ==============================TEST===================================
        if epoch % args.val_rate == 0:
            epoch, F_measure, mae = test(
                model, val_loader, epoch,
                prediction_root, check_root_feature, check_root_opti, val_root
            )

            result['epoch'].append(int(epoch))
            result['F_measure'].append(round(float(F_measure), 3))
            result['MAE'].append(round(float(mae), 3))
            df = pd.DataFrame(result).set_index('epoch')
            df.to_csv('./lart/result.csv')

            if epoch == 0:
                best = F_measure - mae
            elif (F_measure - mae) > best:
                best = F_measure - mae
                # 存储最好的权重和偏置
                filename = ('%s/feature-best.pth' % check_root_feature)
                torch.save(model.state_dict(), filename)
                # 存储最好的优化器状态
                filename_opti = ('%s/opti-best.pth' % check_root_opti)
                torch.save(optimizer_feature.state_dict(), filename_opti)

            # 只在验证期间考虑更改学习率
            scheduler.step(best)
Exemplo n.º 5
0
    def train(args,
              epoch,
              start_iteration,
              data_loader,
              model,
              optimizer,
              logger,
              is_validate=False,
              offset=0):
        statistics = []
        total_loss = 0

        if is_validate:
            model.eval()
            title = 'Validating Epoch {}'.format(epoch)
            args.validation_n_batches = len(
                data_loader
            ) - 1 if args.validation_n_batches < 0 else args.validation_n_batches
            progress = tqdm(tools.IteratorTimer(data_loader),
                            ncols=100,
                            total=np.minimum(len(data_loader),
                                             args.validation_n_batches),
                            leave=True,
                            position=offset,
                            desc=title)
        else:
            model.train()
            title = 'Training Epoch {}'.format(epoch)
            args.train_n_batches = len(
                data_loader
            ) - 1 if args.train_n_batches < 0 else args.train_n_batches
            progress = tqdm(tools.IteratorTimer(data_loader),
                            ncols=120,
                            total=np.minimum(len(data_loader),
                                             args.train_n_batches),
                            smoothing=.9,
                            miniters=1,
                            leave=True,
                            position=offset,
                            desc=title)

        last_log_time = progress._time()
        for batch_idx, (data, target) in enumerate(progress):

            data, target = [Variable(d, volatile=is_validate) for d in data], [
                Variable(t, volatile=is_validate) for t in target
            ]
            if args.cuda and args.number_gpus == 1:
                data, target = [d.cuda(async=True) for d in data
                                ], [t.cuda(async=True) for t in target]

            optimizer.zero_grad() if not is_validate else None
            losses = model(data[0], target[0])
            losses = [torch.mean(loss_value) for loss_value in losses]
            loss_val = losses[1]  # Collect first loss for weight update
            total_loss += loss_val.data[0]
            loss_values = [v.data[0] for v in losses]

            # gather loss_labels, direct return leads to recursion limit error as it looks for variables to gather'
            loss_labels = list(model.module.loss.loss_labels)

            assert not np.isnan(total_loss)

            if not is_validate and args.fp16:
                loss_val.backward()
                if args.gradient_clip:
                    torch.nn.utils.clip_grad_norm(model.parameters(),
                                                  args.gradient_clip)

                params = list(model.parameters())
                for i in range(len(params)):
                    param_copy[i].grad = params[i].grad.clone().type_as(
                        params[i]).detach()
                    param_copy[i].grad.mul_(1. / args.loss_scale)
                optimizer.step()
                for i in range(len(params)):
                    params[i].data.copy_(param_copy[i].data)

            elif not is_validate:
                loss_val.backward()
                if args.gradient_clip:
                    torch.nn.utils.clip_grad_norm(model.parameters(),
                                                  args.gradient_clip)
                optimizer.step()

            # Update hyperparameters if needed
            global_iteration = start_iteration + batch_idx
            if not is_validate:
                tools.update_hyperparameter_schedule(args, epoch,
                                                     global_iteration,
                                                     optimizer)
                loss_labels.append('lr')
                loss_values.append(optimizer.param_groups[0]['lr'])

            loss_labels.append('load')
            loss_values.append(progress.iterable.last_duration)

            # Print out statistics
            statistics.append(loss_values)
            title = '{} Epoch {}'.format(
                'Validating' if is_validate else 'Training', epoch)

            if (type(loss_labels[0]) is list) or (type(loss_labels[0]) is
                                                  tuple):
                progress.set_description(title + ' ' +
                                         tools.format_dictionary_of_losses(
                                             loss_labels[0], statistics[-1]))
            else:
                progress.set_description(title + ' ' +
                                         tools.format_dictionary_of_losses(
                                             loss_labels, statistics[-1]))

            if ((((global_iteration + 1) % args.log_frequency) == 0
                 and not is_validate) or
                (is_validate and batch_idx == args.validation_n_batches - 1)):

                global_iteration = global_iteration if not is_validate else start_iteration

                logger.add_scalar(
                    'batch logs per second',
                    len(statistics) / (progress._time() - last_log_time),
                    global_iteration)
                last_log_time = progress._time()

                all_losses = np.array(statistics)

                for i, key in enumerate(loss_labels[0] if (
                        type(loss_labels[0]) is list) or (
                            type(loss_labels[0]) is tuple) else loss_labels):
                    logger.add_scalar('average batch ' + str(key),
                                      all_losses[:,
                                                 i].mean(), global_iteration)
                    #logger.add_histogram(str(key), all_losses[:, i], global_iteration)
                if is_validate:
                    _, output = model(data[0], target[0], inference=True)
                    render_flow = output[0].data.cpu().numpy().transpose(
                        1, 2, 0)
                    ground_truth = target[0][0].data.cpu().numpy().transpose(
                        1, 2, 0)
                    render_img = tools.flow_to_image(render_flow).transpose(
                        2, 0, 1)
                    true_img = tools.flow_to_image(ground_truth).transpose(
                        2, 0, 1)
                    render_img = torch.Tensor(render_img) / 255.0
                    true_img = torch.Tensor(true_img) / 255.0
                    input_img = data[0][0, :, 0, :, :].data.cpu() / 255.0
                    logger.add_image('renderimg',
                                     torchvision.utils.make_grid(render_img),
                                     global_iteration)
                    logger.add_image('ground_truth',
                                     torchvision.utils.make_grid(true_img),
                                     global_iteration)
                    logger.add_image('input_img',
                                     torchvision.utils.make_grid(input_img),
                                     global_iteration)

            # Reset Summary
            statistics = []

            if (is_validate and (batch_idx == args.validation_n_batches)):
                break

            if ((not is_validate) and (batch_idx == (args.train_n_batches))):
                break

        progress.close()

        return total_loss / float(batch_idx + 1), (batch_idx + 1)
Exemplo n.º 6
0
    def train(input_args,
              train_epoch,
              start_iteration,
              files_loader,
              model,
              model_optimizer,
              logger,
              is_validate=False,
              offset=0):
        statistics = []
        total_loss = 0

        if is_validate:
            model.eval()
            title = 'Validating Epoch {}'.format(train_epoch)
            input_args.validation_n_batches = np.inf if input_args.validation_n_batches < 0 else input_args.validation_n_batches
            file_progress = tqdm(tools.IteratorTimer(files_loader),
                                 ncols=100,
                                 total=np.minimum(
                                     len(files_loader),
                                     input_args.validation_n_batches),
                                 leave=True,
                                 position=offset,
                                 desc=title)
        else:
            model.train()
            title = 'Training Epoch {}'.format(train_epoch)
            input_args.train_n_batches = np.inf if input_args.train_n_batches < 0 else input_args.train_n_batches
            file_progress = tqdm(tools.IteratorTimer(files_loader),
                                 ncols=120,
                                 total=np.minimum(len(files_loader),
                                                  input_args.train_n_batches),
                                 smoothing=.9,
                                 miniters=1,
                                 leave=True,
                                 position=offset,
                                 desc=title)

        last_log_time = file_progress._time()
        for batch_idx, (data_file) in enumerate(file_progress):
            video_dataset = datasets_video.VideoFileDataJIT(
                input_args, data_file[0])
            video_loader = DataLoader(video_dataset,
                                      batch_size=args.effective_batch_size,
                                      shuffle=True,
                                      **gpuargs)

            global_iteration = start_iteration + batch_idx

            # note~ for debugging purposes
            # video_frame_progress = tqdm(tools.IteratorTimer(video_loader), ncols=120,
            #                            total=len(video_loader), smoothing=0.9, miniters=1,
            #                            leave=True, desc=data_file[0])

            for i_batch, (data, target) in enumerate(video_loader):
                data, target = [Variable(d)
                                for d in data], [Variable(t) for t in target]
                if input_args.cuda and input_args.number_gpus == 1:
                    data, target = [d.cuda(async=True) for d in data
                                    ], [t.cuda(async=True) for t in target]

                model_optimizer.zero_grad() if not is_validate else None
                losses = model(data[0], target[0])
                losses = [torch.mean(loss_value) for loss_value in losses]
                loss_val = losses[0]  # Collect first loss for weight update
                total_loss += loss_val.data
                loss_values = [v.data for v in losses]

                # gather loss_labels, direct return leads to recursion limit error as it looks for variables to gather'
                loss_labels = list(model.module.loss.loss_labels)

                assert not np.isnan(total_loss.cpu().numpy())

                if not is_validate and input_args.fp16:
                    loss_val.backward()
                    if input_args.gradient_clip:
                        torch.nn.utils.clip_grad_norm(model.parameters(),
                                                      input_args.gradient_clip)

                    params = list(model.parameters())
                    for i in range(len(params)):
                        param_copy[i].grad = params[i].grad.clone().type_as(
                            params[i]).detach()
                        param_copy[i].grad.mul_(1. / input_args.loss_scale)
                    model_optimizer.step()
                    for i in range(len(params)):
                        params[i].data.copy_(param_copy[i].data)
                elif not is_validate:
                    loss_val.backward()
                    if input_args.gradient_clip:
                        torch.nn.utils.clip_grad_norm(model.parameters(),
                                                      input_args.gradient_clip)
                    model_optimizer.step()

                # Update hyperparameters if needed
                if not is_validate:
                    tools.update_hyperparameter_schedule(
                        input_args, train_epoch, global_iteration,
                        model_optimizer)
                    loss_labels.append('lr')
                    loss_values.append(model_optimizer.param_groups[0]['lr'])

                    loss_labels.append('load')
                    loss_values.append(file_progress.iterable.last_duration)

            # Print out statistics
            statistics.append(loss_values)
            title = '{} Epoch {}'.format(
                'Validating' if is_validate else 'Training', train_epoch)

            file_progress.set_description(
                title + ' ' + tools.format_dictionary_of_losses(
                    tools.flatten_list(loss_labels), statistics[-1]))

            if ((((global_iteration + 1) % input_args.log_frequency) == 0
                 and not is_validate)
                    or (is_validate
                        and batch_idx == input_args.validation_n_batches - 1)):

                global_iteration = global_iteration if not is_validate else start_iteration

                logger.add_scalar(
                    'batch logs per second',
                    len(statistics) / (file_progress._time() - last_log_time),
                    global_iteration)
                last_log_time = file_progress._time()

                all_losses = np.array(statistics)

                for i, key in enumerate(tools.flatten_list(loss_labels)):
                    if isinstance(all_losses[:, i].item(), torch.Tensor):
                        average_batch = all_losses[:, i].item().mean()
                    else:
                        average_batch = all_losses[:, i].item()

                    logger.add_scalar('average batch ' + str(key),
                                      average_batch, global_iteration)
                    logger.add_histogram(str(key), all_losses[:, i],
                                         global_iteration)

            # Reset Summary
            statistics = []

            if is_validate and (batch_idx == input_args.validation_n_batches):
                break

            if (not is_validate) and (batch_idx
                                      == (input_args.train_n_batches)):
                break

        file_progress.close()

        return total_loss / float(batch_idx + 1), (batch_idx + 1)
    def train(args, epoch, start_iteration, data_loader, model, optimizer, logger, is_validate=False, offset=0):
        statistics = []
        total_loss = 0

        if is_validate:
            model.eval()
            title = "Validating Epoch {}".format(epoch)
            args.validation_n_batches = np.inf if args.validation_n_batches < 0 else args.validation_n_batches
            progress = tqdm(
                tools.IteratorTimer(data_loader),
                ncols=100,
                total=np.minimum(len(data_loader), args.validation_n_batches),
                leave=True,
                position=offset,
                desc=title,
            )
        else:
            model.train()
            title = "Training Epoch {}".format(epoch)
            args.train_n_batches = np.inf if args.train_n_batches < 0 else args.train_n_batches
            progress = tqdm(
                tools.IteratorTimer(data_loader),
                ncols=120,
                total=np.minimum(len(data_loader), args.train_n_batches),
                smoothing=0.9,
                miniters=1,
                leave=True,
                position=offset,
                desc=title,
            )

        last_log_time = progress._time()
        for batch_idx, (data, target) in enumerate(progress):

            data, target = [Variable(d) for d in data], [Variable(t) for t in target]
            if args.cuda and args.number_gpus == 1:
                data, target = [d.cuda(non_blocking=True) for d in data], [t.cuda(non_blocking=True) for t in target]

            optimizer.zero_grad() if not is_validate else None
            losses = model(data[0], target[0])
            losses = [torch.mean(loss_value) for loss_value in losses]
            loss_val = losses[0]  # Collect first loss for weight update
            total_loss += loss_val.item()
            loss_values = [v.item() for v in losses]

            # gather loss_labels, direct return leads to recursion limit error as it looks for variables to gather'
            loss_labels = list(model.module.loss.loss_labels)

            assert not np.isnan(total_loss)

            if not is_validate and args.fp16:
                loss_val.backward()
                if args.gradient_clip:
                    torch.nn.utils.clip_grad_norm(model.parameters(), args.gradient_clip)

                params = list(model.parameters())
                for i in range(len(params)):
                    param_copy[i].grad = params[i].grad.clone().type_as(params[i]).detach()
                    param_copy[i].grad.mul_(1.0 / args.loss_scale)
                optimizer.step()
                for i in range(len(params)):
                    params[i].data.copy_(param_copy[i].data)

            elif not is_validate:
                loss_val.backward()
                if args.gradient_clip:
                    torch.nn.utils.clip_grad_norm(model.parameters(), args.gradient_clip)
                optimizer.step()

            # Update hyperparameters if needed
            global_iteration = start_iteration + batch_idx
            if not is_validate:
                tools.update_hyperparameter_schedule(args, epoch, global_iteration, optimizer)
                loss_labels.append("lr")
                loss_values.append(optimizer.param_groups[0]["lr"])

            loss_labels.append("load")
            loss_values.append(progress.iterable.last_duration)

            # Print out statistics
            statistics.append(loss_values)
            title = "{} Epoch {}".format("Validating" if is_validate else "Training", epoch)

            progress.set_description(title + " " + tools.format_dictionary_of_losses(loss_labels, statistics[-1]))

            if (((global_iteration + 1) % args.log_frequency) == 0 and not is_validate) or (
                is_validate and batch_idx == args.validation_n_batches - 1
            ):

                global_iteration = global_iteration if not is_validate else start_iteration

                logger.add_scalar(
                    "batch logs per second", len(statistics) / (progress._time() - last_log_time), global_iteration
                )
                last_log_time = progress._time()

                all_losses = np.array(statistics)

                for i, key in enumerate(loss_labels):
                    logger.add_scalar("average batch " + str(key), all_losses[:, i].mean(), global_iteration)
                    logger.add_histogram(str(key), all_losses[:, i], global_iteration)

            # Reset Summary
            statistics = []

            if is_validate and (batch_idx == args.validation_n_batches):
                break

            if (not is_validate) and (batch_idx == (args.train_n_batches)):
                break

        progress.close()

        return total_loss / float(batch_idx + 1), (batch_idx + 1)
Exemplo n.º 8
0
def main(args):
    dataset = args.dataset
    bsize = args.batch_size
    root = args.data_root
    cache_root = args.cache
    prediction_root = args.pre

    train_root = root + dataset + '/train'
    val_root = root + dataset + '/val'  # validation dataset

    check_root_opti = cache_root + '/opti'  # save checkpoint parameters
    if not os.path.exists(check_root_opti):
        os.mkdir(check_root_opti)

    check_root_feature = cache_root + '/feature'  # save checkpoint parameters
    if not os.path.exists(check_root_feature):
        os.mkdir(check_root_feature)

    train_loader = torch.utils.data.DataLoader(MyData(train_root,
                                                      transform=True),
                                               batch_size=bsize,
                                               shuffle=True,
                                               num_workers=4,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(MyTestData(val_root,
                                                        transform=True),
                                             batch_size=bsize,
                                             shuffle=True,
                                             num_workers=4,
                                             pin_memory=True)

    model = Feature(RCL_Module)
    model.cuda()
    criterion = nn.BCELoss()
    optimizer_feature = torch.optim.Adam(model.parameters(), lr=args.lr)

    train_losses = []

    progress = tqdm(range(args.start_epoch, args.total_epochs + 1),
                    miniters=1,
                    ncols=100,
                    desc='Overall Progress',
                    leave=True,
                    position=0)
    offset = 1

    best = 0
    evaluation = []
    result = {'epoch': [], 'F_measure': [], 'MAE': []}
    for epoch in progress:
        if (epoch != 0):
            print("\nloading parameters")
            model.load_state_dict(
                torch.load(check_root_feature + '/feature-current.pth'))
            optimizer_feature.load_state_dict(
                torch.load(check_root_opti + '/opti-current.pth'))
            #
        title = 'Training Epoch {}'.format(epoch)
        progress_epoch = tqdm(tools.IteratorTimer(train_loader),
                              ncols=120,
                              total=len(train_loader),
                              smoothing=.9,
                              miniters=1,
                              leave=True,
                              position=offset,
                              desc=title)

        for ib, (input, gt) in enumerate(progress_epoch):
            inputs = Variable(input).cuda()
            gt = Variable(gt.unsqueeze(1)).cuda()
            gt_28 = functional.interpolate(gt, size=28, mode='bilinear')
            gt_56 = functional.interpolate(gt, size=56, mode='bilinear')
            gt_112 = functional.interpolate(gt, size=112, mode='bilinear')

            msk1, msk2, msk3, msk4, msk5 = model.forward(inputs)

            loss = criterion(msk1, gt_28) + criterion(msk2, gt_28) + criterion(
                msk3, gt_56) + criterion(msk4, gt_112) + criterion(msk5, gt)
            model.zero_grad()
            loss.backward()
            optimizer_feature.step()

            train_losses.append(round(float(loss.data.cpu()), 3))
            title = '{} Epoch {}/{}'.format('Training', epoch,
                                            args.total_epochs)
            progress_epoch.set_description(title + ' ' + 'loss:' +
                                           str(loss.data.cpu().numpy()))

        filename = ('%s/feature-current.pth' % (check_root_feature))
        filename_opti = ('%s/opti-current.pth' % (check_root_opti))
        torch.save(model.state_dict(), filename)
        torch.save(optimizer_feature.state_dict(), filename_opti)

        #--------------------------validation on the test set every n epoch--------------
        if (epoch % args.val_rate == 0):
            fileroot = ('%s/feature-current.pth' % (check_root_feature))
            model.load_state_dict(torch.load(fileroot))
            val_output_root = (prediction_root + '/epoch_current')
            if not os.path.exists(val_output_root):
                os.mkdir(val_output_root)
            print("\ngenerating output images")
            for ib, (input, img_name, _) in enumerate(val_loader):
                inputs = Variable(input).cuda()
                _, _, _, _, output = model.forward(inputs)
                output = functional.sigmoid(output)
                out = output.data.cpu().numpy()
                for i in range(len(img_name)):
                    imsave(os.path.join(val_output_root, img_name[i] + '.png'),
                           out[i, 0],
                           cmap='gray')

            print("\nevaluating mae....")
            F_measure, mae = get_FM(salpath=val_output_root + '/',
                                    gtpath=val_root + '/gt/')
            evaluation.append([int(epoch), float(F_measure), float(mae)])
            result['epoch'].append(int(epoch))
            result['F_measure'].append(round(float(F_measure), 3))
            result['MAE'].append(round(float(mae), 3))
            df = pd.DataFrame(result).set_index('epoch')
            df.to_csv('./result.csv')

            if (epoch == 0): best = F_measure - mae
            elif ((F_measure - mae) > best):
                best = F_measure - mae
                filename = ('%s/feature-best.pth' % (check_root_feature))
                filename_opti = ('%s/opti-best.pth' % (check_root_opti))
                torch.save(model.state_dict(), filename)
                torch.save(optimizer_feature.state_dict(), filename_opti)
Exemplo n.º 9
0
    def train(args,
              epoch,
              start_iteration,
              data_loader,
              model,
              optimizer,
              logger,
              is_validate=False,
              offset=0):
        statistics = []
        all_gradient_norms = []
        total_loss = 0

        if is_validate:
            model.eval()
            title = 'Validating Epoch {}'.format(epoch)
            args.validation_n_batches = np.inf if args.validation_n_batches < 0 else args.validation_n_batches
            progress = tqdm(tools.IteratorTimer(data_loader),
                            ncols=200,
                            total=np.minimum(len(data_loader),
                                             args.validation_n_batches),
                            leave=True,
                            position=offset,
                            desc=title)
        else:
            model.train()
            title = 'Training Epoch {}'.format(epoch)
            args.train_n_batches = np.inf if args.train_n_batches < 0 else args.train_n_batches
            progress = tqdm(tools.IteratorTimer(data_loader),
                            ncols=200,
                            total=np.minimum(len(data_loader),
                                             args.train_n_batches),
                            smoothing=.9,
                            miniters=1,
                            leave=True,
                            position=offset,
                            desc=title)

        last_log_time = progress._time()
        for batch_idx, (data, target) in enumerate(progress):

            data, target = [Variable(d)
                            for d in data], [Variable(t) for t in target]
            if args.cuda and args.number_gpus == 1:
                data, target = [d.cuda(non_blocking=True) for d in data
                                ], [t.cuda(non_blocking=True) for t in target]

            optimizer.zero_grad() if not is_validate else None

            losses, flow = model(data[0], target[0])
            #print('Losses shape {} {}'.format(losses[0].shape, losses[1].shape))

            losses = [torch.mean(loss_value) for loss_value in losses]
            loss_val = losses[0]  # Collect first loss for weight update
            total_loss += loss_val.item()
            loss_values = [v.item() for v in losses]
            loss_labels = list(model.module.loss.loss_labels)

            assert not np.isnan(total_loss)

            if not is_validate and args.fp16:
                loss_val.backward()
                if args.gradient_clip:
                    torch.nn.utils.clip_grad_norm(model.parameters(),
                                                  args.gradient_clip)

                params = list(model.parameters())
                for i in range(len(params)):
                    param_copy[i].grad = params[i].grad.clone().type_as(
                        params[i]).detach()
                    param_copy[i].grad.mul_(1. / args.loss_scale)
                optimizer.step()
                for i in range(len(params)):
                    params[i].data.copy_(param_copy[i].data)

            elif not is_validate:
                loss_val.backward()
                if args.gradient_clip:
                    gradient_norm = torch.nn.utils.clip_grad_norm(
                        model.parameters(), args.gradient_clip)
                    all_gradient_norms.append(gradient_norm)

                optimizer.step()

            # Update hyperparameters if needed
            global_iteration = start_iteration + batch_idx
            if not is_validate:
                tools.update_hyperparameter_schedule(args, epoch,
                                                     global_iteration,
                                                     optimizer)
                loss_labels.append('lr')
                loss_values.append(optimizer.param_groups[0]['lr'])

            loss_labels.append('load')
            loss_values.append(progress.iterable.last_duration)

            # Print out statistics
            statistics.append(loss_values)
            title = '{} Epoch {}'.format(
                'Validating' if is_validate else 'Training', epoch)

            progress.set_description(
                title + ' ' +
                tools.format_dictionary_of_losses(loss_labels, statistics[-1]))

            if ((((global_iteration + 1) % args.log_frequency) == 0
                 and not is_validate) or
                (is_validate and batch_idx == args.validation_n_batches - 1)):

                global_iteration = global_iteration if not is_validate else start_iteration

                logger.add_scalar(
                    'batch logs per second',
                    len(statistics) / (progress._time() - last_log_time),
                    global_iteration)
                last_log_time = progress._time()

                all_losses = np.array(statistics)

                for i, key in enumerate(loss_labels):
                    logger.add_scalar('average batch ' + str(key),
                                      all_losses[:,
                                                 i].mean(), global_iteration)
                    logger.add_histogram(str(key), all_losses[:, i],
                                         global_iteration)

                if args.gradient_clip:
                    logger.add_scalar('average batch gradient_norm',
                                      np.array(all_gradient_norms).mean(),
                                      global_iteration)
                    all_gradient_norms = []

                # Returns multiscale flow, get largest scale and first element in batch
                if args.multiframe or args.multiframe_two_output:
                    flow = flow_utils.flow_postprocess(flow)[0][0]

                    num_flows = len(args.frame_weights)
                    flows_scaled = [
                        cv2.resize(flow[:, :, i:i + 2], None, fx=4.0, fy=4.0)
                        for i in range(0, 2 * num_flows, 2)
                    ]

                    target = target[0].detach().cpu().numpy()
                    target_flow = np.transpose(target[0], (1, 2, 3, 0))

                    results_images = [
                        visualize_results(flows_scaled[i], target_flow[i],
                                          data[0][0] if i == 0 else None)
                        for i in range(0, num_flows)
                    ]

                    for i in range(0, num_flows):
                        logger.add_image('flow{} and target'.format(i),
                                         ToTensor()(results_images[i]),
                                         global_iteration)

                else:
                    flow = flow_utils.flow_postprocess(flow)[0][0]
                    flow_scaled = cv2.resize(flow, None, fx=4.0, fy=4.0)
                    target_flow = flow_utils.flow_postprocess(target)[0][0]
                    results_image = visualize_results(flow_scaled, target_flow,
                                                      data[0][0])
                    logger.add_image('flow and target',
                                     ToTensor()(results_image),
                                     global_iteration)

                # logger.add_histogram('flow_values', flow[0], global_iteration)

            # Reset Summary
            statistics = []

            if (is_validate and (batch_idx == args.validation_n_batches)):
                break

            if ((not is_validate) and (batch_idx == (args.train_n_batches))):
                break

        progress.close()

        return total_loss / float(batch_idx + 1), (batch_idx + 1)
Exemplo n.º 10
0
    def train(args,
              epoch,
              start_iteration,
              data_loader,
              model,
              optimizer,
              logger,
              is_validate=False,
              offset=0):
        statistics = []
        total_loss = 0

        if is_validate:
            model.eval()
            title = 'Validating Epoch {}'.format(epoch)
            #print("validation_n_batches", args.validation_n_batches)
            args.validation_n_batches = np.inf if args.validation_n_batches < 0 else args.validation_n_batches
            #print("validation_n_batches", args.validation_n_batches)
            progress = tqdm(tools.IteratorTimer(data_loader),
                            ncols=100,
                            total=np.minimum(len(data_loader),
                                             args.validation_n_batches),
                            leave=True,
                            position=offset,
                            desc=title)
        else:
            model.train()
            title = 'Training Epoch {}'.format(epoch)
            args.train_n_batches = np.inf if args.train_n_batches < 0 else args.train_n_batches
            progress = tqdm(tools.IteratorTimer(data_loader),
                            ncols=120,
                            total=np.minimum(len(data_loader),
                                             args.train_n_batches),
                            smoothing=.9,
                            miniters=1,
                            leave=True,
                            position=offset,
                            desc=title)

        last_log_time = progress._time()
        for batch_idx, (data, target) in enumerate(progress):

            data, target = [Variable(d)
                            for d in data], [Variable(t) for t in target]
            if args.cuda and args.number_gpus == 1:
                data, target = [d.cuda(async=True) for d in data
                                ], [t.cuda(async=True) for t in target]

            optimizer.zero_grad() if not is_validate else None
            #print("this is data type",data[0].type())
            #print("\n")
            #print("this is target type",target[0].type())
            #print("\n")
            losses = model(data[0], target[0])
            losses = [torch.mean(loss_value)
                      for loss_value in losses]  # taking mean of batches
            loss_val = losses[
                0]  # Collect first loss for weight update #take first loss, second is EPE
            total_loss += loss_val.data.cpu()
            loss_values = [v.data.cpu() for v in losses]  #collect loss values

            # gather loss_labels, direct return leads to recursion limit error as it looks for variables to gather'
            #loss_labels = [y for x in model.module.loss.loss_labels for y in x] #list(model.module.loss.loss_labels)
            loss_labels = list(model.module.loss.loss_labels)

            assert not np.isnan(total_loss.cpu())

            if not is_validate and args.fp16:
                loss_val.backward()
                if args.gradient_clip:
                    torch.nn.utils.clip_grad_norm(model.parameters(),
                                                  args.gradient_clip)

                params = list(model.parameters())
                for i in range(len(params)):
                    param_copy[i].grad = params[i].grad.clone().type_as(
                        params[i]).detach()
                    param_copy[i].grad.mul_(1. / args.loss_scale)
                optimizer.step()
                for i in range(len(params)):
                    params[i].data.copy_(param_copy[i].data)

            elif not is_validate:
                loss_val.backward()
                if args.gradient_clip:
                    torch.nn.utils.clip_grad_norm(model.parameters(),
                                                  args.gradient_clip)
                optimizer.step()

            # Update hyperparameters if needed
            global_iteration = start_iteration + batch_idx
            if not is_validate:
                tools.update_hyperparameter_schedule(args, epoch,
                                                     global_iteration,
                                                     optimizer)
                loss_labels.append('lr')
                loss_values.append(optimizer.param_groups[0]['lr'])

            loss_labels.append('load')
            loss_values.append(progress.iterable.last_duration)  #add load

            #if is_validate:
            #    print("this is EPE length", len(loss_values[:,1]))
            # Print out statistics
            #if is_validate:
            #    print(statistics)
            statistics.append(loss_values)
            #if is_validate:
            #    print(statistics)
            title = '{} Epoch {}'.format(
                'Validating' if is_validate else 'Training', epoch)

            progress.set_description(
                title + ' ' +
                tools.format_dictionary_of_losses(loss_labels, statistics[-1]))

            #if is_validate:
            #    print(batch_idx)
            # args.log_frequency == 1 by default
            if ((((global_iteration + 1) % args.log_frequency) == 0
                 and not is_validate) or is_validate
                    and batch_idx == min(args.validation_n_batches,
                                         len(data_loader) - 1)):
                #if ((((global_iteration + 1) % args.log_frequency) == 0 and not is_validate) or (is_validate and batch_idx == args.validation_n_batches - 1)):

                global_iteration = global_iteration if not is_validate else start_iteration

                logger.add_scalar(
                    'batch logs per second',
                    len(statistics) / (progress._time() - last_log_time),
                    global_iteration)
                last_log_time = progress._time()

                all_losses = np.array(statistics)
                #if is_validate:
                #    print(all_losses)

                for i, key in enumerate(loss_labels):
                    logger.add_scalar('average batch ' + str(key),
                                      all_losses[:,
                                                 i].mean(), global_iteration)
                    logger.add_histogram(str(key), all_losses[:, i],
                                         global_iteration)

            # Reset Summary
                statistics = []

            if (is_validate and (batch_idx == args.validation_n_batches)):
                break

            if ((not is_validate) and (batch_idx == (args.train_n_batches))):
                break

        progress.close()

        return total_loss / float(batch_idx + 1), (batch_idx + 1)
Exemplo n.º 11
0
    def train(args,
              epoch,
              data_loader,
              model,
              optimizer,
              is_validate=False,
              offset=0):
        total_loss = 0

        if is_validate:
            model.eval()
            title = 'Validating Epoch {}'.format(epoch)
            args.validation_n_batches = np.inf if args.validation_n_batches < 0 else args.validation_n_batches
            progress = tqdm(tools.IteratorTimer(data_loader),
                            ncols=100,
                            total=np.minimum(len(data_loader),
                                             args.validation_n_batches),
                            leave=True,
                            position=offset,
                            desc=title)
        else:
            model.train()
            title = 'Training Epoch {}'.format(epoch)
            args.train_n_batches = np.inf if args.train_n_batches < 0 else args.train_n_batches
            progress = tqdm(tools.IteratorTimer(data_loader),
                            ncols=120,
                            total=np.minimum(len(data_loader),
                                             args.train_n_batches),
                            smoothing=.9,
                            miniters=1,
                            leave=True,
                            position=offset,
                            desc=title)

        def torch2numpy(i):
            return i[0].numpy()

        for batch_idx, datas in enumerate(progress):
            data = np.array([list(map(down_scailing, d)) for d in datas])
            target = np.array([list(map(torch2numpy, d))[1] for d in datas])
            high_frames = np.array([list(map(torch2numpy, d)) for d in datas])

            # if args.cuda and args.number_gpus >= 1:
            #    data, target, high_frames = [d.cuda for d in data], [t.cuda for t in target], [hf.cuda for hf in high_frames]

            estimated_image = None
            for x, y in zip(data, target):
                optimizer.zero_grad() if not is_validate else None
                output, losses = model(x, y, high_frames, estimated_image)
                estimated_image = output
                loss_val = torch.mean(losses)
                total_loss += loss_val.item()

                if not is_validate:
                    loss_val.backward()
                    optimizer.step()

            title = '{} Epoch {}'.format(
                'Validating' if is_validate else 'Training', epoch)
            progress.set_description(title)

            if (is_validate and (batch_idx == args.validation_n_batches)) or \
                    ((not is_validate) and (batch_idx == (args.train_n_batches))):
                progress.close()
                break

        return total_loss / float(batch_idx + 1), (batch_idx + 1)
Exemplo n.º 12
0
    def train(args,
              epoch,
              start_iteration,
              data_loader,
              model,
              optimizer,
              logger,
              is_validate=False,
              offset=0):
        #print(str(model))
        statistics = []
        total_loss = 0
        debug = False
        if is_validate:
            model.eval()
            title = 'Validating Epoch {}'.format(epoch)
            args.validation_n_batches = np.inf if args.validation_n_batches < 0 else args.validation_n_batches
            progress = tqdm(tools.IteratorTimer(data_loader),
                            ncols=100,
                            total=np.minimum(len(data_loader),
                                             args.validation_n_batches),
                            leave=True,
                            position=offset,
                            desc=title)
        else:
            model.train()
            title = 'Training Epoch {}'.format(epoch)
            args.train_n_batches = np.inf if args.train_n_batches < 0 else args.train_n_batches
            progress = tqdm(tools.IteratorTimer(data_loader),
                            ncols=120,
                            total=np.minimum(len(data_loader),
                                             args.train_n_batches),
                            smoothing=.9,
                            miniters=1,
                            leave=True,
                            position=offset,
                            desc=title)

        last_log_time = progress._time()

        for batch_idx, (data, target, cdm) in enumerate(progress):
            data, target, cdm = [
                Variable(d, volatile=is_validate) for d in data
            ], [Variable(t, volatile=is_validate) for t in target
                ], [Variable(q, volatile=is_validate) for q in cdm]

            if args.cuda and args.number_gpus == 1:
                data, target, cdm = [d.cuda(async=True) for d in data
                                     ], [t.cuda(async=True) for t in target
                                         ], [q.cuda(async=True) for q in cdm]

            if debug:
                print(
                    '****************************************************************'
                )
                print('data_0')
                print(data[0])
                print('target_0')
                print(target[0])
                print('cdm')
                print(type(cdm))
                temp1 = cdm[0].data.cpu().numpy()
                print(np.max(temp1))
                print(temp1.shape)
                print(
                    '****************************************************************'
                )

            optimizer.zero_grad() if not is_validate else None
            losses = model(data[0], target[0])
            losses = [torch.mean(loss_value) for loss_value in losses]

            loss_val = losses[0]  # Collect first loss for weight update
            #A[batch_idx] =  loss_val.data[0]
            #np.savetxt('test_loss.out', np.array(A) , delimiter=',' , newline='\r\n'  )

            total_loss += loss_val.data[0]
            loss_values = [v.data[0] for v in losses]

            # gather loss_labels, direct return leads to recursion limit error as it looks for variables to gather'
            loss_labels = list(model.module.loss.loss_labels)

            assert not np.isnan(total_loss)

            if not is_validate and args.fp16:
                loss_val.backward()
                if args.gradient_clip:
                    torch.nn.utils.clip_grad_norm(model.parameters(),
                                                  args.gradient_clip)

                params = list(model.parameters())
                for i in range(len(params)):
                    param_copy[i].grad = params[i].grad.clone().type_as(
                        params[i]).detach()
                    param_copy[i].grad.mul_(1. / args.loss_scale)
                optimizer.step()
                for i in range(len(params)):
                    params[i].data.copy_(param_copy[i].data)

            elif not is_validate:
                loss_val.backward()
                if args.gradient_clip:
                    torch.nn.utils.clip_grad_norm(model.parameters(),
                                                  args.gradient_clip)
                optimizer.step()

            # Update hyperparameters if needed
            global_iteration = start_iteration + batch_idx
            if not is_validate:
                tools.update_hyperparameter_schedule(args, epoch,
                                                     global_iteration,
                                                     optimizer)
                loss_labels.append('lr')
                loss_values.append(optimizer.param_groups[0]['lr'])

            loss_labels.append('load')
            loss_values.append(progress.iterable.last_duration)

            # Print out statistics
            statistics.append(loss_values)
            title = '{} Epoch {}'.format(
                'Validating' if is_validate else 'Training', epoch)

            progress.set_description(
                title + ' ' +
                tools.format_dictionary_of_losses(loss_labels, statistics[-1]))

            if ((((global_iteration + 1) % args.log_frequency) == 0
                 and not is_validate) or
                (is_validate and batch_idx == args.validation_n_batches - 1)):

                global_iteration = global_iteration if not is_validate else start_iteration

                logger.add_scalar(
                    'batch logs per second',
                    len(statistics) / (progress._time() - last_log_time),
                    global_iteration)
                last_log_time = progress._time()

                all_losses = np.array(statistics)

                for i, key in enumerate(loss_labels):
                    logger.add_scalar('average batch ' + str(key),
                                      all_losses[:,
                                                 i].mean(), global_iteration)
                    logger.add_histogram(str(key), all_losses[:, i],
                                         global_iteration)

            # Reset Summary
            statistics = []

            if (is_validate and (batch_idx == args.validation_n_batches)):
                break

            if ((not is_validate) and (batch_idx == (args.train_n_batches))):
                break

        progress.close()

        return total_loss / float(batch_idx + 1), (batch_idx + 1)
Exemplo n.º 13
0
    def train(args,
              epoch,
              start_iteration,
              data_loader,
              model,
              optimizer,
              scheduler,
              logger,
              is_validate=False,
              offset=0,
              max_flows_to_show=8):
        running_statistics = None  # Initialize below when the first losses are collected
        all_losses = None  # Initialize below when the first losses are collected
        total_loss = 0

        if is_validate:
            model.eval()
            title = 'Validating Epoch {}'.format(epoch)
            args.validation_n_batches = np.inf if args.validation_n_batches < 0 else args.validation_n_batches
            progress = tqdm(tools.IteratorTimer(data_loader),
                            ncols=100,
                            total=np.minimum(len(data_loader),
                                             args.validation_n_batches),
                            leave=True,
                            position=offset,
                            desc=title)
        else:
            model.train()
            title = 'Training Epoch {}'.format(epoch)
            args.train_n_batches = np.inf if args.train_n_batches < 0 else args.train_n_batches
            progress = tqdm(tools.IteratorTimer(data_loader),
                            ncols=120,
                            total=np.minimum(len(data_loader),
                                             args.train_n_batches),
                            smoothing=.9,
                            miniters=1,
                            leave=True,
                            position=offset,
                            desc=title)

        def convert_flow_to_image(flow_converter, flows_viz):
            imgs = []
            for flow_pair in flows_viz:
                for flow in flow_pair:
                    flow = flow.numpy().transpose((1, 2, 0))
                    img = flow_converter._flowToColor(flow)
                    imgs.append(torch.from_numpy(img.transpose((2, 0, 1))))
                epe_img = torch.sqrt(
                    torch.sum(torch.pow(flow_pair[0] - flow_pair[1], 2),
                              dim=0))
                max_epe = torch.max(epe_img)
                if max_epe == 0:
                    max_epe = torch.ones(1)
                normalized_epe_img = epe_img / max_epe
                normalized_epe_img = (255 * normalized_epe_img).type(
                    torch.uint8)
                normalized_epe_img = torch.stack(
                    (normalized_epe_img, normalized_epe_img,
                     normalized_epe_img),
                    dim=0)
                imgs.append(normalized_epe_img)

                saturated_epe_img = torch.min(epe_img,
                                              5.0 * torch.ones_like(epe_img))
                saturated_epe_img = (51 * saturated_epe_img).type(torch.uint8)
                saturated_epe_img = torch.stack(
                    (saturated_epe_img, saturated_epe_img, saturated_epe_img),
                    dim=0)
                imgs.append(saturated_epe_img)
            return imgs

        max_iters = min(len(data_loader),
                        (args.validation_n_batches if
                         (is_validate and args.validation_n_batches > 0) else
                         len(data_loader)),
                        (args.train_n_batches if
                         (not is_validate
                          and args.train_n_batches > 0) else len(data_loader)))

        if is_validate:
            flow_converter = f2i.Flow()
            collect_flow_interval = int(
                np.ceil(float(max_iters) / max_flows_to_show))
            flows_viz = []

        last_log_batch_idx = 0
        last_log_time = progress._time()
        for batch_idx, (data, target) in enumerate(progress):
            global_iteration = start_iteration + batch_idx

            data, target = [Variable(d)
                            for d in data], [Variable(t) for t in target]
            if args.cuda and args.number_gpus == 1:
                data, target = [d.cuda()
                                for d in data], [t.cuda() for t in target]

            optimizer.zero_grad() if not is_validate else None
            losses, output = model(data[0], target[0], inference=True)
            losses = [torch.mean(loss_value) for loss_value in losses]
            loss_val = losses[0]  # Collect first loss for weight update
            total_loss += loss_val.item()
            loss_values = [v.item() for v in losses]

            if is_validate and batch_idx % collect_flow_interval == 0:
                flows_viz.append(
                    (target[0][0].detach().cpu(), output[0].detach().cpu()))

            if is_validate and args.validation_log_images and batch_idx == (
                    max_iters - 1):
                imgs = convert_flow_to_image(flow_converter, flows_viz)
                imgs = torchvision_utils.make_grid(imgs,
                                                   nrow=4,
                                                   normalize=False,
                                                   scale_each=False)
                logger.add_image('target/predicted flows', imgs,
                                 global_iteration)

            # gather loss_labels, direct return leads to recursion limit error as it looks for variables to gather'
            loss_labels = list(model.module.loss.loss_labels)

            assert not np.isnan(total_loss)

            if not is_validate and args.fp16:
                loss_val.backward()
                if args.gradient_clip:
                    torch.nn.utils.clip_grad_norm(model.parameters(),
                                                  args.gradient_clip)

                params = list(model.parameters())
                for i in range(len(params)):
                    param_copy[i].grad = params[i].grad.clone().type_as(
                        params[i]).detach()
                    param_copy[i].grad.mul_(1. / args.loss_scale)
                optimizer.step()
                for i in range(len(params)):
                    params[i].data.copy_(param_copy[i].data)

            elif not is_validate:
                loss_val.backward()
                if args.gradient_clip:
                    torch.nn.utils.clip_grad_norm(model.parameters(),
                                                  args.gradient_clip)
                optimizer.step()

            # Update hyperparameters if needed
            if not is_validate:
                scheduler.step()
                loss_labels.append('lr')
                loss_values.append(optimizer.param_groups[0]['lr'])

            loss_labels.append('load')
            loss_values.append(progress.iterable.last_duration)

            if running_statistics is None:
                running_statistics = np.array(loss_values)
                all_losses = np.zeros((len(data_loader), len(loss_values)),
                                      np.float32)
            else:
                running_statistics += np.array(loss_values)
            all_losses[batch_idx] = loss_values.copy()
            title = '{} Epoch {}'.format(
                'Validating' if is_validate else 'Training', epoch)

            progress.set_description(title + ' ' +
                                     tools.format_dictionary_of_losses(
                                         loss_labels, running_statistics /
                                         (batch_idx + 1)))

            if ((((global_iteration + 1) % args.log_frequency) == 0
                 and not is_validate) or (batch_idx == max_iters - 1)):

                global_iteration = global_iteration if not is_validate else start_iteration

                logger.add_scalar('batch logs per second',
                                  (batch_idx - last_log_batch_idx) /
                                  (progress._time() - last_log_time),
                                  global_iteration)
                last_log_time = progress._time()
                last_log_batch_idx = batch_idx

                for i, key in enumerate(loss_labels):
                    logger.add_scalar('average batch ' + str(key),
                                      all_losses[:batch_idx + 1,
                                                 i].mean(), global_iteration)
                    logger.add_histogram(str(key), all_losses[:batch_idx + 1,
                                                              i],
                                         global_iteration)

            if (is_validate and (batch_idx == args.validation_n_batches)):
                break

            if ((not is_validate) and (batch_idx == (args.train_n_batches))):
                break

        progress.close()

        return total_loss / float(batch_idx + 1), (batch_idx + 1)
Exemplo n.º 14
0
def main(args):
    dataset = args.dataset
    bsize = args.batch_size
    root = args.data_root
    cache_root = args.cache
    prediction_root = args.pre
    
    train_root = root + dataset + '/Train'
    val_root = root + dataset + '/Test'  # validation dataset
    
    # mkdir( path [,mode] ):创建一个目录,可以是相对或者绝对路径,mode的默认模式是0777。
    # 如果目录有多级,则创建最后一级。如果最后一级目录的上级目录有不存在的,则会抛出一个OSError。
    # makedirs( path [,mode] ):创建递归的目录树,可以是相对或者绝对路径,mode的默认模式是
    # 0777。如果子目录创建失败或者已经存在,会抛出一个OSError的异常,Windows上Error 183即为
    # 目录已经存在的异常错误。如果path只有一级,与mkdir相同。
    check_root_opti = cache_root + '/opti'  # save checkpoint parameters
    if not os.path.exists(check_root_opti):
        os.makedirs(check_root_opti)
    
    check_root_feature = cache_root + '/feature'  # save checkpoint parameters
    if not os.path.exists(check_root_feature):
        os.makedirs(check_root_feature)

    check_root_model = cache_root + '/model'  # save checkpoint parameters
    if not os.path.exists(check_root_model):
        os.makedirs(check_root_model)
        
    # 获取调整后的数据集
    train_loader = torch.utils.data.DataLoader(
        MyData(train_root, transform=True),
        batch_size=bsize, shuffle=True, num_workers=4, pin_memory=True
    )
    val_loader = torch.utils.data.DataLoader(
        MyTestData(val_root, transform=True),
        batch_size=bsize, shuffle=True, num_workers=4, pin_memory=True
    )
    
    model = Vgg(RCL_Module)
    model.cuda()
    
    criterion = nn.BCELoss()
    optimizer_feature = torch.optim.Adam(model.parameters(), lr=args.lr)
    # http://www.spytensor.com/index.php/archives/32/
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer_feature, 'max', verbose=1, patience=10
    )
    progress = tqdm(
        range(args.start_epoch, args.total_epochs + 1), miniters=1,
        ncols=100, desc='Overall Progress', leave=True, position=0
    )
    offset = 1
    
    best = 0
    evaluation = []
    result = {'epoch': [], 'F_measure': [], 'MAE': []}
    for epoch in progress:
        # ===============================TRAIN=================================
        # if epoch != 0:
        #     print("\nloading parameters")
        #     # 载入上一次的训练结果(权重和偏置项), 进一步的训练
        #     model.load_state_dict(
        #         torch.load(check_root_feature + '/feature-current.pth')
        #     )
        #     # 载入优化器状态
        #     optimizer_feature.load_state_dict(
        #         torch.load(check_root_opti + '/opti-current.pth')
        #     )
        
        title = 'Training Epoch {}'.format(epoch)
        progress_epoch = tqdm(
            tools.IteratorTimer(train_loader), ncols=120,
            total=len(train_loader), smoothing=0.9, miniters=1,
            leave=True, position=offset, desc=title
        )
        
        # 一个周期内部进行迭代计算
        for ib, (input_, gt) in enumerate(progress_epoch):
            # 获取对应的5个掩膜预测结果
            inputs = Variable(input_).cuda()
            msk1, msk2, msk3, msk4, msk5 = model.forward(inputs)
            
            gt = Variable(gt.unsqueeze(1)).cuda()
            gt_28 = functional.interpolate(gt, size=28, mode='bilinear')
            gt_56 = functional.interpolate(gt, size=56, mode='bilinear')
            gt_112 = functional.interpolate(gt, size=112, mode='bilinear')

            loss = criterion(msk1, gt_28) + criterion(msk2, gt_28) \
                   + criterion(msk3, gt_56) + criterion(msk4, gt_112) \
                   + criterion(msk5, gt)
            
            model.zero_grad()
            loss.backward()
            optimizer_feature.step()
            
            title = '{} Epoch {}/{}'.format(
                'Training', epoch, args.total_epochs
            )
            progress_epoch.set_description(
                title + ' ' + 'loss:' + str(loss.data.cpu().numpy())
            )
        
        # 存储一个epoch后的模型(权重和偏置项), 以便后期使用
        filename = ('%s/feature-current.pth' % check_root_feature)
        torch.save(model.state_dict(), filename)
        # 存储优化器状态
        filename_opti = ('%s/opti-current.pth' % check_root_opti)
        torch.save(optimizer_feature.state_dict(), filename_opti)
             
        # ==============================TEST===================================
        if epoch % args.val_rate == 0:
            fileroot = ('%s/feature-current.pth' % check_root_feature)
            # 基于torch.save(model.state_dict(), filename)存储方法的对应的恢复方法
            model.load_state_dict(torch.load(fileroot))
            val_output_root = (prediction_root + '/epoch_current')
            if not os.path.exists(val_output_root):
                os.makedirs(val_output_root)
        
            print("\ngenerating output images")
            for ib, (input_, img_name, _) in enumerate(val_loader):
                inputs = Variable(input_).cuda()
                _, _, _, _, output = model.forward(inputs)
                out = output.data.cpu().numpy()
                for i in range(len(img_name)):
                    print(out[i])
                    imsave(os.path.join(val_output_root, img_name[i] + '.png'),
                           out[i, 0], cmap='gray')
            print("\nevaluating mae....")    
            
#             mean = np.array([0.485, 0.456, 0.406])
#             std = np.array([0.229, 0.224, 0.225])
#             img = Image.open("./data/ILSVRC2012_test_00000004_224x224.jpg")
#             img = np.array(img)
#             img = img.astype(np.float64) / 255
#             img -= mean
#             img /= std
#             img = img.transpose(2, 0, 1)
#             img = np.array(img)[np.newaxis, :, :, :].astype(np.float32)
#             img = torch.from_numpy(img).float()
#             inputs = Variable(img).cuda()
#             _, _, _, _, output = model.forward(inputs)
#             out = output.data.cpu().numpy()
#             print(out)
#             imsave(os.path.join(val_output_root, 'caffe2_test' + '.png'),
#                        out[0, 0], cmap='gray')
      
            # 计算F测度和平均绝对误差
            F_measure, mae = get_FM(
                salpath=val_output_root + '/', gtpath=val_root + '/masks/'
            )
            evaluation.append([int(epoch), float(F_measure), float(mae)])
            result['epoch'].append(int(epoch))
            result['F_measure'].append(round(float(F_measure), 3))
            result['MAE'].append(round(float(mae), 3))
            df = pd.DataFrame(result).set_index('epoch')
            df.to_csv('./result.csv')
        
            if epoch == 0:
                best = F_measure - mae
            elif (F_measure - mae) > best:
                best = F_measure - mae
                # 存储最好的权重和偏置
                filename = ('%s/feature-best.pth' % check_root_feature)
                torch.save(model.state_dict(), filename)
                # 存储最好的优化器状态
                filename_opti = ('%s/opti-best.pth' % check_root_opti)
                torch.save(optimizer_feature.state_dict(), filename_opti)
#                 # 存储最好的完整网络
#                 filename_opti = ('%s/model-best.pth' % check_root_model)
#                 torch.save(model, filename_opti)
                print("完成一次保存")
            # 只在验证期间考虑更改学习率
            scheduler.step(best)
            print("完成一次测试")