Пример #1
0
def load_model(params, checkpoint=None, device=None):
    if params["encoder"] == "mobilenet":
        model = models.MobileNetSkipAdd(output_size=(224, 224),
                                        pretrained=True)
    elif params["encoder"] == "resnet50":
        model = models.ResNetSkipAdd(layers=50,
                                     output_size=(224, 224),
                                     pretrained=True)
    elif params["encoder"] == "resnet18":
        model = models.ResNetSkipAdd(layers=18,
                                     output_size=(224, 224),
                                     pretrained=True)
    else:
        model = models.MobileNetSkipAdd(output_size=(224, 224),
                                        pretrained=True)

    # Load model checkpoint if specified
    model_state_dict,\
        optimizer_state_dict,\
        params["start_epoch"] = load_checkpoint(checkpoint, device)
    model_state_dict = convert_state_dict_from_gpu(model_state_dict)
    if model_state_dict:
        model.load_state_dict(model_state_dict)

    return model, optimizer_state_dict
Пример #2
0
def main():
    global args, best_result, output_directory, train_csv, test_csv

    # Data loading code
    print("=> creating data loaders...")
    # TODO: Test - change later
    valdir = os.path.join('..', 'data', args.data, 'val')
    testdir = args.test_path

    if args.data == 'nyudepthv2':
        from dataloaders.nyu import NYUDataset
        val_dataset = NYUDataset(valdir, split='val', modality=args.modality)
    elif args.data == 'sun3d':
        from dataloaders.sun3d import Sun3DDataset
        val_dataset = Sun3DDataset(testdir, split='val', modality=args.modality)

    # set batch size to be 1 for validation
    val_loader = torch.utils.data.DataLoader(val_dataset,
        batch_size=1, shuffle=False, num_workers=args.workers, pin_memory=True)
    print("=> data loaders created.")

    # evaluation mode
    if args.evaluate:
        assert os.path.isfile(args.evaluate), \
        "=> no model found at '{}'".format(args.evaluate)
        print("=> loading model '{}'".format(args.evaluate))
        checkpoint = torch.load(args.evaluate, map_location='cpu')
        if type(checkpoint) is dict:
            args.start_epoch = checkpoint['epoch']
            best_result = checkpoint['best_result']
            model = checkpoint['model']
            print("=> loaded best model (epoch {})".format(checkpoint['epoch']))
        else:
            model = checkpoint
            args.start_epoch = 0
        output_directory = os.path.dirname(args.evaluate)
        validate(val_loader, model, args.start_epoch, write_to_file=False)
        return

    # Train from start
    if args.train:
        assert os.path.isfile(args.train), \
        "=> no model found at '{}'".format(args.train)
        print("=> loading model '{}'".format(args.train))
        model = models.MobileNetSkipAdd(output_size=10)
        args.start_epoch = 0
        output_directory = os.path.dirname(args.train)
        validate(val_loader, model, args.start_epoch, write_to_file=False)
        return
Пример #3
0
def main():
    global args, best_result, output_directory, train_csv, test_csv

    # Data loading code
    print("=> creating data loaders...")
    valdir = os.path.join('..', 'data', args.data, 'val')

    val_dataset = torch.Tensor(cv2.divide(cv2.resize(cv2.imread("data/nyudepthv2/val/0000000140.jpg"), (128, 128)), 255.))
    val_dataset = val_dataset.unsqueeze(0)

    # set batch size to be 1 for validation
    val_loader = torch.utils.data.DataLoader(val_dataset,
        batch_size=1, shuffle=False, num_workers=args.workers, pin_memory=True)
    print("=> data loaders created.")

    # evaluation mode
    model = models.MobileNetSkipAdd((640, 480))
    # model = model.load_state_dict(torch.load('model_best.pth.tar')['state_dict'])
    args.start_epoch = 0
    validate(val_loader, model, args.start_epoch, write_to_file=False)
Пример #4
0
def main():
    global args, best_result, output_directory, train_csv, test_csv

    # evaluation mode
    if args.evaluate:

        # Data loading code
        print("=> creating data loaders...")
        valdir = os.path.join('..', 'data', args.data, 'val')

        if args.data == 'nyudepthv2':
            from dataloaders.nyu import NYUDataset
            val_dataset = NYUDataset(valdir,
                                     split='val',
                                     modality=args.modality)
        else:
            raise RuntimeError('Dataset not found.')

        # set batch size to be 1 for validation
        val_loader = torch.utils.data.DataLoader(val_dataset,
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=args.workers,
                                                 pin_memory=True)
        print("=> data loaders created.")

        assert os.path.isfile(args.evaluate), \
            "=> no model found at '{}'".format(args.evaluate)
        print("=> loading model '{}'".format(args.evaluate))
        checkpoint = torch.load(args.evaluate)
        if type(checkpoint) is dict:
            args.start_epoch = checkpoint['epoch']
            best_result = checkpoint['best_result']
            model = checkpoint['model']
            print("=> loaded best model (epoch {})".format(
                checkpoint['epoch']))
        else:
            model = checkpoint
            args.start_epoch = 0
        output_directory = os.path.dirname(args.evaluate)
        validate(val_loader, model, args.start_epoch, write_to_file=False)
        return

    start_epoch = 0
    if args.train:
        train_loader, val_loader = create_data_loaders(args)
        print("=> creating Model ({}-{}) ...".format(args.arch, args.decoder))

        model = models.MobileNetSkipAdd(
            output_size=train_loader.dataset.output_size)
        print("=> model created.")
        optimizer = torch.optim.SGD(model.parameters(),
                                    args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)

        # model = torch.nn.DataParallel(model).cuda() # for multi-gpu training
        model = model.cuda()

        # define loss function (criterion) and optimizer
    if args.criterion == 'l2':
        criterion = criteria.MaskedMSELoss().cuda()
    elif args.criterion == 'l1':
        criterion = criteria.MaskedL1Loss().cuda()

        # create results folder, if not already exists
    output_directory = utils.get_output_directory(args)
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)
    train_csv = os.path.join(output_directory, 'train.csv')
    test_csv = os.path.join(output_directory, 'test.csv')
    best_txt = os.path.join(output_directory, 'best.txt')

    # create new csv files with only header
    if not args.resume:
        with open(train_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()
        with open(test_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()

        for epoch in range(start_epoch, args.epochs):
            utils.adjust_learning_rate(optimizer, epoch, args.lr)
            train(train_loader, model, criterion, optimizer,
                  epoch)  # train for one epoch
            result, img_merge = validate(val_loader, model,
                                         epoch)  # evaluate on validation set

            # remember best rmse and save checkpoint
            is_best = result.rmse < best_result.rmse
            if is_best:
                best_result = result
                with open(best_txt, 'w') as txtfile:
                    txtfile.write(
                        "epoch={}\nmse={:.3f}\nrmse={:.3f}\nabsrel={:.3f}\nlg10={:.3f}\nmae={:.3f}\ndelta1={:.3f}\nt_gpu={:.4f}\n"
                        .format(epoch, result.mse, result.rmse, result.absrel,
                                result.lg10, result.mae, result.delta1,
                                result.gpu_time))
                if img_merge is not None:
                    img_filename = output_directory + '/comparison_best.png'
                    utils.save_image(img_merge, img_filename)

            utils.save_checkpoint(
                {
                    'args': args,
                    'epoch': epoch,
                    'arch': args.arch,
                    'model': model,
                    'best_result': best_result,
                    'optimizer': optimizer,
                }, is_best, epoch, output_directory)
Пример #5
0
def main(args):

    print("Loading config file: ", args.config)
    params = utils.load_config_file(args.config)
    params["test_dataset_paths"] = utils.format_dataset_path(
        params["test_dataset_paths"])

    if args.existing_experiment:
        experiment = ExistingExperiment(
            api_key="jBFVYFo9VUsy0kb0lioKXfTmM",
            previous_experiment=args.existing_experiment)
    else:
        experiment = Experiment(api_key="jBFVYFo9VUsy0kb0lioKXfTmM",
                                project_name="fastdepth")

    # Data loading code
    print("Creating data loaders...")
    if args.nyu:
        from dataloaders.nyu import NYUDataset
        val_dataset = NYUDataset(params["test_dataset_paths"], split='val')
    else:
        val_dataset = Datasets.FastDepthDataset(params["test_dataset_paths"],
                                                split='val',
                                                depth_min=params["depth_min"],
                                                depth_max=params["depth_max"],
                                                input_shape_model=(224, 224))

    # set batch size to be 1 for validation
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=1,
                                             shuffle=True,
                                             num_workers=params["num_workers"],
                                             pin_memory=True)

    # Set GPU
    params["device"] = torch.device(
        "cuda:{}".format(params["device"])
        if params["device"] >= 0 and torch.cuda.is_available() else "cpu")
    print("Using device", params["device"])

    print("Loading model '{}'".format(args.model))
    if not args.nyu:
        model, _ = utils.load_model(params, args.model, params["device"])
    else:
        # Maintain compatibility for fastdepth NYU model format
        state_dict = torch.load(args.model, map_location=params["device"])
        model = models.MobileNetSkipAdd(output_size=(224, 224),
                                        pretrained=True)
        model.load_state_dict(state_dict)
        params["start_epoch"] = 0

    model.to(params["device"])

    # Create output directory
    output_directory = os.path.join(os.path.dirname(args.model), "images")
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)
    params["experiment_dir"] = output_directory
    print("Saving results to " + output_directory)

    evaluate(params, val_loader, model, experiment)
Пример #6
0
args = parser.parse_args()

model_path = args.model

if args.nyu:
    checkpoint = torch.load(args.model)
    model = checkpoint['model']
else:
    model_state_dict, _, _ = utils.load_checkpoint(args.model)
    model_state_dict = utils.convert_state_dict_from_gpu(model_state_dict)
    if args.resnet18:
        model = models.ResNetSkipAdd(layers=18,
                                     output_size=(224, 224),
                                     pretrained=True)
    else:
        model = models.MobileNetSkipAdd(output_size=(224, 224),
                                        pretrained=True)
    if model_state_dict:
        model.load_state_dict(model_state_dict)

if args.save_gpu:
    print("Saving model on GPU")
    model.to(torch.device("cuda:0"))
    example = torch.rand(1, 3, 224, 224).cuda()
else:
    print("Saving model on CPU")
    model.to(torch.device("cpu"))
    example = torch.rand(1, 3, 224, 224)

model.eval()

# Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing.
Пример #7
0
def main():
    global args, best_result, output_directory, train_csv, test_csv


    # evaluation mode
    start_epoch =0
    if args.evaluate:
       assert os.path.isfile(args.evaluate), \
       "=> no model found at '{}'".format(args.evaluate)
       print("=> loading model '{}'".format(args.evaluate))
       checkpoint = torch.load(args.evaluate)
       if type(checkpoint) is dict:
           args = checkpoint['args']
           args.start_epoch = checkpoint['epoch']
           best_result = checkpoint['best_result']
           model = checkpoint['model']
           
           #print(model)
           
           print("=> loaded best model (epoch {})".format(checkpoint['epoch']))
       else:
           model = checkpoint
           args.start_epoch = 0
           
           #print(model)
           
       output_directory = os.path.dirname(args.evaluate)
       _, val_loader = create_data_loaders(args)
       validate(val_loader, model, args.start_epoch, write_to_file=False)  
       return

    #lisa ->
    # optionally resume from a checkpoint
    elif args.resume:
        chkpt_path = args.resume
        assert os.path.isfile(chkpt_path), \
            "=> no checkpoint found at '{}'".format(chkpt_path)
        print("=> loading checkpoint '{}'".format(chkpt_path))
        checkpoint = torch.load(chkpt_path)
        if type(checkpoint) is dict:
            args = checkpoint['args']
            start_epoch = checkpoint['epoch'] 
            best_result = checkpoint['best_result']
            model = checkpoint['model']
        else:
            model = checkpoint
            args.start_epoch = 0

        optimizer = checkpoint['optimizer']
        output_directory = os.path.dirname(chkpt_path)
        print("=> loaded checkpoint (epoch {})".format(checkpoint['epoch']))
        train_loader, val_loader = create_data_loaders(args)
        #args.resume = True

    # create new model
    else:
        train_loader, val_loader = create_data_loaders(args)
        print("=> creating Model ({}) ...".format(args.arch))
        in_channels = len(args.modality)
        if args.arch == 'MobileNet':
            model = models.MobileNet(decoder=args.decoder, output_size=train_loader.dataset.output_size,
                in_channels=in_channels, pretrained=args.pretrained)
            print(model)
        elif args.arch == 'MobileNetSkipAdd':
            model = models.MobileNetSkipAdd(output_size=train_loader.dataset.output_size, pretrained=args.pretrained)
            #print(model)
        print("=> model created.")
        optimizer = torch.optim.SGD(model.parameters(), args.lr, \
            momentum=args.momentum, weight_decay=args.weight_decay)

        # model = torch.nn.DataParallel(model).cuda() # for multi-gpu training
        model = model.cuda()
        #summary(model,(3,224,224))
    # define loss function (criterion) and optimizer
    if args.criterion == 'l2':
        criterion = criteria.MaskedMSELoss().cuda()
    elif args.criterion == 'l1':
        criterion = criteria.MaskedL1Loss().cuda()

    # create results folder, if not already exists
    output_directory = utils.get_output_directory(args)
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)
    train_csv = os.path.join(output_directory, 'train.csv')
    test_csv = os.path.join(output_directory, 'test.csv')
    best_txt = os.path.join(output_directory, 'best.txt')

    # create new csv files with only header
    if not args.resume:
        with open(train_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()
        with open(test_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()

    for epoch in range(start_epoch, args.epochs):
        utils.adjust_learning_rate(optimizer, epoch, args.lr)
        train(train_loader, model, criterion, optimizer, epoch) # train for one epoch
        result, img_merge = validate(val_loader, model, epoch) # evaluate on validation set

        # remember best rmse and save checkpoint
        is_best = result.rmse < best_result.rmse
        if is_best:
            best_result = result
            with open(best_txt, 'w') as txtfile:
                txtfile.write("epoch={}\nmse={:.3f}\nrmse={:.3f}\nabsrel={:.3f}\nlg10={:.3f}\nmae={:.3f}\ndelta1={:.3f}\nt_gpu={:.4f}\n".
                    format(epoch, result.mse, result.rmse, result.absrel, result.lg10, result.mae, result.delta1, result.gpu_time))
            if img_merge is not None:
                img_filename = output_directory + '/comparison_best.png'
                utils.save_image(img_merge, img_filename)

        utils.save_checkpoint({
            'args': args,
            'epoch': epoch,
            'arch': args.arch,
            'model': model,
            'best_result': best_result,
            'optimizer' : optimizer,
        }, is_best, epoch, output_directory)
Пример #8
0
def main():
    global args, best_result, output_directory, train_csv, test_csv
    # Random seed setting
    torch.manual_seed(16)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    # Data loading code
    print("=> creating data loaders...")
    data_dir = '/media/vasp/Data2/Users/vmhp806/depth-estimation'
    valdir = os.path.join(data_dir, 'data', args.data, 'val')
    traindir = os.path.join(data_dir, 'data', args.data, 'train')

    if args.data == 'nyu' or args.data == 'uow_dataset':
        from dataloaders.nyu import NYUDataset
        val_dataset = NYUDataset(valdir, split='val', modality=args.modality)
        #val_dataset = nc.SafeDataset(val_dataset)
        train_dataset = NYUDataset(traindir,
                                   split='train',
                                   modality=args.modality)
        #train_dataset = nc.SafeDataset(train_dataset)
    else:
        raise RuntimeError('Dataset not found.')

    # set batch size to be 1 for validation
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True,
                                             collate_fn=my_collate)
    if not args.evaluate:
        train_loader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=args.batch_size,
                                                   shuffle=False,
                                                   num_workers=args.workers,
                                                   pin_memory=True,
                                                   collate_fn=my_collate)
    print("=> data loaders created.")

    # evaluation mode
    if args.evaluate:
        assert os.path.isfile(args.evaluate), \
        "=> no model found at '{}'".format(args.evaluate)
        print("=> loading model '{}'".format(args.evaluate))
        checkpoint = torch.load(args.evaluate)
        if type(checkpoint) is dict:
            args.start_epoch = checkpoint['epoch']
            best_result = checkpoint['best_result']
            model = checkpoint['model']
            print("=> loaded best model (epoch {})".format(
                checkpoint['epoch']))
        else:
            model = checkpoint
            args.start_epoch = 0
        output_directory = os.path.dirname(args.evaluate)
        if args.predict:
            predict(val_loader, model, output_directory)
        else:
            validate(val_loader, model, args.start_epoch, write_to_file=False)
        return
        # optionally resume from a checkpoint
    elif args.resume:
        chkpt_path = args.resume
        assert os.path.isfile(chkpt_path), \
            "=> no checkpoint found at '{}'".format(chkpt_path)
        print("=> loading checkpoint " "'{}'".format(chkpt_path))
        checkpoint = torch.load(chkpt_path)
        #args = checkpoint['args']
        start_epoch = checkpoint['epoch'] + 1
        best_result = checkpoint['best_result']
        model = checkpoint['model']
        optimizer = torch.optim.SGD(model.parameters(), lr=0.9)
        optimizer.load_state_dict(checkpoint['optimizer'])
        #optimizer = checkpoint['optimizer']
        output_directory = os.path.dirname(os.path.abspath(chkpt_path))
        print("=> loaded checkpoint (epoch {})".format(checkpoint['epoch']))
        args.resume = True
    else:
        print("=> creating Model ({} - {}) ...".format(args.arch,
                                                       args.decoder))
        #in_channels = len(args.modality)
        if args.arch == 'mobilenet-skipconcat':
            model = models.MobileNetSkipConcat(
                decoder=args.decoder,
                output_size=train_loader.dataset.output_size)
        elif args.arch == 'mobilenet-skipadd':
            model = models.MobileNetSkipAdd(
                decoder=args.decoder,
                output_size=train_loader.dataset.output_size)
        elif args.arch == 'resnet18-skipconcat':
            model = models.ResNetSkipConcat(
                layers=18,
                decoder=args.decoder,
                output_size=train_loader.dataset.output_size)
        elif args.arch == 'resnet18-skipadd':
            model = models.ResNetSkipAdd(
                layers=18, output_size=train_loader.dataset.output_size)
        else:
            raise Exception('Invalid architecture')
        print("=> model created.")
        optimizer = torch.optim.SGD(model.parameters(),
                                    args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)

        # model = torch.nn.DataParallel(model).cuda() # for multi-gpu training
        model = model.cuda()
        start_epoch = 0
    # define loss function (criterion) and optimizer
    if args.criterion == 'l2':
        criterion = criteria.MaskedMSELoss().cuda()
    elif args.criterion == 'l1':
        criterion = criteria.MaskedL1Loss().cuda()

    # create results folder, if not already exists
    output_directory = utils.get_output_directory(args)
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)
    train_csv = os.path.join(output_directory, 'train.csv')
    test_csv = os.path.join(output_directory, 'test.csv')
    best_txt = os.path.join(output_directory, 'best.txt')

    # create new csv files with only header
    if not args.resume:
        with open(train_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()
        with open(test_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()
    #start_epoch = 0
    for epoch in range(start_epoch, args.epochs):
        utils.adjust_learning_rate(optimizer, epoch, args.lr)
        train(train_loader, model, criterion, optimizer,
              epoch)  # train for one epoch
        result, img_merge = validate(
            val_loader, model, epoch,
            write_to_file=True)  # evaluate on validation set

        # remember best rmse and save checkpoint
        is_best = result.rmse < best_result.rmse
        if is_best:
            best_result = result
            with open(best_txt, 'w') as txtfile:
                txtfile.write(
                    "epoch={}\nmse={:.3f}\nrmse={:.3f}\nabsrel={:.3f}\nlg10={:.3f}\nmae={:.3f}\ndelta1={:.3f}\nt_gpu={:.4f}\n"
                    .format(epoch, result.mse, result.rmse, result.absrel,
                            result.lg10, result.mae, result.delta1,
                            result.gpu_time))
            if img_merge is not None:
                img_filename = output_directory + '/comparison_best.png'
                utils.save_image(img_merge, img_filename)

        utils.save_checkpoint(
            {
                'args': args,
                'epoch': epoch,
                #'arch': args.arch,
                'model': model,
                'best_result': best_result,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            epoch,
            output_directory)
Пример #9
0
def main():
    global args, best_result, output_directory, train_csv, test_csv
    print(args)
    start = 0

    # evaluation mode
    if args.evaluate:
        datasets = configuration_file.datasets_path
        valdir = os.path.join(datasets, args.data, 'val')
        print("Validation directory ", valdir)
        if args.data == 'nyudepthv2':
            from dataloaders.nyu import NYUDataset
            val_dataset = NYUDataset(valdir,
                                     split='val',
                                     modality=args.modality)
        else:
            raise RuntimeError('Dataset not found.')

        #set batch size to be 1 for validation
        val_loader = torch.utils.data.DataLoader(val_dataset,
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=args.workers,
                                                 pin_memory=True)
        print("=> validation loaders created.")
        assert os.path.isfile(args.evaluate), \
            "=> no model found at '{}'".format(args.evaluate)
        print("=> loading model '{}'".format(args.evaluate))
        checkpoint = torch.load(args.evaluate)
        if type(checkpoint) is dict:
            args.start_epoch = checkpoint['epoch']
            best_result = checkpoint['best_result']
            model = checkpoint['model']
            print("=> loaded best model (epoch {})".format(
                checkpoint['epoch']))
        else:
            model = checkpoint
            args.start_epoch = 0
        output_directory = os.path.dirname(args.evaluate)
        validate(val_loader, model, args.start_epoch, write_to_file=False)

        return

    # resume from a particular check point
    elif args.resume:
        chkpt_path = args.resume
        assert os.path.isfile(
            chkpt_path), "=> no checkpoint found at '{}'".format(chkpt_path)
        print("=> loading checkpoint '{}'".format(chkpt_path))
        checkpoint = torch.load(chkpt_path)
        args = checkpoint['args']
        start_epoch = checkpoint['epoch'] + 1  # load epoch number
        start = start_epoch  # resume from the checkpoint epoch
        best_result = checkpoint['best_result']  # load best result
        model = checkpoint['model']  # load model
        optimizer = checkpoint['optimizer']  # load optimizer
        output_directory = os.path.dirname(os.path.abspath(chkpt_path))
        print("=> loaded checkpoint (epoch {})".format(checkpoint['epoch']))
        train_loader, val_loader = create_data_loaders(
            args)  # create data loader
        args.resume = True

    # create new model if checkpoint does not exist
    elif args.train:
        train_loader, val_loader = create_data_loaders(
            args)  # load train and validation data
        print("=> creating Model ({}-{}) ...".format(args.arch, args.decoder))
        in_channels = len(args.modality)
        if args.arch == 'MobileNet':  # if encoder is MobileNet
            model = models.MobileNetSkipAdd(
                output_size=train_loader.dataset.output_size
            )  # MobileNet model is created
        else:
            model = models.MobileNetSkipAdd(
                output_size=train_loader.dataset.output_size
            )  # by default MobileNet

        print("=> model created.")
        optimizer = torch.optim.SGD(model.parameters(), args.lr, \
                                    momentum=args.momentum, weight_decay=args.weight_decay) # configure optimizer

        if configuration_file.GPU == True:
            if configuration_file.MULTI_GPU == True:  # training on multiple GPU
                model = torch.nn.DataParallel(model).cuda()
            else:  # training on single GPU
                model = model.cuda()
        else:
            pass

    # define loss function  and optimizer
    if args.criterion == 'l2':
        if configuration_file.GPU == True:
            criterion = MaskedMSELoss().cuda()
        else:
            criterion = MaskedMSELoss()
    elif args.criterion == 'l1':
        if configuration_file.GPU == True:
            criterion = MaskedL1Loss().cuda()
        else:
            criterion = MaskedL1Loss()

    # create results folder, if not already exists
    output_directory = utils.get_output_directory(args)

    if not os.path.exists(output_directory):  # create new directory
        os.makedirs(output_directory)
    train_csv = os.path.join(output_directory,
                             'train.csv')  # store training result
    test_csv = os.path.join(output_directory, 'test.csv')  # store test result
    best_txt = os.path.join(output_directory, 'best.txt')  # store best result

    # create new csv files with only header
    if not args.resume:
        with open(train_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()
        with open(test_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()

    # training is strarted from here
    for epoch in range(start, args.epochs):
        utils.adjust_learning_rate(optimizer, epoch, args.lr)
        train(train_loader, model, criterion, optimizer,
              epoch)  # train for one epoch
        result, img_merge = validate(val_loader, model,
                                     epoch)  # evaluate on validation set

        # remember best rmse and save checkpoint
        is_best = result.rmse < best_result.rmse  # compare result of the current epoch and best result
        if is_best:
            best_result = result
            with open(best_txt, 'w') as txtfile:
                txtfile.write(
                    "epoch={}\nmse={:.3f}\nrmse={:.3f}\nabsrel={:.3f}\nlg10={:.3f}\nmae={:.3f}\ndelta1={:.3f}\nt_gpu={:.4f}\n"
                    .format(epoch, result.mse, result.rmse, result.absrel,
                            result.lg10, result.mae, result.delta1,
                            result.gpu_time))
            if img_merge is not None:
                img_filename = output_directory + '/comparison_best.png'
                utils.save_image(img_merge, img_filename)

        utils.save_checkpoint(
            {
                'args': args,
                'epoch': epoch,
                'arch': args.arch,
                'model': model,
                'best_result': best_result,
                'optimizer': optimizer,
            }, is_best, epoch, output_directory)
Пример #10
0
def main():
    global args, best_result, output_directory, train_csv, test_csv
    # Data loading code
    datasets = '/content/drive/MyDrive'
    #valdir = os.path.join(datasets, 'Datasets', args.data, 'val')
    #valdir = '/content/drive/MyDrive/Datasets/Nyudepthv2Previous/nyudepthv2/val/official/'
    valdir = os.path.join(datasets, 'Datasets', 'Nyudepthv2Previous',
                          args.data, 'val')

    if args.data == 'nyudepthv2':
        from dataloaders.nyu import NYUDataset
        val_dataset = NYUDataset(valdir, split='val', modality=args.modality)

    elif args.data == 'kitti':
        from dataloaders.kitti import KITTIDataset
        val_dataset = KITTIDataset(valdir, type='val', modality=args.modality)
    else:
        raise RuntimeError('Dataset not found.')

    # set batch size to be 1 for validation
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)
    print("=> validation loaders created.")

    # evaluation mode
    if args.evaluate:
        assert os.path.isfile(args.evaluate), \
        "=> no model found at '{}'".format(args.evaluate)
        print("=> loading model '{}'".format(args.evaluate))
        checkpoint = torch.load(args.evaluate)
        if type(checkpoint) is dict:
            args.start_epoch = checkpoint['epoch']
            best_result = checkpoint['best_result']
            model = checkpoint['model']
            print("=> loaded best model (epoch {})".format(
                checkpoint['epoch']))
        else:
            model = checkpoint
            args.start_epoch = 0
        output_directory = os.path.dirname(args.evaluate)
        validate(val_loader, model, args.start_epoch, write_to_file=False)

        return

    # training mode
    # resume from a check point
    elif args.resume:
        print("Resume")
        chkpt_path = args.resume
        assert os.path.isfile(
            chkpt_path), "=> no checkpoint found at '{}'".format(chkpt_path)
        print("=> loading checkpoint '{}'".format(chkpt_path))
        checkpoint = torch.load(chkpt_path)
        args = checkpoint['args']
        start_epoch = checkpoint['epoch'] + 1
        best_result = checkpoint['best_result']
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']
        output_directory = os.path.dirname(os.path.abspath(chkpt_path))
        print("=> loaded checkpoint (epoch {})".format(checkpoint['epoch']))
        train_loader, val_loader = create_data_loaders(args)
        args.resume = True

    # create new model
    elif args.train:
        print("Inside Train 1----------->")
        train_loader, val_loader = create_data_loaders(args)
        print("=> creating Model ({}-{}) ...".format(args.arch, args.decoder))
        in_channels = len(args.modality)
        if args.arch == 'MobileNet':
            #model = models.MobileNetSkipAdd(output_size=train_loader.dataset.output_size)
            model = ResNetSkipAdd(layers=50,
                                  output_size=train_loader.dataset.output_size,
                                  in_channels=in_channels,
                                  pretrained=args.pretrained)
            #print("Mobile Net model ",str(train_loader.dataset.output_size)
        elif args.arch == 'resnet50':
            model = ResNet(layers=50,
                           decoder=args.decoder,
                           output_size=train_loader.dataset.output_size,
                           in_channels=in_channels,
                           pretrained=args.pretrained)

        else:
            model = models.MobileNetSkipAdd(output_size=train_loader.dataset.
                                            output_size)  #by default MobileNet

        print("=> model created.")
        optimizer = torch.optim.SGD(model.parameters(), args.lr, \
                                    momentum=args.momentum, weight_decay=args.weight_decay)

        # model = torch.nn.DataParallel(model).cuda() # for multi-gpu training
        model = model.cuda()

        # define loss function (criterion) and optimizer
    if args.criterion == 'l2':
        criterion = MaskedMSELoss().cuda()
    elif args.criterion == 'l1':
        criterion = MaskedL1Loss().cuda()

        # create results folder, if not already exists
    print("Arguments ")
    print(args)
    output_directory = utils.get_output_directory(args)

    if not os.path.exists(output_directory):
        os.makedirs(output_directory)
    train_csv = os.path.join(output_directory, 'train.csv')
    test_csv = os.path.join(output_directory, 'test.csv')
    best_txt = os.path.join(output_directory, 'best.txt')

    # create new csv files with only header
    if not args.resume:
        with open(train_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()
        with open(test_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()

    #Training is strarted from here
    if args.train == True:
        print("Training...........(args.train)", args.train)
        start = 0
        for epoch in range(start, args.epochs):
            utils.adjust_learning_rate(optimizer, epoch, args.lr)
            train(train_loader, model, criterion, optimizer,
                  epoch)  # train for one epoch
            result, img_merge = validate(val_loader, model,
                                         epoch)  # evaluate on validation set

            # remember best rmse and save checkpoint
            is_best = result.rmse < best_result.rmse
            if is_best:
                best_result = result
                with open(best_txt, 'w') as txtfile:
                    txtfile.write(
                        "epoch={}\nmse={:.3f}\nrmse={:.3f}\nabsrel={:.3f}\nlg10={:.3f}\nmae={:.3f}\ndelta1={:.3f}\nt_gpu={:.4f}\n"
                        .format(epoch, result.mse, result.rmse, result.absrel,
                                result.lg10, result.mae, result.delta1,
                                result.gpu_time))
                if img_merge is not None:
                    img_filename = output_directory + '/comparison_best.png'
                    utils.save_image(img_merge, img_filename)

            utils.save_checkpoint(
                {
                    'args': args,
                    'epoch': epoch,
                    'arch': args.arch,
                    'model': model,
                    'best_result': best_result,
                    'optimizer': optimizer,
                }, is_best, epoch, output_directory)

    elif args.resume == True:
        print("Resume......................")
        start = start_epoch
        for epoch in range(start, args.epochs):
            print("Epoch inside resume ", epoch)
            utils.adjust_learning_rate(optimizer, epoch, args.lr)
            train(train_loader, model, criterion, optimizer,
                  epoch)  # train for one epoch
            result, img_merge = validate(val_loader, model,
                                         epoch)  # evaluate on validation set

            # remember best rmse and save checkpoint
            is_best = result.rmse < best_result.rmse
            if is_best:
                best_result = result
                with open(best_txt, 'w') as txtfile:
                    txtfile.write(
                        "epoch={}\nmse={:.3f}\nrmse={:.3f}\nabsrel={:.3f}\nlg10={:.3f}\nmae={:.3f}\ndelta1={:.3f}\nt_gpu={:.4f}\n"
                        .format(epoch, result.mse, result.rmse, result.absrel,
                                result.lg10, result.mae, result.delta1,
                                result.gpu_time))
                if img_merge is not None:
                    img_filename = output_directory + '/comparison_best.png'
                    utils.save_image(img_merge, img_filename)

            utils.save_checkpoint(
                {
                    'args': args,
                    'epoch': epoch,
                    'arch': args.arch,
                    'model': model,
                    'best_result': best_result,
                    'optimizer': optimizer,
                }, is_best, epoch, output_directory)