コード例 #1
0
def get_model(model_path, num_classes):

    model = params.model(3, num_classes)
    model = nn.DataParallel(model, device_ids=params.device_ids).cuda()

    state = torch.load(str(model_path))['model']
    model.load_state_dict(state)

    return model
コード例 #2
0
ファイル: train.py プロジェクト: carrenD/MF-TAPNet
def main(fold):
    # check cuda available
    assert torch.cuda.is_available() == True

    # when the input dimension doesnot change, add this flag to speed up
    cudnn.benchmark = True

    num_classes = config.problem_class[params.problem_type]
    # input are RGB images in size 3 * h * w
    # output are binary
    model = params.model(in_channels=3, num_classes=num_classes)
    # data parallel
    model = nn.DataParallel(model, device_ids=params.device_ids).cuda()
    # loss function
    if num_classes == 2:
        loss = LossBinary(jaccard_weight=params.jaccard_weight)
        valid_metric = validation_binary
    else:
        loss = LossMulti(num_classes=num_classes, jaccard_weight=params.jaccard_weight)
        valid_metric = validation_multi


    # trainset transform
    train_transform = Compose([
        Resize(height=params.train_height, width=params.train_width, p=1),
        Normalize(p=1)
    ], p=1)

    # validset transform
    valid_transform = Compose([
        Resize(height=params.valid_height, width=params.valid_width, p=1),
        Normalize(p=1)
    ], p=1)

    # train/valid filenmaes
    train_filenames, valid_filenames = trainval_split(fold)
    print('num of train / validation files = {} / {}'.format(len(train_filenames), len(valid_filenames)))

    # train dataloader
    train_loader = DataLoader(
        dataset=RobotSegDataset(train_filenames, transform=train_transform),
        shuffle=True,
        num_workers=params.num_workers,
        batch_size=params.batch_size,
        pin_memory=True
    )
    # valid dataloader
    valid_loader = DataLoader(
        dataset=RobotSegDataset(valid_filenames, transform=valid_transform),
        shuffle=True,
        num_workers=params.num_workers,
        batch_size=len(params.device_ids), # in valid time use one img for each dataset
        pin_memory=True
    )

    train(
        model=model,
        loss_func=loss,
        train_loader=train_loader,
        valid_loader=valid_loader,
        valid_metric=valid_metric,
        fold=fold,
        num_classes=num_classes
    )
コード例 #3
0
ファイル: kalman.py プロジェクト: hbishop1/e2e_self_driving
    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')

    transforms = torchvision.transforms.Compose([
        torchvision.transforms.ToTensor()
    ])
    
    test_dataset = params.test_dset(os.path.join(params.data_directory,'valid'), transform=transforms, return_restart=True)
    train_dataset = params.train_dset(os.path.join(params.data_directory,'train'), transform=transforms)
    dataloader = torch.utils.data.DataLoader(
        test_dataset,
        shuffle = False, 
        batch_size=1)

    if 'stereo' in inspect.getargspec(params.model)[0]:
        Model = params.model(stereo=params.stereo,dropout_conv=params.dropout_conv,dropout_fc=params.dropout_fc).to(device)
    else:
        Model = params.model(dropout_conv=params.dropout_conv,dropout_fc=params.dropout_fc).to(device)

    Model.load_state_dict(torch.load(params.kalman_model_path))

    print(f'Time started: {time}')  
    print(f'Number of network parameters: {len(torch.nn.utils.parameters_to_vector(Model.parameters()))}')

    logs = {'mae_kalman':[],'rmse_kalman':[],'whiteness_kalman':[],'mae_no_kalman':[],'rmse_no_kalman':[],'whiteness_no_kalman':[],'kalman_prediction':[],'no_kalman_prediction':[],'gt':[],'sigma':[]}

    Model.eval()   # Set model to evaluate mode

    previous_output = 0
    previous_mut = train_dataset.steering_mean
    m = train_dataset.steering_mean
コード例 #4
0
def main(fold):
    # check cuda available
    assert torch.cuda.is_available() == True

    # when the input dimension doesnot change, add this flag to speed up
    cudnn.benchmark = True

    num_classes = config.problem_class[params.problem_type]
    # input are RGB images in size 3 * h * w
    # output are binary
    model = params.model(in_channels=3, num_classes=num_classes)
    # data parallel
    model = nn.DataParallel(model, device_ids=params.device_ids).cuda()
    # loss function
    if num_classes == 2:
        loss = LossBinary(jaccard_weight=params.jaccard_weight)
        valid_metric = validation_binary
    else:
        loss = LossMulti(num_classes=num_classes,
                         jaccard_weight=params.jaccard_weight)
        valid_metric = validation_multi

    # trainset transform
    train_transform = Compose([
        Resize(height=params.train_height, width=params.train_width, p=1),
        Normalize(p=1),
        PadIfNeeded(
            min_height=params.train_height, min_width=params.train_width, p=1),
    ],
                              p=1)

    # validset transform
    valid_transform = Compose([
        PadIfNeeded(
            min_height=params.valid_height, min_width=params.valid_width, p=1),
        Resize(height=params.train_height, width=params.train_width, p=1),
        Normalize(p=1)
    ],
                              p=1)

    # train/valid filenmaes
    train_filenames, valid_filenames = trainval_split(fold)
    print('fold {}, {} train / {} validation files'.format(
        fold, len(train_filenames), len(valid_filenames)))

    # train dataloader
    train_loader = DataLoader(
        dataset=RobotSegDataset(train_filenames, transform=train_transform, \
            schedule="ordered", batch_size=params.batch_size, problem_type=params.problem_type, semi_percentage=params.semi_percentage),
        shuffle=False, # set to false to disable pytorch dataset shuffle
        num_workers=params.num_workers,
        batch_size=params.batch_size,
        pin_memory=True
    )
    # valid dataloader
    valid_loader = DataLoader(
        dataset=RobotSegDataset(valid_filenames,
                                transform=valid_transform,
                                problem_type=params.problem_type,
                                mode='valid'),
        shuffle=False,  # set to false to disable pytorch dataset shuffle
        num_workers=0,  # params.num_workers,
        batch_size=1,  # in valid time. have to use one image by one
        pin_memory=True)

    train(model=model,
          loss_func=loss,
          train_loader=train_loader,
          valid_loader=valid_loader,
          valid_metric=valid_metric,
          fold=fold,
          num_classes=num_classes)