示例#1
0
snapshot = 10000
paramName = 'models/'+exp_prefix+'stereo_2'
predModel = 'models/9-3_stereo_2_100000.pkl'
lossfilename = exp_prefix+'loss'
SceneTurn = 5
ImgHeight = 320
ImgWidth = 640

stereonet = StereoNet()
stereonet.cuda()
loadPretrain(stereonet,predModel)

normalize = Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])

sceneDataset = SceneflowDataset(transform=Compose([ RandomCrop(size=(ImgHeight,ImgWidth)),
													RandomHSV((10,80,80)),
													ToTensor(),
													normalize]))
kittiDataset = KittiDataset(transform=Compose([ RandomCrop(size=(ImgHeight,ImgWidth)),
													RandomHSV((7,50,50)),
													ToTensor(),
													normalize]),
							surfix='train')

sceneDataloader = DataLoader(sceneDataset, batch_size=batch, shuffle=True, num_workers=4)
kittiDataloader = DataLoader(kittiDataset, batch_size=batch, shuffle=True, num_workers=4)
sceneiter = iter(sceneDataloader)
kittiiter = iter(kittiDataloader)

criterion = nn.SmoothL1Loss()
# stereoOptimizer = optim.Adam(stereonet.parameters(), lr = Lr)
示例#2
0
batch = 1
trainstep = 100000
showiter = 20
snapshot = 10000
paramName = 'models/' + exp_prefix + 'stereo_2'
predModel = 'models/9-2-2_stereo_2_50000.pkl'
lossfilename = exp_prefix + 'loss'

stereonet = StereoNet()
stereonet.cuda()
# loadPretrain(stereonet,predModel)

normalize = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
sceneDataset = SceneflowDataset(transform=Compose([
    RandomCrop(size=(320, 640)),
    RandomHSV((7, 37, 37)),
    ToTensor(), normalize
]))
dataloader = DataLoader(sceneDataset,
                        batch_size=batch,
                        shuffle=True,
                        num_workers=8)

criterion = nn.SmoothL1Loss()
# stereoOptimizer = optim.Adam(stereonet.parameters(), lr = Lr)
stereoOptimizer = optim.Adam([{
    'params': stereonet.preLoadedParams,
    'lr': Lr
}, {
    'params': stereonet.params
}],
示例#3
0
predModel = 'models/1-4_stereo_2_45000.pkl'
dnetPreModel = 'models/4-2_dnet_2000.pkl'

stereonet = StereoNet()
stereonet.cuda()
loadPretrain(stereonet, predModel)

print '---'
dnet = DNet()
dnet.cuda()
loadPretrain(dnet, dnetPreModel)

normalize = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
sceneDataset = SceneflowDataset(transform=Compose([
    RandomCrop(size=(320, 640)),
    RandomHSV((10, 100, 100)),
    ToTensor(), normalize
]))
dataloader = DataLoader(sceneDataset,
                        batch_size=batch,
                        shuffle=True,
                        num_workers=8)

criterion = nn.BCELoss()
# stereoOptimizer = optim.Adam(stereonet.parameters(), lr = Lr)
dnetOptimizer = optim.Adam(dnet.parameters(), lr=Lr)

label = torch.FloatTensor(batch)
input = torch.FloatTensor(batch, 320, 640)
real_label = 1
fake_label = 0
示例#4
0
    return gradient_penalty


stereonet = StereoNet()
stereonet.cuda()
loadPretrain(stereonet, predModel)

print '---'
dnet = DNet()
dnet.cuda()
# loadPretrain(dnet,dnetPreModel)

normalize = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
sceneDataset = SceneflowDataset(transform=Compose([
    RandomCrop(size=(320, 640)),
    RandomHSV((5, 30, 30)),
    ToTensor(), normalize
]))

dataloader = DataLoader(sceneDataset,
                        batch_size=batch,
                        shuffle=True,
                        num_workers=8)
dataiter = iter(dataloader)

criterion1 = nn.SmoothL1Loss()
# stereoOptimizer = optim.Adam(stereonet.parameters(), lr = Lr)
# stereoOptimizer = optim.Adam([{'params':stereonet.preLoadedParams,'lr': 0},
# 								{'params':stereonet.params}], lr = stereo_lr)

# dnetOptimizer = optim.Adam(dnet.parameters(), lr = dnet_lr)
# paramName = 'models/'+exp_prefix+'stereo_2'
predModel = 'models/12-3-4_stereo_gan_80000.pkl'

dataset = 'scene'

stereonet = StereoNet()
stereonet.cuda()
loadPretrain(stereonet, predModel)

normalize = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if dataset == 'scene':
    sceneDataset = SceneflowDataset(filename='val.txt',
                                    transform=Compose([
                                        RandomCrop(size=(320, 640)),
                                        RandomHSV((0, 0, 0)),
                                        ToTensor(), normalize
                                    ]))
else:
    sceneDataset = KittiDataset(transform=Compose([
        RandomCrop(size=(320, 640)),
        RandomHSV((0, 0, 0)),
        ToTensor(), normalize
    ]))

dataloader = DataLoader(sceneDataset,
                        batch_size=1,
                        shuffle=True,
                        num_workers=8)

# criterion = nn.SmoothL1Loss()