def get_initial_model(): tmp_resnet = resnet18(pretrained=True, efficient=False, mean=mean, std=std, scale=scale) tmp_model = SemsegModel(tmp_resnet, num_classes) tmp_model.load_state_dict( torch.load('weights/rn18_single_scale/model_best.pt')) tmp_model.criterion = SemsegCrossEntropy(num_classes=num_classes, ignore_id=ignore_id) return tmp_model
def get_initial_model(): backbone = resnet18(pretrained=True, pyramid_levels=num_levels, k_upsample=3, scale=scale, mean=mean, std=std, k_bneck=1, output_stride=ostride, efficient=False) model_tmp = SemsegModel(backbone, num_classes, k=1, bias=True) model_tmp.load_state_dict(torch.load('weights/rn18_pyramid/model_best.pt'), strict=False) model_tmp.criterion = BoundaryAwareFocalLoss(gamma=.5, num_classes=num_classes, ignore_id=ignore_id) return model_tmp
ignore_id=num_classes, mean=mean_rgb), SetTargetSize(target_size=target_size_crops, target_size_feats=target_size_crops_feats), Tensor(), ]) dataset_train = Cityscapes(root, transforms=trans_train, subset='train') dataset_val = Cityscapes(root, transforms=trans_val, subset='val') resnet = resnet18(pretrained=True, efficient=False, mean=mean, std=std, scale=scale) model = SemsegModel(resnet, num_classes) if pruning: model.load_state_dict( torch.load('weights/rn18_single_scale/model_best.pt')) if evaluating: model.load_state_dict( torch.load('weights/rn18_single_scale/model_best.pt')) else: model.criterion = SemsegCrossEntropy(num_classes=num_classes, ignore_id=ignore_id) optim_params = [ { 'params': model.random_init_params(),
LabelDistanceTransform(num_classes=num_classes, ignore_id=ignore_id, reduce=True, bins=dist_trans_bins, alphas=dist_trans_alphas), Tensor(), SetTargetSize(None, None), ]) dataset_train = Vistas(root, transforms=trans_train, subset='training', epoch=epoch) dataset_val = Vistas(root, transforms=trans_val, subset='validation', epoch=epoch) backbone = resnet18(pretrained=True, k_up=3, scale=scale, mean=mean, std=std, output_stride=output_stride, efficient=False) model = SemsegModel(backbone, num_classes, k=1, bias=True) if evaluating: model.load_state_dict(torch.load(f'{dir_path}/stored/model_best.pt'), strict=False) else: model.criterion = BoundaryAwareFocalLoss(gamma=.5, num_classes=num_classes, ignore_id=ignore_id) bn_count = 0 for m in model.modules(): if isinstance(m, nn.BatchNorm2d): bn_count += 1 print(f'Num BN layers: {bn_count}') if not evaluating: lr = 8e-4 lr_min = 1e-6 fine_tune_factor = 4
def get_initial_model(): tmp_resnet = resnet34(pretrained=True, k_up=3, scale=scale, mean=mean, std=std, output_stride=8, efficient=False) tmp_model = SemsegModel(tmp_resnet, num_classes, k=1, bias=True) tmp_model.load_state_dict(torch.load('weights/76-66_resnet34x8/stored/model_best.pt'), strict=False) model.criterion = BoundaryAwareFocalLoss(gamma=.5, num_classes=num_classes, ignore_id=ignore_id) return tmp_model
import torch import torch.onnx from torch.autograd import Variable from models.resnet.resnet_single_scale import * from models.semseg import SemsegModel import numpy as np InResolution = [1, 3, 1024, 2048] if __name__ == '__main__': use_bn = True resnet = resnet18(pretrained=False, efficient=False, use_bn=use_bn) model = SemsegModel(resnet, 19, use_bn=use_bn) model.load_state_dict(torch.load( '/home/smocilac/dipl_seminar/swiftnet/weights/swiftnet_ss_cs.pt'), strict=True) model.to('cuda') input_ = torch.ones(InResolution).to('cuda') #model.forward(input_) torch.onnx.export(model, input_, "swiftnet.onnx", verbose=True, do_constant_folding=True)
Tensor(), ]) dataset_train = Cityscapes(root, transforms=trans_train, subset='train') dataset_val = Cityscapes(root, transforms=trans_val, subset='val') backbone = resnet18(pretrained=True, pyramid_levels=num_levels, k_upsample=3, scale=scale, mean=mean, std=std, k_bneck=1, output_stride=ostride, efficient=True) model = SemsegModel(backbone, num_classes, k=1, bias=True) if evaluating: model.load_state_dict(torch.load('weights/rn18_pyramid/model_best.pt'), strict=False) else: model.criterion = BoundaryAwareFocalLoss(gamma=.5, num_classes=num_classes, ignore_id=ignore_id) bn_count = 0 for m in model.modules(): if isinstance(m, nn.BatchNorm2d): bn_count += 1 print(f'Num BN layers: {bn_count}') if not evaluating:
transforms = Compose( [Open(), RemapLabels(map_to_id, num_classes), Pyramid(alphas=alphas), SetTargetSize(target_size=target_size, target_size_feats=target_size_feats), Normalize(scale, mean, std), Tensor(), ] ) use_bn = True if single_scale: from models.resnet.resnet_single_scale import resnet18 resnet = resnet18(pretrained=False, efficient=False, use_bn=use_bn) model = SemsegModel(resnet, num_classes, use_bn=use_bn) # if change number classes, modify here model.load_state_dict(torch.load('weights/swiftnet_ss_cs.pt'), strict=True) else: from models.resnet.resnet_pyramid import resnet18 resnet = resnet18(pretrained=False, pyramid_levels=num_levels, efficient=False, use_bn=use_bn) model = SemsegPyramidModel(resnet, num_classes) model.load_state_dict(torch.load('weights/swiftnet_pyr_cs.pt'), strict=True) model = model.cuda() ret_dict = { 'image': '000025.png', } batch = transforms(ret_dict) # import ipdb; ipdb.set_trace()
trans_train = trans_val = Compose([ Open(), RemapLabels(Cityscapes.map_to_id, Cityscapes.num_classes), Pyramid(alphas=alphas), SetTargetSize(target_size=target_size, target_size_feats=target_size_feats), Normalize(scale, mean, std), Tensor(), ]) dataset_train = Cityscapes(root, transforms=trans_train, subset='train') dataset_val = Cityscapes(root, transforms=trans_val, subset='val') use_bn = True resnet = resnet18(pretrained=False, efficient=False, use_bn=use_bn) model = SemsegModel(resnet, Cityscapes.num_classes, use_bn=use_bn) model.load_state_dict(torch.load('weights/swiftnet_ss_cs.pt'), strict=True) batch_size = 1 nw = 8 loader_train = DataLoader(dataset_train, batch_size=batch_size, collate_fn=custom_collate, num_workers=nw) loader_val = DataLoader(dataset_val, batch_size=batch_size, collate_fn=custom_collate, num_workers=nw) total_params = get_n_params(model.parameters())