Example #1
0
from models.ilsvrc import mobilenetv2_skip, mobilenetv2
#from models.cifar100 import mobilenetv2_skip
dic_model = {'MobileNetV2_skip': mobilenetv2_skip.MobileNetV2_skip}

if args.model not in dic_model:
    print("The model is currently not supported")
    sys.exit()

trainloader = utils.get_traindata('ILSVRC2012',
                                  args.dataset_path,
                                  batch_size=args.batch_size,
                                  download=True,
                                  num_workers=16)
testloader = utils.get_testdata('ILSVRC2012',
                                args.dataset_path,
                                batch_size=args.batch_size,
                                num_workers=16)

#args.visible_device sets which cuda devices to be used"
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_device
device = 'cuda'


# parallelize
class MyDataParallel(nn.DataParallel):
    def __getattr__(self, name):
        try:
            return super().__getattr__(name)
        except AttributeError:
            return getattr(self.module, name)
Example #2
0
    'ResNet1202':resnet.ResNet1202, \
    'ResNet56_DoubleShared':resnet.ResNet56_DoubleShared, \
    'ResNet32_DoubleShared':resnet.ResNet32_DoubleShared, \
    'ResNet56_SingleShared':resnet.ResNet56_SingleShared, \
    'ResNet32_SingleShared':resnet.ResNet32_SingleShared, \
    'ResNet56_SharedOnly':resnet.ResNet56_SharedOnly, \
    'ResNet32_SharedOnly':resnet.ResNet32_SharedOnly, \
    'ResNet56_NonShared':resnet.ResNet56_NonShared, \
    'ResNet32_NonShared':resnet.ResNet32_NonShared}

if args.model not in dic_model:
    print("The model is currently not supported")
    sys.exit()

testloader = utils.get_testdata('CIFAR10',
                                args.dataset_path,
                                batch_size=args.batch_size,
                                download=True)

#args.visible_device sets which cuda devices to be used
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_device
device = 'cuda'

if 'DoubleShared' in args.model or 'SingleShared' in args.model:
    net = dic_model[args.model](args.shared_rank, args.unique_rank)
elif 'SharedOnly' in args.model:
    net = dic_model[args.model](args.shared_rank)
elif 'NonShared' in args.model:
    net = dic_model[args.model](args.unique_rank)
else:
    net = dic_model[args.model]()
Example #3
0
args = parser.parse_args()

from models.cifar100 import mobilenetv2_skip
#from models.cifar100 import mobilenetv2_skip
dic_model = {'MobileNetV2_skip': mobilenetv2_skip.MobileNetV2_skip}

if args.model not in dic_model:
    print("The model is currently not supported")
    sys.exit()

trainloader = utils.get_traindata('CIFAR100',
                                  args.dataset_path,
                                  batch_size=args.batch_size,
                                  download=True)
testloader = utils.get_testdata('CIFAR100',
                                args.dataset_path,
                                batch_size=args.batch_size)

#args.visible_device sets which cuda devices to be used"
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_device
device = 'cuda'

net = dic_model[args.model](num_classes=100)
net = net.to(device)

#CrossEntropyLoss for accuracy loss criterion
criterion = nn.CrossEntropyLoss()

# Kullback Leibler divergence loss
criterion_kd = nn.KLDivLoss(reduction='batchmean')
Example #4
0
                    default=False,
                    action='store_true',
                    help='Execute a scaled-down model')
args = parser.parse_args()

print('skip:', args.skip)

from models.ilsvrc import mobilenetv2_skip
dic_model = {'MobileNetV2_skip': mobilenetv2_skip.MobileNetV2_skip}

if args.model not in dic_model:
    print("The model is currently not supported")
    sys.exit()

testloader = utils.get_testdata('ILSVRC2012',
                                args.dataset_path,
                                batch_size=args.batch_size,
                                download=True)

#args.visible_device sets which cuda devices to be used
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_device
device = 'cuda'

net = dic_model[args.model](num_classes=1000)

net = net.to(device)


# parallelize
class MyDataParallel(nn.DataParallel):
    def __getattr__(self, name):