N = int(sys.argv[1])

poisoned_models = glob.glob(
    './poisoned_models/Triggers_01_10/*.pt')[:1000]  # Choose 1000 models
clean_models = glob.glob(
    './clean_models/train/*.pt')[:1000]  # Choose 1000 models
models = clean_models + poisoned_models
labels = np.concatenate(
    [np.zeros((len(clean_models), )),
     np.ones((len(poisoned_models), ))])

# split models into train and test set fro ULP training
train_models, test_models, train_labels, test_labels = train_test_split(
    models, labels, test_size=.2, random_state=10)

cnn = resnet18_mod(num_classes=nofclasses)

if use_cuda:
    device = torch.device('cuda')
    cnn.cuda()
else:
    device = torch.device('cpu')

X = torch.rand((N, 3, 32, 32), requires_grad=True, device='cuda')
X.data *= 255.
W = torch.randn((200, 2), requires_grad=True, device='cuda')
b = torch.zeros((2, ), requires_grad=True, device='cuda')

optimizerX = optim.SGD(params=[X], lr=1e+2)
optimizerWb = optim.Adam(params=[W, b], lr=1e-4)
Example #2
0
	os.makedirs(saveDirmeta)

crossentropy=torch.nn.CrossEntropyLoss()

count=0
clean_models = []
partition = int(sys.argv[1])
gpu="0"
runs=0
while runs<50:
	n = partition*50+runs
	val_temp=0
	train_accuracy=0
	logging.info('Training model %d'%(n))

	cnn = resnet18_mod(num_classes=200)

	logging.info(cnn)
	# Compute number of parameters
	s  = sum(np.prod(list(p.size())) for p in cnn.parameters())
	print ('Number of params: %d' % s)

	if use_cuda:
		device='cuda:'+gpu
		cnn.to(device)
	else:
		device=torch.device('cpu')
	optimizer = optim.Adam(params=cnn.parameters(), lr=0.001)
	for epoch in range(nof_epochs):
		cnn.train()
		# adjust_learning_rate(optimizer, epoch)