def __init__(self, channel=32): super(BBSNet, self).__init__() #Backbone model self.resnet = ResNet50('rgb') self.resnet_depth=ResNet50('rgbd') #Decoder 1 self.rfb2_1 = GCM(512, channel) self.rfb3_1 = GCM(1024, channel) self.rfb4_1 = GCM(2048, channel) self.agg1 = aggregation_init(channel) #Decoder 2 self.rfb0_2 = GCM(64, channel) self.rfb1_2 = GCM(256, channel) self.rfb5_2 = GCM(512, channel) self.agg2 = aggregation_final(channel) #upsample function self.upsample = nn.Upsample(scale_factor=8, mode='bilinear', align_corners=True) self.upsample4 = nn.Upsample(scale_factor=4, mode='bilinear', align_corners=True) self.upsample2 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) #Refinement flow self.HA = Refine() #Components of DEM module self.atten_depth_channel_0=ChannelAttention(64) self.atten_depth_channel_1=ChannelAttention(256) self.atten_depth_channel_2=ChannelAttention(512) self.atten_depth_channel_3_1=ChannelAttention(1024) self.atten_depth_channel_4_1=ChannelAttention(2048) self.atten_depth_spatial_0=SpatialAttention() self.atten_depth_spatial_1=SpatialAttention() self.atten_depth_spatial_2=SpatialAttention() self.atten_depth_spatial_3_1=SpatialAttention() self.atten_depth_spatial_4_1=SpatialAttention() #Components of PTM module self.inplanes = 32*2 self.deconv1 = self._make_transpose(TransBasicBlock, 32*2, 3, stride=2) self.inplanes =32 self.deconv2 = self._make_transpose(TransBasicBlock, 32, 3, stride=2) self.agant1 = self._make_agant_layer(32*3, 32*2) self.agant2 = self._make_agant_layer(32*2, 32) self.out0_conv = nn.Conv2d(32*3, 1, kernel_size=1, stride=1, bias=True) self.out1_conv = nn.Conv2d(32*2, 1, kernel_size=1, stride=1, bias=True) self.out2_conv = nn.Conv2d(32*1, 1, kernel_size=1, stride=1, bias=True) if self.training: self.initialize_weights()
def attack_all(): if args.model == 'resnet': model = ResNet50(enable_lat =args.enable_lat, epsilon =args.lat_epsilon, pro_num =args.lat_pronum, batch_size =args.model_batchsize, num_classes = 10, if_dropout=args.dropout ) elif args.model == 'vgg': model = VGG16(enable_lat=args.enable_lat, epsilon=args.lat_epsilon, pro_num=args.lat_pronum, batch_size=args.model_batchsize, num_classes=10, if_dropout=args.dropout ) elif args.model == 'resnet18': model = ResNet18(enable_lat=args.enable_lat, epsilon=args.lat_epsilon, pro_num=args.lat_pronum, batch_size=args.model_batchsize, num_classes=10, if_dropout=args.dropout ) model.cuda() model.load_state_dict(torch.load((args.modelpath))) for eps in range(4,16+1): test_data_cln, test_data_adv, test_label, test_label_adv = attack_one(model,eps) if args.generate: save_data_label(args.savepath, eps, test_data_cln, test_data_adv,test_label, test_label_adv)
def main(): cw_attack = AttackCarliniWagnerL2() if MODEL == 'vgg': model = VGG.VGG16(enable_lat=False, epsilon=0.5, pro_num=5, batch_size=BATCHSIZE, num_classes=NUM_CLASSES, if_dropout=False) elif MODEL == 'resnet': model = ResNet50(enable_lat=False, epsilon=0.5, pro_num=5, batch_size=BATCHSIZE, num_classes=NUM_CLASSES, if_dropout=True) model.cuda() model.load_state_dict(torch.load((MODEL_PATH))) dataloader = return_data() # batch-norm and drop-out performs different in train() and eval() model.eval() cor_cln = 0 cor_adv = 0 tot = 0 for step, (x, y) in enumerate(dataloader): print('step {}'.format(step)) x = Variable(x, requires_grad=True).cuda() y_true = Variable(y, requires_grad=False).cuda() h = model(x) pred = torch.max(h, 1)[1] cor_cln += (pred == y_true.data).sum().item() x_adv_np = cw_attack.run(model, x.detach(), y_true, step) x_adv = torch.from_numpy(x_adv_np).permute(0, 3, 1, 2).cuda() print(type(x_adv), x_adv.size()) h_adv = model(x_adv) pred_adv = torch.max(h_adv, 1)[1] cor_adv += (pred_adv == y_true.data).sum().item() tot += y.size(0) print(x.data.size(), x_adv.data.size(), y.size()) if step == 0: test_data_cln = x.data.detach() test_data_adv = x_adv.data test_label = y test_label_adv = pred_adv else: test_data_cln = torch.cat([test_data_cln, x.data.detach()], 0) test_data_adv = torch.cat([test_data_adv, x_adv.data.detach()], 0) test_label = torch.cat([test_label, y], 0) test_label_adv = torch.cat([test_label_adv, pred_adv], 0) model.train() print("Before Carlini-L2 the accuracy is", float(100 * cor_cln) / tot) print("After Carlini-L2 the accuracy is", float(100 * cor_adv) / tot) return test_data_cln, test_data_adv, test_label, test_label_adv
def main(): transform = transforms.Compose([ transforms.ToTensor(), ]) dataset = AncientSiteDataset('coordinates_train.txt', args.root_dir, transform) #print("Length of cross_train = {}, length of cross_test= {}".format(len(cross_train), len(cross_test))) #trainset, devset = torch.utils.data.random_split(dataset, [45000, 5220]) device0 = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') res1 = ResNet34().to(device0) res1.load_state_dict(torch.load('models/bestModel/ResNet34Epoch:55.pt')) res3 = ResNet50().to(device0) res3.load_state_dict(torch.load('ResStack/ResNet50Epoch:75.pt')) res2 = ResNet34().to(device0) res2.load_state_dict(torch.load('ResStack/ResNet34Epoch:45.pt')) model = [res1, res3, res2] i = 0 cross_train = [] numCrossNegative = 0 numCrossPositive = 0 cross_test = [] for sample in dataset: if i % 10 == 0: cross_test.append(sample) else: cross_train.append(sample) if sample['label'] == 0: numCrossNegative += 1 else: numCrossPositive += 1 i += 1 crossClassWeights = [1 / numCrossNegative, 1 / numCrossPositive] class_weights = torch.FloatTensor(crossClassWeights).to(device0) crossSampleWeights = [0] * len(cross_train) loss_function = NN.CrossEntropyLoss(weight=class_weights) test_loader = DataLoader(cross_test, batch_size=args.batch_size, shuffle=True, num_workers=5) test(model=model, test_loader=test_loader, device=device0) test2(model=res1, test_loader=test_loader, device=device0) test2(model=res3, test_loader=test_loader, device=device0) test2(model=res2, test_loader=test_loader, device=device0)
with open(path + 'label_adv(eps_{:.3f}).p'.format(eps), 'wb') as f: pickle.dump(test_label_adv.cpu(), f, pickle.HIGHEST_PROTOCOL) if __name__ == "__main__": torch.cuda.set_device(device_id) from ResNet import ResNet50 from VGG import VGG16 from denseNet import DenseNet from Inception_v2 import Inception_v2 from utils import read_data_label if args.model == 'resnet': model = ResNet50(enable_lat=args.enable_lat, epsilon=args.lat_epsilon, pro_num=args.lat_pronum, batch_size=args.model_batchsize, num_classes=10, if_dropout=args.dropout) elif args.model == 'vgg': model = VGG16(enable_lat=args.enable_lat, epsilon=args.lat_epsilon, pro_num=args.lat_pronum, batch_size=args.model_batchsize, num_classes=10, if_dropout=args.dropout) elif args.model == 'densenet': model = DenseNet() elif args.model == 'inception': model = Inception_v2() model.cuda() model.load_state_dict(torch.load((args.modelpath)))
def main(): transform = transforms.Compose([ transforms.Resize((129, 129)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), ]) dataset = AncientSiteDataset('coordinates_train.txt', args.root_dir, transform) device0 = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') models = { "ResNet50": ResNet50(), } for modelName in models: initial_lr = args.lr model = models[modelName].to(device0) #Create Cross validation sets i = 0 cross_train = [] numCrossNegative = 0 numCrossPositive = 0 cross_test = [] for sample in dataset: if i % 10 == 0: cross_test.append(sample) else: cross_train.append(sample) if sample['label'] == 0: numCrossNegative += 1 else: numCrossPositive += 1 i += 1 #calculate weights based on occurences in the dataset crossClassWeights = [1 / numCrossNegative, 1 / numCrossPositive] class_weights = torch.FloatTensor(crossClassWeights).to(device0) loss_function = NN.CrossEntropyLoss(weight=class_weights) train_loader = DataLoader(cross_train, batch_size=args.batch_size, num_workers=5, shuffle=True) test_loader = DataLoader(cross_test, batch_size=args.batch_size, shuffle=True, num_workers=5) optimizer = optim.Adam(model.parameters(), lr=initial_lr) start = time.time() #These lists will be used to create graphs trainLoss = [[], []] testAccuracy = [[], []] f1Score = [[], []] for epoch in range(args.num_epochs): if ((epoch + 1) % 10) == 0: for param_group in optimizer.param_groups: param_group['lr'] /= 2 loss = train(model=model, train_loader=train_loader, optimizer=optimizer, epoch=epoch, device=device0, loss_function=loss_function) acc = test(model=model, test_loader=test_loader, device=device0) trainLoss[0].append(epoch) trainLoss[1].append(loss) testAccuracy[0].append(epoch) testAccuracy[1].append(acc[0]) f1Score[0].append(epoch) f1Score[1].append(acc[1]) #call graph functions getTrainLoss(trainLoss) getTestAccuracy(testAccuracy) getF1(f1Score) end = time.time() print("This took {test} minutes to run {name}".format( test=(start - end) / 60, name=modelName)) torch.save(model.state_dict(), "ResStack/{}Epoch:{}.pt".format(modelName, args.num_epochs))
local_lable_path = "" def receive_data(childPipe): context = zmq.Context() sub_recv = context.socket(zmq.SUB) sub_recv.connect("tcp://" + IP + ":5557") sub_recv.setsockopt(zmq.SUBSCRIBE, b'') while True: b_data_array = sub_recv.recv_pyobj() childPipe.send(b_data_array) continue global model model = ResNet50(input_shape=(32, 32, 3), classes=10) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) def get_gradient_func(model): grads = K.gradients(model.total_loss, model.trainable_weights) inputs = model._feed_inputs + model._feed_targets + model._feed_sample_weights func = K.function(inputs, grads) return func def trainHandler(event, context): worker_ids = event["worker_ids"] # the allocate work id
for data_cat in data_list: data_path = data_list[data_cat] test_one(model,data_cat,data_path) if __name__ == "__main__": if args.model == 'vgg': model = VGG16(enable_lat=False, epsilon=0.6, pro_num=5, batch_size=args.batch_size, if_dropout=True) elif args.model == 'resnet': model = ResNet50(enable_lat=False, epsilon=0.6, pro_num=5, batch_size=args.batch_size, if_dropout=True) model.cuda() if os.path.exists(args.model_path): model.load_state_dict(torch.load(args.model_path)) print('load model.') else: print("load failed.") if args.test_flag: test_all(model) else: test_op(model)
def main(): if args.model == 'resnet': model = ResNet50(enable_lat =args.enable_lat, epsilon =args.lat_epsilon, pro_num =args.lat_pronum, batch_size =args.model_batchsize, num_classes = 10, if_dropout=args.dropout ) elif args.model == 'vgg': model = VGG16(enable_lat=args.enable_lat, epsilon=args.lat_epsilon, pro_num=args.lat_pronum, batch_size=args.model_batchsize, num_classes=10, if_dropout=args.dropout ) elif args.model == 'resnet18': model = ResNet18(enable_lat=args.enable_lat, epsilon=args.lat_epsilon, pro_num=args.lat_pronum, batch_size=args.model_batchsize, num_classes=10, if_dropout=args.dropout ) elif args.model == 'densenet': model = DenseNet() elif args.model == 'inception': model = Inception_v2() model.cuda() model.load_state_dict(torch.load((args.modelpath))) # if cifar then normalize epsilon from [0,255] to [0,1] ''' if args.dataset == 'cifar10': eps = args.attack_epsilon / 255.0 else: eps = args.attack_epsilon ''' eps = args.attack_epsilon # the last layer of densenet is F.log_softmax, while CrossEntropyLoss have contained Softmax() attack = Attack(dataroot = "/media/dsg3/dsgprivate/lat/data/cifar10/", dataset = args.dataset, batch_size = args.attack_batchsize, target_model = model, criterion = nn.CrossEntropyLoss(), epsilon = eps, alpha = args.attack_alpha, iteration = args.attack_iter) if args.attack == 'fgsm': test_data_cln, test_data_adv, test_label, test_label_adv = attack.fgsm() elif args.attack == 'ifgsm': test_data_cln, test_data_adv, test_label, test_label_adv = attack.i_fgsm() elif args.attack == 'stepll': test_data_cln, test_data_adv, test_label, test_label_adv = attack.step_ll() elif args.attack == 'pgd': test_data_cln, test_data_adv, test_label, test_label_adv = attack.PGD() elif args.attack == 'momentum_ifgsm': test_data_cln, test_data_adv, test_label, test_label_adv = attack.momentum_ifgsm() print(test_data_adv.size(),test_label.size(), type(test_data_adv)) #test_data, test_label, size = read_data_label('./test_data_cln.p','./test_label.p') #test_data_adv, test_label_adv, size = read_data_label('./test_data_cln.p','./test_label.p') ''' test_loader = attack.return_data() dataiter = iter(test_loader) images,labels = dataiter.next() print(images[0]) ''' #test_data_cln, test_data_adv, test_label, test_label_adv = attack.i_fgsm() #display(test_data_cln, test_data_adv, test_label, test_label_adv) if args.generate: save_data_label(args.savepath, eps, test_data_cln, test_data_adv,test_label, test_label_adv)
def train(load_model=False): train_dataset = dataset.ImageFolder(config.train_path, transform=config.train_transform) train_data_loader = DataLoader(train_dataset, config.source_batch_size, sampler=ImbalancedDatasetSampler(train_dataset)) test_dataset = dataset.ImageFolder(config.train_path, transform=config.test_transform) test_data_loader = DataLoader(test_dataset, config.target_batch_size, shuffle=True) test_dataset = dataset.ImageFolder(config.test_path, transform=config.test_transform) test_data_loader = DataLoader(test_dataset, config.target_batch_size, shuffle=False) # define GPU device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # define model net = ResNet50(num_classes=config.class_num).to(device) if load_model: net = torch.load(config.model_path) cross_loss = nn.CrossEntropyLoss() optimizer = optim.Adam(net.parameters(), lr=1e-5, weight_decay=5e-4) for epoch in range(config.epoches): sum_loss = 0. correct = 0. total = 0. since = time.time() net.train() length = config.source_batch_size + config.target_batch_size dis_list = [] for i, data in enumerate(train_data_loader): inputs, labels = data inputs, labels = inputs.to(device), labels.to(device) optimizer.zero_grad() outputs, dis = net(inputs, labels=labels) dis_list.append(dis) loss1 = cross_loss(outputs, labels) loss = loss1 sum_loss += loss1 _, pre = torch.max(outputs.data, 1) total += outputs.size(0) correct += torch.sum(pre == labels.data) train_acc = correct / total loss.backward() optimizer.step() iter_num = i + 1 + epoch * length print('[epoch:%d, iter:%d] Loss: %f | Train_acc: %f | Time: %f' % (epoch + 1, iter_num, sum_loss / i, train_acc, time.time() - since)) mean_dis = np.mean(dis_list) # start to test if epoch % 1 == 0: print("start to test:") with torch.no_grad(): correct = 0. total = 0. loss = 0. for i, data in enumerate(test_data_loader): net.eval() inputs_test, labels_test = data inputs_test, labels_test = inputs_test.to(device), labels_test.to(device) outputs_test, _ = net(inputs_test, mean_dis) loss += cross_loss(outputs_test, labels_test) # present_max, pred = torch.max(outputs.data, 1) _, pred = torch.max(outputs_test.data, 1) total += labels_test.size(0) correct += torch.sum(pred == labels_test.data) test_acc = correct / total print('test_acc:', test_acc, '| time', time.time() - since)
##################################################### Hyperparameters ##################################################### os.environ['CUDA_VISIBLE_DEVICES'] = "0" network = 8 batch_size = 25 stop_epoch = 300 model_state = "submit" # train, eval, submit model_weight = "epoch_85.pkl" tensorboard_path = "./tensorboard/loss_nolr" ckpt_path = "ckpt" ##################################################### Hyperparameters ##################################################### if network == 1: net = ResNet18() elif network == 2: net = ResNet50() elif network == 3: net = get_torch_model("restnet18") elif network == 4: net = get_torch_model("restnet50") elif network == 5: net = get_torch_model("restnet18", fix_weight=True) elif network == 6: net = get_torch_model("restnet50", fix_weight=True) elif network == 7: net = get_torch_model("wide_resnet50_2") elif network == 8: net = get_torch_model("efficientnet") net.cuda()
def training(path="/", batch_size=10, epochs=30, save_dir="Saved/", save_file="dataSaved", sess=tf.Session()): # get image height, width, channels batche_num, height, width, channels = data.shape print(OKBLUE + "Input image size :" + ENDC, height, width, channels) if not PATH.isdir(save_dir): makedirs(save_dir) print(OKGREEN + save_dir, "is created" + ENDC) with sess: if PATH.isdir(save_dir) and PATH.isfile( save_dir + save_file + ".meta") and PATH.isfile(save_dir + "checkpoint"): print(OKGREEN + "files are exist" + ENDC) saver = tf.train.import_meta_graph(save_dir + save_file + ".meta") saver.restore(sess, tf.train.latest_checkpoint('Save/')) print(OKGREEN + "data are restored" + ENDC) graph = tf.get_default_graph() x = graph.get_tensor_by_name("t_picture:0") #vrai y = graph.get_tensor_by_name("t_labels:0") #vrai train = tf.get_collection('train_op') #vrai loss = tf.get_collection('loss_op') #vrai logists = tf.get_collection('logits_op') #vrai errors = tf.get_collection('errors') #vrai else: print(FAIL + "files are not exist" + ENDC) # number of classes num_classes = len(classes) x = tf.placeholder(tf.float32, [None, height, width, channels], name='t_picture') y = tf.placeholder(tf.float32, [None, num_classes], name='t_labels') # sess = tf.InteractiveSession() logits = ResNet50(x, num_classes) softmax = tf.nn.softmax(logits) tf.add_to_collection('logits_op', logits) # Define a loss function loss = tf.reduce_mean(tf.abs(y - logits), name='Loss') tf.add_to_collection('loss_op', loss) # loss = tf.nn.softmax_cross_entropy_with_logits_v2 (labels=y, logits=logits) # loss = tf.nn.l2_loss(logits - y) # loss = tf.reduce_mean(-tf.reduce_sum(y*tf.log(logits), reduction_indices=1)) # train = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(loss) train = tf.train.MomentumOptimizer( learning_rate=0.1, momentum=0.9).minimize(loss, name='Train_op') tf.add_to_collection('train_op', train) # train = tf.train.AdadeltaOptimizer().minimize(loss) correct_prediction = tf.equal(tf.argmax(softmax), tf.argmax(y)) tf.add_to_collection('prediction_op', correct_prediction) acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) tf.add_to_collection('acc_op', acc) sess.run(tf.global_variables_initializer()) errors = [] tf.add_to_collection('errors', errors) print(OKGREEN + "training start" + ENDC) for _ in range(epochs): print(OKBLUE + "******************* ", _, " *******************" + ENDC) indice = np.random.permutation(batche_num) for i in range(batch_size - 1): min_batch = indice[i * batch_size:(i + 1) * batch_size] curr_loss, curr_train = sess.run([loss, train], { x: data[min_batch], y: labels[min_batch] }) print("Iteration %d loss:\n%s" % (i, curr_loss)) errors.append(curr_loss) print(OKGREEN + "training is finished" + ENDC) saver = tf.train.Saver() saver.save(sess, save_dir + save_file) print(OKGREEN + "files saved in :" + ENDC, save_dir) plt.plot(errors, label="loss") plt.xlabel('# epochs') plt.ylabel('MSE') plt.show() sess.close()
def test_all(): if args.model == 'resnet': model = ResNet50(enable_lat=args.enable_lat, epsilon=args.epsilon, pro_num=args.pro_num, batch_size=args.batchsize, if_dropout=args.dropout) elif args.model == 'vgg': model = VGG16(enable_lat=args.enable_lat, epsilon=args.epsilon, pro_num=args.pro_num, batch_size=args.batchsize, if_dropout=args.dropout) model.cuda() resnet_model_list = { 'naive-resnet': "/media/dsg3/dsgprivate/yuhang/model/resnet50/naive/naive_param.pkl", 'new-AT-resnet': "/media/dsg3/dsgprivate/yuhang/model/resnet50/nat/naive_param.pkl", 'origin-AT-resnet': "/media/dsg3/dsgprivate/yuhang/model/resnet50/oat/naive_param.pkl", 'ensemble-AT-resnet': "/media/dsg3/dsgprivate/yuhang/model/resnet50/eat/naive_param.pkl", 'LAT-aaai-resnet': "/media/dsg3/dsgprivate/yuhang/model/resnet50/aaai/naive_param.pkl", 'DPLAT-resnet50': "/media/dsg3/dsgprivate/yuhang/model/resnet50/dplat/lat_param.pkl", 'DPLAT-resnet18': "/media/dsg3/dsgprivate/yuhang/model/resnet50/dplat-18/lat_param.pkl", } vgg_model_list = { 'naive-vgg': "/media/dsg3/dsgprivate/yuhang/model/vgg16/naive/naive_param.pkl", 'new-AT-vgg': "/media/dsg3/dsgprivate/yuhang/model/vgg16/nat/naive_param.pkl", 'origin-AT-vgg': "/media/dsg3/dsgprivate/yuhang/model/vgg16/oat/naive_param.pkl", 'ensemble-AT-vgg': "/media/dsg3/dsgprivate/yuhang/model/vgg16/eat/naive_param.pkl", 'LAT-aaai-vgg': "/media/dsg3/dsgprivate/yuhang/model/vgg16/aaai/naive_param.pkl", 'PLAT-vgg': "/media/dsg3/dsgprivate/yuhang/model/vgg16/plat/lat_param.pkl", 'DPLAT-vgg': "/media/dsg3/dsgprivate/yuhang/model/vgg16/dplat/lat_param.pkl", } data_list = { 'k3c0.03-vgg': "/media/dsg3/dsgprivate/lat/test_cw/test_adv(k3c0.030).p", 'k5c0.05-vgg': "/media/dsg3/dsgprivate/lat/test_cw/test_adv(k5c0.050).p", 'k3c0.03-resnet': "/media/dsg3/dsgprivate/lat/test_cw/resnet/test_adv(k3c0.030).p", 'k5c0.05-resnet': "/media/dsg3/dsgprivate/lat/test_cw/resnet/test_adv(k5c0.050).p", } if args.model == 'vgg': model_list = vgg_model_list elif args.model == 'resnet': model_list = resnet_model_list for target in model_list: print('------- Now target model is {} ------'.format(target)) model_path = model_list[target] if target == 'DPLAT-resnet18': model = ResNet18(enable_lat=args.enable_lat, epsilon=args.epsilon, pro_num=args.pro_num, batch_size=args.batchsize, if_dropout=args.dropout).cuda() model.load_state_dict(torch.load(model_path)) for data in data_list: data_path = data_list[data] test_one(model, data, data_path)
if args.test_flag: args.enable_lat = False # switch models if args.model == 'lenet': cnn = LeNet(enable_lat=args.enable_lat, epsilon=args.epsilon, pro_num=args.pro_num, batch_size=args.batchsize, batch_norm=args.batchnorm, if_dropout=args.dropout) elif args.model == 'resnet': cnn = ResNet50(enable_lat=args.enable_lat, epsilon=args.epsilon, pro_num=args.pro_num, batch_size=args.batchsize, if_dropout=args.dropout) cnn.apply(conv_init) elif args.model == 'resnet18': cnn = ResNet18(enable_lat=args.enable_lat, epsilon=args.epsilon, pro_num=args.pro_num, batch_size=args.batchsize, if_dropout=args.dropout) elif args.model == 'vgg': cnn = VGG16(enable_lat=args.enable_lat, epsilon=args.epsilon, pro_num=args.pro_num, batch_size=args.batchsize, if_dropout=args.dropout)