def __init__(self, name, train_data_dir, test_data_dir):
        self.name = name

        transform = transforms.ToTensor()

        trainset = datasets.ImageFolder(train_data_dir, transform=transform)
        self.trainloader = torch.utils.data.DataLoader(
            trainset,
            batch_size=BATCH_SIZE,
            shuffle=True
        )

        testset = datasets.ImageFolder(test_data_dir, transform=transform)
        self.testloader = torch.utils.data.DataLoader(
            testset,
            batch_size=BATCH_SIZE,
            shuffle=False
        )

        dataset_list = list(self.trainloader)
        self.dataset_len = len(dataset_list)

        self.net = LeNet().to(device)

        self.criterion = nn.CrossEntropyLoss()
Пример #2
0
def inference(input_x, input_y):
    x = input_x
    y_ = input_y
    global_step = tf.Variable(0, trainable=False)

    #regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    le_network = LeNet()
    result_train = le_network.network_fn(x, is_training=True)
    result_test = le_network.network_fn(x, is_training=False)

    #correct_train = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    correct_test = tf.equal(tf.argmax(result_test, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_test, "float"))

    # loss, learning rate, moving_average
    #variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    #variables_averages_op = variable_averages.apply(tf.trainable_variables())
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=result_train, labels=y_)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean # + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(
        cfg.LEARNING_RATE_BASE,
        global_step,
        mnist.train.num_examples / cfg.BATCH_SIZE, cfg.LEARNING_RATE_DECAY,
        staircase=True)

    # update_ops & tf.control_dependencies are needed when introducing batch_norm
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) 
    with tf.control_dependencies(update_ops): 
        train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
        train_op = train_step

    return train_op, loss, global_step, accuracy
def main(args):
    transform_SVHN = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

    trainset_SVHN = torchvision.datasets.SVHN(root=args.dataset_path[0], split='train', download=True, transform=transform_SVHN)
    testset_SVHN = torchvision.datasets.SVHN(root=args.dataset_path[0], split='test', download=True, transform=transform_SVHN)

    train_labelled_size = int(0.6 * len(trainset_SVHN))
    train_unlabelled_size = len(trainset_SVHN) - train_labelled_size
    val_size = int(0.2 * len(testset_SVHN))
    test_size = len(testset_SVHN) - val_size
    trainset_labelled, trainset_unlabelled = torch.utils.data.random_split(trainset_SVHN, [train_labelled_size, train_unlabelled_size])
    valset, testset = torch.utils.data.random_split(testset_SVHN, [val_size, test_size])

    # Should increase batch size to decrease training time. Batch size for LeNet and VAT datasets can be different, i.e. 32 for LeNet and 128 for VAT
    train_labelled_loader = DataLoader(trainset_labelled, batch_size=32, shuffle=True, num_workers=2)
    train_unlabelled_loader = DataLoader(trainset_unlabelled, batch_size=32, shuffle=True, num_workers=2)
    valloader = DataLoader(valset, batch_size=1, shuffle=True, num_workers=2)
    testloader = DataLoader(testset, batch_size=1, shuffle=False, num_workers=2)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("Device: " + str(device))

    lenet0 = LeNet(device)
    lenet0 = lenet0.to(device)
    print(lenet0)

    criterion = nn.CrossEntropyLoss()
    criterion_VAT = VAT(device, eps=args.eps, xi=args.xi, k=args.k, use_entmin=args.use_entmin)
    optimizer = optim.Adam(lenet0.parameters(), lr=args.lr) # Should implement lr scheduler.
    # optimizer = optim.SGD(lenet0.parameters(), lr=args.lr, momentum=0.9)

    if args.eval_only:
        loadsave(lenet0, optimizer, "VATcheck", root=args.weights_path[0], mode='load')

    else:
        supervised_loss, unsupervised_loss = train(lenet0, optimizer, criterion, criterion_VAT, train_labelled_loader, train_unlabelled_loader, valloader, testloader, args.alpha, args.epochs, device, args.weights_path[0])

        plt.subplot(2,1,1)
        plt.plot(supervised_loss)
        plt.title("Supervised loss")
        plt.xlabel("Epochs")
        plt.ylabel("Loss")
        plt.grid(True)

        plt.subplot(2,1,2)
        plt.plot(unsupervised_loss)
        plt.title("Unsupervised loss")
        plt.xlabel("Epochs")
        plt.ylabel("Loss")
        plt.grid(True)

        plt.show()

        loadsave(lenet0, optimizer, "VATcheck", root=args.weights_path[0], mode='load')

    vat_acc =  evaluate_classifier(lenet0, testloader, device)
    print("Accuracy of the network on SVHN is %d%%\n" %(vat_acc*100))

    barchartplot(lenet0, testloader, device)
Пример #4
0
def mnist_lenet(input_shape, classes):
    model = LeNet(input_shape, classes)

    optimizer = keras.optimizers.SGD(lr=0.01)
    model.compile(optimizer=optimizer,
                  loss=keras.losses.categorical_crossentropy,
                  metrics=['accuracy'])
    return model
Пример #5
0
 def _train(self, input_dim, num_classes, params, X_train, y_train, X_valid,
            y_valid):
     lenet_config = LeNetConfig(input_dim, num_classes, params)
     lenet = LeNet(lenet_config)
     history, best_valid_loss, best_valid_accuracy = lenet.fit(X_train,
                                                               y_train,
                                                               X_valid,
                                                               y_valid,
                                                               debug=False)
     return history, best_valid_loss, best_valid_accuracy, lenet
class Client:
    def __init__(self, name, train_data_dir, test_data_dir):
        self.name = name

        transform = transforms.ToTensor()

        trainset = datasets.ImageFolder(train_data_dir, transform=transform)
        self.trainloader = torch.utils.data.DataLoader(
            trainset,
            batch_size=BATCH_SIZE,
            shuffle=True
        )

        testset = datasets.ImageFolder(test_data_dir, transform=transform)
        self.testloader = torch.utils.data.DataLoader(
            testset,
            batch_size=BATCH_SIZE,
            shuffle=False
        )

        dataset_list = list(self.trainloader)
        self.dataset_len = len(dataset_list)

        self.net = LeNet().to(device)

        self.criterion = nn.CrossEntropyLoss()

    def update(self, net_dict, center_params_dict):
        self.net.load_state_dict(net_dict)

        for i in range(LOCAL_EPOCH_NUM):
            data_iter = iter(self.trainloader)
            for b in range(self.dataset_len):
                inputs, labels = next(data_iter)
                inputs = torch.index_select(inputs, 1, torch.LongTensor([0]))
                inputs, labels = inputs.to(device), labels.to(device)
                outputs = self.net(inputs)
                loss = self.criterion(outputs, labels)
                optimizer = optim.SGD(self.net.parameters(), lr=LR, momentum=0.9)
                optimizer.zero_grad()
                loss.backward()

                params_modules = list(self.net.named_parameters())
                for params_module in params_modules:
                    name, params = params_module
                    params.grad += MU * (params.data - center_params_dict[name])

                optimizer.step()

        return self.net.state_dict()
Пример #7
0
def main():

    print("\033[37mSanVik2000".center(os.get_terminal_size().columns))
    print("\033[37mLibrary of Famous Classification Algorithms".center(
        os.get_terminal_size().columns))
    print("\033[37mImplemented in PyTorch".center(
        os.get_terminal_size().columns))

    Print_Line()

    print("\033[93mSOFTWARE DETAILS\033[00m".center(
        os.get_terminal_size().columns))
    print("\033[00m================\033[00m".center(
        os.get_terminal_size().columns))
    print("Python Version       : \033[93m", sys.version)
    print("\033[00mPyTorch Verison      : \033[93m", torch.__version__)
    print("\033[00mPyTorch using Device : \033[93m", device, "\033[00m")

    Print_Line()

    print(
        "Choose from the following models to classify images using the CIFAR-10 Dataset\033[00m"
    )
    print("*\033[93m 1 \033[00m* LeNet")
    print("*\033[93m 2 \033[00m* AlexNet")
    print("*\033[93m 3 \033[00m* VGG16")
    model = int(input("Enter model choice : "))
    if model == 1:
        model = LeNet()
        model_name = "LeNet"
        image_size = 32
    if model == 2:
        model = AlexNet()
        model_name = "AlexNet"
        image_size = 256
    if model == 3:
        model = VGG()
        model_name = "VGG16"
        image_size = 32

    Print_Line()

    trainloader, testloader, classes = Prepare_Data(image_size)

    model = model.to(device)

    Train_Model(model, trainloader, model_name)
    Test_Model(model, testloader, model_name)
class Client:
    def __init__(self, name, train_data_dir, test_data_dir, pk, sk):
        self.name = name
        self.pk = pk
        self.sk = sk

        transform = transforms.ToTensor()

        trainset = datasets.ImageFolder(train_data_dir, transform=transform)
        self.trainloader = torch.utils.data.DataLoader(trainset,
                                                       batch_size=BATCH_SIZE,
                                                       shuffle=True)

        testset = datasets.ImageFolder(test_data_dir, transform=transform)
        self.testloader = torch.utils.data.DataLoader(testset,
                                                      batch_size=BATCH_SIZE,
                                                      shuffle=False)

        dataset_list = list(self.trainloader)
        self.dataset_len = len(dataset_list)

        self.net = LeNet().to(device)

        self.criterion = nn.CrossEntropyLoss()

    def get_encrypted_grad(self, client_inputs, client_labels, net_dict):
        self.net.load_state_dict(net_dict)
        client_outputs = self.net(client_inputs)
        client_loss = self.criterion(client_outputs, client_labels)
        client_optimizer = optim.SGD(self.net.parameters(),
                                     lr=LR,
                                     momentum=0.9)
        client_optimizer.zero_grad()
        client_loss.backward()

        params_modules = list(self.net.named_parameters())
        params_grad_list = []
        for params_module in params_modules:
            name, params = params_module
            params_grad_list.append(copy.deepcopy(params.grad).view(-1))

        params_grad = ((torch.cat(params_grad_list, 0) + bound) *
                       2**prec).long().cuda()
        client_encrypted_grad = Enc(self.pk, params_grad)

        client_optimizer.zero_grad()

        return client_encrypted_grad
Пример #9
0
def train_student():
    ckpt_path = './train_dir/mnist_student.ckpt'
    train_data, train_labels, test_data, test_labels = preprocessing()
    # student_share = 1000
    student_share = FLAGS.stdnt_share
    train_data = test_data[:student_share]
    # train_labels = test_labels[:student_share]
    weightsPath = './weights/LeNet.hdf5'
    lenet = LeNet.build(width=28, height=28, depth=1, classes=10)
    lenet.compile(loss='categorical_crossentropy',
                  optimizer=keras.optimizers.Adam(),
                  metrics=['accuracy'])
    lenet.load_weights(weightsPath)
    train_labels = lenet.predict(train_data)
    # print(train_labels[:10])
    # x = input()
    train_data = train_data.reshape(train_data.shape[0], -1)
    test_data = test_data.reshape(test_data.shape[0], -1)
    test_data = test_data[student_share:]
    test_labels = test_labels[student_share:]

    print("data preprocessing done")
    assert train_with_noise_ce(train_data, train_labels, ckpt_path)

    ckpt_path_final = ckpt_path + '-' + str(FLAGS.max_steps - 1)
    logits = softmax_preds(train_data, ckpt_path_final)
    accuracy = np.sum(np.argmax(logits, -1) == np.argmax(
        train_labels, -1)) / len(train_labels)
    print("student's train accuracy is ", accuracy)
    logits = softmax_preds(test_data, ckpt_path_final)
    accuracy = np.sum(
        np.argmax(logits, -1) == np.argmax(test_labels, -1)) / len(test_labels)
    print("student's test accuracy is ", accuracy)
    return True
Пример #10
0
def distillation():
    '''
    distillation of lenet knowledge
    '''
    train_data, train_labels, test_data, test_labels = preprocessing()
    student_share = 1000
    train_data = test_data[:student_share]
    # train_labels = test_labels[:student_share]
    weightsPath = './weights/LeNet.hdf5'
    lenet = LeNet.build(width=28, height=28, depth=1, classes=10)
    lenet.compile(loss='categorical_crossentropy',
                  optimizer=keras.optimizers.Adam(),
                  metrics=['accuracy'])
    lenet.load_weights(weightsPath)
    train_labels = lenet.predict(train_data)
    # print(train_labels[:10])
    # x = input()
    train_data = train_data.reshape(train_data.shape[0], -1)
    test_data = test_data.reshape(test_data.shape[0], -1)
    test_data = test_data[student_share:]
    test_labels = test_labels[student_share:]

    op = keras.optimizers.Adam()
    model = StudentModel.build(train_data.shape[1], 10)
    model.compile(optimizer=op,
                  loss=lambda y_true, y_pred: loss_fun(y_true, y_pred),
                  metrics=['accuracy'])
    model.fit(train_data, train_labels, batch_size=128, nb_epoch=20, verbose=1)
    print("[INFO] evaluating...")
    (loss, accuracy) = model.evaluate(test_data,
                                      test_labels,
                                      batch_size=128,
                                      verbose=1)
    print("[INFO] accuracy: {:.2f}%".format(accuracy * 100))
Пример #11
0
class Client:
    def __init__(self, name, train_data_dir, test_data_dir):
        self.name = name

        transform = transforms.ToTensor()

        trainset = datasets.ImageFolder(train_data_dir, transform=transform)
        self.trainloader = torch.utils.data.DataLoader(trainset,
                                                       batch_size=BATCH_SIZE,
                                                       shuffle=True)

        testset = datasets.ImageFolder(test_data_dir, transform=transform)
        self.testloader = torch.utils.data.DataLoader(testset,
                                                      batch_size=BATCH_SIZE,
                                                      shuffle=False)

        dataset_list = list(self.trainloader)
        self.dataset_len = len(dataset_list)

        self.net = LeNet().to(device)

        self.criterion = nn.CrossEntropyLoss()

    def get_grad(self, client_inputs, client_labels, net_dict):
        self.net.load_state_dict(net_dict)
        client_outputs = self.net(client_inputs)
        client_loss = self.criterion(client_outputs, client_labels)
        client_optimizer = optim.SGD(self.net.parameters(),
                                     lr=LR,
                                     momentum=0.9)
        client_optimizer.zero_grad()
        client_loss.backward()

        client_grad_dict = dict()
        params_modules = list(self.net.named_parameters())
        for params_module in params_modules:
            name, params = params_module
            params_grad = copy.deepcopy(params.grad)
            client_grad_dict[name] = params_grad
        client_optimizer.zero_grad()
        return client_grad_dict
Пример #12
0
def main():
    # image shape: N x H x W, pixel [0, 255]
    # label shape: N x 10
    with np.load(r'mnist.npz', allow_pickle=True) as f:
        x_train, y_train = f['x_train'], f['y_train']
        x_test, y_test = f['x_test'], f['y_test']

    plt.imshow(x_train[59999], cmap='gray')
    plt.show()
    print(x_train.shape, x_train[0].max(), x_train[0].min()) #(60000, 28, 28) 255 0 5
    print(x_test.shape, x_test[0].max(), x_test[0].min()) #(10000, 28, 28) 255 0 7
    
    x_train = normalize_image(x_train)
    x_test = normalize_image(x_test)
    y_train = one_hot_labels(y_train)
    y_test = one_hot_labels(y_test)

    net = LeNet()
    averagetime, _ = net.fit(x_train, y_train, x_test, y_test, epoches=10, batch_size=16, lr=1e-3)
    accuracy = net.evaluate(x_test, labels=y_test)
    print("final accuracy {}".format(accuracy))
    print(averagetime,'s')
def model_attack(data_local,
                 data_backdoor,
                 net_dict,
                 client_net,
                 local_epochs=10,
                 e=0.001,
                 batch=128,
                 c=112):
    '''
    模型攻击
    :param data_local: 正常的数据
    :param data_backdoor: 后门数据
    :param net_dict: 全局网络参数
    :param client_net: 本地网络
    :param local_epochs: 本地网络迭代次数
    :param e: 最大loss,loss低于这个值的时候训练停止
    :param batch: 一次输入训练的数据量
    :param c: batch里替换的数据数
    :return: 训练好的模型X
    '''
    client_net.load_state_dict(net_dict)
    gamma = Num_client / LR
    t_net = LeNet()
    opt_local = optim.Adam(client_net.parameters(), lr=LR, betas=(0.9, 0.99))

    for epoch in range(local_epochs):
        if get_loss(client_net, data_backdoor) < e:
            break
        for start in range(0, len(data_local), batch):
            b = [
                data_local[0][start:start + batch],
                data_local[1][start:start + batch]
            ]
            b = replace(c, b, data_backdoor)
            grad = get_client_grad(b[0], b[1], client_net.state_dict(), t_net)
            params_modules_attacker = client_net.named_parameters()
            for params_module in params_modules_attacker:
                (name_attacker, params) = params_module
                params.grad = grad[name_attacker]
            opt_local.step()
    params_modules_attacker = list(client_net.named_parameters())
    params_modules_G = list(net.named_parameters())
    params_modules_L = {}
    for i in range(len(params_modules_attacker)):
        params_modules_L[params_modules_attacker[i][0]] = -(
            gamma * (params_modules_attacker[i][1] - params_modules_G[i][1]) +
            params_modules_G[i][1])
    # client_net.load_state_dict(params_modules_L)
    return params_modules_L
Пример #14
0
def main():
    # Setting the processing to GPU
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(device)
    # Loading Training and Testing Dataset from CIFAR10
    batch_size = 128
    train_set = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transforms.ToTensor())
    train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True)

    test_set = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transforms.ToTensor())
    test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False)

    # Initializing the LeNet Object
    net = LeNet().to(device)
    # Initializing the Cross Entropy Loss Function
    loss_fn = nn.CrossEntropyLoss()
    # Initializing the Adam Optimizer
    opt = optim.Adam(net.parameters())

    start_time = time.time()
    # Training the Model
    net.fit(train_loader, test_loader, max_epochs=16, opt=opt, loss_fn=loss_fn)
    duration = time.time() - start_time
    print("Duration of Model: {:.2f} secs".format(duration))
Пример #15
0
def main(_):
    iterations = FLAGS.iterations
    lr = FLAGS.lr
    batch_size = FLAGS.batch_size

    mu = FLAGS.mu
    sigma = FLAGS.sigma

    lenet_part = LeNet(iterations=iterations, lr=lr, batch_size=batch_size, mu=mu, sigma=sigma)

    gd = gif_drawer()

    show_all_variables()

    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth=True

    with tf.Session(config=run_config) as sess:
        mnist = input_data.read_data_sets('./data/mnist',one_hot=True)
        sess.run(tf.global_variables_initializer())
        test_images = mnist.test.images
        test_images = np.reshape(test_images,[-1,28,28,1])
        test_images = np.pad(test_images,((0,0),(2,2),(2,2),(0,0)), 'constant')
        for ii in range(lenet_part.iterations):
            batch_xs, batch_ys = mnist.train.next_batch(lenet_part.batch_size)
            batch_xs = np.reshape(batch_xs,[-1,28,28,1])
            batch_xs = np.pad(batch_xs, ((0,0),(2,2),(2,2),(0,0)), 'constant')
            sess.run(lenet_part.train_op, feed_dict={lenet_part.raw_input_image:batch_xs, lenet_part.raw_input_label: batch_ys})

            if ii % 10 == 0:
                validation_images, validation_labels = mnist.validation.next_batch(100)
                validation_images = np.reshape(validation_images,[-1,28,28,1])
                validation_images = np.pad(validation_images,((0,0),(2,2),(2,2),(0,0)), 'constant')

                acc, los = sess.run([lenet_part.accuracy, lenet_part.cross_entropy], \
                    feed_dict={lenet_part.raw_input_image:validation_images,\
                    lenet_part.raw_input_label:validation_labels})
                print("Iteration [%5d/%5d]: accuracy is: %4f loss is: %4f"%(ii,lenet_part.iterations,acc,los))
                gd.draw(ii, acc, los)
        gd.save('./train.png')
        for ii in range(10):
            acc = sess.run(lenet_part.accuracy, \
                    feed_dict={lenet_part.raw_input_image:test_images,\
                    lenet_part.raw_input_label:mnist.test.labels})
            print("Test: accuracy is %4f"%(acc))
Пример #16
0
def test(model_save_dir, X_test, y_test):

    # load the graph structure from the ".meta" file into the current graph.
    tf.reset_default_graph()
    lenet = LeNet(img_w=32, img_h=32, img_channel=1, n_classes=43)

    # load the values of variables.
    # values only exist within a session
    # evaluate the model
    with tf.Session() as sess:
        var_list = tf.global_variables()

        saver = tf.train.Saver(var_list=var_list)
        saver.restore(sess, tf.train.latest_checkpoint('./model/lenet5/'))

        feed = {lenet.x: X_test, lenet.labels: y_test}
        test_accuracy = sess.run(lenet.accuracy, feed_dict=feed)
        print("Test Accuracy = {:.3f}".format(test_accuracy))
Пример #17
0
def build_lenet():
    train_data, train_labels, test_data, test_labels = preprocessing()
    train_labels = perturb(train_labels)
    print(train_data.shape)
    op = keras.optimizers.Adam()
    model = LeNet.build(width=28, height=28, depth=1, classes=10)
    model.compile(loss='binary_crossentropy',
                  optimizer=op,
                  metrics=['accuracy'])

    print("[INFO] training...")
    model.fit(train_data, train_labels, batch_size=128, nb_epoch=30, verbose=1)

    print("[INFO] evaluating...")
    (loss, accuracy) = model.evaluate(test_data,
                                      test_labels,
                                      batch_size=128,
                                      verbose=1)
    print("[INFO] accuracy: {:.2f}%".format(accuracy * 100))

    weightsPath = './weights/LeNet.hdf5'
    print("[INFO] dumping weights to file...")
    model.save_weights(weightsPath, overwrite=True)
Пример #18
0
# convert the labels from integers to vectors
trainY = to_categorical(trainY, num_classes=len(labellist))
testY = to_categorical(testY, num_classes=len(labellist))

# construct the image generator for data augmentation
aug = ImageDataGenerator(rotation_range=30,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         shear_range=0.2,
                         zoom_range=0.2,
                         horizontal_flip=True,
                         fill_mode="nearest")

# initialize the model
print("[INFO] compiling model...")
model = LeNet.build(width=64, height=64, depth=3, classes=len(labellist))
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="categorical_crossentropy",
              optimizer=opt,
              metrics=["accuracy"])

# train the network
print("[INFO] training network...")
H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS),
                        validation_data=(testX, testY),
                        steps_per_epoch=len(trainX) // BS,
                        epochs=EPOCHS,
                        verbose=1)

# save the model to disk
print("[INFO] serializing network...")
Пример #19
0
def runCNN(datasets):
    lenet = LeNet()
    lenet.fit(datasets)
from dataset import train_ds

# Hyper parameters
epoch_num = 300
batch_size = 128
lr = 1e-4  # learning rate
workers = 2  # subprocess number for load the image
weight_decay = 1e-3

train_ds_size = 42000  # the size of your train dataset

# dataset
train_dl = DataLoader(train_ds, batch_size, True, num_workers=workers)

# use cuda if you have GPU
net = LeNet()
net = net.cuda()

# optimizer
opt = torch.optim.Adam(net.parameters(), lr=lr,
                       weight_decay=weight_decay)  # optimizer for network

# loss function
loss_func = nn.CrossEntropyLoss()

# train the network
start = time()

for epoch in range(epoch_num):

    for step, (data, target) in enumerate(train_dl, 1):
Пример #21
0
Файл: train.py Проект: Dcrdn/ASL
                                                  labels,
                                                  test_size=0.25,
                                                  random_state=42)
trainY = to_categorical(trainY, num_classes=2)  #aqui cambiale a las 26 diego
testY = to_categorical(testY, num_classes=2)
# construct the image generator for data augmentation
aug = ImageDataGenerator(rotation_range=30,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         shear_range=0.2,
                         zoom_range=0.2,
                         horizontal_flip=True,
                         fill_mode="nearest")

print("[INFO] compiling model...")
model = LeNet.build(width=28, height=28, depth=3, classes=2)
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(
    loss="binary_crossentropy",
    optimizer=opt,  #for 2 binary_crossentropy, more categorical_crossentropy
    metrics=["accuracy"])

# train the network
print("[INFO] training network...")
H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS),
                        validation_data=(testX, testY),
                        steps_per_epoch=len(trainX) // BS,
                        epochs=EPOCHS,
                        verbose=1)

# save the model to disk
Пример #22
0
if __name__ == "__main__":
    if args.enable_lat:
        real_model_path = args.model_path + "lat_param.pkl"
        print('loading the LAT model')
    else:
        real_model_path = args.model_path + "naive_param.pkl"
        print('loading the naive model')
    
    if args.test_flag:
        args.enable_lat = False
    
    # switch models
    if args.model == 'lenet':
        cnn = LeNet(enable_lat=args.enable_lat,
                    epsilon=args.epsilon,
                    pro_num=args.pro_num,
                    batch_size=args.batchsize,
                    batch_norm=args.batchnorm,
                    if_dropout=args.dropout)
    elif args.model == 'resnet':
        cnn = ResNet50(enable_lat=args.enable_lat,
                    epsilon=args.epsilon,
                    pro_num=args.pro_num,
                    batch_size=args.batchsize,
                    if_dropout=args.dropout)
        cnn.apply(conv_init)
    elif args.model == 'vgg':
        cnn = VGG16(enable_lat=args.enable_lat,
                    epsilon=args.epsilon,
                    pro_num=args.pro_num,
                    batch_size=args.batchsize,
                    if_dropout=args.dropout)
Пример #23
0
from LeNet import LeNet
from dataset import test_ds

# Hyper parameters
batch_size = 128
workers = 2  # subprocess number for load the image
module_dir = './modle/net299-329.pth'

pred_label = []

# dataset
test_dl = DataLoader(test_ds, batch_size, num_workers=workers)

# use cuda if you have GPU
net = LeNet().cuda()
net.load_state_dict(torch.load(module_dir))
net.eval()

# 预测结果
for step, data in enumerate(test_dl, 1):

    data = data.cuda()

    with torch.no_grad():
        outputs = net(data)

    outputs = torch.max(outputs, 1)[1].data.cpu().numpy().tolist()
    pred_label += outputs

# 将预测结果写入到csv文件中
Пример #24
0
def main():
    print("Loading data...\n")

    dataset = get_dataset()
    (train_X, train_Y), (test_X, test_Y) = process_data(dataset)

    # Convert the dataset into torch tensors
    train = data_utils.TensorDataset(
        torch.tensor(train_X).float(),
        torch.tensor(train_Y).long())
    test = data_utils.TensorDataset(
        torch.tensor(test_X).float(),
        torch.tensor(test_Y).long())

    train_loader = data_utils.DataLoader(train,
                                         batch_size=BATCH_SIZE,
                                         shuffle=True,
                                         num_workers=5,
                                         sampler=None,
                                         pin_memory=False)

    test_loader = data_utils.DataLoader(
        test,  # dataset to load from
        batch_size=BATCH_SIZE,  # examples per batch (default: 1)
        shuffle=False,
        sampler=
        None,  # if a sampling method is specified, `shuffle` must be False
        num_workers=5,  # subprocesses to use for sampling
        pin_memory=False)  # whether to return an item pinned to GPU

    # Calculate the word-level accuracy on the training and the test ser
    default_train_loader = get_default_train_loader()
    default_test_loader = get_default_test_loader()

    if args.model == "lenet":
        print("Running LeNet on OCR")
        model = LeNet()
    else:
        print("Running AlexNet on OCR")
        model = AlexNet(num_classes=26)

    model.to(device)

    criterion = nn.CrossEntropyLoss()

    optimizer = optim.LBFGS(model.parameters(), history_size=5, max_iter=5)

    if args.num_epochs is not None:
        NUM_EPOCHS = args.num_epochs
    else:
        NUM_EPOCHS = 100

    print("Starting Training...\n")

    letter_training_accuracies = []
    letter_test_accuracies = []
    word_training_accuracies = []
    word_test_accuracies = []

    for epoch in range(NUM_EPOCHS):
        print("Processing epoch {}".format(epoch + 1))
        running_loss = 0.0

        for i_batch, sample in enumerate(train_loader, 0):
            train_X = sample[0]
            train_Y = sample[1]
            train_X, train_Y = train_X.to(device), train_Y.to(device)
            train_Y_labels = torch.max(train_Y, 1)[1]

            def closure():
                optimizer.zero_grad()
                outputs = model(train_X)
                outputs.to(device)
                tr_loss = criterion(outputs, train_Y_labels)
                print('Loss at epoch {}, batch {}: {}'.format(
                    epoch + 1, i_batch, tr_loss.item()))
                tr_loss.backward()
                del outputs
                return tr_loss

            optimizer.step(closure)

            del train_X, train_Y, train_Y_labels

        # Calculate the letter-level accuracy on the training and the test set
        letter_training_accuracy = letter_accuracy(train_loader, model)
        letter_test_accuracy = letter_accuracy(test_loader, model)
        letter_training_accuracies.append(letter_training_accuracy)
        letter_test_accuracies.append(letter_test_accuracy)

        word_training_accuracy = word_accuracy(default_train_loader, model)
        word_test_accuracy = word_accuracy(default_test_loader, model)
        word_training_accuracies.append(word_training_accuracy)
        word_test_accuracies.append(word_test_accuracy)

        print('\nLetter Training Accuracy on epoch {}: {}'.format(
            epoch + 1, letter_training_accuracy))
        print('Letter Test Accuracy on epoch {}: {}'.format(
            epoch + 1, letter_test_accuracy))
        print('Word Training Accuracy on epoch {}: {}'.format(
            epoch + 1, word_training_accuracy))
        print('Word Training Accuracy on epoch {}: {}\n'.format(
            epoch + 1, word_test_accuracy))

    final_letter_test_accuracy = letter_accuracy(test_loader, model)
    final_word_test_accuracy = word_accuracy(default_test_loader, model)

    print("Letter Test accuracy of {} on OCR Data: {}".format(
        args.model, final_letter_test_accuracy))
    print("Word Test accuracy of {} on OCR Data: {}".format(
        args.model, final_word_test_accuracy))

    save_accuracies(letter_training_accuracies, letter_test_accuracies,
                    "letter", args.model, "lbfgs")
    save_accuracies(word_training_accuracies, word_test_accuracies, "word",
                    args.model, "lbfgs")

    # Save the model
    print("Saving {} model to {}".format(args.model, PATH))
    torch.save(model, PATH)
def main():
    mlp_hiddens = [1000]
    filter_sizes = [(9, 9), (5, 5), (5, 5)]
    feature_maps = [80, 50, 20]
    pooling_sizes = [(3, 3), (2, 2), (2, 2)]
    save_to = "DvC.pkl"
    image_size = (128, 128)
    output_size = 2
    learningRate = 0.1
    num_epochs = 300
    num_batches = None
    if socket.gethostname() == 'tim-X550JX':
        host_plot = 'http://*****:*****@ %s' %
             ('CNN ', datetime.datetime.now(), socket.gethostname()),
             channels=[['train_error_rate', 'valid_error_rate'],
                       ['train_total_gradient_norm']],
             after_epoch=True,
             server_url=host_plot))

    model = Model(cost)

    main_loop = MainLoop(algorithm,
                         stream_data_train,
                         model=model,
                         extensions=extensions)

    main_loop.run()
Пример #26
0
def run_training(X_train,
                 y_train,
                 X_valid,
                 y_valid,
                 num_epoch,
                 batch_size,
                 learning_rate,
                 model_save_dir,
                 restorePath=None):
    log_dir = './result'

    if not os.path.exists(model_save_dir):
        os.makedirs(model_save_dir)

    # build LeNet
    lenet = LeNet(img_w=32, img_h=32, img_channel=1, n_classes=43)

    with tf.name_scope("train"):
        train_step = tf.train.AdamOptimizer(learning_rate).minimize(lenet.loss)

    num_examples = X_train.shape[0]

    print("starts training")

    with tf.Session() as sess:

        train_writer = tf.summary.FileWriter(log_dir + '/train', sess.graph)
        # initialize variables
        sess.run(tf.global_variables_initializer())

        # Create a saver.
        # The tf.train.Saver must be created after the variables that you want to restore (or save).
        # Additionally it must be created in the same graph as those variables.

        # restore training
        if restorePath:
            print("restore training")
            var_list = tf.global_variables()
            saver = tf.train.Saver(var_list=var_list)
            saver.restore(sess, tf.train.latest_checkpoint(model_save_dir))
        # start a new saver
        else:
            saver = tf.train.Saver(max_to_keep=1)

        # each epoch will shuffle the entire training data
        for ep in range(num_epoch):
            print("epoch: ", ep)
            X_train, y_train = shuffle(X_train, y_train)

            # train on each batch
            for offset in range(0, num_examples, batch_size):
                end = offset + batch_size
                batch_x, batch_y = X_train[offset:end], y_train[offset:end]

                feed = {lenet.x: batch_x, lenet.labels: batch_y}
                _, loss, summary = sess.run(
                    [train_step, lenet.loss, lenet.merged], feed_dict=feed)
                # print("summary", summary)
                # print("offset+num_examples*ep", offset+num_examples*ep)
                train_writer.add_summary(summary, offset + num_examples * ep)

            # test on training data
            print("loss=", loss)

            # save model
            if ep % 10 == 0:
                # test on train
                feed = {lenet.x: X_valid, lenet.labels: y_valid}
                accuracy = sess.run(lenet.accuracy, feed_dict=feed)

                print("accuracy = ", accuracy)
                # Append the step number to the checkpoint name:
                saver.save(sess, model_save_dir + '/my-model', global_step=ep)
Пример #27
0
                                x: batch_x,
                                y: batch_y
                            })
        total_accuracy += (accuracy * len(batch_x))
    return total_accuracy / num_examples


EPOCHS = 90
BATCH_SIZE = 1024

x = tf.placeholder(tf.float32, (None, 32, 32, 3))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 43)
rate = 0.001

logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                        labels=one_hot_y)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=rate)
training_operation = optimizer.minimize(loss_operation)

correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()

### Train the model here.

print('PASS')
with tf.Session() as sess:
    print('PASS')
Пример #28
0
def main(port_data):
    mlp_hiddens = [500]
    filter_sizes = [(3,3),(3,3)]
    feature_maps = [20, 20]
    pooling_sizes = [(3,3),(2,2)]
    save_to="DvC.pkl"
    image_size = (128, 128)
    output_size = 2
    learningRate=0.1
    num_epochs=300
    num_batches=None
    if socket.gethostname()=='tim-X550JX':host_plot = 'http://*****:*****@ %s' % ('CNN ', datetime.datetime.now(), socket.gethostname()),
                        channels=[['train_error_rate', 'valid_error_rate'],
                         ['train_total_gradient_norm']], after_epoch=True, server_url=host_plot))

    model = Model(cost)

    main_loop = MainLoop(
        algorithm,
        stream_data_train,
        model=model,
        extensions=extensions)

    main_loop.run()
                    params.grad += MU * (params.data - center_params_dict[name])

                optimizer.step()

        return self.net.state_dict()


def weight_init(m):
    if isinstance(m, nn.Conv2d):
        n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
        m.weight.data.normal_(0, math.sqrt(2. / n))
    elif isinstance(m, nn.BatchNorm2d):
        m.weight.data.fill_(1)
        m.bias.data.zero_()

net = LeNet().to(device)
net.apply(weight_init)

optimizer_server = optim.SGD(net.parameters(), lr=LR, momentum=0.9)

client_list = []
# train_data_root = '/home/dchen/dataset/MNIST/IID/' + str(CLIENT_NUM) + '/train/'
# test_data_root  = '/home/dchen/dataset/MNIST/IID/' + str(CLIENT_NUM) + '/test/'
train_data_root = '/home/dchen/dataset/MNIST/Non-IID1/' + str(CLIENT_NUM) + '/train/'
test_data_root  = '/home/dchen/dataset/MNIST/Non-IID1/' + str(CLIENT_NUM) + '/test/'

for i in range(CLIENT_NUM):
    client_name = 'client' + str(i)
    client_list.append(Client(client_name, train_data_root + client_name + '/', test_data_root + client_name + '/'))

center_params_dict = dict()
def main():
    mlp_hiddens = [1000]
    filter_sizes = [(9,9),(5,5),(5,5)]
    feature_maps = [80, 50, 20]
    pooling_sizes = [(3,3),(2,2),(2,2)]
    save_to="DvC.pkl"
    image_size = (128, 128)
    output_size = 2
    learningRate=0.1
    num_epochs=300
    num_batches=None
    if socket.gethostname()=='tim-X550JX':host_plot = 'http://*****:*****@ %s' % ('CNN ', datetime.datetime.now(), socket.gethostname()),
                        channels=[['train_error_rate', 'valid_error_rate'],
                         ['train_total_gradient_norm']], after_epoch=True, server_url=host_plot))

    model = Model(cost)

    main_loop = MainLoop(
        algorithm,
        stream_data_train,
        model=model,
        extensions=extensions)

    main_loop.run()
Пример #31
0
                                           transform=transform)
cifar_test = torchvision.datasets.CIFAR10(root='./data',
                                          train=False,
                                          transform=transform)

print(cifar_train)

trainloader = torch.utils.data.DataLoader(cifar_train,
                                          batch_size=32,
                                          shuffle=True)
testloader = torch.utils.data.DataLoader(cifar_test,
                                         batch_size=32,
                                         shuffle=True)

device = torch.device("cuda:0")
net = LeNet().to(device)

import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

print("START")
for epoch in range(50):
    loss100 = 0.0
    for i, data in enumerate(trainloader):
        inputs, labels = data
        inputs, labels = inputs.to(device), labels.to(device)
        outputs = net(inputs)
        loss = criterion(outputs, labels)
        optimizer.zero_grad()
        loss.backward()

# 网络参数初始化
def weight_init(m):
    # 使用isinstance来判断m属于什么类型
    if isinstance(m, nn.Conv2d):
        n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
        # torch.manual_seed(7)   # 随机种子,是否每次做相同初始化赋值
        m.weight.data.normal_(0, math.sqrt(2. / n))
    elif isinstance(m, nn.BatchNorm2d):
        # m中的 weight,bias 其实都是 Variable,为了能学习参数以及后向传播
        m.weight.data.fill_(1)
        m.bias.data.zero_()


net = LeNet()
attacker_net = LeNet()
# 初始化网络参数
net.apply(weight_init)  # apply函数会递归地搜索网络内的所有module并把参数表示的函数应用到所有的module上
attacker_net.apply(weight_init)
# # 提取网络参数
# net_dic = net.state_dict()
# 定义损失函数
criterion = nn.CrossEntropyLoss()  # 交叉熵损失函数,通常用于多分类问题上
optimizer_server = optim.Adam(net.parameters(), lr=LR, betas=(0.9, 0.99))
optimizer_backdoor = optim.Adam(attacker_net.parameters(),
                                lr=LR,
                                betas=(0.9, 0.99))

# 分配用户参数 send_back()
client_0_net = LeNet()
Пример #33
0
logprint = LogPrint(log, ExpID)
opt.ExpID = ExpID
opt.CodeID = get_CodeID()
logprint(opt.__dict__)

transform_train = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5, ), (1.0, )),
])

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5, ), (1.0, )),
])

lenet = LeNet().cuda()

train_set = dset.MNIST(root='./data',
                       train=True,
                       download=True,
                       transform=transform_train)
train_loader = torch.utils.data.DataLoader(train_set,
                                           batch_size=opt.batch_size,
                                           shuffle=True,
                                           num_workers=2)

test_set = dset.MNIST(root='./data',
                      train=False,
                      download=True,
                      transform=transform_test)
test_loader = torch.utils.data.DataLoader(test_set,